Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,4 @@ htmlcov/
# Logs
*.log
logs/
coverage.xml
43 changes: 38 additions & 5 deletions cortex/api_key_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,27 @@ def detect(self) -> tuple[bool, str | None, str | None, str | None]:
return result or (False, None, None, None)

def _check_environment_api_keys(self) -> tuple[bool, str, str, str] | None:
"""Check for API keys in environment variables."""
for env_var, provider in ENV_VAR_PROVIDERS.items():
"""Check for API keys in environment variables.

Respects CORTEX_PROVIDER setting when multiple keys are available.
Falls back to OpenAI if Anthropic is not available but OpenAI is.
"""
# Check if user has explicit provider preference
preferred_provider = os.environ.get("CORTEX_PROVIDER", "").lower()

# If provider is specified, check for that key first
if preferred_provider in ("anthropic", "claude"):
value = os.environ.get("ANTHROPIC_API_KEY")
if value:
return (True, value, "anthropic", "environment")
elif preferred_provider == "openai":
value = os.environ.get("OPENAI_API_KEY")
if value:
return (True, value, "openai", "environment")

# Fall back to checking all keys if no preference or preferred key not found
# Prefer OpenAI over Anthropic if no explicit preference (since Anthropic seems to have issues)
for env_var, provider in [("OPENAI_API_KEY", "openai"), ("ANTHROPIC_API_KEY", "anthropic")]:
value = os.environ.get(env_var)
if value:
return (True, value, provider, "environment")
Expand Down Expand Up @@ -215,7 +234,19 @@ def _check_location(
self, source: str | Path, env_vars: list[str]
) -> tuple[bool, str | None, str | None, str | None] | None:
"""Check a specific location for API keys."""
for env_var in env_vars:
# Respect preferred provider when multiple keys exist in a location
preferred_provider = os.environ.get("CORTEX_PROVIDER", "").lower()
if preferred_provider in ("openai", "anthropic", "claude"):
# Build ordered list with preferred env var first
preferred_var = (
"OPENAI_API_KEY" if preferred_provider == "openai" else "ANTHROPIC_API_KEY"
)
# Keep uniqueness and order: preferred first, then the rest
ordered_vars = [preferred_var] + [v for v in env_vars if v != preferred_var]
else:
ordered_vars = env_vars

for env_var in ordered_vars:
if source == "environment":
result = self._check_environment_variable(env_var)
elif isinstance(source, Path):
Expand Down Expand Up @@ -673,17 +704,19 @@ def setup_api_key() -> tuple[bool, str | None, str | None]:
Tuple of (success, key, provider)
"""
detector = APIKeyDetector()
silent = os.environ.get("CORTEX_SILENT_OUTPUT", "0") == "1"

# Try auto-detection first
found, key, provider, source = detector.detect()
if found:
# Only show "Found" message for non-default locations
# ~/.cortex/.env is our canonical location, so no need to announce it
default_location = str(Path.home() / CORTEX_DIR / CORTEX_ENV_FILE)
if source != default_location:
if not silent and source != default_location:
display_name = PROVIDER_DISPLAY_NAMES.get(provider, provider.upper())
cx_print(f"🔑 Found {display_name} API key in {source}", "success")
detector._maybe_save_found_key(key, provider, source)
if not silent:
detector._maybe_save_found_key(key, provider, source)
return (True, key, provider)

# Prompt for manual entry
Expand Down
109 changes: 93 additions & 16 deletions cortex/cli.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import argparse
import json
import logging
import os
import sys
Expand Down Expand Up @@ -822,11 +823,20 @@ def install(
execute: bool = False,
dry_run: bool = False,
parallel: bool = False,
json_output: bool = False,
):
# Initialize installation history
history = InstallationHistory()
install_id = None
start_time = datetime.now()

# Validate input first
is_valid, error = validate_install_request(software)
if not is_valid:
self._print_error(error)
if json_output:
print(json.dumps({"success": False, "error": error, "error_type": "ValueError"}))
else:
self._print_error(error)
return 1

# Special-case the ml-cpu stack:
Expand All @@ -844,27 +854,43 @@ def install(

api_key = self._get_api_key()
if not api_key:
error_msg = "No API key found. Please configure an API provider."
# Record installation attempt before failing if we have packages
try:
packages = [software.split()[0]] # Basic package extraction
install_id = history.record_installation(
InstallationType.INSTALL, packages, [], start_time
)
except Exception:
pass # If recording fails, continue with error reporting

if install_id:
history.update_installation(install_id, InstallationStatus.FAILED, error_msg)

if json_output:
print(
json.dumps({"success": False, "error": error_msg, "error_type": "RuntimeError"})
)
else:
self._print_error(error_msg)
return 1

provider = self._get_provider()
self._debug(f"Using provider: {provider}")
self._debug(f"API key: {api_key[:10]}...{api_key[-4:]}")

# Initialize installation history
history = InstallationHistory()
install_id = None
start_time = datetime.now()

try:
self._print_status("🧠", t("install.analyzing"))
if not json_output:
self._print_status("🧠", "Understanding request...")

interpreter = CommandInterpreter(api_key=api_key, provider=provider)

self._print_status("📦", t("install.planning"))
if not json_output:
self._print_status("📦", "Planning installation...")

for _ in range(10):
self._animate_spinner(t("progress.analyzing_requirements"))
self._clear_line()
for _ in range(10):
self._animate_spinner("Analyzing system requirements...")
self._clear_line()

commands = interpreter.parse(f"install {software}")

Expand All @@ -881,8 +907,20 @@ def install(
InstallationType.INSTALL, packages, commands, start_time
)

self._print_status("⚙️", t("install.executing"))
print(f"\n{t('install.commands_would_run')}:")
# If JSON output requested, return structured data and exit early
if json_output:

output = {
"success": True,
"commands": commands,
"packages": packages,
"install_id": install_id,
}
print(json.dumps(output, indent=2))
return 0

self._print_status("⚙️", f"Installing {software}...")
print("\nGenerated commands:")
for i, cmd in enumerate(commands, 1):
print(f" {i}. {cmd}")

Expand Down Expand Up @@ -1042,17 +1080,29 @@ def parallel_log_callback(message: str, level: str = "info"):
except ValueError as e:
if install_id:
history.update_installation(install_id, InstallationStatus.FAILED, str(e))
self._print_error(str(e))
if json_output:

print(json.dumps({"success": False, "error": str(e), "error_type": "ValueError"}))
else:
self._print_error(str(e))
return 1
except RuntimeError as e:
if install_id:
history.update_installation(install_id, InstallationStatus.FAILED, str(e))
self._print_error(f"API call failed: {str(e)}")
if json_output:

print(json.dumps({"success": False, "error": str(e), "error_type": "RuntimeError"}))
else:
self._print_error(f"API call failed: {str(e)}")
return 1
except OSError as e:
if install_id:
history.update_installation(install_id, InstallationStatus.FAILED, str(e))
self._print_error(f"System error: {str(e)}")
if json_output:

print(json.dumps({"success": False, "error": str(e), "error_type": "OSError"}))
else:
self._print_error(f"System error: {str(e)}")
return 1
except Exception as e:
if install_id:
Expand Down Expand Up @@ -3136,6 +3186,25 @@ def _handle_set_language(language_input: str) -> int:
cx_print(t("language.set_failed", error=str(e)), "error")
return 1

def dashboard(self) -> int:
"""Launch the real-time system monitoring dashboard"""
try:
from cortex.dashboard import DashboardApp

app = DashboardApp()
rc = app.run()
return rc if isinstance(rc, int) else 0
except ImportError as e:
self._print_error(f"Dashboard dependencies not available: {e}")
cx_print("Install required packages with:", "info")
cx_print(" pip install psutil>=5.9.0 nvidia-ml-py>=12.0.0", "info")
return 1
except KeyboardInterrupt:
return 0
except Exception as e:
self._print_error(f"Dashboard error: {e}")
return 1


def show_rich_help():
"""Display a beautifully formatted help table using the Rich library.
Expand Down Expand Up @@ -3170,6 +3239,7 @@ def show_rich_help():
table.add_row("rollback <id>", "Undo installation")
table.add_row("role", "AI-driven system role detection")
table.add_row("stack <name>", "Install the stack")
table.add_row("dashboard", "Real-time system monitoring dashboard")
table.add_row("notify", "Manage desktop notifications")
table.add_row("env", "Manage environment variables")
table.add_row("cache stats", "Show LLM cache statistics")
Expand Down Expand Up @@ -3283,6 +3353,11 @@ def main():
# Demo command
demo_parser = subparsers.add_parser("demo", help="See Cortex in action")

# Dashboard command
dashboard_parser = subparsers.add_parser(
"dashboard", help="Real-time system monitoring dashboard"
)

# Wizard command
wizard_parser = subparsers.add_parser("wizard", help="Configure API key interactively")

Expand Down Expand Up @@ -3929,6 +4004,8 @@ def main():

if args.command == "demo":
return cli.demo()
elif args.command == "dashboard":
return cli.dashboard()
elif args.command == "wizard":
return cli.wizard()
elif args.command == "status":
Expand Down
Loading