diff --git a/app.py b/app.py index e6793e6..7bb4e07 100644 --- a/app.py +++ b/app.py @@ -2,7 +2,7 @@ from pathlib import Path import json -from bespoken import chat +from bespoken import Chat from bespoken.tools import FileTool, TodoTools from bespoken.prompts import marimo_prompt from bespoken import ui @@ -26,15 +26,29 @@ def debug_reason(): return out -chat( +# Define tools for different modes +file_tool = FileTool("edit.py") +todo_tools = TodoTools() + +Chat( model_name="anthropic/claude-3-5-sonnet-20240620", - tools=[FileTool("edit.py"), TodoTools()], + tools={ + "development": [file_tool, todo_tools], # Full development capabilities + "review": [file_tool], # Code review mode - can read files but no todo management + "planning": [], # Planning mode - no tools, pure discussion + }, + mode_switch_messages={ + "development": "You are now in development mode. You can edit files and manage todos. Focus on implementing features and fixing bugs.", + "review": "You are now in review mode. You can read files to understand the codebase but cannot make changes. Focus on analyzing code and providing feedback.", + "planning": "You are now in planning mode. You cannot access files or tools. Focus on high-level discussion, architecture planning, and strategic thinking.", + }, system_prompt=marimo_prompt, debug=True, + initial_mode="development", slash_commands={ "/thinking": "Let me think through this step by step:", "/role": set_role, "/debug_prompt": debug_reason, }, history_callback=lambda x: srsly.write_jsonl(Path("logs.json"), x, append=True, append_new_line=False) -) +).run() diff --git a/src/bespoken/__init__.py b/src/bespoken/__init__.py index 1e833c1..a08205d 100644 --- a/src/bespoken/__init__.py +++ b/src/bespoken/__init__.py @@ -2,7 +2,7 @@ import importlib.metadata -from .__main__ import chat +from .__main__ import chat, Chat # Get version dynamically from package metadata try: @@ -10,4 +10,4 @@ except: __version__ = "unknown" -__all__ = ["chat", "__version__"] \ No newline at end of file +__all__ = ["chat", "Chat", "__version__"] \ No newline at end of file diff --git a/src/bespoken/__main__.py b/src/bespoken/__main__.py index c18488a..6119a2d 100644 --- a/src/bespoken/__main__.py +++ b/src/bespoken/__main__.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Optional, Callable +from typing import Optional, Callable, Union import json import uuid @@ -126,118 +126,430 @@ def dispatch_slash_command(command, user_commands, model, tools, conversation): return COMMAND_HANDLED, conversation -def chat( - debug: bool = typer.Option(False, "--debug", "-d", help="Enable debug mode to see LLM interactions"), - model_name: str = typer.Option("anthropic/claude-3-5-sonnet-20240620", "--model", "-m", help="LLM model to use"), - system_prompt: Optional[str] = typer.Option(None, "--system", "-s", help="System prompt for the assistant"), - tools: list = None, - slash_commands: dict = None, - history_callback: Optional[Callable] = None, - first_message: Optional[str] = None, - show_banner: bool = True, -): - """Run the bespoken chat assistant.""" - # Set debug mode globally - config.DEBUG_MODE = debug +class Chat: + """Chat session manager with support for configurable modes.""" - # Initialize user slash commands - user_commands = slash_commands or {} + def __init__( + self, + debug: bool = False, + model_name: str = "anthropic/claude-3-5-sonnet-20240620", + system_prompt: Optional[str] = None, + tools: Union[list, dict] = None, + mode_switch_messages: Optional[dict] = None, + slash_commands: dict = None, + history_callback: Optional[Callable] = None, + first_message: Optional[str] = None, + show_banner: bool = True, + initial_mode: Optional[str] = None, + ): + """Initialize chat session. + + Args: + debug: Enable debug mode + model_name: LLM model to use + system_prompt: System prompt for the assistant + tools: Either list of tools or dict of {mode: [tools]} + mode_switch_messages: Dict of {mode: message} sent when switching modes + slash_commands: User-defined slash commands + history_callback: Callback for conversation history + first_message: Initial message to display + show_banner: Whether to show banner + initial_mode: Initial mode (None = no modes) + """ + self.debug = debug + self.model_name = model_name + self.system_prompt = system_prompt + self.slash_commands = slash_commands or {} + self.history_callback = history_callback + self.first_message = first_message + self.show_banner = show_banner + self.current_mode = initial_mode + self.mode_switch_messages = mode_switch_messages or {} + self.conversation_history = [] + + # Parse tools configuration + if isinstance(tools, dict): + self.mode_tools = tools + self.available_modes = list(tools.keys()) + else: + self.mode_tools = {"default": tools or []} + self.available_modes = [] + + # Initialize model and conversation + self.model = None + self.conversation = None + self._initialize_model() - console = Console() - - # Show the banner - if show_banner: - ui.show_banner() + def _initialize_model(self): + """Initialize the LLM model and conversation.""" + try: + self.model = llm.get_model(self.model_name) + except Exception as e: + ui.print(f"[red]Error loading model '{self.model_name}': {e}[/red]") + raise typer.Exit(1) + + current_tools = self._get_current_tools() + self.conversation = self.model.conversation(tools=current_tools) - if first_message: - ui.print(first_message) - ui.print("") + def _get_current_tools(self): + """Get tools for current mode.""" + if self.current_mode is None: + return self.mode_tools.get("default", []) + return self.mode_tools.get(self.current_mode, []) - if debug: - ui.print("[magenta]Debug mode enabled[/magenta]") - ui.print("") + def _is_modes_enabled(self) -> bool: + """Check if modes are configured.""" + return self.current_mode is not None + def get_available_modes(self) -> list: + """Return list of available modes.""" + return self.available_modes.copy() if self._is_modes_enabled() else [] - try: - model = llm.get_model(model_name) - except Exception as e: - ui.print(f"[red]Error loading model '{model_name}': {e}[/red]") - raise typer.Exit(1) + def switch_to_next_mode(self) -> str: + """Switch to the next mode in the list (for keyboard shortcut).""" + if not self._is_modes_enabled() or len(self.available_modes) <= 1: + return None + + current_index = self.available_modes.index(self.current_mode) + next_index = (current_index + 1) % len(self.available_modes) + next_mode = self.available_modes[next_index] + + # Save conversation history before switching + self.conversation_history = [msg.response_json for msg in self.conversation.responses] + + # Switch mode silently (no UI feedback here, handled by input function) + old_mode = self.current_mode + self.current_mode = next_mode + self._initialize_model() + + # Send mode switch message if configured + if next_mode in self.mode_switch_messages: + switch_message = self.mode_switch_messages[next_mode] + for _ in self.conversation.chain(switch_message, system=self.system_prompt): + pass # Consume the response silently + + # Replay conversation history + for msg in self.conversation_history: + if msg.get("role") == "user": + content = msg.get("content", []) + if content and isinstance(content, list) and content[0].get("type") == "text": + text = content[0].get("text", "") + if text: # Only replay non-empty user messages + for _ in self.conversation.chain(text, system=self.system_prompt): + pass # Consume responses silently + + return next_mode - conversation = model.conversation(tools=tools) - history = [] - try: - while True: - # Define available commands for completion (builtin + user commands) - builtin_commands = ["/quit", "/help", "/tools", "/debug"] - user_command_names = list(user_commands.keys()) - completions = builtin_commands + user_command_names + def switch_mode(self, new_mode: str): + """Switch to a different mode.""" + if not self._is_modes_enabled(): + ui.print("[red]Modes are not configured for this session[/red]") + return False - # Show completion hint on first prompt - if not hasattr(chat, '_shown_completion_hint'): - ui.print("[dim]Tips: TAB for completions • @file.py for file paths • ↑/↓ for history • Ctrl+U to clear[/dim]") - chat._shown_completion_hint = True - - out = ui.input("> ", completions=completions).strip() - - # Handle slash commands (only if it's a known command) - if out.startswith("/"): - # Check if it's a known command + if new_mode not in self.available_modes: + ui.print(f"[red]Unknown mode: {new_mode}[/red]") + ui.print(f"[dim]Available modes: {', '.join(self.available_modes)}[/dim]") + return False + + if new_mode == self.current_mode: + ui.print(f"[dim]Already in {new_mode} mode[/dim]") + return True + + # Save conversation history + self.conversation_history = [msg.response_json for msg in self.conversation.responses] + + # Switch mode and reinitialize + old_mode = self.current_mode + self.current_mode = new_mode + self._initialize_model() + + # Send mode switch message if configured + if new_mode in self.mode_switch_messages: + switch_message = self.mode_switch_messages[new_mode] + # Send the switch message to establish new mode context + for _ in self.conversation.chain(switch_message, system=self.system_prompt): + pass # Consume the response silently + + # Replay conversation history + for msg in self.conversation_history: + if msg.get("role") == "user": + content = msg.get("content", []) + if content and isinstance(content, list) and content[0].get("type") == "text": + text = content[0].get("text", "") + if text: # Only replay non-empty user messages + for _ in self.conversation.chain(text, system=self.system_prompt): + pass # Consume responses silently + + ui.print(f"[green]Switched from {old_mode} to {new_mode} mode[/green]") + ui.print("") + return True + + def run(self): + """Main chat loop.""" + # Set debug mode globally + config.DEBUG_MODE = self.debug + + # Initialize user slash commands + user_commands = self.slash_commands.copy() + + console = Console() + + # Show the banner + if self.show_banner: + ui.show_banner() + + if self.first_message: + ui.print(self.first_message) + ui.print("") + + if self.debug: + ui.print("[magenta]Debug mode enabled[/magenta]") + ui.print("") + + history = [] + try: + while True: + # Define available commands for completion (builtin + user commands + mode commands) builtin_commands = ["/quit", "/help", "/tools", "/debug"] - if out in builtin_commands or out in user_commands: - result, conversation = dispatch_slash_command(out, user_commands, model, tools, conversation) + if self._is_modes_enabled(): + builtin_commands.extend(["/mode", "/modes"]) + + user_command_names = list(user_commands.keys()) + completions = builtin_commands + user_command_names + + # Show completion hint on first prompt + if not hasattr(self, '_shown_completion_hint'): + tip_text = "[dim]Tips: TAB for completions • @file.py for file paths • ↑/↓ for history • Ctrl+U to clear" + if self._is_modes_enabled() and len(self.available_modes) > 1: + tip_text += " • Shift+TAB to switch modes" + tip_text += "[/dim]" + ui.print(tip_text) + self._shown_completion_hint = True + + # Create mode-aware prompt + if self._is_modes_enabled(): + prompt = f"[{self.current_mode}] > " + else: + prompt = "> " + + # Prepare mode switching for keyboard shortcut + mode_switcher = self.switch_to_next_mode if self._is_modes_enabled() else None + available_modes = self.get_available_modes() if self._is_modes_enabled() else None + + out = ui.input( + prompt, + completions=completions, + mode_switcher_callback=mode_switcher, + available_modes=available_modes + ).strip() + + # Handle slash commands (only if it's a known command) + if out.startswith("/"): + # Parse command and arguments + parts = out.split(None, 1) # Split at most once to preserve spaces in args + command = parts[0] if parts else out + args = parts[1] if len(parts) > 1 else "" - if result == COMMAND_QUIT: - break - elif result == COMMAND_HANDLED: - continue - else: - # Command returned text for LLM - out = result - # If it starts with / but isn't a known command, treat as regular text - - # Skip empty input - if not out.strip(): - continue - - # Show spinner while getting initial response - # Create a padded spinner - spinner_text = Text("Thinking...", style="dim") - padded_spinner = Columns([Text(" " * ui.LEFT_PADDING), Spinner("dots"), spinner_text], expand=False) - response_started = False - - with Live(padded_spinner, console=console, refresh_per_second=10, transient=True) as live: - if history_callback: - new_id = str(uuid.uuid4()).replace("-", "")[:24] - history_callback([{"id": f"msg_{new_id}", "role": "user", "content": [{"text": out, "type": "text"}]}]) - for chunk in conversation.chain(out, system=system_prompt): - if not response_started: - # First chunk received, clear and stop the spinner so it disappears - try: - live.update(Text(""), refresh=True) - except Exception: - pass - live.stop() - response_started = True - # Initialize streaming state - ui.start_streaming(ui.LEFT_PADDING) + # Check if it's a known command + if command in builtin_commands or command in user_commands: + result, self.conversation = self._dispatch_slash_command(command, args, user_commands) + + if result == COMMAND_QUIT: + break + elif result == COMMAND_HANDLED: + continue + else: + # Command returned text for LLM + out = result + # If it starts with / but isn't a known command, treat as regular text + + # Skip empty input + if not out.strip(): + continue + + # Show spinner while getting initial response + # Create a padded spinner + spinner_text = Text("Thinking...", style="dim") + padded_spinner = Columns([Text(" " * ui.LEFT_PADDING), Spinner("dots"), spinner_text], expand=False) + response_started = False + + with Live(padded_spinner, console=console, refresh_per_second=10, transient=True) as live: + if self.history_callback: + new_id = str(uuid.uuid4()).replace("-", "")[:24] + self.history_callback([{"id": f"msg_{new_id}", "role": "user", "content": [{"text": out, "type": "text"}]}]) + for chunk in self.conversation.chain(out, system=self.system_prompt): + if not response_started: + # First chunk received, clear and stop the spinner so it disappears + try: + live.update(Text(""), refresh=True) + except Exception: + pass + live.stop() + response_started = True + # Initialize streaming state + ui.start_streaming(ui.LEFT_PADDING) + + # Stream each chunk as it arrives + ui.stream_chunk(chunk, ui.LEFT_PADDING) - # Stream each chunk as it arrives - ui.stream_chunk(chunk, ui.LEFT_PADDING) + # Finish streaming and print any remaining text + if response_started: + ui.end_streaming(ui.LEFT_PADDING) + ids = set([e["id"] for e in history]) + new_responses = [e for e in self.conversation.responses if e.response_json["id"] not in ids] + if self.history_callback: + self.history_callback([e.response_json for e in new_responses]) + + ui.print("") # Add extra newline after bot response + except KeyboardInterrupt: + ui.print("") # Add newlines + ui.print("[cyan]Thanks for using Bespoken. Goodbye![/cyan]") + ui.print("") # Add final newline + + def _dispatch_slash_command(self, command, args, user_commands): + """Dispatch slash command to appropriate handler.""" + if command == "/quit": + return handle_quit(), self.conversation + elif command == "/help": + return self._handle_help(user_commands), self.conversation + elif command == "/tools": + return self._handle_tools(), self.conversation + elif command == "/debug": + return toggle_debug(), self.conversation + elif command == "/mode": + return self._handle_mode_command(args), self.conversation + elif command == "/modes": + return self._handle_modes_command(), self.conversation + elif command in user_commands: + return handle_user_command(command, user_commands[command]), self.conversation + else: + ui.print(f"[red]Unknown command: {command}[/red]") + ui.print("[dim]Type /help for available commands[/dim]") + ui.print("") + return COMMAND_HANDLED, self.conversation + + def _handle_help(self, user_commands): + """Handle /help command with mode awareness.""" + ui.print("[cyan]Built-in commands:[/cyan]") + ui.print(" /quit - Exit the application") + ui.print(" /help - Show this help message") + ui.print(" /tools - Show available tools") + ui.print(" /debug - Toggle debug mode") + + if self._is_modes_enabled(): + ui.print(" /mode - Switch mode interactively or /mode ") + ui.print(" /modes - List available modes") + if len(self.available_modes) > 1: + ui.print(" [dim]Shift+TAB - Quick switch to next mode[/dim]") + + if user_commands: + ui.print("") + ui.print("[cyan]Custom commands:[/cyan]") + for cmd_name, cmd_handler in user_commands.items(): + if callable(cmd_handler): + desc = cmd_handler.__doc__ or "Custom function" + ui.print(f" {cmd_name} - {desc}") + else: + preview = str(cmd_handler)[:50] + "..." if len(str(cmd_handler)) > 50 else str(cmd_handler) + ui.print(f" {cmd_name} - {preview}") + + ui.print("") + return COMMAND_HANDLED + + def _handle_tools(self): + """Handle /tools command with mode awareness.""" + current_tools = self._get_current_tools() + + if self._is_modes_enabled(): + ui.print(f"[cyan]Available tools in {self.current_mode} mode:[/cyan]") + else: + ui.print("[cyan]Available tools:[/cyan]") + + if current_tools: + for tool in current_tools: + tool_name = getattr(tool, 'tool_name', type(tool).__name__) + ui.print(f" {tool_name}") + else: + ui.print("[dim]No tools configured[/dim]") + ui.print("") + return COMMAND_HANDLED + + def _handle_mode_command(self, args=""): + """Handle /mode command.""" + if not self._is_modes_enabled(): + ui.print("[red]Modes are not configured for this session[/red]") + ui.print("") + return COMMAND_HANDLED + + if not args.strip(): + # No mode specified, show interactive picker + try: + # Create choices with current mode indicator + choices = [] + for mode in self.available_modes: + if mode == self.current_mode: + choices.append(f"{mode} (current)") + else: + choices.append(mode) - # Finish streaming and print any remaining text - if response_started: - ui.end_streaming(ui.LEFT_PADDING) - ids = set([e["id"] for e in history]) - new_responses = [e for e in conversation.responses if e.response_json["id"] not in ids] - if history_callback: - history_callback([e.response_json for e in new_responses]) - - ui.print("") # Add extra newline after bot response - except KeyboardInterrupt: - ui.print("") # Add newlines - ui.print("[cyan]Thanks for using Bespoken. Goodbye![/cyan]") - ui.print("") # Add final newline + selected = ui.choice("Select mode:", choices) + if selected: + # Extract mode name (remove " (current)" if present) + target_mode = selected.replace(" (current)", "") + if target_mode != self.current_mode: + self.switch_mode(target_mode) + else: + ui.print(f"[dim]Already in {target_mode} mode[/dim]") + ui.print("") + except KeyboardInterrupt: + ui.print("[dim]Mode selection cancelled[/dim]") + ui.print("") + return COMMAND_HANDLED + + # Switch to the specified mode + target_mode = args.strip() + self.switch_mode(target_mode) + return COMMAND_HANDLED + + def _handle_modes_command(self): + """Handle /modes command.""" + if not self._is_modes_enabled(): + ui.print("[red]Modes are not configured for this session[/red]") + ui.print("") + return COMMAND_HANDLED + + ui.print("[cyan]Available modes:[/cyan]") + for mode in self.available_modes: + if mode == self.current_mode: + ui.print(f" {mode} [green](current)[/green]") + else: + ui.print(f" {mode}") + ui.print("") + return COMMAND_HANDLED + + +def chat( + debug: bool = typer.Option(False, "--debug", "-d", help="Enable debug mode to see LLM interactions"), + model_name: str = typer.Option("anthropic/claude-3-5-sonnet-20240620", "--model", "-m", help="LLM model to use"), + system_prompt: Optional[str] = typer.Option(None, "--system", "-s", help="System prompt for the assistant"), + tools: list = None, + slash_commands: dict = None, + history_callback: Optional[Callable] = None, + first_message: Optional[str] = None, + show_banner: bool = True, +): + """Run the bespoken chat assistant.""" + chat_instance = Chat( + debug=debug, + model_name=model_name, + system_prompt=system_prompt, + tools=tools, + slash_commands=slash_commands, + history_callback=history_callback, + first_message=first_message, + show_banner=show_banner, + ) + chat_instance.run() def main(): diff --git a/src/bespoken/ui.py b/src/bespoken/ui.py index 8bf5c43..af1c77b 100644 --- a/src/bespoken/ui.py +++ b/src/bespoken/ui.py @@ -10,6 +10,8 @@ from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.history import InMemoryHistory from prompt_toolkit.formatted_text import HTML +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.keys import Keys import questionary from .file_completer import create_completer @@ -242,13 +244,29 @@ def stream(chunks, indent: int = LEFT_PADDING, wrap: bool = True) -> None: _console.print(f"[dim]{word_buffer}[/dim]", end="", highlight=False) -def input(prompt_text: str, indent: int = LEFT_PADDING, completions: Optional[List[str]] = None) -> str: +def input(prompt_text: str, indent: int = LEFT_PADDING, completions: Optional[List[str]] = None, + mode_switcher_callback = None, available_modes: Optional[List[str]] = None) -> str: """Get input with left padding and optional completions.""" padded_prompt = " " * indent + prompt_text # Use combined completer for commands and file paths completer = create_completer(completions) if completions else None + # Create key bindings for mode switching + bindings = KeyBindings() + + if mode_switcher_callback and available_modes: + @bindings.add('s-tab') # Shift+Tab + def _(event): + """Switch to next mode on Shift+Tab""" + try: + next_mode = mode_switcher_callback() + if next_mode: + # Show mode switch feedback + _console.print(f"{' ' * indent}[dim]→ Switched to {next_mode} mode[/dim]") + except Exception: + pass # Ignore errors in mode switching + # Create a style with auto-suggestion preview in gray style = Style.from_dict({ # Default text style @@ -270,6 +288,7 @@ def input(prompt_text: str, indent: int = LEFT_PADDING, completions: Optional[Li auto_suggest=AutoSuggestFromHistory(), # Suggest from history history=_command_history, # Enable history with up/down arrows enable_history_search=False, # Disable Ctrl+R search + key_bindings=bindings, # Add custom key bindings ) return result except (KeyboardInterrupt, EOFError): diff --git a/teacher.py b/teacher.py index efe4d2d..6ef0ffc 100644 --- a/teacher.py +++ b/teacher.py @@ -1,4 +1,4 @@ -from bespoken import chat +from bespoken import Chat from bespoken.prompts import socratic_prompt from bespoken import ui @@ -10,7 +10,7 @@ def set_voice(): return f"You are now acting as a {role}. Please respond in character but stick to the topic of teaching." -chat( +Chat( model_name="anthropic/claude-3-5-sonnet-20240620", tools=[], system_prompt=socratic_prompt, @@ -20,4 +20,4 @@ def set_voice(): }, first_message="I can teach you anything about a technical topic. What would you like to learn?", show_banner=False -) +).run() diff --git a/test_app_import.py b/test_app_import.py new file mode 100644 index 0000000..2bde0e7 --- /dev/null +++ b/test_app_import.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +"""Test that app.py can import and create the Chat instance without errors.""" + +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from unittest.mock import patch, MagicMock + +def test_app_import(): + """Test that the app.py configuration works.""" + print("Testing app.py configuration...") + + # Mock the tools that app.py imports + with patch('bespoken.tools.FileTool') as mock_file_tool, \ + patch('bespoken.tools.TodoTools') as mock_todo_tools, \ + patch('bespoken.prompts.marimo_prompt') as mock_prompt, \ + patch('llm.get_model') as mock_get_model: + + # Set up mocks + mock_file_tool.return_value = MagicMock() + mock_todo_tools.return_value = MagicMock() + mock_prompt = "Mock marimo prompt" + + mock_model = MagicMock() + mock_conversation = MagicMock() + mock_conversation.responses = [] + mock_conversation.chain.return_value = iter([]) + mock_model.conversation.return_value = mock_conversation + mock_get_model.return_value = mock_model + + # Now try to create the Chat instance like app.py does + from bespoken import Chat + + # Define tools for different modes like in app.py + file_tool = mock_file_tool("edit.py") + todo_tools = mock_todo_tools() + + chat = Chat( + model_name="anthropic/claude-3-5-sonnet-20240620", + tools={ + "development": [file_tool, todo_tools], + "review": [file_tool], + "planning": [], + }, + mode_switch_messages={ + "development": "You are now in development mode. You can edit files and manage todos. Focus on implementing features and fixing bugs.", + "review": "You are now in review mode. You can read files to understand the codebase but cannot make changes. Focus on analyzing code and providing feedback.", + "planning": "You are now in planning mode. You cannot access files or tools. Focus on high-level discussion, architecture planning, and strategic thinking.", + }, + system_prompt=mock_prompt, + debug=True, + initial_mode="development", + slash_commands={ + "/thinking": "Let me think through this step by step:", + }, + show_banner=False # Avoid banner for testing + ) + + print(f"✓ Chat instance created successfully") + print(f"✓ Initial mode: {chat.current_mode}") + print(f"✓ Available modes: {chat.get_available_modes()}") + print(f"✓ Mode switching enabled: {chat._is_modes_enabled()}") + print(f"✓ Can switch to next mode: {chat.switch_to_next_mode()}") + + print("✓ App.py configuration works correctly!") + +if __name__ == "__main__": + test_app_import() + print("\n✅ All app.py import tests passed!") \ No newline at end of file diff --git a/test_chat_class.py b/test_chat_class.py new file mode 100644 index 0000000..feebee3 --- /dev/null +++ b/test_chat_class.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +"""Test script for the new Chat class functionality.""" + +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from bespoken import Chat + +def test_no_modes(): + """Test Chat class without modes (backward compatibility).""" + print("Testing Chat class without modes...") + tools = [] # Empty tools list for testing + + # Mock the model initialization to avoid API calls + from unittest.mock import patch, MagicMock + + with patch('llm.get_model') as mock_get_model: + mock_model = MagicMock() + mock_conversation = MagicMock() + mock_model.conversation.return_value = mock_conversation + mock_get_model.return_value = mock_model + + chat = Chat( + tools=tools, + show_banner=False, + first_message="Testing Chat class without modes. Type /quit to exit." + ) + + print("✓ Chat instance created successfully") + print(f"✓ Modes enabled: {chat._is_modes_enabled()}") # Should be False + print(f"✓ Available modes: {chat.get_available_modes()}") # Should be empty + print(f"✓ Current tools: {len(chat._get_current_tools())}") + + # Test would run chat.run() here but that's interactive + + +def test_with_modes(): + """Test Chat class with modes.""" + print("\nTesting Chat class with modes...") + + from unittest.mock import patch, MagicMock + + # Mock tools for different modes + read_tool = MagicMock() + read_tool.tool_name = "read" + write_tool = MagicMock() + write_tool.tool_name = "write" + debug_tool = MagicMock() + debug_tool.tool_name = "debug" + + tools = { + "normal": [read_tool, write_tool], + "plan": [read_tool], # Read-only mode + "debug": [read_tool, write_tool, debug_tool] + } + + mode_switch_messages = { + "plan": "You are now in plan mode. You can read files but cannot write them.", + "normal": "You are now in normal mode with full capabilities.", + "debug": "You are now in debug mode with additional debugging tools." + } + + with patch('llm.get_model') as mock_get_model: + mock_model = MagicMock() + mock_conversation = MagicMock() + mock_conversation.responses = [] + mock_conversation.chain.return_value = iter([]) # Empty iterator + mock_model.conversation.return_value = mock_conversation + mock_get_model.return_value = mock_model + + chat = Chat( + tools=tools, + mode_switch_messages=mode_switch_messages, + initial_mode="normal", + show_banner=False, + first_message="Testing Chat class with modes. Type /modes to see available modes, /quit to exit." + ) + + print("✓ Chat instance with modes created successfully") + print(f"✓ Modes enabled: {chat._is_modes_enabled()}") # Should be True + print(f"✓ Current mode: {chat.current_mode}") # Should be 'normal' + print(f"✓ Available modes: {chat.get_available_modes()}") + print(f"✓ Current tools: {[t.tool_name for t in chat._get_current_tools()]}") + + # Test mode switching + print("\nTesting mode switching...") + success = chat.switch_mode("plan") + print(f"✓ Switch to plan mode: {success}") + print(f"✓ Current mode: {chat.current_mode}") + print(f"✓ Current tools: {[t.tool_name for t in chat._get_current_tools()]}") + + # Test switching to invalid mode + success = chat.switch_mode("invalid_mode") + print(f"✓ Switch to invalid mode (should fail): {success}") + print(f"✓ Current mode after failed switch: {chat.current_mode}") + + +if __name__ == "__main__": + test_no_modes() + test_with_modes() + print("\n✅ All tests passed!") \ No newline at end of file diff --git a/test_keyboard_shortcut.py b/test_keyboard_shortcut.py new file mode 100644 index 0000000..413cc96 --- /dev/null +++ b/test_keyboard_shortcut.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +"""Test the keyboard shortcut functionality.""" + +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from unittest.mock import patch, MagicMock +from bespoken import Chat + +def test_keyboard_shortcut(): + """Test that the keyboard shortcut method works correctly.""" + print("Testing keyboard shortcut for mode switching...") + + # Mock tools for different modes + read_tool = MagicMock() + read_tool.tool_name = "read" + write_tool = MagicMock() + write_tool.tool_name = "write" + + tools = { + "development": [read_tool, write_tool], + "review": [read_tool], + "planning": [] + } + + with patch('llm.get_model') as mock_get_model: + mock_model = MagicMock() + mock_conversation = MagicMock() + mock_conversation.responses = [] + mock_conversation.chain.return_value = iter([]) + mock_model.conversation.return_value = mock_conversation + mock_get_model.return_value = mock_model + + chat = Chat( + tools=tools, + initial_mode="development", + show_banner=False, + ) + + print(f"✓ Initial mode: {chat.current_mode}") + print(f"✓ Available modes: {chat.get_available_modes()}") + + # Test switching to next mode + next_mode = chat.switch_to_next_mode() + print(f"✓ After first switch: {chat.current_mode} (returned: {next_mode})") + + # Test switching again (should cycle through all modes) + next_mode = chat.switch_to_next_mode() + print(f"✓ After second switch: {chat.current_mode} (returned: {next_mode})") + + # Test switching again (should cycle back to first) + next_mode = chat.switch_to_next_mode() + print(f"✓ After third switch: {chat.current_mode} (returned: {next_mode})") + + # Test with single mode (should return None) + single_mode_chat = Chat( + tools={"only": [read_tool]}, + initial_mode="only", + show_banner=False, + ) + result = single_mode_chat.switch_to_next_mode() + print(f"✓ Single mode switch result: {result} (should be None)") + + print("✓ Keyboard shortcut functionality works correctly!") + +if __name__ == "__main__": + test_keyboard_shortcut() + print("\n✅ All keyboard shortcut tests passed!") \ No newline at end of file diff --git a/test_mode_ui.py b/test_mode_ui.py new file mode 100644 index 0000000..c5e8b37 --- /dev/null +++ b/test_mode_ui.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +"""Test the mode UI picker functionality.""" + +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from unittest.mock import patch, MagicMock +from bespoken import Chat + +def test_mode_ui_picker(): + """Test that the mode picker UI works correctly.""" + print("Testing mode UI picker...") + + # Mock tools for different modes + read_tool = MagicMock() + read_tool.tool_name = "read" + write_tool = MagicMock() + write_tool.tool_name = "write" + + tools = { + "development": [read_tool, write_tool], + "review": [read_tool], + "planning": [] + } + + with patch('llm.get_model') as mock_get_model: + mock_model = MagicMock() + mock_conversation = MagicMock() + mock_conversation.responses = [] + mock_conversation.chain.return_value = iter([]) + mock_model.conversation.return_value = mock_conversation + mock_get_model.return_value = mock_model + + chat = Chat( + tools=tools, + initial_mode="development", + show_banner=False, + ) + + print(f"✓ Initial mode: {chat.current_mode}") + + # Test the mode command handler with mock choice + with patch('src.bespoken.ui.choice') as mock_choice: + # Test selecting a different mode + mock_choice.return_value = "review" + result = chat._handle_mode_command("") + print(f"✓ Mode command handler returned: {result}") + + # Test the choices that would be presented + expected_choices = ["development (current)", "review", "planning"] + mock_choice.assert_called_with("Select mode:", expected_choices) + print(f"✓ Choice UI called with correct options") + + print("✓ Mode UI picker functionality works correctly!") + +if __name__ == "__main__": + test_mode_ui_picker() + print("\n✅ All mode UI tests passed!") \ No newline at end of file