diff --git a/.aic/graph.db b/.aic/graph.db new file mode 100644 index 00000000..cc58f813 Binary files /dev/null and b/.aic/graph.db differ diff --git a/README.md b/README.md index e6a57dc6..fa3228cb 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ The philosophy behind Conductor is simple: control your code. By treating contex - **Iterate safely**: Review plans before code is written, keeping you firmly in the loop. - **Work as a team**: Set project-level context for your product, tech stack, and workflow preferences that become a shared foundation for your team. - **Build on existing projects**: Intelligent initialization for both new (Greenfield) and existing (Brownfield) projects. +- **Semantic Awareness (AIC)**: Automatically indexes your codebase into "Rich Skeletons" using the AI Compiler (AIC). This functionality is powered by a local **Model Context Protocol (MCP)** server that exposes tools for semantic indexing and context retrieval ( `index_repo`, `get_file_context`) directly to the Gemini agent. - **Smart revert**: A git-aware revert command that understands logical units of work (tracks, phases, tasks) rather than just commit hashes. ## Installation @@ -112,8 +113,21 @@ During implementation, you can also: | `/conductor:status` | Displays the current progress of the tracks file and active tracks. | Reads `conductor/tracks.md` | | `/conductor:revert` | Reverts a track, phase, or task by analyzing git history. | Reverts git history | +## Architecture + +Conductor leverages the **Model Context Protocol (MCP)** to provide deep, local integration with your codebase. + +- **Client**: The Gemini CLI acts as the MCP client. +- **Server**: The `aic` package runs as a local MCP server (`python3 -m aic.server`). +- **Tools**: The server exposes the following tools to the agent: + - `aic_index`: Builds/updates the semantic dependency graph. + - `aic_get_file_context`: Retrieves token-optimized skeletons for files and their dependencies. + - `aic_list_directory`: Provides filesystem visibility. + - `aic_run_shell_command`: Allows safe execution of setup and maintenance commands. + ## Resources +- [AI Compiler Patent](https://www.tdcommons.org/dpubs_series/8241/): Semantic Dependency Graph for AI Agents - [Gemini CLI extensions](https://geminicli.com/docs/extensions/): Documentation about using extensions in Gemini CLI - [GitHub issues](https://github.com/gemini-cli-extensions/conductor/issues): Report bugs or request features diff --git a/aic/__init__.py b/aic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/aic/cli.py b/aic/cli.py new file mode 100644 index 00000000..55061b2d --- /dev/null +++ b/aic/cli.py @@ -0,0 +1,99 @@ +import argparse +import os +from aic.db import init_db, upsert_node, get_node, get_dependencies, update_edges, mark_dirty +from aic.skeleton import UniversalSkeletonizer +from aic.utils import calculate_hash, resolve_dep_to_path, get_ignore_patterns, should_ignore + +def index_repo(root_dir="."): + init_db() + skeletonizer = UniversalSkeletonizer() + ignore_patterns = get_ignore_patterns(root_dir) + + indexed_count = 0 + + for root, dirs, files in os.walk(root_dir): + # Exclusions + dirs[:] = [d for d in dirs if not should_ignore(d, ignore_patterns)] + + for file in files: + if should_ignore(file, ignore_patterns): + continue + + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, root_dir) + + # Skip non-text files to avoid reading binaries + # Simple heuristic: check extension or try reading + try: + with open(file_path, 'r', encoding='utf-8', errors='strict') as f: + content = f.read() + except UnicodeDecodeError: + # print(f"Skipping binary file: {rel_path}") + continue + except Exception as e: + print(f"Skipping {rel_path}: {e}") + continue + + current_hash = calculate_hash(content) + existing = get_node(rel_path) + + if existing and existing['hash'] == current_hash: + continue + + print(f"Indexing: {rel_path}") + skeleton, dependencies = skeletonizer.skeletonize(content, rel_path) + upsert_node(rel_path, current_hash, skeleton) + mark_dirty(rel_path) + + # Resolve dependencies to file paths + resolved_deps = [] + for dep in dependencies: + resolved = resolve_dep_to_path(dep, rel_path, root_dir) + if resolved: + resolved_deps.append(resolved) + + update_edges(rel_path, resolved_deps) + indexed_count += 1 + + print(f"Finished indexing. Processed {indexed_count} files.") + +def get_context(file_path): + node = get_node(file_path) + if not node: + return f"# Error: {file_path} not indexed." + + output = [f"# Context for {file_path}", node['skeleton'], ""] + + deps = get_dependencies(file_path) + if deps: + output.append("## Dependencies") + for dep in deps: + dep_node = get_node(dep) + if dep_node: + output.append(f"### {dep}") + output.append(dep_node['skeleton']) + output.append("") + + return "\n".join(output) + +def main(): + parser = argparse.ArgumentParser(description="AIC: AI Compiler") + subparsers = parser.add_subparsers(dest="command") + + subparsers.add_parser("index") + + context_parser = subparsers.add_parser("context") + context_parser.add_argument("file") + + args = parser.parse_args() + + if args.command == "index": + index_repo() + print("Finished indexing.") + elif args.command == "context": + print(get_context(args.file)) + else: + parser.print_help() + +if __name__ == "__main__": + main() diff --git a/aic/db.py b/aic/db.py new file mode 100644 index 00000000..ccd8058e --- /dev/null +++ b/aic/db.py @@ -0,0 +1,65 @@ +import sqlite3 +import os + +DB_PATH = ".aic/graph.db" + +def get_connection(): + os.makedirs(os.path.dirname(DB_PATH), exist_ok=True) + conn = sqlite3.connect(DB_PATH) + conn.row_factory = sqlite3.Row + return conn + +def init_db(): + with get_connection() as conn: + conn.execute(""" + CREATE TABLE IF NOT EXISTS nodes ( + path TEXT PRIMARY KEY, + hash TEXT, + skeleton TEXT, + status TEXT DEFAULT 'CLEAN' + ) + """) + conn.execute(""" + CREATE TABLE IF NOT EXISTS edges ( + source TEXT, + target TEXT, + PRIMARY KEY (source, target), + FOREIGN KEY(source) REFERENCES nodes(path) + ) + """) + +def upsert_node(path, hash_val, skeleton): + with get_connection() as conn: + conn.execute(""" + INSERT INTO nodes (path, hash, skeleton, status) + VALUES (?, ?, ?, 'CLEAN') + ON CONFLICT(path) DO UPDATE SET + hash = excluded.hash, + skeleton = excluded.skeleton, + status = 'CLEAN' + """, (path, hash_val, skeleton)) + +def mark_dirty(path): + """Mark all nodes that depend on this path as DIRTY.""" + with get_connection() as conn: + conn.execute(""" + UPDATE nodes + SET status = 'DIRTY' + WHERE path IN ( + SELECT source FROM edges WHERE target = ? + ) + """, (path,)) + +def update_edges(source_path, target_paths): + with get_connection() as conn: + conn.execute("DELETE FROM edges WHERE source = ?", (source_path,)) + for target in target_paths: + conn.execute("INSERT OR IGNORE INTO edges (source, target) VALUES (?, ?)", (source_path, target)) + +def get_node(path): + with get_connection() as conn: + return conn.execute("SELECT * FROM nodes WHERE path = ?", (path,)).fetchone() + +def get_dependencies(path): + with get_connection() as conn: + return [row['target'] for row in conn.execute("SELECT target FROM edges WHERE source = ?", (path,)).fetchall()] diff --git a/aic/server.py b/aic/server.py new file mode 100644 index 00000000..7d1b3f56 --- /dev/null +++ b/aic/server.py @@ -0,0 +1,123 @@ +import asyncio +import os +import sys +import logging +from mcp.server.fastmcp import FastMCP + +from aic.db import init_db, upsert_node, update_edges, mark_dirty +from aic.skeleton import UniversalSkeletonizer +from aic.utils import calculate_hash, resolve_dep_to_path, get_ignore_patterns, should_ignore +from aic.cli import get_context + +# Initialize the server +mcp = FastMCP("aic") + +@mcp.tool() +async def aic_index(root_dir: str = ".") -> str: + """ + Indexes the repository to build a semantic dependency graph. + Scans for Python, TypeScript/JavaScript, and Go files, generates skeletons, and updates the SQLite database. + """ + init_db() + skeletonizer = UniversalSkeletonizer() + indexed_count = 0 + + # Ensure we use absolute path for walking + abs_root_dir = os.path.abspath(root_dir) + ignore_patterns = get_ignore_patterns(abs_root_dir) + + for root, dirs, files in os.walk(abs_root_dir): + # Exclusions + dirs[:] = [d for d in dirs if not should_ignore(d, ignore_patterns)] + + for file in files: + if should_ignore(file, ignore_patterns): + continue + + file_path = os.path.join(root, file) + rel_path = os.path.relpath(file_path, abs_root_dir) + + # Skip non-text files to avoid reading binaries + try: + with open(file_path, 'r', encoding='utf-8', errors='strict') as f: + content = f.read() + except UnicodeDecodeError: + continue + except Exception as e: + print(f"Skipping {rel_path}: {e}") + continue + + current_hash = calculate_hash(content) + + skeleton, dependencies = skeletonizer.skeletonize(content, rel_path) + upsert_node(rel_path, current_hash, skeleton) + mark_dirty(rel_path) + + # Resolve dependencies to file paths + resolved_deps = [] + for dep in dependencies: + resolved = resolve_dep_to_path(dep, rel_path, abs_root_dir) + if resolved: + resolved_deps.append(resolved) + + update_edges(rel_path, resolved_deps) + indexed_count += 1 + + return f"Successfully indexed {indexed_count} files in {abs_root_dir}" + +@mcp.tool() +async def aic_get_file_context(file_path: str) -> str: + """ + Retrieves the extensive context for a file, including its skeleton and its direct dependencies' skeletons. + """ + try: + return get_context(file_path) + except Exception as e: + return f"Error retrieving context for {file_path}: {str(e)}" + +@mcp.tool() +async def aic_list_directory(path: str = ".") -> str: + """ + Lists the files and directories in the specified path. + """ + try: + abs_path = os.path.abspath(path) + if not os.path.exists(abs_path): + return f"Error: Path '{path}' not found." + + items = [] + for name in os.listdir(abs_path): + full_path = os.path.join(abs_path, name) + is_dir = os.path.isdir(full_path) + items.append(f"{name}{'/' if is_dir else ''}") + + return "\n".join(sorted(items)) + except Exception as e: + return f"Error listing directory '{path}': {str(e)}" + +@mcp.tool() +async def aic_run_shell_command(command: str, cwd: str = ".") -> str: + """ + Executes a shell command. + """ + try: + process = await asyncio.create_subprocess_shell( + command, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=cwd + ) + stdout, stderr = await process.communicate() + + output = f"Exit Code: {process.returncode}\n" + if stdout: + output += f"\nStandard Output:\n{stdout.decode().strip()}" + if stderr: + output += f"\nStandard Error:\n{stderr.decode().strip()}" + + return output + except Exception as e: + return f"Error executing command: {str(e)}" + +if __name__ == "__main__": + mcp.run() diff --git a/aic/skeleton.py b/aic/skeleton.py new file mode 100644 index 00000000..0828b357 --- /dev/null +++ b/aic/skeleton.py @@ -0,0 +1,211 @@ +import ast +import os +import re + +class PythonSkeletonizer(ast.NodeVisitor): +# ... (rest of PythonSkeletonizer remains the same) + def __init__(self): + self.reset() + + def reset(self): + self.skeleton = [] + self.dependencies = set() + self.imports = [] + + def skeletonize(self, source_code, path): + self.reset() + try: + tree = ast.parse(source_code) + except Exception as e: + return f"# BUG: Failed to parse {path}: {str(e)}", set() + + self.visit(tree) + return "\n".join(self.skeleton), self.dependencies + + def visit_Import(self, node): + for alias in node.names: + self.dependencies.add(alias.name) + self.imports.append(f"import {alias.name}") + + def visit_ImportFrom(self, node): + module = node.module or "" + level = node.level + # Handle relative imports level + prefix = "." * level if level > 0 else "" + full_module = prefix + module + + for alias in node.names: + self.dependencies.add(full_module) + self.imports.append(f"from {full_module} import {alias.name}") + + def visit_ClassDef(self, node): + # Extract class signature + self.skeleton.append(f"class {node.name}:") + docstring = ast.get_docstring(node) + if docstring: + self.skeleton.append(f' """{docstring}"""') + + # We don't visit children yet, just let the visitor handle them + # But we want to indent them + old_skeleton = self.skeleton + self.skeleton = [] + self.generic_visit(node) + inner = self.skeleton + self.skeleton = old_skeleton + for line in inner: + self.skeleton.append(f" {line}") + self.skeleton.append("") # Spacer + + def visit_FunctionDef(self, node): + self._skeletonize_func(node) + + def visit_AsyncFunctionDef(self, node): + self._skeletonize_func(node, is_async=True) + + def _skeletonize_func(self, node, is_async=False): + prefix = "async " if is_async else "" + args = ast.unparse(node.args) if hasattr(ast, 'unparse') else "..." + returns = f" -> {ast.unparse(node.returns)}" if hasattr(ast, 'unparse') and node.returns else "" + + signature = f"{prefix}def {node.name}({args}){returns}:" + self.skeleton.append(signature) + + docstring = ast.get_docstring(node) + if docstring: + self.skeleton.append(f' """{docstring}"""') + + # Effects analysis + effects = self._analyze_effects(node) + if effects: + self.skeleton.append(f" # {effects}") + + self.skeleton.append(" ...") + self.skeleton.append("") # Spacer + + def _analyze_effects(self, node): + returns = [] + raises = [] + calls = [] + + for child in ast.walk(node): + if isinstance(child, ast.Return): + if child.value: + try: + returns.append(ast.unparse(child.value)) + except: + returns.append("some_value") + elif isinstance(child, ast.Raise): + if child.exc: + try: + raises.append(ast.unparse(child.exc)) + except: + raises.append("Exception") + elif isinstance(child, ast.Call): + try: + calls.append(ast.unparse(child.func)) + except: + pass + + res = [] + if returns: res.append(f"RETURNS: {' | '.join(list(set(returns))[:3])}") + if raises: res.append(f"RAISES: {' | '.join(list(set(raises))[:3])}") + if calls: res.append(f"CALLS: {' | '.join(list(set(calls))[:5])}") + + return " | ".join(res) + +class TypeScriptSkeletonizer: + def skeletonize(self, source_code, path): + skeleton = [] + dependencies = set() + + # Simple regex for imports + import_matches = re.findall(r'import\s+.*?\s+from\s+[\'"](.*?)[\'"]', source_code) + for dep in import_matches: + dependencies.add(dep) + + # Strip comments for processing but keep them in skeleton if they are doc-like + lines = source_code.splitlines() + i = 0 + while i < len(lines): + line = lines[i] + # Capture imports + if line.strip().startswith('import '): + skeleton.append(line) + # Capture class/interface/function signatures + elif re.search(r'\b(class|interface|function|enum|type|const|let|async)\b', line): + # If it looks like a declaration + if '{' in line or line.strip().endswith(';'): + # Basic approach: take the line, if it has '{', add '...' and find matching '}' + # This is naive but works for simple cases + sig = line.split('{')[0].strip() + if sig: + skeleton.append(sig + " { ... }") + else: + skeleton.append(line) + # Capture JSDoc + elif line.strip().startswith('/**'): + doc = [line] + while i + 1 < len(lines) and not lines[i].strip().endswith('*/'): + i += 1 + doc.append(lines[i]) + skeleton.append("\n".join(doc)) + i += 1 + + return "\n".join(skeleton), dependencies + +class GoSkeletonizer: + def skeletonize(self, source_code, path): + skeleton = [] + dependencies = set() + + # Simple regex for imports + # Single line: import "fmt" + # Multi line: import ( "fmt" "os" ) + import_block = re.search(r'import\s*\((.*?)\)', source_code, re.DOTALL) + if import_block: + deps = re.findall(r'[\'"](.*?)[\'"]', import_block.group(1)) + dependencies.update(deps) + + single_imports = re.findall(r'import\s+[\'"](.*?)[\'"]', source_code) + dependencies.update(single_imports) + + lines = source_code.splitlines() + i = 0 + while i < len(lines): + line = lines[i] + # Capture package + if line.strip().startswith('package '): + skeleton.append(line) + # Capture func/type/struct/interface + elif re.search(r'\b(func|type|struct|interface|const|var)\b', line): + if '{' in line: + sig = line.split('{')[0].strip() + skeleton.append(sig + " { ... }") + else: + skeleton.append(line) + # Capture comments + elif line.strip().startswith('//'): + skeleton.append(line) + i += 1 + + return "\n".join(skeleton), dependencies + +class UniversalSkeletonizer: + def __init__(self): + self.py_skeletonizer = PythonSkeletonizer() + self.ts_skeletonizer = TypeScriptSkeletonizer() + self.go_skeletonizer = GoSkeletonizer() + + def skeletonize(self, source_code, path): + if path.endswith('.py'): + return self.py_skeletonizer.skeletonize(source_code, path) + elif path.endswith(('.ts', '.tsx', '.js', '.jsx')): + return self.ts_skeletonizer.skeletonize(source_code, path) + elif path.endswith('.go'): + return self.go_skeletonizer.skeletonize(source_code, path) + else: + # For non-supported files, treat content as the skeleton + # Limit size to avoid DB bloat (e.g. 100KB) + if len(source_code) > 100 * 1024: + return f"# Content truncated (size: {len(source_code)} bytes)\n" + source_code[:100*1024] + "...", set() + return source_code, set() diff --git a/aic/utils.py b/aic/utils.py new file mode 100644 index 00000000..f2e4f6da --- /dev/null +++ b/aic/utils.py @@ -0,0 +1,130 @@ +import hashlib +import os +import fnmatch +import sys +import re + +def calculate_hash(content): + if isinstance(content, str): + content = content.encode('utf-8') + return hashlib.sha256(content).hexdigest() + +def get_ignore_patterns(root_dir): + """ + Loads ignore patterns from .geminiignore and .gitignore, plus defaults. + """ + # Defaults + patterns = {'.git', '.aic', '__pycache__', 'node_modules', '.DS_Store', 'venv', '.venv', 'env', '.env', 'dist', 'build'} + + for filename in ['.geminiignore', '.gitignore']: + path = os.path.join(root_dir, filename) + if os.path.exists(path): + try: + with open(path, 'r') as f: + for line in f: + line = line.strip() + if not line or line.startswith('#'): + continue + # Normalize pattern: remove leading/trailing slashes for simple matching + # This is a naive implementation; proper gitignore handling is complex + clean_line = line.rstrip('/') + if clean_line: + patterns.add(clean_line) + except Exception: + pass # Fail silently on read errors + + return list(patterns) + +def should_ignore(name, patterns): + """ + Checks if a name matches any of the ignore patterns. + """ + for pattern in patterns: + if fnmatch.fnmatch(name, pattern): + return True + return False + +def resolve_dep_to_path(dep_name, current_file, root_dir): + """Refined heuristic to resolve module name to file path.""" + if not dep_name: + return None + + # Ensure absolute paths for processing + abs_root = os.path.abspath(root_dir) + # current_file might be relative to root_dir + abs_current_file = os.path.abspath(os.path.join(abs_root, current_file)) + curr_dir = os.path.dirname(abs_current_file) + + # Determine language from current_file + ext = os.path.splitext(current_file)[1] + + # 1. Handle Relative Imports + if dep_name.startswith('.'): + levels = 0 + temp_dep = dep_name + while temp_dep.startswith('.'): + levels += 1 + temp_dep = temp_dep[1:] + + # .module (levels=1) -> same dir + # ..module (levels=2) -> parent dir + # ./ts (levels=1, temp_dep='/ts' or levels=2, temp_dep='ts'?) + # Actually: + # ./ts starts with '.', levels=1, temp_dep='ts' if we handle it right. + # But wait: './ts' -> starts with '.', one dot. Then '/ts'. + # Let's be more robust: + m = re.match(r'^(\.+)(.*)$', dep_name) + dots = m.group(1) + rel_path = m.group(2).lstrip('/\\').replace('.', os.sep) + levels = len(dots) + + target_dir = curr_dir + for _ in range(levels - 1): + target_dir = os.path.dirname(target_dir) + + if len(target_dir) < len(abs_root): + target_dir = abs_root + + base_path = os.path.join(target_dir, rel_path) + else: + # 2. Handle Absolute/Package Imports + # Try resolving relative to root_dir + base_path = os.path.join(abs_root, dep_name.replace('.', os.sep)) + + # 3. Language specific candidates + candidates = [] + if ext == '.py': + candidates = [ + base_path + ".py", + os.path.join(base_path, "__init__.py") + ] + elif ext in ('.ts', '.tsx', '.js', '.jsx'): + candidates = [ + base_path + ".ts", + base_path + ".tsx", + base_path + ".js", + base_path + ".jsx", + os.path.join(base_path, "index.ts"), + os.path.join(base_path, "index.js") + ] + elif ext == '.go': + candidates = [ + base_path + ".go", + os.path.join(base_path, "main.go") + ] + if os.path.isdir(base_path): + try: + for f in os.listdir(base_path): + if f.endswith('.go'): + candidates.append(os.path.join(base_path, f)) + break + except Exception: + pass + else: + candidates = [base_path, base_path + ext] + + for cand in candidates: + if os.path.exists(cand): + return os.path.relpath(cand, abs_root) + + return None diff --git a/commands/conductor/implement.toml b/commands/conductor/implement.toml index 9988a6c8..3603f265 100644 --- a/commands/conductor/implement.toml +++ b/commands/conductor/implement.toml @@ -66,6 +66,9 @@ CRITICAL: You must validate the success of every tool call. If any tool call fai - **Workflow:** Resolve **Workflow** (via the **Universal File Resolution Protocol** using the project's index file). c. **Error Handling:** If you fail to read any of these files, you MUST stop and inform the user of the error. +4. **Semantic Dependency Awareness (AIC):** + - **Protocol:** During the implementation of any task, if you encounter imported internal modules or functions whose names are not fully self-describing or whose implementation is not directly in your current context, you SHOULD call the `aic_get_file_context` tool for those modules. This will provide you with the "Rich Skeleton" (signatures, docstrings, and IO/side-effects) of the dependency without consuming excessive tokens. + 4. **Execute Tasks and Update Track Plan:** a. **Announce:** State that you will now execute the tasks from the track's **Implementation Plan** by following the procedures in the **Workflow**. b. **Iterate Through Tasks:** You MUST now loop through each task in the track's **Implementation Plan** one by one. diff --git a/commands/conductor/newTrack.toml b/commands/conductor/newTrack.toml index aab88e8b..c3919793 100644 --- a/commands/conductor/newTrack.toml +++ b/commands/conductor/newTrack.toml @@ -72,6 +72,9 @@ CRITICAL: You must validate the success of every tool call. If any tool call fai * Examples include reproduction steps for bugs, specific scope for chores, or success criteria. * Tailor the questions to the specific request. +4. **Semantic Context (Optional):** + - **Recommendation:** If the track involves complex interactions with existing code, you SHOULD call the `aic_get_file_context` tool for relevant files to better understand the API contracts and dependencies. This will help you generate a more accurate specification and plan. + 3. **Draft `spec.md`:** Once sufficient information is gathered, draft the content for the track's `spec.md` file, including sections like Overview, Functional Requirements, Non-Functional Requirements (if any), Acceptance Criteria, and Out of Scope. 4. **User Confirmation:** Present the drafted `spec.md` content to the user for review and approval. diff --git a/commands/conductor/setup.toml b/commands/conductor/setup.toml index 2f6850c3..a4611bd3 100644 --- a/commands/conductor/setup.toml +++ b/commands/conductor/setup.toml @@ -343,6 +343,10 @@ CRITICAL: When determining model complexity, ALWAYS select the "flash" model, re 2. **Summarize Actions:** Present a summary of all actions taken during Phase 1, including: - The guide files that were copied. - The workflow file that was copied. + +2. **Semantic Indexing (AIC):** + - **Announce Action:** Inform the user: "Finally, I will perform a semantic indexing of your project using AIC (AI Compiler). This creates a lightweight dependency graph to help me understand your code better." + - **Action:** Call the `aic_index` tool to index the repository. 3. **Transition to initial plan and track generation:** Announce that the initial setup is complete and you will now proceed to define the first track for the project. --- diff --git a/gemini-extension.json b/gemini-extension.json index fe6539e7..1b1dfe29 100644 --- a/gemini-extension.json +++ b/gemini-extension.json @@ -1,5 +1,17 @@ { "name": "conductor", "version": "0.2.0", - "contextFileName": "GEMINI.md" + "contextFileName": "GEMINI.md", + "mcpServers": { + "aic": { + "command": "python3", + "args": [ + "-m", + "aic.server" + ], + "env": { + "PYTHONUNBUFFERED": "1" + } + } + } } diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..457cc61a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "conductor-aic" +version = "0.1.3" +description = "Conductor Extension for Gemini CLI with AI Compiler (AIC)" +readme = "README.md" +requires-python = ">=3.8" +license = "Apache-2.0" +authors = [ + { name = "Google DeepMind" }, +] +dependencies = [ + "mcp" +] + +[project.scripts] +aic = "aic.cli:main" + +[tool.hatch.build.targets.wheel] +packages = ["aic"] diff --git a/tests/test_multi_lang.py b/tests/test_multi_lang.py new file mode 100644 index 00000000..3115dec2 --- /dev/null +++ b/tests/test_multi_lang.py @@ -0,0 +1,59 @@ +import unittest +from aic.skeleton import TypeScriptSkeletonizer, GoSkeletonizer + +class TestMultiLangSkeletonizer(unittest.TestCase): + def test_typescript_skeleton(self): + ts_code = """ +import { Request, Response } from 'express'; +import * as fs from 'fs'; + +/** + * Interface for User + */ +export interface User { + id: number; + name: string; +} + +async function getUser(id: number): Promise { + const user = await db.find(id); + return user; +} +""" + skel = TypeScriptSkeletonizer() + skeleton, deps = skel.skeletonize(ts_code, "test.ts") + self.assertIn("'express'", str(deps)) + self.assertIn("'fs'", str(deps)) + self.assertIn("interface User { ... }", skeleton) + self.assertIn("async function getUser(id: number): Promise { ... }", skeleton) + self.assertIn("Interface for User", skeleton) + + def test_go_skeleton(self): + go_code = """ +package main + +import ( + "fmt" + "net/http" +) + +// Greeter interface +type Greeter interface { + Greet() string +} + +func main() { + fmt.Println("Hello") +} +""" + skel = GoSkeletonizer() + skeleton, deps = skel.skeletonize(go_code, "test.go") + self.assertIn("fmt", deps) + self.assertIn("net/http", deps) + self.assertIn("package main", skeleton) + self.assertIn("type Greeter interface { ... }", skeleton) + self.assertIn("func main() { ... }", skeleton) + self.assertIn("// Greeter interface", skeleton) + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_resolution.py b/tests/test_resolution.py new file mode 100644 index 00000000..dc6d3802 --- /dev/null +++ b/tests/test_resolution.py @@ -0,0 +1,49 @@ +import unittest +import os +import shutil +import tempfile +from aic.utils import resolve_dep_to_path + +class TestResolution(unittest.TestCase): + def setUp(self): + self.test_dir = tempfile.mkdtemp() + # Create a mock project structure + # /pkg1/__init__.py + # /pkg1/mod1.py + # /pkg2/mod2.py + # /ts/index.ts + # /go/main.go + os.makedirs(os.path.join(self.test_dir, "pkg1")) + os.makedirs(os.path.join(self.test_dir, "pkg2")) + os.makedirs(os.path.join(self.test_dir, "ts")) + os.makedirs(os.path.join(self.test_dir, "go")) + + with open(os.path.join(self.test_dir, "pkg1", "__init__.py"), "w") as f: f.write("") + with open(os.path.join(self.test_dir, "pkg1", "mod1.py"), "w") as f: f.write("") + with open(os.path.join(self.test_dir, "pkg2", "mod2.py"), "w") as f: f.write("") + with open(os.path.join(self.test_dir, "ts", "index.ts"), "w") as f: f.write("") + with open(os.path.join(self.test_dir, "go", "utils.go"), "w") as f: f.write("") + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def test_python_relative(self): + # From pkg1/mod1.py, import ..pkg2.mod2 + res = resolve_dep_to_path("..pkg2.mod2", "pkg1/mod1.py", self.test_dir) + self.assertEqual(res, "pkg2/mod2.py") + + def test_python_absolute(self): + # From pkg1/mod1.py, import pkg2.mod2 + res = resolve_dep_to_path("pkg2.mod2", "pkg1/mod1.py", self.test_dir) + self.assertEqual(res, "pkg2/mod2.py") + + def test_typescript_index(self): + res = resolve_dep_to_path("./ts", "main.ts", self.test_dir) + self.assertEqual(res, "ts/index.ts") + + def test_go_package(self): + res = resolve_dep_to_path("go", "main.go", self.test_dir) + self.assertEqual(res, "go/utils.go") + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_skeleton.py b/tests/test_skeleton.py new file mode 100644 index 00000000..24d9e193 --- /dev/null +++ b/tests/test_skeleton.py @@ -0,0 +1,38 @@ +import unittest +from aic.skeleton import PythonSkeletonizer + +class TestSkeletonizer(unittest.TestCase): + def setUp(self): + self.skeletonizer = PythonSkeletonizer() + + def test_basic_function(self): + source = ''' +def hello(name: str) -> str: + """Greets the user.""" + return f"Hello, {name}" +''' + skeleton, deps = self.skeletonizer.skeletonize(source, "test.py") + self.assertIn("def hello(name: str) -> str:", skeleton) + self.assertIn('"""Greets the user."""', skeleton) + self.assertIn("RETURNS: f'Hello, {name}'", skeleton) + self.assertIn("...", skeleton) + + def test_class_skeleton(self): + source = ''' +class MyClass: + """A test class.""" + def __init__(self, value): + self.value = value + + def get_value(self): + return self.value +''' + skeleton, deps = self.skeletonizer.skeletonize(source, "test.py") + self.assertIn("class MyClass:", skeleton) + self.assertIn('"""A test class."""', skeleton) + self.assertIn("def __init__(self, value):", skeleton) + self.assertIn("def get_value(self):", skeleton) + +if __name__ == "__main__": + unittest.main() +