diff --git a/.gitignore b/.gitignore index bd15bf3..3dacaa0 100644 --- a/.gitignore +++ b/.gitignore @@ -8,5 +8,5 @@ __pycache__/ build/ dist/ *.egg-info/ - +*.md env.sh diff --git a/laser/main.py b/laser/main.py index 8f58d4b..0de68b1 100644 --- a/laser/main.py +++ b/laser/main.py @@ -26,6 +26,13 @@ "google": (GoogleGenAI, {}), "openai": (OpenAIResponses, {}), "chat": (OpenAILegacy, {}), + "litellm": ( + OpenAILegacy, + { + "base_url": os.getenv("LITELLM_PROXY_API_BASE", "http://localhost:4000"), + "api_key": os.getenv("LITELLM_PROXY_API_KEY"), + }, + ), } SYSTEM_PROMPT = """You are Laser, a coding agent for this repository. @@ -73,7 +80,7 @@ def parse_args(): parser.add_argument( "--model", default="openai/gpt-5.2", - help="Model identifier to use in the form /", + help="Model identifier to use in the form /. Providers: anthropic, google, openai, chat, litellm", ) parser.add_argument( "--max-tokens", @@ -217,6 +224,10 @@ def build_provider(model: str, max_tokens: int) -> ChatProvider: provider_cls, provider_kwargs = PROVIDER_CONFIGS[provider] if provider_cls is Anthropic: provider_kwargs = {**provider_kwargs, "default_max_tokens": max_tokens} + if provider == "litellm": + env_model = os.getenv("LITELLM_MODEL") + if env_model: + model_name = env_model return provider_cls(model=model_name, **provider_kwargs) diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..13d47fc --- /dev/null +++ b/run.sh @@ -0,0 +1,14 @@ +export LITELLM_MODEL=glm-4.7 +export LITELLM_PROXY_API_KEY=sk-1234 +export LITELLM_PROXY_API_BASE=http://192.168.43.179:4001 +uv run laser --model litellm/glm-4.7 $@ + +exit + +o use: + +$: uv tool install git+https://github.com/ExpressGradient/laser +$: laser + +or +$: uvx --from git+https://github.com/ExpressGradient/laser laser \ No newline at end of file