From 0f070087331a8efc5ee8b55ec46dd03c08996727 Mon Sep 17 00:00:00 2001 From: tanzilahmed0 Date: Thu, 24 Jul 2025 20:34:06 -0700 Subject: [PATCH] Compeleted Task B-15 --- backend/api/chat.py | 23 ++++++++++++++++++++--- backend/requirements.txt | 6 +++--- backend/services/__init__.py | 1 + backend/services/llm_service.py | 28 ++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+), 6 deletions(-) create mode 100644 backend/services/llm_service.py diff --git a/backend/api/chat.py b/backend/api/chat.py index fc8c0da..f07bade 100644 --- a/backend/api/chat.py +++ b/backend/api/chat.py @@ -17,6 +17,7 @@ SendMessageResponse, ) from services.project_service import get_project_service +from services.llm_service import llm_service router = APIRouter(prefix="/chat", tags=["chat"]) project_service = get_project_service() @@ -262,8 +263,24 @@ async def send_message( created_at=datetime.utcnow().isoformat() + "Z", ) - # Generate mock query result - query_result = generate_mock_query_result(request.message, project_id) + # Use LLMService for AI response, fallback to mock if not configured + try: + ai_content = llm_service.run(request.message) + # For now, just echo the LLM response as the AI message content + query_result = QueryResult( + id=str(uuid.uuid4()), + query=request.message, + sql_query="", # To be filled by future agent logic + result_type="summary", + data=[], + execution_time=0.0, + row_count=0, + chart_config=None, + ) + except Exception as e: + # Fallback to mock logic if LLM not available + ai_content = f"[MOCK] Here are the results for your query: '{request.message}'" + query_result = generate_mock_query_result(request.message, project_id) # Store message in mock database if project_id not in MOCK_CHAT_MESSAGES: @@ -275,7 +292,7 @@ async def send_message( id=str(uuid.uuid4()), project_id=project_id, user_id="assistant", - content=f"Here are the results for your query: '{request.message}'", + content=ai_content, role="assistant", created_at=datetime.utcnow().isoformat() + "Z", metadata={"query_result_id": query_result.id}, diff --git a/backend/requirements.txt b/backend/requirements.txt index 358c60e..14a7222 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -35,6 +35,6 @@ google-auth==2.25.2 email-validator==2.1.0 # Future dependencies (commented for now, will be added in later tasks) -# langchain==0.1.0 -# openai==1.3.0 -# duckdb==0.9.2 \ No newline at end of file +langchain==0.1.0 +openai==1.3.0 +duckdb==0.9.2 \ No newline at end of file diff --git a/backend/services/__init__.py b/backend/services/__init__.py index cb5a1bb..2e4908c 100644 --- a/backend/services/__init__.py +++ b/backend/services/__init__.py @@ -1 +1,2 @@ # Services package for SmartQuery backend +# This file intentionally left blank for service package initialization. diff --git a/backend/services/llm_service.py b/backend/services/llm_service.py new file mode 100644 index 0000000..42f8ebb --- /dev/null +++ b/backend/services/llm_service.py @@ -0,0 +1,28 @@ +import os +from langchain.llms import OpenAI +from langchain.agents import initialize_agent, Tool +from langchain.agents import AgentType + +class LLMService: + """Service for managing LangChain LLM agent for query processing.""" + + def __init__(self): + self.openai_api_key = os.getenv("OPENAI_API_KEY") + if not self.openai_api_key: + raise ValueError("OPENAI_API_KEY environment variable not set.") + self.llm = OpenAI(openai_api_key=self.openai_api_key, temperature=0) + # Placeholder: Add tools as needed for agent + self.tools = [] + self.agent = initialize_agent( + self.tools, + self.llm, + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, + verbose=False, + ) + + def run(self, prompt: str) -> str: + """Run the agent with a given prompt and return the response.""" + return self.agent.run(prompt) + +# Singleton instance for import +llm_service = LLMService() \ No newline at end of file