Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 20 additions & 3 deletions backend/api/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
SendMessageResponse,
)
from services.project_service import get_project_service
from services.llm_service import llm_service

router = APIRouter(prefix="/chat", tags=["chat"])
project_service = get_project_service()
Expand Down Expand Up @@ -262,8 +263,24 @@ async def send_message(
created_at=datetime.utcnow().isoformat() + "Z",
)

# Generate mock query result
query_result = generate_mock_query_result(request.message, project_id)
# Use LLMService for AI response, fallback to mock if not configured
try:
ai_content = llm_service.run(request.message)
# For now, just echo the LLM response as the AI message content
query_result = QueryResult(
id=str(uuid.uuid4()),
query=request.message,
sql_query="", # To be filled by future agent logic
result_type="summary",
data=[],
execution_time=0.0,
row_count=0,
chart_config=None,
)
except Exception as e:
# Fallback to mock logic if LLM not available
ai_content = f"[MOCK] Here are the results for your query: '{request.message}'"
query_result = generate_mock_query_result(request.message, project_id)

# Store message in mock database
if project_id not in MOCK_CHAT_MESSAGES:
Expand All @@ -275,7 +292,7 @@ async def send_message(
id=str(uuid.uuid4()),
project_id=project_id,
user_id="assistant",
content=f"Here are the results for your query: '{request.message}'",
content=ai_content,
role="assistant",
created_at=datetime.utcnow().isoformat() + "Z",
metadata={"query_result_id": query_result.id},
Expand Down
6 changes: 3 additions & 3 deletions backend/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@ google-auth==2.25.2
email-validator==2.1.0

# Future dependencies (commented for now, will be added in later tasks)
# langchain==0.1.0
# openai==1.3.0
# duckdb==0.9.2
langchain==0.1.0
openai==1.3.0
duckdb==0.9.2
1 change: 1 addition & 0 deletions backend/services/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
# Services package for SmartQuery backend
# This file intentionally left blank for service package initialization.
28 changes: 28 additions & 0 deletions backend/services/llm_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import os
from langchain.llms import OpenAI
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType

class LLMService:
"""Service for managing LangChain LLM agent for query processing."""

def __init__(self):
self.openai_api_key = os.getenv("OPENAI_API_KEY")
if not self.openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable not set.")
self.llm = OpenAI(openai_api_key=self.openai_api_key, temperature=0)
# Placeholder: Add tools as needed for agent
self.tools = []
self.agent = initialize_agent(
self.tools,
self.llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=False,
)

def run(self, prompt: str) -> str:
"""Run the agent with a given prompt and return the response."""
return self.agent.run(prompt)

# Singleton instance for import
llm_service = LLMService()