From 644975160d924a927569570533c2ff120dc402ce Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Tue, 14 Oct 2025 12:01:54 +0530 Subject: [PATCH 1/2] added test endpoint for orchestrate service --- src/llm_orchestration_service_api.py | 80 ++++++++++++++++++++++++++++ src/models/request_models.py | 28 ++++++++++ vault/agent-out/pidfile | 1 - 3 files changed, 108 insertions(+), 1 deletion(-) diff --git a/src/llm_orchestration_service_api.py b/src/llm_orchestration_service_api.py index dd97fa9..60a15f9 100644 --- a/src/llm_orchestration_service_api.py +++ b/src/llm_orchestration_service_api.py @@ -11,6 +11,8 @@ from models.request_models import ( OrchestrationRequest, OrchestrationResponse, + TestOrchestrationRequest, + TestOrchestrationResponse, EmbeddingRequest, EmbeddingResponse, ContextGenerationRequest, @@ -124,6 +126,84 @@ def orchestrate_llm_request( ) +@app.post( + "/orchestrate/test", + response_model=TestOrchestrationResponse, + status_code=status.HTTP_200_OK, + summary="Process test LLM orchestration request", + description="Processes a simplified test message through the LLM orchestration pipeline", +) +def test_orchestrate_llm_request( + http_request: Request, + request: TestOrchestrationRequest, +) -> TestOrchestrationResponse: + """ + Process test LLM orchestration request with simplified input. + + Args: + http_request: FastAPI Request object for accessing app state + request: TestOrchestrationRequest containing only message, environment, and connection_id + + Returns: + TestOrchestrationResponse: Response with LLM output and status flags (without chatId) + + Raises: + HTTPException: For processing errors + """ + try: + logger.info(f"Received test orchestration request for environment: {request.environment}") + + # Get the orchestration service from app state + if not hasattr(http_request.app.state, "orchestration_service"): + logger.error("Orchestration service not found in app state") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Service not initialized", + ) + + orchestration_service = http_request.app.state.orchestration_service + if orchestration_service is None: + logger.error("Orchestration service is None") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Service not initialized", + ) + + # Map TestOrchestrationRequest to OrchestrationRequest with defaults + full_request = OrchestrationRequest( + chatId="test-session", + message=request.message, + authorId="test-user", + conversationHistory=[], + url="test-context", + environment=request.environment, + connection_id=request.connection_id, + ) + + # Process the request using the same logic + response = orchestration_service.process_orchestration_request(full_request) + + # Convert to TestOrchestrationResponse (exclude chatId) + test_response = TestOrchestrationResponse( + llmServiceActive=response.llmServiceActive, + questionOutOfLLMScope=response.questionOutOfLLMScope, + inputGuardFailed=response.inputGuardFailed, + content=response.content, + ) + + logger.info(f"Successfully processed test request for environment: {request.environment}") + return test_response + + except HTTPException: + raise + except Exception as e: + logger.error(f"Unexpected error processing test request: {str(e)}") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Internal server error occurred", + ) + + @app.post( "/embeddings", response_model=EmbeddingResponse, diff --git a/src/models/request_models.py b/src/models/request_models.py index 27152db..c6b9b50 100644 --- a/src/models/request_models.py +++ b/src/models/request_models.py @@ -129,3 +129,31 @@ class EmbeddingErrorResponse(BaseModel): error: str = Field(..., description="Error message") failed_texts: List[str] = Field(..., description="Texts that failed to embed") retry_after: Optional[int] = Field(None, description="Retry after seconds") + + +# Test endpoint models + + +class TestOrchestrationRequest(BaseModel): + """Model for simplified test orchestration request.""" + + message: str = Field(..., description="User's message/query") + environment: Literal["production", "test", "development"] = Field( + ..., description="Environment context" + ) + connection_id: Optional[str] = Field( + None, description="Optional connection identifier" + ) + + +class TestOrchestrationResponse(BaseModel): + """Model for test orchestration response (without chatId).""" + + llmServiceActive: bool = Field(..., description="Whether LLM service is active") + questionOutOfLLMScope: bool = Field( + ..., description="Whether question is out of LLM scope" + ) + inputGuardFailed: bool = Field( + ..., description="Whether input guard validation failed" + ) + content: str = Field(..., description="Response content with citations") diff --git a/vault/agent-out/pidfile b/vault/agent-out/pidfile index c793025..e69de29 100644 --- a/vault/agent-out/pidfile +++ b/vault/agent-out/pidfile @@ -1 +0,0 @@ -7 \ No newline at end of file From d93ebfbaa0351b6ee4c3c4975f6dde1fcbb27d2f Mon Sep 17 00:00:00 2001 From: nuwangeek Date: Tue, 14 Oct 2025 12:02:37 +0530 Subject: [PATCH 2/2] fixed ruff linting issue --- src/llm_orchestration_service_api.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/llm_orchestration_service_api.py b/src/llm_orchestration_service_api.py index 60a15f9..4dfd295 100644 --- a/src/llm_orchestration_service_api.py +++ b/src/llm_orchestration_service_api.py @@ -151,7 +151,9 @@ def test_orchestrate_llm_request( HTTPException: For processing errors """ try: - logger.info(f"Received test orchestration request for environment: {request.environment}") + logger.info( + f"Received test orchestration request for environment: {request.environment}" + ) # Get the orchestration service from app state if not hasattr(http_request.app.state, "orchestration_service"): @@ -173,7 +175,7 @@ def test_orchestrate_llm_request( full_request = OrchestrationRequest( chatId="test-session", message=request.message, - authorId="test-user", + authorId="test-user", conversationHistory=[], url="test-context", environment=request.environment, @@ -191,7 +193,9 @@ def test_orchestrate_llm_request( content=response.content, ) - logger.info(f"Successfully processed test request for environment: {request.environment}") + logger.info( + f"Successfully processed test request for environment: {request.environment}" + ) return test_response except HTTPException: