Skip to content

Commit 2c16bdb

Browse files
committed
ruff format
1 parent 11b1bdc commit 2c16bdb

File tree

11 files changed

+20
-18
lines changed

11 files changed

+20
-18
lines changed

.github/workflows/deepeval-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ jobs:
7171
const missingSecrets = '${{ steps.validate_secrets.outputs.secrets_list }}'.split(' ');
7272
const secretsList = missingSecrets.map(s => `- \`${s}\``).join('\n');
7373
74-
const comment = `## ⚠️ DeepEval Tests: Missing Required Secrets
74+
const comment = `## DeepEval Tests: Missing Required Secrets
7575
7676
The DeepEval RAG system tests cannot run because the following GitHub secrets are not configured:
7777

.github/workflows/deepteam-red-team-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ jobs:
8282
const missingSecrets = '${{ steps.validate_secrets.outputs.secrets_list }}'.split(' ');
8383
const secretsList = missingSecrets.map(s => `- \`${s}\``).join('\n');
8484
85-
const comment = `## 🛡️ Red Team Security Tests: Missing Required Secrets
85+
const comment = `## Red Team Security Tests: Missing Required Secrets
8686
8787
The Red Team security assessment cannot run because the following GitHub secrets are not configured:
8888

src/llm_orchestration_service.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1084,16 +1084,15 @@ def _generate_rag_response(
10841084
logger.info(
10851085
"Test environment detected – returning out-of-scope message."
10861086
)
1087-
1087+
10881088
return TestOrchestrationResponse(
10891089
llmServiceActive=True, # service OK; insufficient context
10901090
questionOutOfLLMScope=True,
10911091
inputGuardFailed=False,
10921092
content=OUT_OF_SCOPE_MESSAGE,
10931093
)
10941094
else:
1095-
1096-
response = OrchestrationResponse(
1095+
response = OrchestrationResponse(
10971096
chatId=request.chatId,
10981097
llmServiceActive=True, # service OK; insufficient context
10991098
questionOutOfLLMScope=True,
@@ -1103,7 +1102,7 @@ def _generate_rag_response(
11031102
if testing_mode:
11041103
response.retrieval_context = retrieval_context
11051104
response.refined_questions = refined_output.refined_questions
1106-
return response
1105+
return response
11071106

11081107
# In-scope: return the answer as-is (NO citations)
11091108
logger.info("Returning in-scope answer without citations.")
@@ -1116,7 +1115,7 @@ def _generate_rag_response(
11161115
content=answer,
11171116
)
11181117
else:
1119-
response = OrchestrationResponse(
1118+
response = OrchestrationResponse(
11201119
chatId=request.chatId,
11211120
llmServiceActive=True,
11221121
questionOutOfLLMScope=False,

src/llm_orchestration_service_api.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
"""LLM Orchestration Service API - FastAPI application."""
2+
23
import os
34
from contextlib import asynccontextmanager
45
from typing import Any, AsyncGenerator, Dict
@@ -18,7 +19,7 @@
1819
ContextGenerationRequest,
1920
ContextGenerationResponse,
2021
EmbeddingErrorResponse,
21-
DeepEvalTestOrchestrationResponse
22+
DeepEvalTestOrchestrationResponse,
2223
)
2324

2425

@@ -300,6 +301,7 @@ async def get_available_embedding_models(
300301
logger.error(f"Failed to get embedding models: {e}")
301302
raise HTTPException(status_code=500, detail=str(e))
302303

304+
303305
@app.post("orchestrate-test")
304306
def orchestrate_llm_request_test(
305307
http_request: Request,

src/models/request_models.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,7 @@ class TestOrchestrationResponse(BaseModel):
163163
)
164164
content: str = Field(..., description="Response content with citations")
165165

166+
166167
class DeepEvalTestOrchestrationResponse(BaseModel):
167168
"""Extended response model for testing with additional evaluation data."""
168169

@@ -173,4 +174,4 @@ class DeepEvalTestOrchestrationResponse(BaseModel):
173174
content: str
174175
retrieval_context: Optional[List[Dict[str, Any]]] = None
175176
refined_questions: Optional[List[str]] = None
176-
expected_output: Optional[str] = None # For DeepEval
177+
expected_output: Optional[str] = None # For DeepEval

tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -730,4 +730,4 @@ def orchestration_client(rag_stack: RAGStackTestContainers) -> Any:
730730
{"Content-Type": "application/json", "Accept": "application/json"}
731731
)
732732
setattr(session, "base_url", rag_stack.get_orchestration_service_url())
733-
return session
733+
return session

tests/deepeval_tests/red_team_report_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -522,4 +522,4 @@ def main():
522522

523523

524524
if __name__ == "__main__":
525-
main()
525+
main()

tests/deepeval_tests/red_team_tests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -448,4 +448,4 @@ def _test_attack_category(
448448
)
449449

450450
category_duration = (datetime.datetime.now() - category_start).total_seconds()
451-
print(f" {category_name} completed in {category_duration:.1f}s")
451+
print(f" {category_name} completed in {category_duration:.1f}s")

tests/deepeval_tests/report_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,4 +303,4 @@ def main():
303303

304304

305305
if __name__ == "__main__":
306-
main()
306+
main()

tests/deepeval_tests/standard_tests.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,4 +278,4 @@ def test_all_metrics(self, test_item: Dict[str, Any], orchestration_client):
278278
# Now raise assertion if any metrics failed (for pytest reporting)
279279
if failed_assertions:
280280
# Just raise the first failure to keep pytest output clean
281-
raise AssertionError(failed_assertions[0])
281+
raise AssertionError(failed_assertions[0])

0 commit comments

Comments
 (0)