Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Lightspeed Core Service (LCS)
service:
host: 0.0.0.0
port: 8080
auth_enabled: false
workers: 1
color_log: true
access_log: true
llama_stack:
# Uses a remote llama-stack service
# The instance would have already been started with a llama-stack-run.yaml file
use_as_library_client: false
# Alternative for "as library use"
# use_as_library_client: true
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
url: http://llama-stack:8321
api_key: xyzzy
user_data_collection:
feedback_enabled: true
feedback_storage: "/invalid"
transcripts_enabled: true
transcripts_storage: "/tmp/data/transcripts"

authentication:
module: "noop-with-token"
45 changes: 34 additions & 11 deletions tests/e2e/features/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,18 @@
4. after_scenario
"""

import requests
import subprocess
import time
from behave.model import Scenario, Feature
from behave.runner import Context

from tests.e2e.utils.utils import switch_config_and_restart
from tests.e2e.utils.utils import (
switch_config,
restart_container,
remove_config_backup,
create_config_backup,
)

try:
import os # noqa: F401
Expand All @@ -32,10 +38,18 @@ def before_scenario(context: Context, scenario: Scenario) -> None:
if "local" in scenario.effective_tags and not context.local:
scenario.skip("Marked with @local")
return
if "InvalidFeedbackStorageConfig" in scenario.effective_tags:
context.scenario_config = (
"tests/e2e/configuration/lightspeed-stack-invalid-feedback-storage.yaml"
)


def after_scenario(context: Context, scenario: Scenario) -> None:
"""Run after each scenario is run."""
if "InvalidFeedbackStorageConfig" in scenario.effective_tags:
switch_config(context.feature_config)
restart_container("lightspeed-stack")

# Restore Llama Stack connection if it was disrupted
if hasattr(context, "llama_stack_was_running") and context.llama_stack_was_running:
try:
Expand Down Expand Up @@ -87,19 +101,28 @@ def after_scenario(context: Context, scenario: Scenario) -> None:
def before_feature(context: Context, feature: Feature) -> None:
"""Run before each feature file is exercised."""
if "Authorized" in feature.tags:
context.backup_file = switch_config_and_restart(
"lightspeed-stack.yaml",
"tests/e2e/configuration/lightspeed-stack-auth-noop-token.yaml",
"lightspeed-stack",
context.feature_config = (
"tests/e2e/configuration/lightspeed-stack-auth-noop-token.yaml"
)
context.default_config_backup = create_config_backup("lightspeed-stack.yaml")
switch_config(context.feature_config)
restart_container("lightspeed-stack")

if "Feedback" in feature.tags:
context.feedback_conversations = []


def after_feature(context: Context, feature: Feature) -> None:
"""Run after each feature file is exercised."""
if "Authorized" in feature.tags:
switch_config_and_restart(
"lightspeed-stack.yaml",
context.backup_file,
"lightspeed-stack",
cleanup=True,
)
switch_config(context.default_config_backup)
restart_container("lightspeed-stack")
remove_config_backup(context.default_config_backup)

if "Feedback" in feature.tags:
print(context.feedback_conversations)
for conversation_id in context.feedback_conversations:
url = f"http://localhost:8080/v1/conversations/{conversation_id}"
headers = context.auth_headers if hasattr(context, "auth_headers") else {}
response = requests.delete(url, headers=headers)
assert response.status_code == 200, url
Comment on lines +123 to +128
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Add timeouts to cleanup DELETE calls

This cleanup DELETE runs without a timeout, so if the service stalls we hang the entire suite. Please add a shared timeout (mirroring the other HTTP helpers) before asserting the response.

 import requests
+
+DEFAULT_TIMEOUT = 10
@@
-            response = requests.delete(url, headers=headers)
+            response = requests.delete(
+                url, headers=headers, timeout=DEFAULT_TIMEOUT
+            )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
print(context.feedback_conversations)
for conversation_id in context.feedback_conversations:
url = f"http://localhost:8080/v1/conversations/{conversation_id}"
headers = context.auth_headers if hasattr(context, "auth_headers") else {}
response = requests.delete(url, headers=headers)
assert response.status_code == 200, url
++ b/tests/e2e/features/environment.py
@@
import requests
DEFAULT_TIMEOUT = 10
@@
response = requests.delete(
url,
headers=headers,
timeout=DEFAULT_TIMEOUT
)
assert response.status_code == 200, url
🤖 Prompt for AI Agents
In tests/e2e/features/environment.py around lines 123 to 128, the cleanup DELETE
loop calls requests.delete without any timeout which can hang the entire suite
if the service stalls; update the call to include the shared timeout used by
other HTTP helpers (e.g. use context.http_timeout or a module-level
HTTP_TIMEOUT) by passing it as the timeout parameter to requests.delete, then
assert the response.status_code as before.

Loading
Loading