diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml index e34b0cf5d..fe5190692 100644 --- a/.github/workflows/e2e_tests.yaml +++ b/.github/workflows/e2e_tests.yaml @@ -1,4 +1,4 @@ -# .github/workflows/e2e_test.yml +# .github/workflows/e2e_tests.yml name: E2E Tests on: [push, pull_request_target] @@ -9,13 +9,18 @@ jobs: strategy: fail-fast: false matrix: - environment: [ "ci", "azure"] + mode: ["server", "library"] + environment: ["ci", "azure"] + + name: "E2E: ${{ matrix.mode }} mode / ${{ matrix.environment }}" + env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} E2E_OPENAI_MODEL: ${{ vars.E2E_OPENAI_MODEL }} CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }} CLIENT_ID: ${{ secrets.CLIENT_ID }} TENANT_ID: ${{ secrets.TENANT_ID }} + E2E_DEPLOYMENT_MODE: ${{ matrix.mode }} steps: - uses: actions/checkout@v4 @@ -41,42 +46,24 @@ jobs: echo "Current commit: $(git rev-parse HEAD)" echo "Current commit message: $(git log -1 --oneline)" echo "" - echo "=== Recent commits (should show setup-metrics commits) ===" + echo "=== Recent commits ===" git log --oneline -5 - - uses: 1arp/create-a-file-action@0.4.5 - env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - with: - path: '.' - isAbsolutePath: false - file: 'lightspeed-stack.yaml' - content: | - name: Lightspeed Core Service (LCS) - service: - host: 0.0.0.0 - port: 8080 - auth_enabled: false - workers: 1 - color_log: true - access_log: true - llama_stack: - # Uses a remote llama-stack service - # The instance would have already been started with a llama-stack-run.yaml file - use_as_library_client: false - # Alternative for "as library use" - # use_as_library_client: true - # library_client_config_path: - url: http://llama-stack:8321 - api_key: xyzzy - user_data_collection: - feedback_enabled: true - feedback_storage: "/tmp/data/feedback" - transcripts_enabled: true - transcripts_storage: "/tmp/data/transcripts" - - authentication: - module: "noop" + - name: Load lightspeed-stack.yaml configuration + run: | + MODE="${{ matrix.mode }}" + CONFIG_FILE="tests/e2e/configuration/lightspeed-stack-${MODE}-mode.yaml" + + echo "Loading configuration for ${MODE} mode" + echo "Source: ${CONFIG_FILE}" + + if [ ! -f "${CONFIG_FILE}" ]; then + echo "❌ Configuration file not found: ${CONFIG_FILE}" + exit 1 + fi + + cp "${CONFIG_FILE}" lightspeed-stack.yaml + echo "✅ Configuration loaded successfully" - name: Get Azure API key (access token) if: matrix.environment == 'azure' @@ -106,7 +93,7 @@ jobs: - name: Select and configure run.yaml env: - CONFIG_ENVIRONMENT: ${{ matrix.environment || 'ci' }} + CONFIG_ENVIRONMENT: ${{ matrix.environment }} run: | CONFIGS_DIR="tests/e2e/configs" ENVIRONMENT="$CONFIG_ENVIRONMENT" @@ -128,12 +115,12 @@ jobs: echo "Looking for: $CONFIG_FILE" if [ -f "$CONFIG_FILE" ]; then - echo "Found config for environment: $ENVIRONMENT" + echo "Found config for $ENVIRONMENT environment" cp "$CONFIG_FILE" run.yaml else echo "Configuration file not found: $CONFIG_FILE" - echo "Available files in $CONFIGS_DIR:" - ls -la "$CONFIGS_DIR/" + echo "Available files:" + find "$CONFIGS_DIR" -name "*.yaml" exit 1 fi @@ -141,24 +128,29 @@ jobs: sed -i 's|db_path: \.llama/distributions|db_path: /app-root/.llama/distributions|g' run.yaml sed -i 's|db_path: tmp/|db_path: /app-root/.llama/distributions/|g' run.yaml - echo "Successfully configured for environment: $ENVIRONMENT" + echo "Successfully configured for $ENVIRONMENT environment" echo "Using configuration: $(basename "$CONFIG_FILE")" - name: Show final configuration run: | echo "=== Configuration Summary ===" - echo "Source config: tests/e2e/configs/run-ci.yaml" + echo "Deployment mode: ${{ matrix.mode }}" + echo "Environment: ${{ matrix.environment }}" + echo "Source config: tests/e2e/configs/run-${{ matrix.environment }}.yaml" echo "Final file: run.yaml" - echo "Container mount: /app-root/run.yaml" echo "" - echo "=== Final Configuration Preview ===" + echo "=== Configuration Preview ===" echo "Providers: $(grep -c "provider_id:" run.yaml)" echo "Models: $(grep -c "model_id:" run.yaml)" echo "" + echo "=== lightspeed-stack.yaml ===" + grep -A 3 "llama_stack:" lightspeed-stack.yaml - - name: Run service manually + - name: Run services (Server Mode) + if: matrix.mode == 'server' env: - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + AZURE_API_KEY: ${{ env.AZURE_API_KEY }} run: | # Debug: Check if environment variable is available for docker-compose echo "OPENAI_API_KEY is set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')" @@ -176,17 +168,38 @@ jobs: echo "All services started successfully" fi + - name: Run services (Library Mode) + if: matrix.mode == 'library' + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + AZURE_API_KEY: ${{ env.AZURE_API_KEY }} + run: | + echo "Starting service in library mode (1 container)" + docker compose -f docker-compose-library.yaml up -d + + if docker compose -f docker-compose-library.yaml ps | grep -E 'Exit|exited|stopped'; then + echo "Service failed to start - showing logs:" + docker compose -f docker-compose-library.yaml logs + exit 1 + else + echo "Service started successfully" + fi + - name: Wait for services run: | echo "Waiting for services to be healthy..." - sleep 20 # adjust depending on boot time + sleep 20 - name: Quick connectivity test run: | echo "Testing basic connectivity before full test suite..." curl -f http://localhost:8080/v1/models || { - echo "❌ Basic connectivity failed - showing logs before running full tests" - docker compose logs --tail=30 + echo "❌ Basic connectivity failed - showing logs" + if [ "${{ matrix.mode }}" == "server" ]; then + docker compose logs --tail=30 + else + docker compose -f docker-compose-library.yaml logs --tail=30 + fi exit 1 } @@ -194,6 +207,7 @@ jobs: env: TERM: xterm-256color FORCE_COLOR: 1 + E2E_DEPLOYMENT_MODE: ${{ matrix.mode }} run: | echo "Installing test dependencies..." pip install uv @@ -206,9 +220,14 @@ jobs: if: failure() run: | echo "=== Test failure logs ===" - echo "=== llama-stack logs ===" - docker compose logs llama-stack - echo "" - echo "=== lightspeed-stack logs ===" - docker compose logs lightspeed-stack + if [ "${{ matrix.mode }}" == "server" ]; then + echo "=== llama-stack logs ===" + docker compose logs llama-stack + echo "" + echo "=== lightspeed-stack logs ===" + docker compose logs lightspeed-stack + else + echo "=== lightspeed-stack (library mode) logs ===" + docker compose -f docker-compose-library.yaml logs lightspeed-stack + fi diff --git a/Containerfile b/Containerfile index c393027d5..90a0dbf13 100644 --- a/Containerfile +++ b/Containerfile @@ -73,6 +73,10 @@ USER root # Additional tools for derived images RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs jq patch +# Create llama-stack directories for library mode +RUN mkdir -p /opt/app-root/src/.llama/distributions/ollama /opt/app-root/src/.llama/providers.d && \ + chown -R 1001:1001 /opt/app-root/src/.llama + # Add executables from .venv to system PATH ENV PATH="/app-root/.venv/bin:$PATH" diff --git a/Makefile b/Makefile index d84dfc5ce..5b1e27c64 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,10 @@ test-integration: ## Run integration tests tests test-e2e: ## Run end to end tests for the service script -q -e -c "uv run behave --color --format pretty --tags=-skip -D dump_errors=true @tests/e2e/test_list.txt" +test-e2e-local: ## Run end to end tests for the service + uv run behave --color --format pretty --tags=-skip -D dump_errors=true @tests/e2e/test_list.txt + + check-types: ## Checks type hints in sources uv run mypy --explicit-package-bases --disallow-untyped-calls --disallow-untyped-defs --disallow-incomplete-defs --ignore-missing-imports --disable-error-code attr-defined src/ tests/unit tests/integration tests/e2e/ diff --git a/docker-compose-library.yaml b/docker-compose-library.yaml new file mode 100644 index 000000000..e61eda9d9 --- /dev/null +++ b/docker-compose-library.yaml @@ -0,0 +1,37 @@ +services: + # Lightspeed Stack with embedded llama-stack (library mode) + lightspeed-stack: + build: + context: . + dockerfile: Containerfile + platform: linux/amd64 + container_name: lightspeed-stack + ports: + - "8080:8080" + volumes: + # Mount both config files - lightspeed-stack.yaml should have library mode enabled + - ./lightspeed-stack.yaml:/app-root/lightspeed-stack.yaml:Z + - ./run.yaml:/app-root/run.yaml:Z + environment: + # LLM Provider API Keys + - OPENAI_API_KEY=${OPENAI_API_KEY} + - E2E_OPENAI_MODEL=${E2E_OPENAI_MODEL:-gpt-4-turbo} + - AZURE_API_KEY=${AZURE_API_KEY:-} + - BRAVE_SEARCH_API_KEY=${BRAVE_SEARCH_API_KEY:-} + - TAVILY_SEARCH_API_KEY=${TAVILY_SEARCH_API_KEY:-} + - RHAIIS_URL=${RHAIIS_URL:-} + - RHAIIS_API_KEY=${RHAIIS_API_KEY:-} + - RHAIIS_MODEL=${RHAIIS_MODEL:-} + - RHEL_AI_URL=${RHEL_AI_URL:-} + - RHEL_AI_PORT=${RHEL_AI_PORT:-} + - RHEL_AI_API_KEY=${RHEL_AI_API_KEY:-} + - RHEL_AI_MODEL=${RHEL_AI_MODEL:-} + # Enable debug logging if needed + - LLAMA_STACK_LOGGING=${LLAMA_STACK_LOGGING:-} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/liveness"] + interval: 10s # how often to run the check + timeout: 5s # how long to wait before considering it failed + retries: 3 # how many times to retry before marking as unhealthy + start_period: 15s # time to wait before starting checks (increased for library initialization) + diff --git a/run.yaml b/run.yaml index d77492af4..945449bee 100644 --- a/run.yaml +++ b/run.yaml @@ -16,7 +16,7 @@ apis: benchmarks: [] container_image: null datasets: [] -external_providers_dir: null +external_providers_dir: /opt/app-root/src/.llama/providers.d inference_store: db_path: .llama/distributions/ollama/inference_store.db type: sqlite diff --git a/test.containerfile b/test.containerfile index e0c0d7875..4cc99456d 100644 --- a/test.containerfile +++ b/test.containerfile @@ -1,12 +1,14 @@ # Custom Red Hat llama-stack image with missing dependencies FROM quay.io/opendatahub/llama-stack:rhoai-v2.25-latest -# Install missing dependencies +# Install missing dependencies and create required directories USER root RUN pip install faiss-cpu==1.11.0 && \ mkdir -p /app-root && \ chown -R 1001:0 /app-root && \ - chmod -R 775 /app-root + chmod -R 775 /app-root && \ + mkdir -p /opt/app-root/src/.llama/distributions/ollama /opt/app-root/src/.llama/providers.d && \ + chown -R 1001:0 /opt/app-root/src/.llama # Switch back to the original user USER 1001 diff --git a/tests/e2e/configs/run-azure.yaml b/tests/e2e/configs/run-azure.yaml index fd8a8c79d..533ad057d 100644 --- a/tests/e2e/configs/run-azure.yaml +++ b/tests/e2e/configs/run-azure.yaml @@ -16,7 +16,7 @@ apis: benchmarks: [] container_image: null datasets: [] -external_providers_dir: null +external_providers_dir: /opt/app-root/src/.llama/providers.d inference_store: db_path: .llama/distributions/ollama/inference_store.db type: sqlite diff --git a/tests/e2e/configs/run-ci.yaml b/tests/e2e/configs/run-ci.yaml index d532c50ea..30135ffaa 100644 --- a/tests/e2e/configs/run-ci.yaml +++ b/tests/e2e/configs/run-ci.yaml @@ -16,7 +16,7 @@ apis: benchmarks: [] container_image: null datasets: [] -external_providers_dir: null +external_providers_dir: /opt/app-root/src/.llama/providers.d inference_store: db_path: .llama/distributions/ollama/inference_store.db type: sqlite diff --git a/tests/e2e/configs/run-rhelai.yaml b/tests/e2e/configs/run-rhelai.yaml index be455d924..e4a7d6494 100644 --- a/tests/e2e/configs/run-rhelai.yaml +++ b/tests/e2e/configs/run-rhelai.yaml @@ -16,7 +16,7 @@ apis: benchmarks: [] container_image: null datasets: [] -external_providers_dir: null +external_providers_dir: /opt/app-root/src/.llama/providers.d inference_store: db_path: .llama/distributions/ollama/inference_store.db type: sqlite diff --git a/tests/e2e/configuration/library-mode/lightspeed-stack-auth-noop-token.yaml b/tests/e2e/configuration/library-mode/lightspeed-stack-auth-noop-token.yaml new file mode 100644 index 000000000..c4f53338a --- /dev/null +++ b/tests/e2e/configuration/library-mode/lightspeed-stack-auth-noop-token.yaml @@ -0,0 +1,20 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + use_as_library_client: true + library_client_config_path: run.yaml +user_data_collection: + feedback_enabled: true + feedback_storage: "/tmp/data/feedback" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" + +authentication: + module: "noop-with-token" + diff --git a/tests/e2e/configuration/library-mode/lightspeed-stack-invalid-feedback-storage.yaml b/tests/e2e/configuration/library-mode/lightspeed-stack-invalid-feedback-storage.yaml new file mode 100644 index 000000000..1a39ad1ec --- /dev/null +++ b/tests/e2e/configuration/library-mode/lightspeed-stack-invalid-feedback-storage.yaml @@ -0,0 +1,20 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + use_as_library_client: true + library_client_config_path: run.yaml +user_data_collection: + feedback_enabled: true + feedback_storage: "/invalid" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" + +authentication: + module: "noop-with-token" + diff --git a/tests/e2e/configuration/lightspeed-stack-library-mode.yaml b/tests/e2e/configuration/lightspeed-stack-library-mode.yaml new file mode 100644 index 000000000..034201317 --- /dev/null +++ b/tests/e2e/configuration/lightspeed-stack-library-mode.yaml @@ -0,0 +1,20 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + # Library mode - embeds llama-stack as library + use_as_library_client: true + library_client_config_path: run.yaml +user_data_collection: + feedback_enabled: true + feedback_storage: "/tmp/data/feedback" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" +authentication: + module: "noop" + diff --git a/tests/e2e/configuration/lightspeed-stack-server-mode.yaml b/tests/e2e/configuration/lightspeed-stack-server-mode.yaml new file mode 100644 index 000000000..6acf6e9eb --- /dev/null +++ b/tests/e2e/configuration/lightspeed-stack-server-mode.yaml @@ -0,0 +1,21 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + # Server mode - connects to separate llama-stack service + use_as_library_client: false + url: http://llama-stack:8321 + api_key: xyzzy +user_data_collection: + feedback_enabled: true + feedback_storage: "/tmp/data/feedback" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" +authentication: + module: "noop" + diff --git a/tests/e2e/configuration/lightspeed-stack-auth-noop-token.yaml b/tests/e2e/configuration/server-mode/lightspeed-stack-auth-noop-token.yaml similarity index 100% rename from tests/e2e/configuration/lightspeed-stack-auth-noop-token.yaml rename to tests/e2e/configuration/server-mode/lightspeed-stack-auth-noop-token.yaml diff --git a/tests/e2e/configuration/lightspeed-stack-invalid-feedback-storage.yaml b/tests/e2e/configuration/server-mode/lightspeed-stack-invalid-feedback-storage.yaml similarity index 100% rename from tests/e2e/configuration/lightspeed-stack-invalid-feedback-storage.yaml rename to tests/e2e/configuration/server-mode/lightspeed-stack-invalid-feedback-storage.yaml diff --git a/tests/e2e/features/conversation_cache_v2.feature b/tests/e2e/features/conversation_cache_v2.feature index 5ef36cfd4..3e9d53a5b 100644 --- a/tests/e2e/features/conversation_cache_v2.feature +++ b/tests/e2e/features/conversation_cache_v2.feature @@ -188,7 +188,7 @@ Feature: Conversation Cache V2 API tests Then The status code of the response is 404 And The body of the response contains Conversation not found - + @skip-in-library-mode Scenario: Check conversations/{conversation_id} works when llama-stack is down Given REST API service prefix is /v1 And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva @@ -283,7 +283,7 @@ Feature: Conversation Cache V2 API tests Then The status code of the response is 404 And The body of the response contains Conversation not found - + @skip-in-library-mode Scenario: V2 conversations DELETE endpoint works even when llama-stack is down Given REST API service prefix is /v1 And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva diff --git a/tests/e2e/features/conversations.feature b/tests/e2e/features/conversations.feature index f847d6718..9a82f9fbc 100644 --- a/tests/e2e/features/conversations.feature +++ b/tests/e2e/features/conversations.feature @@ -129,6 +129,7 @@ Feature: conversations endpoint API tests } """ + @skip-in-library-mode Scenario: Check if conversations/{conversation_id} GET endpoint fails when llama-stack is unavailable Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva @@ -177,6 +178,7 @@ Feature: conversations endpoint API tests Then The status code of the response is 404 And The body of the response contains Conversation not found + @skip-in-library-mode Scenario: Check if conversations/{conversation_id} DELETE endpoint fails when llama-stack is unavailable Given The system is in default state And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva diff --git a/tests/e2e/features/environment.py b/tests/e2e/features/environment.py index 9d2fc8972..987c4e73e 100644 --- a/tests/e2e/features/environment.py +++ b/tests/e2e/features/environment.py @@ -51,6 +51,12 @@ def _fetch_models_from_service() -> dict: def before_all(context: Context) -> None: """Run before and after the whole shooting match.""" + # Detect deployment mode from environment variable + context.deployment_mode = os.getenv("E2E_DEPLOYMENT_MODE", "server").lower() + context.is_library_mode = context.deployment_mode == "library" + + print(f"Running tests in {context.deployment_mode} mode") + # Get first LLM model from running service llm_model = _fetch_models_from_service() @@ -75,13 +81,19 @@ def before_scenario(context: Context, scenario: Scenario) -> None: if "local" in scenario.effective_tags and not context.local: scenario.skip("Marked with @local") return + + # Skip scenarios that require separate llama-stack container in library mode + if context.is_library_mode and "skip-in-library-mode" in scenario.effective_tags: + scenario.skip("Skipped in library mode (no separate llama-stack container)") + return + + mode_dir = "library-mode" if context.is_library_mode else "server-mode" + if "InvalidFeedbackStorageConfig" in scenario.effective_tags: - context.scenario_config = ( - "tests/e2e/configuration/lightspeed-stack-invalid-feedback-storage.yaml" - ) + context.scenario_config = f"tests/e2e/configuration/{mode_dir}/lightspeed-stack-invalid-feedback-storage.yaml" if "NoCacheConfig" in scenario.effective_tags: context.scenario_config = ( - "tests/e2e/configuration/lightspeed-stack-no-cache.yaml" + f"tests/e2e/configuration/{mode_dir}/lightspeed-stack-no-cache.yaml" ) # Switch config and restart immediately switch_config( @@ -99,8 +111,12 @@ def after_scenario(context: Context, scenario: Scenario) -> None: switch_config(context.feature_config) restart_container("lightspeed-stack") - # Restore Llama Stack connection if it was disrupted - if hasattr(context, "llama_stack_was_running") and context.llama_stack_was_running: + # Restore Llama Stack connection if it was disrupted (only in server mode) + if ( + not context.is_library_mode + and hasattr(context, "llama_stack_was_running") + and context.llama_stack_was_running + ): try: # Start the llama-stack container again subprocess.run( @@ -150,8 +166,9 @@ def after_scenario(context: Context, scenario: Scenario) -> None: def before_feature(context: Context, feature: Feature) -> None: """Run before each feature file is exercised.""" if "Authorized" in feature.tags: + mode_dir = "library-mode" if context.is_library_mode else "server-mode" context.feature_config = ( - "tests/e2e/configuration/lightspeed-stack-auth-noop-token.yaml" + f"tests/e2e/configuration/{mode_dir}/lightspeed-stack-auth-noop-token.yaml" ) context.default_config_backup = create_config_backup("lightspeed-stack.yaml") switch_config(context.feature_config) diff --git a/tests/e2e/features/health.feature b/tests/e2e/features/health.feature index 5d47c0bef..bd5f14721 100644 --- a/tests/e2e/features/health.feature +++ b/tests/e2e/features/health.feature @@ -40,6 +40,7 @@ Feature: REST API tests """ + @skip-in-library-mode Scenario: Check if service report proper readiness state when llama stack is not available Given The system is in default state And The llama-stack connection is disrupted @@ -51,6 +52,7 @@ Feature: REST API tests """ + @skip-in-library-mode Scenario: Check if service report proper liveness state even when llama stack is not available Given The system is in default state And The llama-stack connection is disrupted diff --git a/tests/e2e/features/info.feature b/tests/e2e/features/info.feature index 241907e03..1a45153a3 100644 --- a/tests/e2e/features/info.feature +++ b/tests/e2e/features/info.feature @@ -18,6 +18,7 @@ Feature: Info tests And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.3.0 And The body of the response has llama-stack version 0.2.22 + @skip-in-library-mode Scenario: Check if info endpoint reports error when llama-stack connection is not working Given The system is in default state And The llama-stack connection is disrupted @@ -35,6 +36,7 @@ Feature: Info tests And The body of the response has proper model structure + @skip-in-library-mode Scenario: Check if models endpoint reports error when llama-stack in unreachable Given The system is in default state And The llama-stack connection is disrupted @@ -52,6 +54,7 @@ Feature: Info tests And The body of the response has proper shield structure + @skip-in-library-mode Scenario: Check if shields endpoint reports error when llama-stack in unreachable Given The system is in default state And The llama-stack connection is disrupted @@ -108,6 +111,7 @@ Feature: Info tests """ + @skip-in-library-mode Scenario: Check if tools endpoint reports error when llama-stack in unreachable Given The system is in default state And The llama-stack connection is disrupted diff --git a/tests/e2e/features/query.feature b/tests/e2e/features/query.feature index 171714965..5491afa55 100644 --- a/tests/e2e/features/query.feature +++ b/tests/e2e/features/query.feature @@ -107,6 +107,7 @@ Scenario: Check if LLM responds for query request with error for missing query Then The status code of the response is 422 And The body of the response contains Value error, Provider must be specified if model is specified + @skip-in-library-mode Scenario: Check if LLM responds for query request with error for missing provider Given The system is in default state And The llama-stack connection is disrupted