Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions tests/e2e/configs/run-rhelai.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ server:
shields:
- shield_id: llama-guard-shield
provider_id: llama-guard
provider_shield_id: "gpt-4-turbo"
provider_shield_id: ${env.RHEL_AI_MODEL}
models:
- metadata:
embedding_dimension: 768 # Depends on chosen model
Expand All @@ -142,7 +142,3 @@ models:
provider_id: vllm
model_type: llm
provider_model_id: ${env.RHEL_AI_MODEL}
- model_id: gpt-4-turbo
provider_id: openai
model_type: llm
provider_model_id: gpt-4-turbo
45 changes: 29 additions & 16 deletions tests/e2e/features/steps/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,40 +30,53 @@ def check_llama_version(context: Context, llama_version: str) -> None:

@then("The body of the response has proper model structure")
def check_model_structure(context: Context) -> None:
"""Check that the first LLM model has the correct structure and required fields."""
"""Check that the expected LLM model has the correct structure and required fields."""
response_json = context.response.json()
assert response_json is not None, "Response is not valid JSON"

assert "models" in response_json, "Response missing 'models' field"
models = response_json["models"]
assert len(models) > 0, "Response has empty list of models"

# Find first LLM model (same logic as environment.py)
# Get expected values from context (detected in before_all)
expected_model = context.default_model
expected_provider = context.default_provider

# Search for the specific model that was detected in before_all
llm_model = None
for model in models:
if model.get("api_model_type") == "llm":
if (
model.get("api_model_type") == "llm"
and model.get("provider_id") == expected_provider
and model.get("provider_resource_id") == expected_model
):
llm_model = model
break

assert llm_model is not None, "No LLM model found in response"

# Get expected values from context
expected_model = context.default_model
expected_provider = context.default_provider
assert llm_model is not None, (
f"Expected LLM model not found in response. "
f"Looking for provider_id='{expected_provider}' and provider_resource_id='{expected_model}'"
)

# Validate structure and values
assert llm_model["type"] == "model", "type should be 'model'"
assert llm_model["api_model_type"] == "llm", "api_model_type should be 'llm'"
assert llm_model["model_type"] == "llm", "model_type should be 'llm'"
assert (
llm_model["type"] == "model"
), f"type should be 'model', but is {llm_model["type"]}"
assert (
llm_model["api_model_type"] == "llm"
), f"api_model_type should be 'llm', but is {llm_model["api_model_type"]}"
assert (
llm_model["model_type"] == "llm"
), f"model_type should be 'llm', but is {llm_model["model_type"]}"
assert (
llm_model["provider_id"] == expected_provider
), f"provider_id should be '{expected_provider}'"
), f"provider_id should be '{expected_provider}', but is '{llm_model["provider_id"]}'"
assert (
llm_model["provider_resource_id"] == expected_model
), f"provider_resource_id should be '{expected_model}'"
), f"provider_resource_id should be '{expected_model}', but is '{llm_model["provider_resource_id"]}'"
assert (
llm_model["identifier"] == f"{expected_provider}/{expected_model}"
), f"identifier should be '{expected_provider}/{expected_model}'"
), f"identifier should be '{expected_provider}/{expected_model}', but is '{llm_model["identifier"]}'"


@then("The body of the response has proper shield structure")
Expand Down Expand Up @@ -94,10 +107,10 @@ def check_shield_structure(context: Context) -> None:
), "provider_id should be 'llama-guard'"
assert (
found_shield["provider_resource_id"] == expected_model
), f"provider_resource_id should be '{expected_model}'"
), f"provider_resource_id should be '{expected_model}', but is '{found_shield["provider_resource_id"]}'"
assert (
found_shield["identifier"] == "llama-guard-shield"
), "identifier should be 'llama-guard-shield'"
), f"identifier should be 'llama-guard-shield', but is '{found_shield["identifier"]}'"


@then("The response contains {count:d} tools listed for provider {provider_name}")
Expand Down
1 change: 1 addition & 0 deletions tests/e2e/features/steps/llm_query_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def wait_for_complete_response(context: Context) -> None:
"""Wait for the response to be complete."""
context.response_data = _parse_streaming_response(context.response.text)
print(context.response_data)
context.response.raise_for_status()
assert context.response_data["finished"] is True


Expand Down
6 changes: 2 additions & 4 deletions tests/e2e/features/streaming_query.feature
Original file line number Diff line number Diff line change
Expand Up @@ -46,20 +46,18 @@ Feature: streaming_query endpoint API tests
Scenario: Check if LLM ignores new system prompt in same conversation
Given The system is in default state
And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva
And I use "streaming_query" to ask question
And I use "streaming_query" to ask question with authorization header
"""
{"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything"}
"""
When I wait for the response to be completed
Then The status code of the response is 200
And I use "streaming_query" to ask question with same conversation_id
"""
{"query": "Write a simple code for reversing string", "system_prompt": "provide coding assistance", "model": "{MODEL}", "provider": "{PROVIDER}"}
"""
Then The status code of the response is 200
When I wait for the response to be completed
Then The status code of the response is 200
And The streamed response should contain following fragments
Then The streamed response should contain following fragments
| Fragments in LLM response |
| questions |

Expand Down
Loading