Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ upload-distribution-archives: ## Upload distribution archives into Python regist
uv run python -m twine upload --repository ${PYTHON_REGISTRY} dist/*

konflux-requirements: ## generate hermetic requirements.*.txt file for konflux build
uv pip compile pyproject.toml -o requirements.x86_64.txt --generate-hashes --group llslibdev --python-platform x86_64-unknown-linux-gnu --torch-backend cpu --python-version 3.12
uv pip compile pyproject.toml -o requirements.aarch64.txt --generate-hashes --group llslibdev --python-platform aarch64-unknown-linux-gnu --torch-backend cpu --python-version 3.12
uv pip compile pyproject.toml -o requirements.x86_64.txt --generate-hashes --group llslibdev --python-platform x86_64-unknown-linux-gnu --torch-backend cpu --python-version 3.12 --refresh
uv pip compile pyproject.toml -o requirements.aarch64.txt --generate-hashes --group llslibdev --python-platform aarch64-unknown-linux-gnu --torch-backend cpu --python-version 3.12 --refresh
./scripts/remove_torch_deps.sh requirements.x86_64.txt
./scripts/remove_torch_deps.sh requirements.aarch64.txt
echo "torch==${TORCH_VERSION}" | uv pip compile - -o requirements.torch.txt --generate-hashes --python-version 3.12 --torch-backend cpu --emit-index-url --no-deps --index-url https://download.pytorch.org/whl/cpu
echo "torch==${TORCH_VERSION}" | uv pip compile - -o requirements.torch.txt --generate-hashes --python-version 3.12 --torch-backend cpu --emit-index-url --no-deps --index-url https://download.pytorch.org/whl/cpu --refresh

help: ## Show this help screen
@echo 'Usage: make <OPTIONS> ... <TARGETS>'
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ dependencies = [
# Used by authentication/k8s integration
"kubernetes>=30.1.0",
# Used to call Llama Stack APIs
"llama-stack==0.3.4",
"llama-stack-client==0.3.4",
"llama-stack==0.3.5",
"llama-stack-client==0.3.5",
# Used by Logger
"rich>=14.0.0",
# Used by JWK token auth handler
Expand Down
14 changes: 7 additions & 7 deletions requirements.aarch64.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv pip compile pyproject.toml -o requirements.aarch64.txt --generate-hashes --group llslibdev --python-platform aarch64-unknown-linux-gnu --torch-backend cpu --python-version 3.12
# uv pip compile pyproject.toml -o requirements.aarch64.txt --generate-hashes --group llslibdev --python-platform aarch64-unknown-linux-gnu --torch-backend cpu --python-version 3.12 --refresh
accelerate==1.12.0 \
--hash=sha256:3e2091cd341423207e2f084a6654b1efcd250dc326f2a37d6dde446e07cabb11 \
--hash=sha256:70988c352feb481887077d2ab845125024b2a137a5090d6d7a32b57d03a45df6
Expand Down Expand Up @@ -1393,13 +1393,13 @@ litellm==1.80.10 \
--hash=sha256:4a4aff7558945c2f7e5c6523e67c1b5525a46b10b0e1ad6b8f847cb13b16779e \
--hash=sha256:9b3e561efaba0eb1291cb1555d3dcb7283cf7f3cb65aadbcdb42e2a8765898c8
# via lightspeed-stack (pyproject.toml)
llama-stack==0.3.4 \
--hash=sha256:3e302db1efb2ed6c974526b8c6b04b9e54891f3959d0d83c004f77e1c21f6147 \
--hash=sha256:bdb489e4341559465d604c9eba554460ab0d17c5dc005ee2d40aa892b94e2e9b
llama-stack==0.3.5 \
--hash=sha256:4a0ce8014b17d14a06858251736f1170f12580fafc519daf75ee1df6c4fbf64b \
--hash=sha256:93097409c65108e429fc3dda2f246ef4e8d0b07314a32865e941680e537ec366
# via lightspeed-stack (pyproject.toml)
llama-stack-client==0.3.4 \
--hash=sha256:6afbd10b152911a044e8d038e58981425ce0a34510da3e31cdd3103516e27688 \
--hash=sha256:949c0a6c9a1c925a2b0d930d85b6485bb8d264ba68d02f36aca3c2539cb7b893
llama-stack-client==0.3.5 \
--hash=sha256:2d954429347e920038709ae3e026c06f336ce570bd41245fc4e1e54c78879485 \
--hash=sha256:b98acdc660d60839da8b71d5ae59531ba7f059e3e9656ca5ca20edca70f7d6a2
# via
# lightspeed-stack (pyproject.toml)
# llama-stack
Expand Down
2 changes: 1 addition & 1 deletion requirements.torch.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv pip compile - -o requirements.torch.txt --generate-hashes --python-version 3.12 --torch-backend cpu --emit-index-url --no-deps --index-url https://download.pytorch.org/whl/cpu
# uv pip compile - -o requirements.torch.txt --generate-hashes --python-version 3.12 --torch-backend cpu --emit-index-url --no-deps --index-url https://download.pytorch.org/whl/cpu --refresh
--index-url https://download.pytorch.org/whl/cpu

torch==2.7.1+cpu \
Expand Down
14 changes: 7 additions & 7 deletions requirements.x86_64.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# This file was autogenerated by uv via the following command:
# uv pip compile pyproject.toml -o requirements.x86_64.txt --generate-hashes --group llslibdev --python-platform x86_64-unknown-linux-gnu --torch-backend cpu --python-version 3.12
# uv pip compile pyproject.toml -o requirements.x86_64.txt --generate-hashes --group llslibdev --python-platform x86_64-unknown-linux-gnu --torch-backend cpu --python-version 3.12 --refresh
accelerate==1.12.0 \
--hash=sha256:3e2091cd341423207e2f084a6654b1efcd250dc326f2a37d6dde446e07cabb11 \
--hash=sha256:70988c352feb481887077d2ab845125024b2a137a5090d6d7a32b57d03a45df6
Expand Down Expand Up @@ -1393,13 +1393,13 @@ litellm==1.80.10 \
--hash=sha256:4a4aff7558945c2f7e5c6523e67c1b5525a46b10b0e1ad6b8f847cb13b16779e \
--hash=sha256:9b3e561efaba0eb1291cb1555d3dcb7283cf7f3cb65aadbcdb42e2a8765898c8
# via lightspeed-stack (pyproject.toml)
llama-stack==0.3.4 \
--hash=sha256:3e302db1efb2ed6c974526b8c6b04b9e54891f3959d0d83c004f77e1c21f6147 \
--hash=sha256:bdb489e4341559465d604c9eba554460ab0d17c5dc005ee2d40aa892b94e2e9b
llama-stack==0.3.5 \
--hash=sha256:4a0ce8014b17d14a06858251736f1170f12580fafc519daf75ee1df6c4fbf64b \
--hash=sha256:93097409c65108e429fc3dda2f246ef4e8d0b07314a32865e941680e537ec366
# via lightspeed-stack (pyproject.toml)
llama-stack-client==0.3.4 \
--hash=sha256:6afbd10b152911a044e8d038e58981425ce0a34510da3e31cdd3103516e27688 \
--hash=sha256:949c0a6c9a1c925a2b0d930d85b6485bb8d264ba68d02f36aca3c2539cb7b893
llama-stack-client==0.3.5 \
--hash=sha256:2d954429347e920038709ae3e026c06f336ce570bd41245fc4e1e54c78879485 \
--hash=sha256:b98acdc660d60839da8b71d5ae59531ba7f059e3e9656ca5ca20edca70f7d6a2
# via
# lightspeed-stack (pyproject.toml)
# llama-stack
Expand Down
2 changes: 1 addition & 1 deletion src/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# Minimal and maximal supported Llama Stack version
MINIMAL_SUPPORTED_LLAMA_STACK_VERSION = "0.2.17"
MAXIMAL_SUPPORTED_LLAMA_STACK_VERSION = "0.3.4"
MAXIMAL_SUPPORTED_LLAMA_STACK_VERSION = "0.3.5"

UNABLE_TO_PROCESS_RESPONSE = "Unable to process this request"

Expand Down
19 changes: 19 additions & 0 deletions tests/e2e/configuration/lightspeed-stack-library-mode.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: Lightspeed Core Service (LCS)
service:
host: 0.0.0.0
port: 8080
auth_enabled: false
workers: 1
color_log: true
access_log: true
llama_stack:
# Library mode - embeds llama-stack as library
use_as_library_client: true
library_client_config_path: run.yaml
user_data_collection:
feedback_enabled: true
feedback_storage: "/tmp/data/feedback"
transcripts_enabled: true
transcripts_storage: "/tmp/data/transcripts"
authentication:
module: "noop"
20 changes: 20 additions & 0 deletions tests/e2e/configuration/lightspeed-stack-server-mode.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
name: Lightspeed Core Service (LCS)
service:
host: 0.0.0.0
port: 8080
auth_enabled: false
workers: 1
color_log: true
access_log: true
llama_stack:
# Server mode - connects to separate llama-stack service
use_as_library_client: false
url: http://llama-stack:8321
api_key: xyzzy
user_data_collection:
feedback_enabled: true
feedback_storage: "/tmp/data/feedback"
transcripts_enabled: true
transcripts_storage: "/tmp/data/transcripts"
authentication:
module: "noop"
2 changes: 1 addition & 1 deletion tests/e2e/features/info.feature
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ Feature: Info tests
When I access REST API endpoint "info" using HTTP GET method
Then The status code of the response is 200
And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.3.1
And The body of the response has llama-stack version 0.3.4
And The body of the response has llama-stack version 0.3.5

@skip-in-library-mode
Scenario: Check if info endpoint reports error when llama-stack connection is not working
Expand Down
18 changes: 9 additions & 9 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading