From 23d51c0392b72f006c912e4ab4b675d781ae5ccb Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 20 May 2025 13:56:15 +0000
Subject: [PATCH 1/3] Release 0.8.39
---
poetry.lock | 485 ++++++++----------
pyproject.toml | 2 +-
reference.md | 103 +---
src/humanloop/agents/client.py | 40 +-
src/humanloop/agents/raw_client.py | 40 +-
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/evaluators/client.py | 11 -
src/humanloop/evaluators/raw_client.py | 11 -
src/humanloop/flows/client.py | 14 +-
src/humanloop/flows/raw_client.py | 14 +-
src/humanloop/prompts/client.py | 47 +-
src/humanloop/prompts/raw_client.py | 47 +-
src/humanloop/requests/agent_log_response.py | 6 -
.../requests/create_agent_log_response.py | 2 +-
.../requests/create_flow_log_response.py | 2 +-
.../requests/evaluator_log_response.py | 6 -
src/humanloop/requests/flow_log_response.py | 2 +-
.../requests/prompt_call_response.py | 6 -
src/humanloop/requests/prompt_log_response.py | 6 -
src/humanloop/requests/provider_api_keys.py | 3 +-
src/humanloop/requests/tool_call_response.py | 6 -
src/humanloop/requests/tool_log_response.py | 6 -
src/humanloop/tools/client.py | 31 --
src/humanloop/tools/raw_client.py | 31 --
src/humanloop/types/agent_log_response.py | 6 -
.../types/create_agent_log_response.py | 2 +-
.../types/create_flow_log_response.py | 2 +-
src/humanloop/types/evaluator_log_response.py | 6 -
src/humanloop/types/event_type.py | 2 +-
src/humanloop/types/flow_log_response.py | 2 +-
src/humanloop/types/prompt_call_response.py | 6 -
src/humanloop/types/prompt_log_response.py | 6 -
src/humanloop/types/provider_api_keys.py | 4 +-
src/humanloop/types/tool_call_response.py | 6 -
src/humanloop/types/tool_log_response.py | 6 -
35 files changed, 258 insertions(+), 715 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index acbc4633..1aed4e24 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,7 +6,6 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -18,7 +17,6 @@ version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
{file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
@@ -43,7 +41,6 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -57,7 +54,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -66,19 +63,18 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
-cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
-dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
+tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
@@ -86,7 +82,6 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
-groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -98,7 +93,6 @@ version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
-groups = ["main", "dev"]
files = [
{file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
{file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
@@ -200,7 +194,6 @@ version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
-groups = ["main"]
files = [
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
@@ -215,7 +208,6 @@ version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
-groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -238,12 +230,10 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
-groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
-markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -251,7 +241,6 @@ version = "8.5.0"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "deepdiff-8.5.0-py3-none-any.whl", hash = "sha256:d4599db637f36a1c285f5fdfc2cd8d38bde8d8be8636b65ab5e425b67c54df26"},
{file = "deepdiff-8.5.0.tar.gz", hash = "sha256:a4dd3529fa8d4cd5b9cbb6e3ea9c95997eaa919ba37dac3966c1b8f872dc1cd1"},
@@ -263,7 +252,7 @@ orderly-set = ">=5.4.1,<6"
[package.extras]
cli = ["click (>=8.1.0,<8.2.0)", "pyyaml (>=6.0.0,<6.1.0)"]
coverage = ["coverage (>=7.6.0,<7.7.0)"]
-dev = ["bump2version (>=1.0.0,<1.1.0)", "ipdb (>=0.13.0,<0.14.0)", "jsonpickle (>=4.0.0,<4.1.0)", "nox (==2025.5.1)", "numpy (>=2.0,<3.0) ; python_version < \"3.10\"", "numpy (>=2.2.0,<2.3.0) ; python_version >= \"3.10\"", "orjson (>=3.10.0,<3.11.0)", "pandas (>=2.2.0,<2.3.0)", "polars (>=1.21.0,<1.22.0)", "python-dateutil (>=2.9.0,<2.10.0)", "tomli (>=2.2.0,<2.3.0)", "tomli-w (>=1.2.0,<1.3.0)"]
+dev = ["bump2version (>=1.0.0,<1.1.0)", "ipdb (>=0.13.0,<0.14.0)", "jsonpickle (>=4.0.0,<4.1.0)", "nox (==2025.5.1)", "numpy (>=2.0,<3.0)", "numpy (>=2.2.0,<2.3.0)", "orjson (>=3.10.0,<3.11.0)", "pandas (>=2.2.0,<2.3.0)", "polars (>=1.21.0,<1.22.0)", "python-dateutil (>=2.9.0,<2.10.0)", "tomli (>=2.2.0,<2.3.0)", "tomli-w (>=1.2.0,<1.3.0)"]
docs = ["Sphinx (>=6.2.0,<6.3.0)", "sphinx-sitemap (>=2.6.0,<2.7.0)", "sphinxemoji (>=0.3.0,<0.4.0)"]
optimize = ["orjson"]
static = ["flake8 (>=7.1.0,<7.2.0)", "flake8-pyproject (>=1.2.3,<1.3.0)", "pydantic (>=2.10.0,<2.11.0)"]
@@ -275,7 +264,6 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
-groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -285,7 +273,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
[[package]]
name = "distro"
@@ -293,7 +281,6 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
-groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -305,8 +292,6 @@ version = "1.3.0"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
-groups = ["main", "dev"]
-markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
{file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
@@ -324,7 +309,6 @@ version = "2.1.1"
description = "execnet: rapid multi-Python deployment"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
{file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
@@ -335,43 +319,47 @@ testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastavro"
-version = "1.10.0"
+version = "1.11.1"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
-files = [
- {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
- {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
- {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190e80dc7d77d03a6a8597a026146b32a0bbe45e3487ab4904dc8c1bebecb26d"},
- {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bf570d63be9155c3fdc415f60a49c171548334b70fff0679a184b69c29b6bc61"},
- {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e07abb6798e95dccecaec316265e35a018b523d1f3944ad396d0a93cb95e0a08"},
- {file = "fastavro-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:37203097ed11d0b8fd3c004904748777d730cafd26e278167ea602eebdef8eb2"},
- {file = "fastavro-1.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d183c075f527ab695a27ae75f210d4a86bce660cda2f85ae84d5606efc15ef50"},
- {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a95a2c0639bffd7c079b59e9a796bfc3a9acd78acff7088f7c54ade24e4a77"},
- {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a678153b5da1b024a32ec3f611b2e7afd24deac588cb51dd1b0019935191a6d"},
- {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:67a597a5cfea4dddcf8b49eaf8c2b5ffee7fda15b578849185bc690ec0cd0d8f"},
- {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fd689724760b17f69565d8a4e7785ed79becd451d1c99263c40cb2d6491f1d4"},
- {file = "fastavro-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:4f949d463f9ac4221128a51e4e34e2562f401e5925adcadfd28637a73df6c2d8"},
- {file = "fastavro-1.10.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cfe57cb0d72f304bd0dcc5a3208ca6a7363a9ae76f3073307d095c9d053b29d4"},
- {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74e517440c824cb65fb29d3e3903a9406f4d7c75490cef47e55c4c82cdc66270"},
- {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203c17d44cadde76e8eecb30f2d1b4f33eb478877552d71f049265dc6f2ecd10"},
- {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6575be7f2b5f94023b5a4e766b0251924945ad55e9a96672dc523656d17fe251"},
- {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe471deb675ed2f01ee2aac958fbf8ebb13ea00fa4ce7f87e57710a0bc592208"},
- {file = "fastavro-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:567ff515f2a5d26d9674b31c95477f3e6022ec206124c62169bc2ffaf0889089"},
- {file = "fastavro-1.10.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82263af0adfddb39c85f9517d736e1e940fe506dfcc35bc9ab9f85e0fa9236d8"},
- {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:566c193109ff0ff84f1072a165b7106c4f96050078a4e6ac7391f81ca1ef3efa"},
- {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e400d2e55d068404d9fea7c5021f8b999c6f9d9afa1d1f3652ec92c105ffcbdd"},
- {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b8227497f71565270f9249fc9af32a93644ca683a0167cfe66d203845c3a038"},
- {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e62d04c65461b30ac6d314e4197ad666371e97ae8cb2c16f971d802f6c7f514"},
- {file = "fastavro-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:86baf8c9740ab570d0d4d18517da71626fe9be4d1142bea684db52bd5adb078f"},
- {file = "fastavro-1.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5bccbb6f8e9e5b834cca964f0e6ebc27ebe65319d3940b0b397751a470f45612"},
- {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0132f6b0b53f61a0a508a577f64beb5de1a5e068a9b4c0e1df6e3b66568eec4"},
- {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca37a363b711202c6071a6d4787e68e15fa3ab108261058c4aae853c582339af"},
- {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cf38cecdd67ca9bd92e6e9ba34a30db6343e7a3bedf171753ee78f8bd9f8a670"},
- {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f4dd10e0ed42982122d20cdf1a88aa50ee09e5a9cd9b39abdffb1aa4f5b76435"},
- {file = "fastavro-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:aaef147dc14dd2d7823246178fd06fc5e477460e070dc6d9e07dd8193a6bc93c"},
- {file = "fastavro-1.10.0.tar.gz", hash = "sha256:47bf41ac6d52cdfe4a3da88c75a802321321b37b663a900d12765101a5d6886f"},
+files = [
+ {file = "fastavro-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:603aa1c1d1be21fb4bcb63e1efb0711a9ddb337de81391c32dac95c6e0dacfcc"},
+ {file = "fastavro-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45653b312d4ce297e2bd802ea3ffd17ecbe718e5e8b6e2ae04cd72cb50bb99d5"},
+ {file = "fastavro-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998a53fc552e6bee9acda32af258f02557313c85fb5b48becba5b71ec82f421e"},
+ {file = "fastavro-1.11.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9f878c9ad819467120cb066f1c73496c42eb24ecdd7c992ec996f465ef4cedad"},
+ {file = "fastavro-1.11.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da9e4c231ac4951092c2230ca423d8a3f2966718f072ac1e2c5d2d44c70b2a50"},
+ {file = "fastavro-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:7423bfad3199567eeee7ad6816402c7c0ee1658b959e8c10540cfbc60ce96c2a"},
+ {file = "fastavro-1.11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3573340e4564e8962e22f814ac937ffe0d4be5eabbd2250f77738dc47e3c8fe9"},
+ {file = "fastavro-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7291cf47735b8bd6ff5d9b33120e6e0974f52fd5dff90cd24151b22018e7fd29"},
+ {file = "fastavro-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf3bb065d657d5bac8b2cb39945194aa086a9b3354f2da7f89c30e4dc20e08e2"},
+ {file = "fastavro-1.11.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8758317c85296b848698132efb13bc44a4fbd6017431cc0f26eaeb0d6fa13d35"},
+ {file = "fastavro-1.11.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ad99d57228f83bf3e2214d183fbf6e2fda97fd649b2bdaf8e9110c36cbb02624"},
+ {file = "fastavro-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:9134090178bdbf9eefd467717ced3dc151e27a7e7bfc728260ce512697efe5a4"},
+ {file = "fastavro-1.11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e8bc238f2637cd5d15238adbe8fb8c58d2e6f1870e0fb28d89508584670bae4b"},
+ {file = "fastavro-1.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b403933081c83fc4d8a012ee64b86e560a024b1280e3711ee74f2abc904886e8"},
+ {file = "fastavro-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f6ecb4b5f77aa756d973b7dd1c2fb4e4c95b4832a3c98b059aa96c61870c709"},
+ {file = "fastavro-1.11.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:059893df63ef823b0231b485c9d43016c7e32850cae7bf69f4e9d46dd41c28f2"},
+ {file = "fastavro-1.11.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5120ffc9a200699218e01777e695a2f08afb3547ba818184198c757dc39417bd"},
+ {file = "fastavro-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:7bb9d0d2233f33a52908b6ea9b376fe0baf1144bdfdfb3c6ad326e200a8b56b0"},
+ {file = "fastavro-1.11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f963b8ddaf179660e814ab420850c1b4ea33e2ad2de8011549d958b21f77f20a"},
+ {file = "fastavro-1.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0253e5b6a3c9b62fae9fc3abd8184c5b64a833322b6af7d666d3db266ad879b5"},
+ {file = "fastavro-1.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca637b150e1f4c0e8e564fad40a16bd922bcb7ffd1a6e4836e6084f2c4f4e8db"},
+ {file = "fastavro-1.11.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76af1709031621828ca6ce7f027f7711fa33ac23e8269e7a5733996ff8d318da"},
+ {file = "fastavro-1.11.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8224e6d8d9864d4e55dafbe88920d6a1b8c19cc3006acfac6aa4f494a6af3450"},
+ {file = "fastavro-1.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:cde7ed91b52ff21f0f9f157329760ba7251508ca3e9618af3ffdac986d9faaa2"},
+ {file = "fastavro-1.11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e5ed1325c1c414dd954e7a2c5074daefe1eceb672b8c727aa030ba327aa00693"},
+ {file = "fastavro-1.11.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cd3c95baeec37188899824faf44a5ee94dfc4d8667b05b2f867070c7eb174c4"},
+ {file = "fastavro-1.11.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e0babcd81acceb4c60110af9efa25d890dbb68f7de880f806dadeb1e70fe413"},
+ {file = "fastavro-1.11.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2c0cb8063c7208b53b6867983dc6ae7cc80b91116b51d435d2610a5db2fc52f"},
+ {file = "fastavro-1.11.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1bc2824e9969c04ab6263d269a1e0e5d40b9bd16ade6b70c29d6ffbc4f3cc102"},
+ {file = "fastavro-1.11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8b579bab31ff87fcb5ef9f6f13baaf99f189b92ed287af60348777583628c327"},
+ {file = "fastavro-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c646f07c7827fea7425b6936a27f67f356a2a80ac19e6100ed6d3bb0610cc3d"},
+ {file = "fastavro-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2915324e1edb0e06f0be0c18279c60f4cff49f6fe01626594707eb75cd9952fc"},
+ {file = "fastavro-1.11.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8fd87ee1e9101b45172fb3cff21b56ce08270d9474eec1d436393677daa95938"},
+ {file = "fastavro-1.11.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88876568ef387996fbfc6b193a5b9830de3c0497af7d07e5c839a70b86bb47e7"},
+ {file = "fastavro-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:cebb7433b860d9b13090d0e53f6db075e4e2042aeb2c577f515e73d2b9c98075"},
+ {file = "fastavro-1.11.1.tar.gz", hash = "sha256:bf6acde5ee633a29fb8dfd6dfea13b164722bc3adc05a0e055df080549c1c2f8"},
]
[package.extras]
@@ -386,7 +374,6 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -395,7 +382,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
+typing = ["typing-extensions (>=4.12.2)"]
[[package]]
name = "fsspec"
@@ -403,7 +390,6 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -439,14 +425,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.24.0"
+version = "0.25.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
- {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
- {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
+ {file = "groq-0.25.0-py3-none-any.whl", hash = "sha256:aadc78b40b1809cdb196b1aa8c7f7293108767df1508cafa3e0d5045d9328e7a"},
+ {file = "groq-0.25.0.tar.gz", hash = "sha256:6e1c7466b0da0130498187b825bd239f86fb77bf7551eacfbfa561d75048746a"},
]
[package.dependencies]
@@ -463,7 +448,6 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -475,7 +459,6 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -497,7 +480,6 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -510,7 +492,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
+brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -522,7 +504,6 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -530,14 +511,13 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.31.2"
+version = "0.31.4"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
-groups = ["main", "dev"]
files = [
- {file = "huggingface_hub-0.31.2-py3-none-any.whl", hash = "sha256:8138cd52aa2326b4429bb00a4a1ba8538346b7b8a808cdce30acb6f1f1bdaeec"},
- {file = "huggingface_hub-0.31.2.tar.gz", hash = "sha256:7053561376ed7f6ffdaecf09cc54d70dc784ac6315fa4bb9b93e19662b029675"},
+ {file = "huggingface_hub-0.31.4-py3-none-any.whl", hash = "sha256:4f70704760296cc69b612916056e9845f5490a33782b924fc531767967acc15d"},
+ {file = "huggingface_hub-0.31.4.tar.gz", hash = "sha256:5a7bc710b9f9c028aee5b1476867b4ec5c1b92f043cb364d5fdc54354757e4ce"},
]
[package.dependencies]
@@ -570,7 +550,6 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
-groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -585,7 +564,6 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -595,12 +573,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -609,7 +587,6 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -617,88 +594,88 @@ files = [
[[package]]
name = "jiter"
-version = "0.9.0"
+version = "0.10.0"
description = "Fast iterable JSON parser."
optional = false
-python-versions = ">=3.8"
-groups = ["main", "dev"]
-files = [
- {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
- {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1339f839b91ae30b37c409bf16ccd3dc453e8b8c3ed4bd1d6a567193651a4a51"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ffba79584b3b670fefae66ceb3a28822365d25b7bf811e030609a3d5b876f538"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cfc7d0a8e899089d11f065e289cb5b2daf3d82fbe028f49b20d7b809193958d"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e00a1a2bbfaaf237e13c3d1592356eab3e9015d7efd59359ac8b51eb56390a12"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1d9870561eb26b11448854dce0ff27a9a27cb616b632468cafc938de25e9e51"},
- {file = "jiter-0.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9872aeff3f21e437651df378cb75aeb7043e5297261222b6441a620218b58708"},
- {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1fd19112d1049bdd47f17bfbb44a2c0001061312dcf0e72765bfa8abd4aa30e5"},
- {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ef5da104664e526836070e4a23b5f68dec1cc673b60bf1edb1bfbe8a55d0678"},
- {file = "jiter-0.9.0-cp310-cp310-win32.whl", hash = "sha256:cb12e6d65ebbefe5518de819f3eda53b73187b7089040b2d17f5b39001ff31c4"},
- {file = "jiter-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:c43ca669493626d8672be3b645dbb406ef25af3f4b6384cfd306da7eb2e70322"},
- {file = "jiter-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6c4d99c71508912a7e556d631768dcdef43648a93660670986916b297f1c54af"},
- {file = "jiter-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f60fb8ce7df529812bf6c625635a19d27f30806885139e367af93f6e734ef58"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c4e1a4f8ea84d98b7b98912aa4290ac3d1eabfde8e3c34541fae30e9d1f08b"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f4c677c424dc76684fea3e7285a7a2a7493424bea89ac441045e6a1fb1d7b3b"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2221176dfec87f3470b21e6abca056e6b04ce9bff72315cb0b243ca9e835a4b5"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c7adb66f899ffa25e3c92bfcb593391ee1947dbdd6a9a970e0d7e713237d572"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98d27330fdfb77913c1097a7aab07f38ff2259048949f499c9901700789ac15"},
- {file = "jiter-0.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eda3f8cc74df66892b1d06b5d41a71670c22d95a1ca2cbab73654745ce9d0419"},
- {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dd5ab5ddc11418dce28343123644a100f487eaccf1de27a459ab36d6cca31043"},
- {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42f8a68a69f047b310319ef8e2f52fdb2e7976fb3313ef27df495cf77bcad965"},
- {file = "jiter-0.9.0-cp311-cp311-win32.whl", hash = "sha256:a25519efb78a42254d59326ee417d6f5161b06f5da827d94cf521fed961b1ff2"},
- {file = "jiter-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:923b54afdd697dfd00d368b7ccad008cccfeb1efb4e621f32860c75e9f25edbd"},
- {file = "jiter-0.9.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7b46249cfd6c48da28f89eb0be3f52d6fdb40ab88e2c66804f546674e539ec11"},
- {file = "jiter-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:609cf3c78852f1189894383cf0b0b977665f54cb38788e3e6b941fa6d982c00e"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d726a3890a54561e55a9c5faea1f7655eda7f105bd165067575ace6e65f80bb2"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e89dc075c1fef8fa9be219e249f14040270dbc507df4215c324a1839522ea75"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e8ffa3c353b1bc4134f96f167a2082494351e42888dfcf06e944f2729cbe1d"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:203f28a72a05ae0e129b3ed1f75f56bc419d5f91dfacd057519a8bd137b00c42"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fca1a02ad60ec30bb230f65bc01f611c8608b02d269f998bc29cca8619a919dc"},
- {file = "jiter-0.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:237e5cee4d5d2659aaf91bbf8ec45052cc217d9446070699441a91b386ae27dc"},
- {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:528b6b71745e7326eed73c53d4aa57e2a522242320b6f7d65b9c5af83cf49b6e"},
- {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9f48e86b57bc711eb5acdfd12b6cb580a59cc9a993f6e7dcb6d8b50522dcd50d"},
- {file = "jiter-0.9.0-cp312-cp312-win32.whl", hash = "sha256:699edfde481e191d81f9cf6d2211debbfe4bd92f06410e7637dffb8dd5dfde06"},
- {file = "jiter-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:099500d07b43f61d8bd780466d429c45a7b25411b334c60ca875fa775f68ccb0"},
- {file = "jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7"},
- {file = "jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d"},
- {file = "jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3"},
- {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5"},
- {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d"},
- {file = "jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53"},
- {file = "jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7"},
- {file = "jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001"},
- {file = "jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a"},
- {file = "jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf"},
- {file = "jiter-0.9.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4a2d16360d0642cd68236f931b85fe50288834c383492e4279d9f1792e309571"},
- {file = "jiter-0.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e84ed1c9c9ec10bbb8c37f450077cbe3c0d4e8c2b19f0a49a60ac7ace73c7452"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f3c848209ccd1bfa344a1240763975ca917de753c7875c77ec3034f4151d06c"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7825f46e50646bee937e0f849d14ef3a417910966136f59cd1eb848b8b5bb3e4"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d82a811928b26d1a6311a886b2566f68ccf2b23cf3bfed042e18686f1f22c2d7"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c058ecb51763a67f019ae423b1cbe3fa90f7ee6280c31a1baa6ccc0c0e2d06e"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9897115ad716c48f0120c1f0c4efae348ec47037319a6c63b2d7838bb53aaef4"},
- {file = "jiter-0.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351f4c90a24c4fb8c87c6a73af2944c440494ed2bea2094feecacb75c50398ae"},
- {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d45807b0f236c485e1e525e2ce3a854807dfe28ccf0d013dd4a563395e28008a"},
- {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1537a890724ba00fdba21787010ac6f24dad47f763410e9e1093277913592784"},
- {file = "jiter-0.9.0-cp38-cp38-win32.whl", hash = "sha256:e3630ec20cbeaddd4b65513fa3857e1b7c4190d4481ef07fb63d0fad59033321"},
- {file = "jiter-0.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:2685f44bf80e95f8910553bf2d33b9c87bf25fceae6e9f0c1355f75d2922b0ee"},
- {file = "jiter-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9ef340fae98065071ccd5805fe81c99c8f80484e820e40043689cf97fb66b3e2"},
- {file = "jiter-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:efb767d92c63b2cd9ec9f24feeb48f49574a713870ec87e9ba0c2c6e9329c3e2"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:113f30f87fb1f412510c6d7ed13e91422cfd329436364a690c34c8b8bd880c42"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8793b6df019b988526f5a633fdc7456ea75e4a79bd8396a3373c371fc59f5c9b"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a9aaa5102dba4e079bb728076fadd5a2dca94c05c04ce68004cfd96f128ea34"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d838650f6ebaf4ccadfb04522463e74a4c378d7e667e0eb1865cfe3990bfac49"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0194f813efdf4b8865ad5f5c5f50f8566df7d770a82c51ef593d09e0b347020"},
- {file = "jiter-0.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7954a401d0a8a0b8bc669199db78af435aae1e3569187c2939c477c53cb6a0a"},
- {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4feafe787eb8a8d98168ab15637ca2577f6ddf77ac6c8c66242c2d028aa5420e"},
- {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:27cd1f2e8bb377f31d3190b34e4328d280325ad7ef55c6ac9abde72f79e84d2e"},
- {file = "jiter-0.9.0-cp39-cp39-win32.whl", hash = "sha256:161d461dcbe658cf0bd0aa375b30a968b087cdddc624fc585f3867c63c6eca95"},
- {file = "jiter-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e8b36d8a16a61993be33e75126ad3d8aa29cf450b09576f3c427d27647fcb4aa"},
- {file = "jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893"},
+python-versions = ">=3.9"
+files = [
+ {file = "jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303"},
+ {file = "jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa8b3e0068c26ddedc7abc6fac37da2d0af16b921e288a5a613f4b86f050354f"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:286299b74cc49e25cd42eea19b72aa82c515d2f2ee12d11392c56d8701f52224"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ed5649ceeaeffc28d87fb012d25a4cd356dcd53eff5acff1f0466b831dda2a7"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2ab0051160cb758a70716448908ef14ad476c3774bd03ddce075f3c1f90a3d6"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03997d2f37f6b67d2f5c475da4412be584e1cec273c1cfc03d642c46db43f8cf"},
+ {file = "jiter-0.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c404a99352d839fed80d6afd6c1d66071f3bacaaa5c4268983fc10f769112e90"},
+ {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66e989410b6666d3ddb27a74c7e50d0829704ede652fd4c858e91f8d64b403d0"},
+ {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b532d3af9ef4f6374609a3bcb5e05a1951d3bf6190dc6b176fdb277c9bbf15ee"},
+ {file = "jiter-0.10.0-cp310-cp310-win32.whl", hash = "sha256:da9be20b333970e28b72edc4dff63d4fec3398e05770fb3205f7fb460eb48dd4"},
+ {file = "jiter-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:f59e533afed0c5b0ac3eba20d2548c4a550336d8282ee69eb07b37ea526ee4e5"},
+ {file = "jiter-0.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3bebe0c558e19902c96e99217e0b8e8b17d570906e72ed8a87170bc290b1e978"},
+ {file = "jiter-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:558cc7e44fd8e507a236bee6a02fa17199ba752874400a0ca6cd6e2196cdb7dc"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d613e4b379a07d7c8453c5712ce7014e86c6ac93d990a0b8e7377e18505e98d"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f62cf8ba0618eda841b9bf61797f21c5ebd15a7a1e19daab76e4e4b498d515b2"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:919d139cdfa8ae8945112398511cb7fca58a77382617d279556b344867a37e61"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ddbc6ae311175a3b03bd8994881bc4635c923754932918e18da841632349db"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c440ea003ad10927a30521a9062ce10b5479592e8a70da27f21eeb457b4a9c5"},
+ {file = "jiter-0.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc347c87944983481e138dea467c0551080c86b9d21de6ea9306efb12ca8f606"},
+ {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:13252b58c1f4d8c5b63ab103c03d909e8e1e7842d302473f482915d95fefd605"},
+ {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7d1bbf3c465de4a24ab12fb7766a0003f6f9bce48b8b6a886158c4d569452dc5"},
+ {file = "jiter-0.10.0-cp311-cp311-win32.whl", hash = "sha256:db16e4848b7e826edca4ccdd5b145939758dadf0dc06e7007ad0e9cfb5928ae7"},
+ {file = "jiter-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c9c1d5f10e18909e993f9641f12fe1c77b3e9b533ee94ffa970acc14ded3812"},
+ {file = "jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b"},
+ {file = "jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a"},
+ {file = "jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95"},
+ {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea"},
+ {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b"},
+ {file = "jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01"},
+ {file = "jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49"},
+ {file = "jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644"},
+ {file = "jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041"},
+ {file = "jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca"},
+ {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4"},
+ {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e"},
+ {file = "jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d"},
+ {file = "jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4"},
+ {file = "jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca"},
+ {file = "jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070"},
+ {file = "jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca"},
+ {file = "jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522"},
+ {file = "jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9"},
+ {file = "jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a"},
+ {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853"},
+ {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86"},
+ {file = "jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357"},
+ {file = "jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00"},
+ {file = "jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5"},
+ {file = "jiter-0.10.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bd6292a43c0fc09ce7c154ec0fa646a536b877d1e8f2f96c19707f65355b5a4d"},
+ {file = "jiter-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39de429dcaeb6808d75ffe9effefe96a4903c6a4b376b2f6d08d77c1aaee2f18"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ce124f13a7a616fad3bb723f2bfb537d78239d1f7f219566dc52b6f2a9e48d"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:166f3606f11920f9a1746b2eea84fa2c0a5d50fd313c38bdea4edc072000b0af"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28dcecbb4ba402916034fc14eba7709f250c4d24b0c43fc94d187ee0580af181"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86c5aa6910f9bebcc7bc4f8bc461aff68504388b43bfe5e5c0bd21efa33b52f4"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ceeb52d242b315d7f1f74b441b6a167f78cea801ad7c11c36da77ff2d42e8a28"},
+ {file = "jiter-0.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ff76d8887c8c8ee1e772274fcf8cc1071c2c58590d13e33bd12d02dc9a560397"},
+ {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a9be4d0fa2b79f7222a88aa488bd89e2ae0a0a5b189462a12def6ece2faa45f1"},
+ {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab7fd8738094139b6c1ab1822d6f2000ebe41515c537235fd45dabe13ec9324"},
+ {file = "jiter-0.10.0-cp39-cp39-win32.whl", hash = "sha256:5f51e048540dd27f204ff4a87f5d79294ea0aa3aa552aca34934588cf27023cf"},
+ {file = "jiter-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b28302349dc65703a9e4ead16f163b1c339efffbe1049c30a44b001a2a4fff9"},
+ {file = "jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500"},
]
[[package]]
@@ -707,7 +684,6 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -729,7 +705,6 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -744,7 +719,6 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -843,7 +817,6 @@ version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
@@ -897,7 +870,6 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -909,7 +881,6 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -951,14 +922,13 @@ files = [
[[package]]
name = "openai"
-version = "1.78.1"
+version = "1.79.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
- {file = "openai-1.78.1-py3-none-any.whl", hash = "sha256:7368bf147ca499804cc408fe68cdb6866a060f38dec961bbc97b04f9d917907e"},
- {file = "openai-1.78.1.tar.gz", hash = "sha256:8b26b364531b100df1b961d03560042e5f5be11301d7d49a6cd1a2b9af824dca"},
+ {file = "openai-1.79.0-py3-none-any.whl", hash = "sha256:d5050b92d5ef83f869cb8dcd0aca0b2291c3413412500eec40c66981b3966992"},
+ {file = "openai-1.79.0.tar.gz", hash = "sha256:e3b627aa82858d3e42d16616edc22aa9f7477ee5eb3e6819e9f44a961d899a4c"},
]
[package.dependencies]
@@ -978,14 +948,13 @@ voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
-version = "1.33.0"
+version = "1.33.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
- {file = "opentelemetry_api-1.33.0-py3-none-any.whl", hash = "sha256:158df154f628e6615b65fdf6e59f99afabea7213e72c5809dd4adf06c0d997cd"},
- {file = "opentelemetry_api-1.33.0.tar.gz", hash = "sha256:cc4380fd2e6da7dcb52a828ea81844ed1f4f2eb638ca3c816775109d93d58ced"},
+ {file = "opentelemetry_api-1.33.1-py3-none-any.whl", hash = "sha256:4db83ebcf7ea93e64637ec6ee6fabee45c5cbe4abd9cf3da95c43828ddb50b83"},
+ {file = "opentelemetry_api-1.33.1.tar.gz", hash = "sha256:1c6055fc0a2d3f23a50c7e17e16ef75ad489345fd3df1f8b8af7c0bbf8a109e8"},
]
[package.dependencies]
@@ -994,50 +963,47 @@ importlib-metadata = ">=6.0,<8.7.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.54b0"
+version = "0.54b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation-0.54b0-py3-none-any.whl", hash = "sha256:1a502238f8af65625ad48800d268d467653e319d959e1732d3b3248916d21327"},
- {file = "opentelemetry_instrumentation-0.54b0.tar.gz", hash = "sha256:2949d0bbf2316eb5d928a5ef610d0a8a2c261ba80167d878abf6016e1c4ae7bb"},
+ {file = "opentelemetry_instrumentation-0.54b1-py3-none-any.whl", hash = "sha256:a4ae45f4a90c78d7006c51524f57cd5aa1231aef031eae905ee34d5423f5b198"},
+ {file = "opentelemetry_instrumentation-0.54b1.tar.gz", hash = "sha256:7658bf2ff914b02f246ec14779b66671508125c0e4227361e56b5ebf6cef0aec"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.54b0"
+opentelemetry-semantic-conventions = "0.54b1"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.40.5-py3-none-any.whl", hash = "sha256:d3b203b0ee8ee06149711d7acfa0085ad44f1841d709fd4d639934b9d8aa87b4"},
- {file = "opentelemetry_instrumentation_anthropic-0.40.5.tar.gz", hash = "sha256:5a7e9b3852cd8cfc43e50450d83d40b1a15d12b295fb18d2c814576072fcc23f"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.6-py3-none-any.whl", hash = "sha256:85601ce11f4a09f241aa35d91da423ffe72fc0abee6706812f638ffa5aa88bb7"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.6.tar.gz", hash = "sha256:4f116b6d9a3c6494e8de3f8d7e2e5a05b02b304935d6be39116a68fc0220a8e1"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.40.5-py3-none-any.whl", hash = "sha256:5f3cf77c03dfa4ab04fc202bf91a98688053ecb7d1bc64b2eaf42c866fb77c69"},
- {file = "opentelemetry_instrumentation_bedrock-0.40.5.tar.gz", hash = "sha256:0ab690501101a67cff3c4037fa6bdfeb65d25d9ec365e97880ebe118a0a6dd30"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.6-py3-none-any.whl", hash = "sha256:e9ac4326489ba420cb64e2f8317149377d321cf89839c2bd57773a5935c4810e"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.6.tar.gz", hash = "sha256:901a07589c094f5f7eca8600961b66180e268f841360efbfe9a3ed1a0ce983a7"},
]
[package.dependencies]
@@ -1045,92 +1011,87 @@ anthropic = ">=0.17.0"
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_cohere-0.40.5-py3-none-any.whl", hash = "sha256:ff73b4eed87f1d79e737351aabec85fde3aaa2f793c87e508c20498ccbea3d48"},
- {file = "opentelemetry_instrumentation_cohere-0.40.5.tar.gz", hash = "sha256:98332b9bea8b9c84222682a57ebb431e693ab1d548a4a3f4301a9b1dfc3a6cbc"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.6-py3-none-any.whl", hash = "sha256:495b59c6277697220d1fbdf8336ec361a7673004e48c31d18915403c557ca4a8"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.6.tar.gz", hash = "sha256:4f33b41dda4b2a483b101138c005d76d42e86909aa64be38d9e48f35432c8cbc"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_groq-0.40.5-py3-none-any.whl", hash = "sha256:dec20e7f50f648068b2cfb730da0e2297dbdc9a93840cbbc595f652cd0e9e94b"},
- {file = "opentelemetry_instrumentation_groq-0.40.5.tar.gz", hash = "sha256:036bda3c9317a3d34c7538479864d215e0f4e147b5fe475be4f1cd4402b2ae30"},
+ {file = "opentelemetry_instrumentation_groq-0.40.6-py3-none-any.whl", hash = "sha256:f8423faa0951e4d6e865f51cfbef1c5605b843bbcdc9e6594f4001fc9b2079ec"},
+ {file = "opentelemetry_instrumentation_groq-0.40.6.tar.gz", hash = "sha256:17169aa923c648f080cc142896f936908d11ca9b9d4708e287bad1b9061295dd"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_openai-0.40.5-py3-none-any.whl", hash = "sha256:533cbfc00a6d629998c5adb49e37a2165559a5d26bb6bb6a61f768bf23e96cf9"},
- {file = "opentelemetry_instrumentation_openai-0.40.5.tar.gz", hash = "sha256:691d9e7bca55b5a21538c86127ee5af05b033385212aaeb64eab2dd383cb815b"},
+ {file = "opentelemetry_instrumentation_openai-0.40.6-py3-none-any.whl", hash = "sha256:daa46b72e4bb335680029c60f5e0b126022d4340c3aa14d3b15075e9f21fcf4a"},
+ {file = "opentelemetry_instrumentation_openai-0.40.6.tar.gz", hash = "sha256:177ab51c833c399eb7a5e2d4bca68f9ef56229084372b6d855f0032565cf3b31"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.40.5"
+version = "0.40.6"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_replicate-0.40.5-py3-none-any.whl", hash = "sha256:948ecea48de37639433a64cc36bbb5f61f1de24122cafcf3101d5c4c113b7a82"},
- {file = "opentelemetry_instrumentation_replicate-0.40.5.tar.gz", hash = "sha256:d5d70375619ed286c80f25631ac1ab1cbe58146dcf90efa92203c7c93b8d5b6c"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.6-py3-none-any.whl", hash = "sha256:1762ac25820b15f1b8a0a17c8bb76f2342eda463a8ea0cf9eba63cffcb59fbf4"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.6.tar.gz", hash = "sha256:b7b489117cd94a794e555c5868ddf954865fe584c10ff9a082487c9634524d44"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.8"
+opentelemetry-semantic-conventions-ai = "0.4.9"
[[package]]
name = "opentelemetry-proto"
-version = "1.33.0"
+version = "1.33.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
- {file = "opentelemetry_proto-1.33.0-py3-none-any.whl", hash = "sha256:84a1d7daacac4aa0f24a5b1190a3e0619011dbff56f945fc2b6fc0a18f48b942"},
- {file = "opentelemetry_proto-1.33.0.tar.gz", hash = "sha256:ec5aa35486c990207ead2512a8d616d1b324928562c91dbc7e0cb9aa48c60b7b"},
+ {file = "opentelemetry_proto-1.33.1-py3-none-any.whl", hash = "sha256:243d285d9f29663fc7ea91a7171fcc1ccbbfff43b48df0774fd64a37d98eda70"},
+ {file = "opentelemetry_proto-1.33.1.tar.gz", hash = "sha256:9627b0a5c90753bf3920c398908307063e4458b287bb890e5c1d6fa11ad50b68"},
]
[package.dependencies]
@@ -1138,47 +1099,44 @@ protobuf = ">=5.0,<6.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.33.0"
+version = "1.33.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
- {file = "opentelemetry_sdk-1.33.0-py3-none-any.whl", hash = "sha256:bed376b6d37fbf00688bb65edfee817dd01d48b8559212831437529a6066049a"},
- {file = "opentelemetry_sdk-1.33.0.tar.gz", hash = "sha256:a7fc56d1e07b218fcc316b24d21b59d3f1967b2ca22c217b05da3a26b797cc68"},
+ {file = "opentelemetry_sdk-1.33.1-py3-none-any.whl", hash = "sha256:19ea73d9a01be29cacaa5d6c8ce0adc0b7f7b4d58cc52f923e4413609f670112"},
+ {file = "opentelemetry_sdk-1.33.1.tar.gz", hash = "sha256:85b9fcf7c3d23506fbc9692fd210b8b025a1920535feec50bd54ce203d57a531"},
]
[package.dependencies]
-opentelemetry-api = "1.33.0"
-opentelemetry-semantic-conventions = "0.54b0"
+opentelemetry-api = "1.33.1"
+opentelemetry-semantic-conventions = "0.54b1"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.54b0"
+version = "0.54b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions-0.54b0-py3-none-any.whl", hash = "sha256:fad7c1cf8908fd449eb5cf9fbbeefb301acf4bc995101f85277899cec125d823"},
- {file = "opentelemetry_semantic_conventions-0.54b0.tar.gz", hash = "sha256:467b739977bdcb079af1af69f73632535cdb51099d5e3c5709a35d10fe02a9c9"},
+ {file = "opentelemetry_semantic_conventions-0.54b1-py3-none-any.whl", hash = "sha256:29dab644a7e435b58d3a3918b58c333c92686236b30f7891d5e51f02933ca60d"},
+ {file = "opentelemetry_semantic_conventions-0.54b1.tar.gz", hash = "sha256:d1cecedae15d19bdaafca1e56b29a66aa286f50b5d08f036a145c7f3e9ef9cee"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-opentelemetry-api = "1.33.0"
+opentelemetry-api = "1.33.1"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.8"
+version = "0.4.9"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
-groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.8-py3-none-any.whl", hash = "sha256:02f7d5876a21e4376d5f4e7cd1157677e3e186f60bb3b6375a1205518a5f0908"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.8.tar.gz", hash = "sha256:b4663403315aa08e83c4651c13dbc1fbe3e518229b4df87ca32e24895dd04007"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.9-py3-none-any.whl", hash = "sha256:71149e46a72554ae17de46bca6c11ba540c19c89904bd4cc3111aac6edf10315"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.9.tar.gz", hash = "sha256:54a0b901959e2de5124384925846bac2ea0a6dab3de7e501ba6aecf5e293fe04"},
]
[[package]]
@@ -1187,7 +1145,6 @@ version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
{file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
@@ -1199,7 +1156,6 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1211,7 +1167,6 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1298,7 +1253,6 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
-groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1310,7 +1264,6 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
-groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1321,9 +1274,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1331,7 +1284,6 @@ version = "1.6.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"},
{file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"},
@@ -1347,7 +1299,6 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1368,7 +1319,6 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1423,7 +1373,6 @@ version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -1437,7 +1386,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
+timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
@@ -1445,7 +1394,6 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -1557,7 +1505,6 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1580,7 +1527,6 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1599,7 +1545,6 @@ version = "1.7.0"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
{file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
@@ -1617,7 +1562,6 @@ version = "3.6.1"
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
{file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
@@ -1638,7 +1582,6 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1653,7 +1596,6 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1668,7 +1610,6 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
-groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1680,7 +1621,6 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1743,7 +1683,6 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1760,7 +1699,6 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1864,7 +1802,6 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1882,7 +1819,6 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1904,7 +1840,6 @@ version = "0.25.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "rpds_py-0.25.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c146a24a8f0dc4a7846fb4640b88b3a68986585b8ce8397af15e66b7c5817439"},
{file = "rpds_py-0.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:77814c7a4e1dc43fba73aeb4c1ef0fe37d901f3aa869a4823de5ea843a283fd0"},
@@ -2028,7 +1963,6 @@ version = "0.11.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
-groups = ["dev"]
files = [
{file = "ruff-0.11.5-py3-none-linux_armv6l.whl", hash = "sha256:2561294e108eb648e50f210671cc56aee590fb6167b594144401532138c66c7b"},
{file = "ruff-0.11.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ac12884b9e005c12d0bd121f56ccf8033e1614f736f766c118ad60780882a077"},
@@ -2056,7 +1990,6 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
-groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -2068,7 +2001,6 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
-groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -2080,7 +2012,6 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -2128,7 +2059,6 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2161,8 +2091,6 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
-groups = ["dev"]
-markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2204,7 +2132,6 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
-groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2222,14 +2149,13 @@ telegram = ["requests"]
[[package]]
name = "types-jsonschema"
-version = "4.23.0.20241208"
+version = "4.23.0.20250516"
description = "Typing stubs for jsonschema"
optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
+python-versions = ">=3.9"
files = [
- {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
- {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
+ {file = "types_jsonschema-4.23.0.20250516-py3-none-any.whl", hash = "sha256:e7d0dd7db7e59e63c26e3230e26ffc64c4704cc5170dc21270b366a35ead1618"},
+ {file = "types_jsonschema-4.23.0.20250516.tar.gz", hash = "sha256:9ace09d9d35c4390a7251ccd7d833b92ccc189d24d1b347f26212afce361117e"},
]
[package.dependencies]
@@ -2241,7 +2167,6 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2249,14 +2174,13 @@ files = [
[[package]]
name = "types-python-dateutil"
-version = "2.9.0.20241206"
+version = "2.9.0.20250516"
description = "Typing stubs for python-dateutil"
optional = false
-python-versions = ">=3.8"
-groups = ["dev"]
+python-versions = ">=3.9"
files = [
- {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
- {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
+ {file = "types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93"},
+ {file = "types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5"},
]
[[package]]
@@ -2265,7 +2189,6 @@ version = "2.32.0.20250515"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250515-py3-none-any.whl", hash = "sha256:f8eba93b3a892beee32643ff836993f15a785816acca21ea0ffa006f05ef0fb2"},
{file = "types_requests-2.32.0.20250515.tar.gz", hash = "sha256:09c8b63c11318cb2460813871aaa48b671002e59fda67ca909e9883777787581"},
@@ -2280,7 +2203,6 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
-groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2292,7 +2214,6 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2307,7 +2228,6 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
-groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2319,14 +2239,13 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
-groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2337,7 +2256,6 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
-groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2426,21 +2344,20 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
-groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.1"
+lock-version = "2.0"
python-versions = ">=3.9,<4"
content-hash = "b13d162d3fbca536d5eabbb7baec8155cc8eaeffc3c26227397a572bddde5385"
diff --git a/pyproject.toml b/pyproject.toml
index cf857320..60bcf14e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.40b3"
+version = "0.8.39"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 242b5db9..069f352d 100644
--- a/reference.md
+++ b/reference.md
@@ -178,6 +178,7 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
@@ -282,14 +283,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -587,14 +580,6 @@ Controls how the model uses tools. The following options are supported:
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -724,6 +709,7 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
@@ -772,14 +758,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -996,6 +974,7 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
@@ -1044,14 +1023,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2690,14 +2661,6 @@ client.tools.call()
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2952,14 +2915,6 @@ client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'descr
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3179,14 +3134,6 @@ client.tools.update(id='id', log_id='log_id', )
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5779,14 +5726,6 @@ client.evaluators.log(parent_id='parent_id', )
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -6883,9 +6822,6 @@ Log to a Flow.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
@@ -7074,7 +7010,7 @@ client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
@@ -7249,7 +7185,7 @@ client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Pat
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
@@ -8447,6 +8383,7 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
@@ -8551,14 +8488,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -8722,7 +8651,7 @@ client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
@@ -8862,6 +8791,7 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
@@ -8910,14 +8840,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -9120,6 +9042,7 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
@@ -9168,14 +9091,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index ab7b887c..c03ac58a 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -92,7 +92,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -162,6 +161,7 @@ def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -200,9 +200,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -267,7 +264,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -320,7 +316,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -364,7 +360,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -421,6 +416,7 @@ def call_stream(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -438,9 +434,6 @@ def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -497,7 +490,6 @@ def call_stream(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -526,7 +518,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -583,6 +574,7 @@ def call(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -600,9 +592,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -657,7 +646,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1509,7 +1497,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1579,6 +1566,7 @@ async def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -1617,9 +1605,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1687,7 +1672,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1740,7 +1724,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1787,7 +1771,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1844,6 +1827,7 @@ async def call_stream(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -1861,9 +1845,6 @@ async def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1923,7 +1904,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1953,7 +1933,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2010,6 +1989,7 @@ async def call(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2027,9 +2007,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2087,7 +2064,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index e577f8c2..8863cc7f 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -92,7 +92,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -162,6 +161,7 @@ def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -200,9 +200,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -270,7 +267,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -352,7 +348,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -425,7 +421,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -482,6 +477,7 @@ def call_stream(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -499,9 +495,6 @@ def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -561,7 +554,6 @@ def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -640,7 +632,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -697,6 +688,7 @@ def call(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -714,9 +706,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -776,7 +765,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2071,7 +2059,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2141,6 +2128,7 @@ async def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -2179,9 +2167,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2249,7 +2234,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2331,7 +2315,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -2404,7 +2388,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2461,6 +2444,7 @@ async def call_stream(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2478,9 +2462,6 @@ async def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2540,7 +2521,6 @@ async def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2619,7 +2599,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2676,6 +2655,7 @@ async def call(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
+
A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2693,9 +2673,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2755,7 +2732,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 7b49ac92..a71ca52c 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.40b3",
+ "User-Agent": "humanloop/0.8.39",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.40b3",
+ "X-Fern-SDK-Version": "0.8.39",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 69fff10c..78d0f826 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -18,7 +18,6 @@
from ..types.file_environment_response import FileEnvironmentResponse
from ..types.file_sort_by import FileSortBy
from ..types.list_evaluators import ListEvaluators
-from ..types.log_status import LogStatus
from ..types.sort_order import SortOrder
from .raw_client import AsyncRawEvaluatorsClient, RawEvaluatorsClient
from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
@@ -64,7 +63,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -135,9 +133,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -199,7 +194,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -735,7 +729,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -806,9 +799,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -873,7 +863,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py
index 8aeb32bc..5a64aea3 100644
--- a/src/humanloop/evaluators/raw_client.py
+++ b/src/humanloop/evaluators/raw_client.py
@@ -26,7 +26,6 @@
from ..types.file_sort_by import FileSortBy
from ..types.http_validation_error import HttpValidationError
from ..types.list_evaluators import ListEvaluators
-from ..types.log_status import LogStatus
from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from ..types.sort_order import SortOrder
from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
@@ -61,7 +60,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -132,9 +130,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -194,7 +189,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"parent_id": parent_id,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
@@ -1047,7 +1041,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1118,9 +1111,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1180,7 +1170,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"parent_id": parent_id,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 8fae2360..f4c5b239 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -81,9 +81,6 @@ def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -144,7 +141,7 @@ def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -258,7 +255,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -837,9 +834,6 @@ async def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -900,7 +894,7 @@ async def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1017,7 +1011,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py
index e3954572..8a827149 100644
--- a/src/humanloop/flows/raw_client.py
+++ b/src/humanloop/flows/raw_client.py
@@ -78,9 +78,6 @@ def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -141,7 +138,7 @@ def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -285,7 +282,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1166,9 +1163,6 @@ async def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -1229,7 +1223,7 @@ async def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1373,7 +1367,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index cd772a17..e223f66e 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -21,7 +21,6 @@
from ..types.file_sort_by import FileSortBy
from ..types.list_prompts import ListPrompts
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.model_endpoints import ModelEndpoints
from ..types.model_providers import ModelProviders
from ..types.populate_template_response import PopulateTemplateResponse
@@ -92,7 +91,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -164,6 +162,7 @@ def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -202,9 +201,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -267,7 +263,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -304,7 +299,6 @@ def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -387,9 +381,6 @@ def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -428,7 +419,6 @@ def update_log(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -448,7 +438,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -504,6 +493,7 @@ def call_stream(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -521,9 +511,6 @@ def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -586,7 +573,6 @@ def call_stream(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -617,7 +603,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -673,6 +658,7 @@ def call(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -690,9 +676,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -756,7 +739,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1534,7 +1516,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1606,6 +1587,7 @@ async def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -1644,9 +1626,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1712,7 +1691,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1749,7 +1727,6 @@ async def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -1832,9 +1809,6 @@ async def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1876,7 +1850,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -1896,7 +1869,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1952,6 +1924,7 @@ async def call_stream(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -1969,9 +1942,6 @@ async def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2037,7 +2007,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -2069,7 +2038,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2125,6 +2093,7 @@ async def call(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2142,9 +2111,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2211,7 +2177,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 5d12b08e..8a7eb05e 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -32,7 +32,6 @@
from ..types.http_validation_error import HttpValidationError
from ..types.list_prompts import ListPrompts
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.model_endpoints import ModelEndpoints
from ..types.model_providers import ModelProviders
from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse
@@ -92,7 +91,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -164,6 +162,7 @@ def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -202,9 +201,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -272,7 +268,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -338,7 +333,6 @@ def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LogResponse]:
"""
@@ -421,9 +415,6 @@ def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -463,7 +454,6 @@ def update_log(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -513,7 +503,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -569,6 +558,7 @@ def call_stream(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -586,9 +576,6 @@ def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -654,7 +641,6 @@ def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -735,7 +721,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -791,6 +776,7 @@ def call(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -808,9 +794,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -876,7 +859,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2049,7 +2031,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2121,6 +2102,7 @@ async def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -2159,9 +2141,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2229,7 +2208,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2295,7 +2273,6 @@ async def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LogResponse]:
"""
@@ -2378,9 +2355,6 @@ async def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -2420,7 +2394,6 @@ async def update_log(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -2470,7 +2443,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2526,6 +2498,7 @@ async def call_stream(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2543,9 +2516,6 @@ async def call_stream(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2611,7 +2581,6 @@ async def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2692,7 +2661,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2748,6 +2716,7 @@ async def call(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
+
A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
@@ -2765,9 +2734,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2833,7 +2799,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
index 940f348f..65ff3d4a 100644
--- a/src/humanloop/requests/agent_log_response.py
+++ b/src/humanloop/requests/agent_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
from .agent_response import AgentResponseParams
from .chat_message import ChatMessageParams
@@ -135,11 +134,6 @@ class AgentLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
index f68f2e96..fe654f52 100644
--- a/src/humanloop/requests/create_agent_log_response.py
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -26,5 +26,5 @@ class CreateAgentLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it.
"""
diff --git a/src/humanloop/requests/create_flow_log_response.py b/src/humanloop/requests/create_flow_log_response.py
index 6f490ba3..5be10c8c 100644
--- a/src/humanloop/requests/create_flow_log_response.py
+++ b/src/humanloop/requests/create_flow_log_response.py
@@ -26,5 +26,5 @@ class CreateFlowLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it.
"""
diff --git a/src/humanloop/requests/evaluator_log_response.py b/src/humanloop/requests/evaluator_log_response.py
index c434280e..a2860c41 100644
--- a/src/humanloop/requests/evaluator_log_response.py
+++ b/src/humanloop/requests/evaluator_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams
from .evaluator_response import EvaluatorResponseParams
@@ -80,11 +79,6 @@ class EvaluatorLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
parent_id: typing_extensions.NotRequired[str]
"""
Identifier of the evaluated Log. The newly created Log will have this one set as parent.
diff --git a/src/humanloop/requests/flow_log_response.py b/src/humanloop/requests/flow_log_response.py
index 661fc301..f930c376 100644
--- a/src/humanloop/requests/flow_log_response.py
+++ b/src/humanloop/requests/flow_log_response.py
@@ -92,7 +92,7 @@ class FlowLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
"""
source_datapoint_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py
index 14ff4609..cbbbfec9 100644
--- a/src/humanloop/requests/prompt_call_response.py
+++ b/src/humanloop/requests/prompt_call_response.py
@@ -4,7 +4,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .prompt_call_log_response import PromptCallLogResponseParams
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
@@ -60,11 +59,6 @@ class PromptCallResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py
index 6147adec..c91d331f 100644
--- a/src/humanloop/requests/prompt_log_response.py
+++ b/src/humanloop/requests/prompt_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
@@ -135,11 +134,6 @@ class PromptLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/provider_api_keys.py b/src/humanloop/requests/provider_api_keys.py
index c37649ea..e7baf031 100644
--- a/src/humanloop/requests/provider_api_keys.py
+++ b/src/humanloop/requests/provider_api_keys.py
@@ -1,12 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-from ..core.serialization import FieldMetadata
class ProviderApiKeysParams(typing_extensions.TypedDict):
openai: typing_extensions.NotRequired[str]
- ai_21: typing_extensions.NotRequired[typing_extensions.Annotated[str, FieldMetadata(alias="ai21")]]
mock: typing_extensions.NotRequired[str]
anthropic: typing_extensions.NotRequired[str]
deepseek: typing_extensions.NotRequired[str]
@@ -14,3 +12,4 @@ class ProviderApiKeysParams(typing_extensions.TypedDict):
cohere: typing_extensions.NotRequired[str]
openai_azure: typing_extensions.NotRequired[str]
openai_azure_endpoint: typing_extensions.NotRequired[str]
+ google: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
index e00069de..92d6a59a 100644
--- a/src/humanloop/requests/tool_call_response.py
+++ b/src/humanloop/requests/tool_call_response.py
@@ -4,7 +4,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .evaluator_log_response import EvaluatorLogResponseParams
from .log_response import LogResponseParams
from .tool_response import ToolResponseParams
@@ -80,11 +79,6 @@ class ToolCallResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index f4be5ad0..06835c1d 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .tool_response import ToolResponseParams
@@ -80,11 +79,6 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index d8449a7c..eec589d4 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -22,7 +22,6 @@
from ..types.files_tool_type import FilesToolType
from ..types.list_tools import ListTools
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.sort_order import SortOrder
from ..types.tool_call_response import ToolCallResponse
from ..types.tool_response import ToolResponse
@@ -60,7 +59,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -114,9 +112,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -160,7 +155,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -191,7 +185,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -264,9 +257,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -322,7 +312,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -350,7 +339,6 @@ def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -402,9 +390,6 @@ def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -434,7 +419,6 @@ def update(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -1066,7 +1050,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1120,9 +1103,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1169,7 +1149,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1200,7 +1179,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1273,9 +1251,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1334,7 +1309,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1362,7 +1336,6 @@ async def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -1414,9 +1387,6 @@ async def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1449,7 +1419,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 85bbef9e..90657969 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -30,7 +30,6 @@
from ..types.http_validation_error import HttpValidationError
from ..types.list_tools import ListTools
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.paginated_data_tool_response import PaginatedDataToolResponse
from ..types.sort_order import SortOrder
from ..types.tool_call_response import ToolCallResponse
@@ -57,7 +56,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -111,9 +109,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -158,7 +153,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -218,7 +212,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -291,9 +284,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -345,7 +335,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -402,7 +391,6 @@ def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LogResponse]:
"""
@@ -454,9 +442,6 @@ def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -481,7 +466,6 @@ def update(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -1493,7 +1477,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1547,9 +1530,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1594,7 +1574,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -1654,7 +1633,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1727,9 +1705,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1781,7 +1756,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -1838,7 +1812,6 @@ async def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LogResponse]:
"""
@@ -1890,9 +1863,6 @@ async def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1917,7 +1887,6 @@ async def update(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
index 634ad4d0..0128b225 100644
--- a/src/humanloop/types/agent_log_response.py
+++ b/src/humanloop/types/agent_log_response.py
@@ -10,7 +10,6 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_log_response_tool_choice import AgentLogResponseToolChoice
from .chat_message import ChatMessage
-from .log_status import LogStatus
class AgentLogResponse(UncheckedBaseModel):
@@ -132,11 +131,6 @@ class AgentLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
index 2fe74aa4..a890ee41 100644
--- a/src/humanloop/types/create_agent_log_response.py
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -30,7 +30,7 @@ class CreateAgentLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it.
"""
if IS_PYDANTIC_V2:
diff --git a/src/humanloop/types/create_flow_log_response.py b/src/humanloop/types/create_flow_log_response.py
index ae296a6f..28e276df 100644
--- a/src/humanloop/types/create_flow_log_response.py
+++ b/src/humanloop/types/create_flow_log_response.py
@@ -30,7 +30,7 @@ class CreateFlowLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it.
"""
if IS_PYDANTIC_V2:
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e006e7a2..c3dafce9 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -10,7 +10,6 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment
-from .log_status import LogStatus
class EvaluatorLogResponse(UncheckedBaseModel):
@@ -78,11 +77,6 @@ class EvaluatorLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
parent_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Identifier of the evaluated Log. The newly created Log will have this one set as parent.
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
index 128eed92..232e1a68 100644
--- a/src/humanloop/types/event_type.py
+++ b/src/humanloop/types/event_type.py
@@ -8,6 +8,7 @@
"agent_turn_suspend",
"agent_turn_continue",
"agent_turn_end",
+ "agent_turn_error",
"agent_start",
"agent_update",
"agent_end",
@@ -15,7 +16,6 @@
"tool_update",
"tool_end",
"error",
- "agent_generation_error",
],
typing.Any,
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index 188c1fdf..9472a5b8 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -89,7 +89,7 @@ class FlowLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
"""
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index f20ce5f6..7c4b5e1c 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -63,11 +62,6 @@ class PromptCallResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 8bea9781..64982bb9 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
@@ -132,11 +131,6 @@ class PromptLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/provider_api_keys.py b/src/humanloop/types/provider_api_keys.py
index 49bf8731..540f62c7 100644
--- a/src/humanloop/types/provider_api_keys.py
+++ b/src/humanloop/types/provider_api_keys.py
@@ -3,15 +3,12 @@
import typing
import pydantic
-import typing_extensions
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.serialization import FieldMetadata
from ..core.unchecked_base_model import UncheckedBaseModel
class ProviderApiKeys(UncheckedBaseModel):
openai: typing.Optional[str] = None
- ai_21: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="ai21")] = None
mock: typing.Optional[str] = None
anthropic: typing.Optional[str] = None
deepseek: typing.Optional[str] = None
@@ -19,6 +16,7 @@ class ProviderApiKeys(UncheckedBaseModel):
cohere: typing.Optional[str] = None
openai_azure: typing.Optional[str] = None
openai_azure_endpoint: typing.Optional[str] = None
+ google: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
index d3b660e1..b2301cc0 100644
--- a/src/humanloop/types/tool_call_response.py
+++ b/src/humanloop/types/tool_call_response.py
@@ -8,7 +8,6 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
-from .log_status import LogStatus
class ToolCallResponse(UncheckedBaseModel):
@@ -81,11 +80,6 @@ class ToolCallResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 2524eb5b..abc308d5 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
class ToolLogResponse(UncheckedBaseModel):
@@ -77,11 +76,6 @@ class ToolLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
From 196fc3991539587eaddbedddc961dff0d7df500c Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 20 May 2025 13:02:50 +0100
Subject: [PATCH 2/3] Merge branch 'master' into flow-complete-dx
---
poetry.lock | 485 +++++++++-------
pyproject.toml | 2 +-
reference.md | 6 -
src/humanloop/agents/client.py | 808 +++++++++++++-------------
src/humanloop/agents/raw_client.py | 724 +++++++++++------------
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/overload.py | 19 +-
src/humanloop/prompts/client.py | 836 ++++++++++++++-------------
src/humanloop/prompts/raw_client.py | 744 ++++++++++++------------
9 files changed, 1885 insertions(+), 1743 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 1aed4e24..acbc4633 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -17,6 +18,7 @@ version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
{file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
@@ -41,6 +43,7 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -63,18 +66,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "certifi"
@@ -82,6 +86,7 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -93,6 +98,7 @@ version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
{file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
@@ -194,6 +200,7 @@ version = "8.1.8"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
+groups = ["main"]
files = [
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
{file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
@@ -208,6 +215,7 @@ version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -230,10 +238,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -241,6 +251,7 @@ version = "8.5.0"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "deepdiff-8.5.0-py3-none-any.whl", hash = "sha256:d4599db637f36a1c285f5fdfc2cd8d38bde8d8be8636b65ab5e425b67c54df26"},
{file = "deepdiff-8.5.0.tar.gz", hash = "sha256:a4dd3529fa8d4cd5b9cbb6e3ea9c95997eaa919ba37dac3966c1b8f872dc1cd1"},
@@ -252,7 +263,7 @@ orderly-set = ">=5.4.1,<6"
[package.extras]
cli = ["click (>=8.1.0,<8.2.0)", "pyyaml (>=6.0.0,<6.1.0)"]
coverage = ["coverage (>=7.6.0,<7.7.0)"]
-dev = ["bump2version (>=1.0.0,<1.1.0)", "ipdb (>=0.13.0,<0.14.0)", "jsonpickle (>=4.0.0,<4.1.0)", "nox (==2025.5.1)", "numpy (>=2.0,<3.0)", "numpy (>=2.2.0,<2.3.0)", "orjson (>=3.10.0,<3.11.0)", "pandas (>=2.2.0,<2.3.0)", "polars (>=1.21.0,<1.22.0)", "python-dateutil (>=2.9.0,<2.10.0)", "tomli (>=2.2.0,<2.3.0)", "tomli-w (>=1.2.0,<1.3.0)"]
+dev = ["bump2version (>=1.0.0,<1.1.0)", "ipdb (>=0.13.0,<0.14.0)", "jsonpickle (>=4.0.0,<4.1.0)", "nox (==2025.5.1)", "numpy (>=2.0,<3.0) ; python_version < \"3.10\"", "numpy (>=2.2.0,<2.3.0) ; python_version >= \"3.10\"", "orjson (>=3.10.0,<3.11.0)", "pandas (>=2.2.0,<2.3.0)", "polars (>=1.21.0,<1.22.0)", "python-dateutil (>=2.9.0,<2.10.0)", "tomli (>=2.2.0,<2.3.0)", "tomli-w (>=1.2.0,<1.3.0)"]
docs = ["Sphinx (>=6.2.0,<6.3.0)", "sphinx-sitemap (>=2.6.0,<2.7.0)", "sphinxemoji (>=0.3.0,<0.4.0)"]
optimize = ["orjson"]
static = ["flake8 (>=7.1.0,<7.2.0)", "flake8-pyproject (>=1.2.3,<1.3.0)", "pydantic (>=2.10.0,<2.11.0)"]
@@ -264,6 +275,7 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -273,7 +285,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
@@ -281,6 +293,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -292,6 +305,8 @@ version = "1.3.0"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"},
{file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"},
@@ -309,6 +324,7 @@ version = "2.1.1"
description = "execnet: rapid multi-Python deployment"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
{file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
@@ -319,47 +335,43 @@ testing = ["hatch", "pre-commit", "pytest", "tox"]
[[package]]
name = "fastavro"
-version = "1.11.1"
+version = "1.10.0"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
-files = [
- {file = "fastavro-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:603aa1c1d1be21fb4bcb63e1efb0711a9ddb337de81391c32dac95c6e0dacfcc"},
- {file = "fastavro-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45653b312d4ce297e2bd802ea3ffd17ecbe718e5e8b6e2ae04cd72cb50bb99d5"},
- {file = "fastavro-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998a53fc552e6bee9acda32af258f02557313c85fb5b48becba5b71ec82f421e"},
- {file = "fastavro-1.11.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9f878c9ad819467120cb066f1c73496c42eb24ecdd7c992ec996f465ef4cedad"},
- {file = "fastavro-1.11.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da9e4c231ac4951092c2230ca423d8a3f2966718f072ac1e2c5d2d44c70b2a50"},
- {file = "fastavro-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:7423bfad3199567eeee7ad6816402c7c0ee1658b959e8c10540cfbc60ce96c2a"},
- {file = "fastavro-1.11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3573340e4564e8962e22f814ac937ffe0d4be5eabbd2250f77738dc47e3c8fe9"},
- {file = "fastavro-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7291cf47735b8bd6ff5d9b33120e6e0974f52fd5dff90cd24151b22018e7fd29"},
- {file = "fastavro-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf3bb065d657d5bac8b2cb39945194aa086a9b3354f2da7f89c30e4dc20e08e2"},
- {file = "fastavro-1.11.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8758317c85296b848698132efb13bc44a4fbd6017431cc0f26eaeb0d6fa13d35"},
- {file = "fastavro-1.11.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ad99d57228f83bf3e2214d183fbf6e2fda97fd649b2bdaf8e9110c36cbb02624"},
- {file = "fastavro-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:9134090178bdbf9eefd467717ced3dc151e27a7e7bfc728260ce512697efe5a4"},
- {file = "fastavro-1.11.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e8bc238f2637cd5d15238adbe8fb8c58d2e6f1870e0fb28d89508584670bae4b"},
- {file = "fastavro-1.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b403933081c83fc4d8a012ee64b86e560a024b1280e3711ee74f2abc904886e8"},
- {file = "fastavro-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f6ecb4b5f77aa756d973b7dd1c2fb4e4c95b4832a3c98b059aa96c61870c709"},
- {file = "fastavro-1.11.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:059893df63ef823b0231b485c9d43016c7e32850cae7bf69f4e9d46dd41c28f2"},
- {file = "fastavro-1.11.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5120ffc9a200699218e01777e695a2f08afb3547ba818184198c757dc39417bd"},
- {file = "fastavro-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:7bb9d0d2233f33a52908b6ea9b376fe0baf1144bdfdfb3c6ad326e200a8b56b0"},
- {file = "fastavro-1.11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f963b8ddaf179660e814ab420850c1b4ea33e2ad2de8011549d958b21f77f20a"},
- {file = "fastavro-1.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0253e5b6a3c9b62fae9fc3abd8184c5b64a833322b6af7d666d3db266ad879b5"},
- {file = "fastavro-1.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca637b150e1f4c0e8e564fad40a16bd922bcb7ffd1a6e4836e6084f2c4f4e8db"},
- {file = "fastavro-1.11.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76af1709031621828ca6ce7f027f7711fa33ac23e8269e7a5733996ff8d318da"},
- {file = "fastavro-1.11.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8224e6d8d9864d4e55dafbe88920d6a1b8c19cc3006acfac6aa4f494a6af3450"},
- {file = "fastavro-1.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:cde7ed91b52ff21f0f9f157329760ba7251508ca3e9618af3ffdac986d9faaa2"},
- {file = "fastavro-1.11.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:e5ed1325c1c414dd954e7a2c5074daefe1eceb672b8c727aa030ba327aa00693"},
- {file = "fastavro-1.11.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cd3c95baeec37188899824faf44a5ee94dfc4d8667b05b2f867070c7eb174c4"},
- {file = "fastavro-1.11.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e0babcd81acceb4c60110af9efa25d890dbb68f7de880f806dadeb1e70fe413"},
- {file = "fastavro-1.11.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b2c0cb8063c7208b53b6867983dc6ae7cc80b91116b51d435d2610a5db2fc52f"},
- {file = "fastavro-1.11.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1bc2824e9969c04ab6263d269a1e0e5d40b9bd16ade6b70c29d6ffbc4f3cc102"},
- {file = "fastavro-1.11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8b579bab31ff87fcb5ef9f6f13baaf99f189b92ed287af60348777583628c327"},
- {file = "fastavro-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c646f07c7827fea7425b6936a27f67f356a2a80ac19e6100ed6d3bb0610cc3d"},
- {file = "fastavro-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2915324e1edb0e06f0be0c18279c60f4cff49f6fe01626594707eb75cd9952fc"},
- {file = "fastavro-1.11.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8fd87ee1e9101b45172fb3cff21b56ce08270d9474eec1d436393677daa95938"},
- {file = "fastavro-1.11.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88876568ef387996fbfc6b193a5b9830de3c0497af7d07e5c839a70b86bb47e7"},
- {file = "fastavro-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:cebb7433b860d9b13090d0e53f6db075e4e2042aeb2c577f515e73d2b9c98075"},
- {file = "fastavro-1.11.1.tar.gz", hash = "sha256:bf6acde5ee633a29fb8dfd6dfea13b164722bc3adc05a0e055df080549c1c2f8"},
+groups = ["dev"]
+files = [
+ {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
+ {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
+ {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190e80dc7d77d03a6a8597a026146b32a0bbe45e3487ab4904dc8c1bebecb26d"},
+ {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bf570d63be9155c3fdc415f60a49c171548334b70fff0679a184b69c29b6bc61"},
+ {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e07abb6798e95dccecaec316265e35a018b523d1f3944ad396d0a93cb95e0a08"},
+ {file = "fastavro-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:37203097ed11d0b8fd3c004904748777d730cafd26e278167ea602eebdef8eb2"},
+ {file = "fastavro-1.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d183c075f527ab695a27ae75f210d4a86bce660cda2f85ae84d5606efc15ef50"},
+ {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a95a2c0639bffd7c079b59e9a796bfc3a9acd78acff7088f7c54ade24e4a77"},
+ {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a678153b5da1b024a32ec3f611b2e7afd24deac588cb51dd1b0019935191a6d"},
+ {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:67a597a5cfea4dddcf8b49eaf8c2b5ffee7fda15b578849185bc690ec0cd0d8f"},
+ {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fd689724760b17f69565d8a4e7785ed79becd451d1c99263c40cb2d6491f1d4"},
+ {file = "fastavro-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:4f949d463f9ac4221128a51e4e34e2562f401e5925adcadfd28637a73df6c2d8"},
+ {file = "fastavro-1.10.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cfe57cb0d72f304bd0dcc5a3208ca6a7363a9ae76f3073307d095c9d053b29d4"},
+ {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74e517440c824cb65fb29d3e3903a9406f4d7c75490cef47e55c4c82cdc66270"},
+ {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203c17d44cadde76e8eecb30f2d1b4f33eb478877552d71f049265dc6f2ecd10"},
+ {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6575be7f2b5f94023b5a4e766b0251924945ad55e9a96672dc523656d17fe251"},
+ {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe471deb675ed2f01ee2aac958fbf8ebb13ea00fa4ce7f87e57710a0bc592208"},
+ {file = "fastavro-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:567ff515f2a5d26d9674b31c95477f3e6022ec206124c62169bc2ffaf0889089"},
+ {file = "fastavro-1.10.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82263af0adfddb39c85f9517d736e1e940fe506dfcc35bc9ab9f85e0fa9236d8"},
+ {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:566c193109ff0ff84f1072a165b7106c4f96050078a4e6ac7391f81ca1ef3efa"},
+ {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e400d2e55d068404d9fea7c5021f8b999c6f9d9afa1d1f3652ec92c105ffcbdd"},
+ {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b8227497f71565270f9249fc9af32a93644ca683a0167cfe66d203845c3a038"},
+ {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e62d04c65461b30ac6d314e4197ad666371e97ae8cb2c16f971d802f6c7f514"},
+ {file = "fastavro-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:86baf8c9740ab570d0d4d18517da71626fe9be4d1142bea684db52bd5adb078f"},
+ {file = "fastavro-1.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5bccbb6f8e9e5b834cca964f0e6ebc27ebe65319d3940b0b397751a470f45612"},
+ {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0132f6b0b53f61a0a508a577f64beb5de1a5e068a9b4c0e1df6e3b66568eec4"},
+ {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca37a363b711202c6071a6d4787e68e15fa3ab108261058c4aae853c582339af"},
+ {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cf38cecdd67ca9bd92e6e9ba34a30db6343e7a3bedf171753ee78f8bd9f8a670"},
+ {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f4dd10e0ed42982122d20cdf1a88aa50ee09e5a9cd9b39abdffb1aa4f5b76435"},
+ {file = "fastavro-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:aaef147dc14dd2d7823246178fd06fc5e477460e070dc6d9e07dd8193a6bc93c"},
+ {file = "fastavro-1.10.0.tar.gz", hash = "sha256:47bf41ac6d52cdfe4a3da88c75a802321321b37b663a900d12765101a5d6886f"},
]
[package.extras]
@@ -374,6 +386,7 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -382,7 +395,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fsspec"
@@ -390,6 +403,7 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -425,13 +439,14 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.25.0"
+version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "groq-0.25.0-py3-none-any.whl", hash = "sha256:aadc78b40b1809cdb196b1aa8c7f7293108767df1508cafa3e0d5045d9328e7a"},
- {file = "groq-0.25.0.tar.gz", hash = "sha256:6e1c7466b0da0130498187b825bd239f86fb77bf7551eacfbfa561d75048746a"},
+ {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
+ {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
]
[package.dependencies]
@@ -448,6 +463,7 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -459,6 +475,7 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -480,6 +497,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -492,7 +510,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -504,6 +522,7 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -511,13 +530,14 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.31.4"
+version = "0.31.2"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main", "dev"]
files = [
- {file = "huggingface_hub-0.31.4-py3-none-any.whl", hash = "sha256:4f70704760296cc69b612916056e9845f5490a33782b924fc531767967acc15d"},
- {file = "huggingface_hub-0.31.4.tar.gz", hash = "sha256:5a7bc710b9f9c028aee5b1476867b4ec5c1b92f043cb364d5fdc54354757e4ce"},
+ {file = "huggingface_hub-0.31.2-py3-none-any.whl", hash = "sha256:8138cd52aa2326b4429bb00a4a1ba8538346b7b8a808cdce30acb6f1f1bdaeec"},
+ {file = "huggingface_hub-0.31.2.tar.gz", hash = "sha256:7053561376ed7f6ffdaecf09cc54d70dc784ac6315fa4bb9b93e19662b029675"},
]
[package.dependencies]
@@ -550,6 +570,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -564,6 +585,7 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -573,12 +595,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -587,6 +609,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -594,88 +617,88 @@ files = [
[[package]]
name = "jiter"
-version = "0.10.0"
+version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
-python-versions = ">=3.9"
-files = [
- {file = "jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303"},
- {file = "jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa8b3e0068c26ddedc7abc6fac37da2d0af16b921e288a5a613f4b86f050354f"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:286299b74cc49e25cd42eea19b72aa82c515d2f2ee12d11392c56d8701f52224"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ed5649ceeaeffc28d87fb012d25a4cd356dcd53eff5acff1f0466b831dda2a7"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2ab0051160cb758a70716448908ef14ad476c3774bd03ddce075f3c1f90a3d6"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03997d2f37f6b67d2f5c475da4412be584e1cec273c1cfc03d642c46db43f8cf"},
- {file = "jiter-0.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c404a99352d839fed80d6afd6c1d66071f3bacaaa5c4268983fc10f769112e90"},
- {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66e989410b6666d3ddb27a74c7e50d0829704ede652fd4c858e91f8d64b403d0"},
- {file = "jiter-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b532d3af9ef4f6374609a3bcb5e05a1951d3bf6190dc6b176fdb277c9bbf15ee"},
- {file = "jiter-0.10.0-cp310-cp310-win32.whl", hash = "sha256:da9be20b333970e28b72edc4dff63d4fec3398e05770fb3205f7fb460eb48dd4"},
- {file = "jiter-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:f59e533afed0c5b0ac3eba20d2548c4a550336d8282ee69eb07b37ea526ee4e5"},
- {file = "jiter-0.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3bebe0c558e19902c96e99217e0b8e8b17d570906e72ed8a87170bc290b1e978"},
- {file = "jiter-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:558cc7e44fd8e507a236bee6a02fa17199ba752874400a0ca6cd6e2196cdb7dc"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d613e4b379a07d7c8453c5712ce7014e86c6ac93d990a0b8e7377e18505e98d"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f62cf8ba0618eda841b9bf61797f21c5ebd15a7a1e19daab76e4e4b498d515b2"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:919d139cdfa8ae8945112398511cb7fca58a77382617d279556b344867a37e61"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ddbc6ae311175a3b03bd8994881bc4635c923754932918e18da841632349db"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c440ea003ad10927a30521a9062ce10b5479592e8a70da27f21eeb457b4a9c5"},
- {file = "jiter-0.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc347c87944983481e138dea467c0551080c86b9d21de6ea9306efb12ca8f606"},
- {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:13252b58c1f4d8c5b63ab103c03d909e8e1e7842d302473f482915d95fefd605"},
- {file = "jiter-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7d1bbf3c465de4a24ab12fb7766a0003f6f9bce48b8b6a886158c4d569452dc5"},
- {file = "jiter-0.10.0-cp311-cp311-win32.whl", hash = "sha256:db16e4848b7e826edca4ccdd5b145939758dadf0dc06e7007ad0e9cfb5928ae7"},
- {file = "jiter-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c9c1d5f10e18909e993f9641f12fe1c77b3e9b533ee94ffa970acc14ded3812"},
- {file = "jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b"},
- {file = "jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a"},
- {file = "jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95"},
- {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea"},
- {file = "jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b"},
- {file = "jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01"},
- {file = "jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49"},
- {file = "jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644"},
- {file = "jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041"},
- {file = "jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca"},
- {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4"},
- {file = "jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e"},
- {file = "jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d"},
- {file = "jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4"},
- {file = "jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca"},
- {file = "jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070"},
- {file = "jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca"},
- {file = "jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522"},
- {file = "jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9"},
- {file = "jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a"},
- {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853"},
- {file = "jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86"},
- {file = "jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357"},
- {file = "jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00"},
- {file = "jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5"},
- {file = "jiter-0.10.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bd6292a43c0fc09ce7c154ec0fa646a536b877d1e8f2f96c19707f65355b5a4d"},
- {file = "jiter-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39de429dcaeb6808d75ffe9effefe96a4903c6a4b376b2f6d08d77c1aaee2f18"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ce124f13a7a616fad3bb723f2bfb537d78239d1f7f219566dc52b6f2a9e48d"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:166f3606f11920f9a1746b2eea84fa2c0a5d50fd313c38bdea4edc072000b0af"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28dcecbb4ba402916034fc14eba7709f250c4d24b0c43fc94d187ee0580af181"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86c5aa6910f9bebcc7bc4f8bc461aff68504388b43bfe5e5c0bd21efa33b52f4"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ceeb52d242b315d7f1f74b441b6a167f78cea801ad7c11c36da77ff2d42e8a28"},
- {file = "jiter-0.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ff76d8887c8c8ee1e772274fcf8cc1071c2c58590d13e33bd12d02dc9a560397"},
- {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a9be4d0fa2b79f7222a88aa488bd89e2ae0a0a5b189462a12def6ece2faa45f1"},
- {file = "jiter-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab7fd8738094139b6c1ab1822d6f2000ebe41515c537235fd45dabe13ec9324"},
- {file = "jiter-0.10.0-cp39-cp39-win32.whl", hash = "sha256:5f51e048540dd27f204ff4a87f5d79294ea0aa3aa552aca34934588cf27023cf"},
- {file = "jiter-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b28302349dc65703a9e4ead16f163b1c339efffbe1049c30a44b001a2a4fff9"},
- {file = "jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500"},
+python-versions = ">=3.8"
+groups = ["main", "dev"]
+files = [
+ {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
+ {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1339f839b91ae30b37c409bf16ccd3dc453e8b8c3ed4bd1d6a567193651a4a51"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ffba79584b3b670fefae66ceb3a28822365d25b7bf811e030609a3d5b876f538"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cfc7d0a8e899089d11f065e289cb5b2daf3d82fbe028f49b20d7b809193958d"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e00a1a2bbfaaf237e13c3d1592356eab3e9015d7efd59359ac8b51eb56390a12"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1d9870561eb26b11448854dce0ff27a9a27cb616b632468cafc938de25e9e51"},
+ {file = "jiter-0.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9872aeff3f21e437651df378cb75aeb7043e5297261222b6441a620218b58708"},
+ {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1fd19112d1049bdd47f17bfbb44a2c0001061312dcf0e72765bfa8abd4aa30e5"},
+ {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ef5da104664e526836070e4a23b5f68dec1cc673b60bf1edb1bfbe8a55d0678"},
+ {file = "jiter-0.9.0-cp310-cp310-win32.whl", hash = "sha256:cb12e6d65ebbefe5518de819f3eda53b73187b7089040b2d17f5b39001ff31c4"},
+ {file = "jiter-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:c43ca669493626d8672be3b645dbb406ef25af3f4b6384cfd306da7eb2e70322"},
+ {file = "jiter-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6c4d99c71508912a7e556d631768dcdef43648a93660670986916b297f1c54af"},
+ {file = "jiter-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f60fb8ce7df529812bf6c625635a19d27f30806885139e367af93f6e734ef58"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c4e1a4f8ea84d98b7b98912aa4290ac3d1eabfde8e3c34541fae30e9d1f08b"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f4c677c424dc76684fea3e7285a7a2a7493424bea89ac441045e6a1fb1d7b3b"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2221176dfec87f3470b21e6abca056e6b04ce9bff72315cb0b243ca9e835a4b5"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c7adb66f899ffa25e3c92bfcb593391ee1947dbdd6a9a970e0d7e713237d572"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98d27330fdfb77913c1097a7aab07f38ff2259048949f499c9901700789ac15"},
+ {file = "jiter-0.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eda3f8cc74df66892b1d06b5d41a71670c22d95a1ca2cbab73654745ce9d0419"},
+ {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dd5ab5ddc11418dce28343123644a100f487eaccf1de27a459ab36d6cca31043"},
+ {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42f8a68a69f047b310319ef8e2f52fdb2e7976fb3313ef27df495cf77bcad965"},
+ {file = "jiter-0.9.0-cp311-cp311-win32.whl", hash = "sha256:a25519efb78a42254d59326ee417d6f5161b06f5da827d94cf521fed961b1ff2"},
+ {file = "jiter-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:923b54afdd697dfd00d368b7ccad008cccfeb1efb4e621f32860c75e9f25edbd"},
+ {file = "jiter-0.9.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7b46249cfd6c48da28f89eb0be3f52d6fdb40ab88e2c66804f546674e539ec11"},
+ {file = "jiter-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:609cf3c78852f1189894383cf0b0b977665f54cb38788e3e6b941fa6d982c00e"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d726a3890a54561e55a9c5faea1f7655eda7f105bd165067575ace6e65f80bb2"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e89dc075c1fef8fa9be219e249f14040270dbc507df4215c324a1839522ea75"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e8ffa3c353b1bc4134f96f167a2082494351e42888dfcf06e944f2729cbe1d"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:203f28a72a05ae0e129b3ed1f75f56bc419d5f91dfacd057519a8bd137b00c42"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fca1a02ad60ec30bb230f65bc01f611c8608b02d269f998bc29cca8619a919dc"},
+ {file = "jiter-0.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:237e5cee4d5d2659aaf91bbf8ec45052cc217d9446070699441a91b386ae27dc"},
+ {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:528b6b71745e7326eed73c53d4aa57e2a522242320b6f7d65b9c5af83cf49b6e"},
+ {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9f48e86b57bc711eb5acdfd12b6cb580a59cc9a993f6e7dcb6d8b50522dcd50d"},
+ {file = "jiter-0.9.0-cp312-cp312-win32.whl", hash = "sha256:699edfde481e191d81f9cf6d2211debbfe4bd92f06410e7637dffb8dd5dfde06"},
+ {file = "jiter-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:099500d07b43f61d8bd780466d429c45a7b25411b334c60ca875fa775f68ccb0"},
+ {file = "jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7"},
+ {file = "jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d"},
+ {file = "jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3"},
+ {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5"},
+ {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d"},
+ {file = "jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53"},
+ {file = "jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7"},
+ {file = "jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001"},
+ {file = "jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a"},
+ {file = "jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf"},
+ {file = "jiter-0.9.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4a2d16360d0642cd68236f931b85fe50288834c383492e4279d9f1792e309571"},
+ {file = "jiter-0.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e84ed1c9c9ec10bbb8c37f450077cbe3c0d4e8c2b19f0a49a60ac7ace73c7452"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f3c848209ccd1bfa344a1240763975ca917de753c7875c77ec3034f4151d06c"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7825f46e50646bee937e0f849d14ef3a417910966136f59cd1eb848b8b5bb3e4"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d82a811928b26d1a6311a886b2566f68ccf2b23cf3bfed042e18686f1f22c2d7"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c058ecb51763a67f019ae423b1cbe3fa90f7ee6280c31a1baa6ccc0c0e2d06e"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9897115ad716c48f0120c1f0c4efae348ec47037319a6c63b2d7838bb53aaef4"},
+ {file = "jiter-0.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351f4c90a24c4fb8c87c6a73af2944c440494ed2bea2094feecacb75c50398ae"},
+ {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d45807b0f236c485e1e525e2ce3a854807dfe28ccf0d013dd4a563395e28008a"},
+ {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1537a890724ba00fdba21787010ac6f24dad47f763410e9e1093277913592784"},
+ {file = "jiter-0.9.0-cp38-cp38-win32.whl", hash = "sha256:e3630ec20cbeaddd4b65513fa3857e1b7c4190d4481ef07fb63d0fad59033321"},
+ {file = "jiter-0.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:2685f44bf80e95f8910553bf2d33b9c87bf25fceae6e9f0c1355f75d2922b0ee"},
+ {file = "jiter-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9ef340fae98065071ccd5805fe81c99c8f80484e820e40043689cf97fb66b3e2"},
+ {file = "jiter-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:efb767d92c63b2cd9ec9f24feeb48f49574a713870ec87e9ba0c2c6e9329c3e2"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:113f30f87fb1f412510c6d7ed13e91422cfd329436364a690c34c8b8bd880c42"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8793b6df019b988526f5a633fdc7456ea75e4a79bd8396a3373c371fc59f5c9b"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a9aaa5102dba4e079bb728076fadd5a2dca94c05c04ce68004cfd96f128ea34"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d838650f6ebaf4ccadfb04522463e74a4c378d7e667e0eb1865cfe3990bfac49"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0194f813efdf4b8865ad5f5c5f50f8566df7d770a82c51ef593d09e0b347020"},
+ {file = "jiter-0.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7954a401d0a8a0b8bc669199db78af435aae1e3569187c2939c477c53cb6a0a"},
+ {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4feafe787eb8a8d98168ab15637ca2577f6ddf77ac6c8c66242c2d028aa5420e"},
+ {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:27cd1f2e8bb377f31d3190b34e4328d280325ad7ef55c6ac9abde72f79e84d2e"},
+ {file = "jiter-0.9.0-cp39-cp39-win32.whl", hash = "sha256:161d461dcbe658cf0bd0aa375b30a968b087cdddc624fc585f3867c63c6eca95"},
+ {file = "jiter-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e8b36d8a16a61993be33e75126ad3d8aa29cf450b09576f3c427d27647fcb4aa"},
+ {file = "jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893"},
]
[[package]]
@@ -684,6 +707,7 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -705,6 +729,7 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -719,6 +744,7 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -817,6 +843,7 @@ version = "1.13.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
{file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
@@ -870,6 +897,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -881,6 +909,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -922,13 +951,14 @@ files = [
[[package]]
name = "openai"
-version = "1.79.0"
+version = "1.78.1"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "openai-1.79.0-py3-none-any.whl", hash = "sha256:d5050b92d5ef83f869cb8dcd0aca0b2291c3413412500eec40c66981b3966992"},
- {file = "openai-1.79.0.tar.gz", hash = "sha256:e3b627aa82858d3e42d16616edc22aa9f7477ee5eb3e6819e9f44a961d899a4c"},
+ {file = "openai-1.78.1-py3-none-any.whl", hash = "sha256:7368bf147ca499804cc408fe68cdb6866a060f38dec961bbc97b04f9d917907e"},
+ {file = "openai-1.78.1.tar.gz", hash = "sha256:8b26b364531b100df1b961d03560042e5f5be11301d7d49a6cd1a2b9af824dca"},
]
[package.dependencies]
@@ -948,13 +978,14 @@ voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
-version = "1.33.1"
+version = "1.33.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "opentelemetry_api-1.33.1-py3-none-any.whl", hash = "sha256:4db83ebcf7ea93e64637ec6ee6fabee45c5cbe4abd9cf3da95c43828ddb50b83"},
- {file = "opentelemetry_api-1.33.1.tar.gz", hash = "sha256:1c6055fc0a2d3f23a50c7e17e16ef75ad489345fd3df1f8b8af7c0bbf8a109e8"},
+ {file = "opentelemetry_api-1.33.0-py3-none-any.whl", hash = "sha256:158df154f628e6615b65fdf6e59f99afabea7213e72c5809dd4adf06c0d997cd"},
+ {file = "opentelemetry_api-1.33.0.tar.gz", hash = "sha256:cc4380fd2e6da7dcb52a828ea81844ed1f4f2eb638ca3c816775109d93d58ced"},
]
[package.dependencies]
@@ -963,47 +994,50 @@ importlib-metadata = ">=6.0,<8.7.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.54b1"
+version = "0.54b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation-0.54b1-py3-none-any.whl", hash = "sha256:a4ae45f4a90c78d7006c51524f57cd5aa1231aef031eae905ee34d5423f5b198"},
- {file = "opentelemetry_instrumentation-0.54b1.tar.gz", hash = "sha256:7658bf2ff914b02f246ec14779b66671508125c0e4227361e56b5ebf6cef0aec"},
+ {file = "opentelemetry_instrumentation-0.54b0-py3-none-any.whl", hash = "sha256:1a502238f8af65625ad48800d268d467653e319d959e1732d3b3248916d21327"},
+ {file = "opentelemetry_instrumentation-0.54b0.tar.gz", hash = "sha256:2949d0bbf2316eb5d928a5ef610d0a8a2c261ba80167d878abf6016e1c4ae7bb"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.54b1"
+opentelemetry-semantic-conventions = "0.54b0"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.40.6-py3-none-any.whl", hash = "sha256:85601ce11f4a09f241aa35d91da423ffe72fc0abee6706812f638ffa5aa88bb7"},
- {file = "opentelemetry_instrumentation_anthropic-0.40.6.tar.gz", hash = "sha256:4f116b6d9a3c6494e8de3f8d7e2e5a05b02b304935d6be39116a68fc0220a8e1"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.5-py3-none-any.whl", hash = "sha256:d3b203b0ee8ee06149711d7acfa0085ad44f1841d709fd4d639934b9d8aa87b4"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.5.tar.gz", hash = "sha256:5a7e9b3852cd8cfc43e50450d83d40b1a15d12b295fb18d2c814576072fcc23f"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.40.6-py3-none-any.whl", hash = "sha256:e9ac4326489ba420cb64e2f8317149377d321cf89839c2bd57773a5935c4810e"},
- {file = "opentelemetry_instrumentation_bedrock-0.40.6.tar.gz", hash = "sha256:901a07589c094f5f7eca8600961b66180e268f841360efbfe9a3ed1a0ce983a7"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.5-py3-none-any.whl", hash = "sha256:5f3cf77c03dfa4ab04fc202bf91a98688053ecb7d1bc64b2eaf42c866fb77c69"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.5.tar.gz", hash = "sha256:0ab690501101a67cff3c4037fa6bdfeb65d25d9ec365e97880ebe118a0a6dd30"},
]
[package.dependencies]
@@ -1011,87 +1045,92 @@ anthropic = ">=0.17.0"
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_cohere-0.40.6-py3-none-any.whl", hash = "sha256:495b59c6277697220d1fbdf8336ec361a7673004e48c31d18915403c557ca4a8"},
- {file = "opentelemetry_instrumentation_cohere-0.40.6.tar.gz", hash = "sha256:4f33b41dda4b2a483b101138c005d76d42e86909aa64be38d9e48f35432c8cbc"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.5-py3-none-any.whl", hash = "sha256:ff73b4eed87f1d79e737351aabec85fde3aaa2f793c87e508c20498ccbea3d48"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.5.tar.gz", hash = "sha256:98332b9bea8b9c84222682a57ebb431e693ab1d548a4a3f4301a9b1dfc3a6cbc"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_groq-0.40.6-py3-none-any.whl", hash = "sha256:f8423faa0951e4d6e865f51cfbef1c5605b843bbcdc9e6594f4001fc9b2079ec"},
- {file = "opentelemetry_instrumentation_groq-0.40.6.tar.gz", hash = "sha256:17169aa923c648f080cc142896f936908d11ca9b9d4708e287bad1b9061295dd"},
+ {file = "opentelemetry_instrumentation_groq-0.40.5-py3-none-any.whl", hash = "sha256:dec20e7f50f648068b2cfb730da0e2297dbdc9a93840cbbc595f652cd0e9e94b"},
+ {file = "opentelemetry_instrumentation_groq-0.40.5.tar.gz", hash = "sha256:036bda3c9317a3d34c7538479864d215e0f4e147b5fe475be4f1cd4402b2ae30"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_openai-0.40.6-py3-none-any.whl", hash = "sha256:daa46b72e4bb335680029c60f5e0b126022d4340c3aa14d3b15075e9f21fcf4a"},
- {file = "opentelemetry_instrumentation_openai-0.40.6.tar.gz", hash = "sha256:177ab51c833c399eb7a5e2d4bca68f9ef56229084372b6d855f0032565cf3b31"},
+ {file = "opentelemetry_instrumentation_openai-0.40.5-py3-none-any.whl", hash = "sha256:533cbfc00a6d629998c5adb49e37a2165559a5d26bb6bb6a61f768bf23e96cf9"},
+ {file = "opentelemetry_instrumentation_openai-0.40.5.tar.gz", hash = "sha256:691d9e7bca55b5a21538c86127ee5af05b033385212aaeb64eab2dd383cb815b"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.40.6"
+version = "0.40.5"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_replicate-0.40.6-py3-none-any.whl", hash = "sha256:1762ac25820b15f1b8a0a17c8bb76f2342eda463a8ea0cf9eba63cffcb59fbf4"},
- {file = "opentelemetry_instrumentation_replicate-0.40.6.tar.gz", hash = "sha256:b7b489117cd94a794e555c5868ddf954865fe584c10ff9a082487c9634524d44"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.5-py3-none-any.whl", hash = "sha256:948ecea48de37639433a64cc36bbb5f61f1de24122cafcf3101d5c4c113b7a82"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.5.tar.gz", hash = "sha256:d5d70375619ed286c80f25631ac1ab1cbe58146dcf90efa92203c7c93b8d5b6c"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.9"
+opentelemetry-semantic-conventions-ai = "0.4.8"
[[package]]
name = "opentelemetry-proto"
-version = "1.33.1"
+version = "1.33.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "opentelemetry_proto-1.33.1-py3-none-any.whl", hash = "sha256:243d285d9f29663fc7ea91a7171fcc1ccbbfff43b48df0774fd64a37d98eda70"},
- {file = "opentelemetry_proto-1.33.1.tar.gz", hash = "sha256:9627b0a5c90753bf3920c398908307063e4458b287bb890e5c1d6fa11ad50b68"},
+ {file = "opentelemetry_proto-1.33.0-py3-none-any.whl", hash = "sha256:84a1d7daacac4aa0f24a5b1190a3e0619011dbff56f945fc2b6fc0a18f48b942"},
+ {file = "opentelemetry_proto-1.33.0.tar.gz", hash = "sha256:ec5aa35486c990207ead2512a8d616d1b324928562c91dbc7e0cb9aa48c60b7b"},
]
[package.dependencies]
@@ -1099,44 +1138,47 @@ protobuf = ">=5.0,<6.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.33.1"
+version = "1.33.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "opentelemetry_sdk-1.33.1-py3-none-any.whl", hash = "sha256:19ea73d9a01be29cacaa5d6c8ce0adc0b7f7b4d58cc52f923e4413609f670112"},
- {file = "opentelemetry_sdk-1.33.1.tar.gz", hash = "sha256:85b9fcf7c3d23506fbc9692fd210b8b025a1920535feec50bd54ce203d57a531"},
+ {file = "opentelemetry_sdk-1.33.0-py3-none-any.whl", hash = "sha256:bed376b6d37fbf00688bb65edfee817dd01d48b8559212831437529a6066049a"},
+ {file = "opentelemetry_sdk-1.33.0.tar.gz", hash = "sha256:a7fc56d1e07b218fcc316b24d21b59d3f1967b2ca22c217b05da3a26b797cc68"},
]
[package.dependencies]
-opentelemetry-api = "1.33.1"
-opentelemetry-semantic-conventions = "0.54b1"
+opentelemetry-api = "1.33.0"
+opentelemetry-semantic-conventions = "0.54b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.54b1"
+version = "0.54b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions-0.54b1-py3-none-any.whl", hash = "sha256:29dab644a7e435b58d3a3918b58c333c92686236b30f7891d5e51f02933ca60d"},
- {file = "opentelemetry_semantic_conventions-0.54b1.tar.gz", hash = "sha256:d1cecedae15d19bdaafca1e56b29a66aa286f50b5d08f036a145c7f3e9ef9cee"},
+ {file = "opentelemetry_semantic_conventions-0.54b0-py3-none-any.whl", hash = "sha256:fad7c1cf8908fd449eb5cf9fbbeefb301acf4bc995101f85277899cec125d823"},
+ {file = "opentelemetry_semantic_conventions-0.54b0.tar.gz", hash = "sha256:467b739977bdcb079af1af69f73632535cdb51099d5e3c5709a35d10fe02a9c9"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-opentelemetry-api = "1.33.1"
+opentelemetry-api = "1.33.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.9"
+version = "0.4.8"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.9-py3-none-any.whl", hash = "sha256:71149e46a72554ae17de46bca6c11ba540c19c89904bd4cc3111aac6edf10315"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.9.tar.gz", hash = "sha256:54a0b901959e2de5124384925846bac2ea0a6dab3de7e501ba6aecf5e293fe04"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.8-py3-none-any.whl", hash = "sha256:02f7d5876a21e4376d5f4e7cd1157677e3e186f60bb3b6375a1205518a5f0908"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.8.tar.gz", hash = "sha256:b4663403315aa08e83c4651c13dbc1fbe3e518229b4df87ca32e24895dd04007"},
]
[[package]]
@@ -1145,6 +1187,7 @@ version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
{file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
@@ -1156,6 +1199,7 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1167,6 +1211,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1253,6 +1298,7 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
+groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1264,6 +1310,7 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1274,9 +1321,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1284,6 +1331,7 @@ version = "1.6.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"},
{file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"},
@@ -1299,6 +1347,7 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1319,6 +1368,7 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1373,6 +1423,7 @@ version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -1386,7 +1437,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1394,6 +1445,7 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -1505,6 +1557,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1527,6 +1580,7 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1545,6 +1599,7 @@ version = "1.7.0"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
{file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
@@ -1562,6 +1617,7 @@ version = "3.6.1"
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
{file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
@@ -1582,6 +1638,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1596,6 +1653,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1610,6 +1668,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1621,6 +1680,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1683,6 +1743,7 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1699,6 +1760,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1802,6 +1864,7 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1819,6 +1882,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1840,6 +1904,7 @@ version = "0.25.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "rpds_py-0.25.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c146a24a8f0dc4a7846fb4640b88b3a68986585b8ce8397af15e66b7c5817439"},
{file = "rpds_py-0.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:77814c7a4e1dc43fba73aeb4c1ef0fe37d901f3aa869a4823de5ea843a283fd0"},
@@ -1963,6 +2028,7 @@ version = "0.11.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "ruff-0.11.5-py3-none-linux_armv6l.whl", hash = "sha256:2561294e108eb648e50f210671cc56aee590fb6167b594144401532138c66c7b"},
{file = "ruff-0.11.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ac12884b9e005c12d0bd121f56ccf8033e1614f736f766c118ad60780882a077"},
@@ -1990,6 +2056,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -2001,6 +2068,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -2012,6 +2080,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -2059,6 +2128,7 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2091,6 +2161,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2132,6 +2204,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2149,13 +2222,14 @@ telegram = ["requests"]
[[package]]
name = "types-jsonschema"
-version = "4.23.0.20250516"
+version = "4.23.0.20241208"
description = "Typing stubs for jsonschema"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "types_jsonschema-4.23.0.20250516-py3-none-any.whl", hash = "sha256:e7d0dd7db7e59e63c26e3230e26ffc64c4704cc5170dc21270b366a35ead1618"},
- {file = "types_jsonschema-4.23.0.20250516.tar.gz", hash = "sha256:9ace09d9d35c4390a7251ccd7d833b92ccc189d24d1b347f26212afce361117e"},
+ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
+ {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
]
[package.dependencies]
@@ -2167,6 +2241,7 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2174,13 +2249,14 @@ files = [
[[package]]
name = "types-python-dateutil"
-version = "2.9.0.20250516"
+version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "types_python_dateutil-2.9.0.20250516-py3-none-any.whl", hash = "sha256:2b2b3f57f9c6a61fba26a9c0ffb9ea5681c9b83e69cd897c6b5f668d9c0cab93"},
- {file = "types_python_dateutil-2.9.0.20250516.tar.gz", hash = "sha256:13e80d6c9c47df23ad773d54b2826bd52dbbb41be87c3f339381c1700ad21ee5"},
+ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
+ {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
]
[[package]]
@@ -2189,6 +2265,7 @@ version = "2.32.0.20250515"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250515-py3-none-any.whl", hash = "sha256:f8eba93b3a892beee32643ff836993f15a785816acca21ea0ffa006f05ef0fb2"},
{file = "types_requests-2.32.0.20250515.tar.gz", hash = "sha256:09c8b63c11318cb2460813871aaa48b671002e59fda67ca909e9883777787581"},
@@ -2203,6 +2280,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2214,6 +2292,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2228,6 +2307,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2239,13 +2319,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2256,6 +2337,7 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2344,20 +2426,21 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.9,<4"
content-hash = "b13d162d3fbca536d5eabbb7baec8155cc8eaeffc3c26227397a572bddde5385"
diff --git a/pyproject.toml b/pyproject.toml
index 60bcf14e..33f32109 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.39"
+version = "0.8.40b5"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 069f352d..07525c3a 100644
--- a/reference.md
+++ b/reference.md
@@ -178,7 +178,6 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-
A new Prompt version will be created if the provided details do not match any existing version.
@@ -709,7 +708,6 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-
A new Prompt version will be created if the provided details do not match any existing version.
@@ -974,7 +972,6 @@ Controls how the model uses tools. The following options are supported:
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-
A new Prompt version will be created if the provided details do not match any existing version.
@@ -8383,7 +8380,6 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-
A new Agent version will be created if the provided details do not match any existing version.
@@ -8791,7 +8787,6 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-
A new Agent version will be created if the provided details do not match any existing version.
@@ -9042,7 +9037,6 @@ Controls how the model uses tools. The following options are supported:
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-
A new Agent version will be created if the provided details do not match any existing version.
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index c03ac58a..49772dae 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -161,7 +161,6 @@ def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -372,110 +371,113 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[AgentCallStreamResponse]
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- response = client.agents.call_stream()
- for chunk in response:
- yield chunk
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
"""
with self._raw_client.call_stream(
version_id=version_id,
@@ -530,108 +532,111 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AgentCallResponse
+ Returns
+ -------
+ AgentCallResponse
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
"""
_response = self._raw_client.call(
version_id=version_id,
@@ -1506,143 +1511,146 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentLogResponse:
"""
- Create an Agent Log.
+ Create an Agent Log.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
- If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentLogRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- error : typing.Optional[str]
- Error message if the log is an error.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agent_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- CreateAgentLogResponse
- Successful Response
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
- , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
- , 'additionalProperties': False
- , 'required': ['output']
- }, 'strict': True}, 'on_agent_call': "stop"}]}, )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}]}, )
+ asyncio.run(main())
"""
_response = await self._raw_client.log(
version_id=version_id,
@@ -1783,113 +1791,116 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AgentCallStreamResponse]
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- response = await client.agents.call_stream()
- async for chunk in response:
- yield chunk
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+ asyncio.run(main())
"""
async with self._raw_client.call_stream(
version_id=version_id,
@@ -1945,111 +1956,114 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AgentCallResponse
+ Returns
+ -------
+ AgentCallResponse
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
+ asyncio.run(main())
"""
_response = await self._raw_client.call(
version_id=version_id,
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index 8863cc7f..5b91f958 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -161,7 +161,6 @@ def log(
The Agent configuration to use. Two formats are supported:
- An object representing the details of the Agent configuration
- A string representing the raw contents of a .agent file
-
A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -433,101 +432,104 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -644,101 +646,104 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- HttpResponse[AgentCallResponse]
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -2068,130 +2073,133 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreateAgentLogResponse]:
"""
- Create an Agent Log.
+ Create an Agent Log.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
- If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentLogRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- error : typing.Optional[str]
- Error message if the log is an error.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agent_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[CreateAgentLogResponse]
- Successful Response
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"agents/log",
@@ -2400,101 +2408,104 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2611,101 +2622,104 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- A new Agent version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[AgentCallResponse]
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index a71ca52c..5edb2911 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.39",
+ "User-Agent": "humanloop/0.8.40b5",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.39",
+ "X-Fern-SDK-Version": "0.8.40b5",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index f05a6a5b..f22f6913 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -238,6 +238,17 @@ def _overload_call(self: T, file_syncer: Optional[FileSyncer], use_local_files:
raise HumanloopRuntimeError from e
+ClientTemplateType = TypeVar(
+ "ClientTemplateType",
+ bound=Union[
+ FlowsClient,
+ PromptsClient,
+ AgentsClient,
+ ToolsClient,
+ ],
+)
+
+
def overload_client(
client: T,
file_syncer: Optional[FileSyncer] = None,
@@ -253,8 +264,7 @@ def overload_client(
def log_wrapper(self: T, **kwargs) -> LogResponseType:
return _overload_log(self, file_syncer, use_local_files, **kwargs)
- # Replace the log method with type ignore
- client.log = types.MethodType(log_wrapper, client) # type: ignore
+ client.log = types.MethodType(log_wrapper, client)
# Overload call method for Prompt and Agent clients
if _get_file_type_from_client(client) in FileSyncer.SERIALIZABLE_FILE_TYPES:
@@ -262,14 +272,13 @@ def log_wrapper(self: T, **kwargs) -> LogResponseType:
logger.error("file_syncer is None but client has call method and use_local_files=%s", use_local_files)
raise HumanloopRuntimeError("file_syncer is required for clients that support call operations")
if hasattr(client, "call") and not hasattr(client, "_call"):
- # Store original method with type ignore
- client._call = client.call # type: ignore
+ client._call = client.call
# Create a closure to capture file_syncer and use_local_files
def call_wrapper(self: T, **kwargs) -> CallResponseType:
return _overload_call(self, file_syncer, use_local_files, **kwargs)
# Replace the call method with type ignore
- client.call = types.MethodType(call_wrapper, client) # type: ignore
+ client.call = types.MethodType(call_wrapper, client)
return client
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e223f66e..a99ba8e3 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -162,7 +162,6 @@ def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -452,113 +451,116 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[PromptCallStreamResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[PromptCallStreamResponse]
+ Yields
+ ------
+ typing.Iterator[PromptCallStreamResponse]
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- response = client.prompts.call_stream()
- for chunk in response:
- yield chunk
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.prompts.call_stream()
+ for chunk in response:
+ yield chunk
"""
with self._raw_client.call_stream(
version_id=version_id,
@@ -617,114 +619,117 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> PromptCallResponse:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- PromptCallResponse
+ Returns
+ -------
+ PromptCallResponse
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
- , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
- , 'required': []
- }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
"""
_response = self._raw_client.call(
version_id=version_id,
@@ -1525,143 +1530,146 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> CreatePromptLogResponse:
"""
- Log to a Prompt.
+ Log to a Prompt.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptLogRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- error : typing.Optional[str]
- Error message if the log is an error.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompt_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompt_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- CreatePromptLogResponse
- Successful Response
+ Returns
+ -------
+ CreatePromptLogResponse
+ Successful Response
- Examples
- --------
- from humanloop import AsyncHumanloop
- import datetime
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
- }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import datetime
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+ }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
+ asyncio.run(main())
"""
_response = await self._raw_client.log(
version_id=version_id,
@@ -1883,116 +1891,119 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[PromptCallStreamResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[PromptCallStreamResponse]
+ Yields
+ ------
+ typing.AsyncIterator[PromptCallStreamResponse]
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- response = await client.prompts.call_stream()
- async for chunk in response:
- yield chunk
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ response = await client.prompts.call_stream()
+ async for chunk in response:
+ yield chunk
+ asyncio.run(main())
"""
async with self._raw_client.call_stream(
version_id=version_id,
@@ -2052,117 +2063,120 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> PromptCallResponse:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- PromptCallResponse
+ Returns
+ -------
+ PromptCallResponse
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
- , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
- , 'required': []
- }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
+ asyncio.run(main())
"""
_response = await self._raw_client.call(
version_id=version_id,
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 8a7eb05e..eee67107 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -162,7 +162,6 @@ def log(
The Prompt configuration to use. Two formats are supported:
- An object representing the details of the Prompt configuration
- A string representing the raw contents of a .prompt file
-
A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
@@ -517,104 +516,107 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -735,104 +737,107 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[PromptCallResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- HttpResponse[PromptCallResponse]
+ Returns
+ -------
+ HttpResponse[PromptCallResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -2040,132 +2045,135 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreatePromptLogResponse]:
"""
- Log to a Prompt.
+ Log to a Prompt.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptLogRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- error : typing.Optional[str]
- Error message if the log is an error.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompt_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompt_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[CreatePromptLogResponse]
- Successful Response
+ Returns
+ -------
+ AsyncHttpResponse[CreatePromptLogResponse]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"prompts/log",
@@ -2457,104 +2465,107 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2675,104 +2686,107 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[PromptCallResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- A new Prompt version will be created if the provided details do not match any existing version.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[PromptCallResponse]
+ Returns
+ -------
+ AsyncHttpResponse[PromptCallResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
From 32fa6e345ae5c4cd77ee166eabe641046a02fdee Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 20 May 2025 15:01:18 +0100
Subject: [PATCH 3/3] typing issues
---
src/humanloop/overload.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index f22f6913..b682e722 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -264,7 +264,7 @@ def overload_client(
def log_wrapper(self: T, **kwargs) -> LogResponseType:
return _overload_log(self, file_syncer, use_local_files, **kwargs)
- client.log = types.MethodType(log_wrapper, client)
+ client.log = types.MethodType(log_wrapper, client) # type: ignore [method-assign, union-attr]
# Overload call method for Prompt and Agent clients
if _get_file_type_from_client(client) in FileSyncer.SERIALIZABLE_FILE_TYPES:
@@ -272,13 +272,13 @@ def log_wrapper(self: T, **kwargs) -> LogResponseType:
logger.error("file_syncer is None but client has call method and use_local_files=%s", use_local_files)
raise HumanloopRuntimeError("file_syncer is required for clients that support call operations")
if hasattr(client, "call") and not hasattr(client, "_call"):
- client._call = client.call
+ client._call = client.call # type: ignore [method-assign, union-attr]
# Create a closure to capture file_syncer and use_local_files
def call_wrapper(self: T, **kwargs) -> CallResponseType:
return _overload_call(self, file_syncer, use_local_files, **kwargs)
# Replace the call method with type ignore
- client.call = types.MethodType(call_wrapper, client)
+ client.call = types.MethodType(call_wrapper, client) # type: ignore [method-assign]
return client