diff --git a/.gitignore b/.gitignore index 706c607..7f54c28 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ datasets logs/ data_sets vault/agent-out + +# Snyk Security Extension - AI Rules (auto-generated) +.github/instructions/snyk_rules.instructions.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8fccd09..a7a1de1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -178,6 +178,202 @@ git commit -m "added package-name dependency" #### 5. Open a PR CI will validate that the lockfile and environment are consistent. If you forgot to update the lockfile, the PR will fail with a clear error. +--- + +## Type Safety Practices + +Python is a dynamically typed language. This flexibility makes Python productive and expressive, but it also increases the risk of subtle bugs caused by incorrect function calls, unexpected None values, or inconsistent data structures.To balance flexibility with long-term maintainability we use [Pyright](https://microsoft.github.io/pyright) for CI level type-checking. + +We run Pyright in `standard` mode. This mode provides strong type correctness guarantees without requiring the full strictness and annotation overhead of `strict` mode. + +You can check the exact type checking constraints enforced in `standard` mode here in the `Diagnostic Defaults` section of the [Pyright documentation](https://microsoft.github.io/pyright/#/configuration?id=diagnostic-settings-defaults). + +`standard` mode in Pyright is chosen because it enforces the following principles: + +- **Catch real bugs early** - It prevents incorrect function calls, invalid attribute access, misuse of Optional values, inconsistent overloads, and a wide range of type errors that would otherwise only appear at runtime. + +- **Maintain clarity without excessive annotation burden** - Developers are not expected to annotate every variable or build fully typed signatures for every function. Pyright uses inference aggressively, and `standard` mode focuses on correctness where types are known or inferred. + +- **Work seamlessly with third-party libraries** - Many Python libraries ship without type stubs. In `standard` mode, these imports are treated as Any, allowing us to use them without blocking type checks while still preserving type safety inside our own code. + +### Runtime Type Safety at System Boundaries + +While Pyright provides excellent static type checking during development, **system boundaries** require additional runtime validation. These are points where our Python code interfaces with external systems, user input, or network requests where data types cannot be guaranteed at compile time. + +In this project, we use **Pydantic** for rigorous runtime type checking at these critical handover points: + +#### FastAPI Endpoints +All FastAPI route handlers use Pydantic models for request/response validation: +- Request bodies are validated against Pydantic schemas +- Query parameters and path parameters are type-checked at runtime +- Response models ensure consistent API contract enforcement +```python +# Example: API endpoint with Pydantic validation +from pydantic import BaseModel +from fastapi import FastAPI + +class UserRequest(BaseModel): + name: str + age: int + +@app.post("/users") +async def create_user(user: UserRequest): + # Pydantic validates name is string, age is int + # Invalid data raises 422 before reaching this code + return {"id": 1, "name": user.name} +``` + +This dual approach of **static type checking with Pyright** + **runtime validation with Pydantic** ensures both development-time correctness and production-time reliability at system boundaries where type safety cannot be statically guaranteed. + +**Note: Type checks are only run on core source code and not on test-cases** + +## Linter Rules + +Consistent linting is essential for maintaining a reliable and scalable code-base. By adhering to a well-defined linter configuration, we ensure the code remains readable, secure, and predictable even as the project evolves. + +The following set of rules are enabled in this repository. Linter rules are enforced automatically through the CI pipeline and must pass before merging changes into the `wip`, `dev`, or `main` branches. +. + +Each category is summarized with a description and a link to the Ruff documentation explaining these rules. + +### Selected Linter Rule Categories + +#### E4, E7, E9 — Pycodestyle Error Rules + +These check for fundamental correctness issues such as import formatting, indentation, and syntax problems that would otherwise cause runtime failures. + +- **E4**: Import formatting and blank-line rules + (https://docs.astral.sh/ruff/rules/#pycodestyle-e4) + +- **E7**: Indentation and tab-related issues +(https://docs.astral.sh/ruff/rules/#pycodestyle-e7) + +- **E9**: Syntax errors and runtime error patterns (e.g., undefined names in certain contexts) +(https://docs.astral.sh/ruff/rules/#pycodestyle-e9) + +#### F — Pyflakes + +Static analysis rules that detect real bug patterns such as unused variables, unused imports, undefined names, duplicate definitions, and logical mistakes that can cause bugs. + +(https://docs.astral.sh/ruff/rules/#pyflakes-f) + +#### B — Flake8-Bugbear + +A set of high-value checks for common Python pitfalls: mutable default arguments, improper exception handling, unsafe patterns, redundant checks, and subtle bugs that impact correctness and security. + +(https://docs.astral.sh/ruff/rules/#flake8-bugbear-b) + +#### T20 — Flake8-Print + +Flags any usage of `print()` or `pprint()` in production code to prevent leaking sensitive information, mixing debug output into logs, or introducing uncontrolled console output. + +(https://docs.astral.sh/ruff/rules/#flake8-print-t20) + +#### N — PEP8-Naming + +Ensures consistent and conventional naming across classes, functions, variables, and modules. This helps maintain readability across the engineering team and reinforces clarity in code reviews. + +(https://docs.astral.sh/ruff/rules/#pep8-naming-n) + +#### ANN — Flake8-Annotations + +Enforces type annotation discipline across functions, methods, and class structures. With Pyright used for type checking, these rules ensure that type information remains explicit and complete. + +(https://docs.astral.sh/ruff/rules/#flake8-annotations-ann) + +#### ERA — Eradicate + +Removes or flags commented-out code fragments. Commented code tends to accumulate over time and reduces clarity. The goal is to keep the repository clean and avoid keeping dead code in version control. + +(https://docs.astral.sh/ruff/rules/#eradicate-era) + +#### PERF — Perflint + +Performance-oriented rules that highlight inefficient constructs, slow loops, unnecessary list or dict operations, and patterns that degrade runtime efficiency. + +(https://docs.astral.sh/ruff/rules/#perflint-perf) + +### Fixing Linting Issues + +Linting issues should always be resolved manually. +We **strongly discourage** relying on autofixes using `ruff check --fix` for this repository. + +Unlike `ruff format`, which performs safe and predictable code formatting, the linter's autofix mode can alter control flow, refactor logic, or rewrite expressions in ways that introduce unintended bugs. + +All linter errors will have **rule-code** like `ANN204` for example. +You can use the command line command +```bash +ruff rule #for example: ANN204 +``` + +to get an explanation on the rule code, why it's a problem and how you can fix it. + +Human oversight is essential to ensure that any corrective changes maintain the intended behavior of the application. Contributors should review each reported linting issue, understand why it is flagged, and apply the appropriate fix by hand. + +--- + +## Formatting Rules + +This repository uses the **Ruff Formatter** for code formatting. Its behavior is deterministic, safe, and aligned with the [Black Code Style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html). + +Formatting is enforced automatically through the CI pipeline and must pass before merging changes into the `wip`, `dev`, or `main` branches. + +### Selected Formatting Behaviors + +#### String Quote Style + +All string literals are formatted using **double quotes**. +This preserves consistency across the codebase and avoids unnecessary formatting churn. + +(https://docs.astral.sh/ruff/formatter/#quote-style) + +#### Indentation Style + +Indentation always uses **spaces, not tabs**. +This mirrors the formatting style adopted by Black and avoids ambiguity across editors and environments. + +(https://docs.astral.sh/ruff/formatter/#indent-style) + +#### Magic Trailing Commas + +The formatter respects magic trailing commas, meaning: + +- **Adding a trailing comma** in lists, dicts, tuples, or function calls will trigger multi-line formatting. +- **Removing a trailing comma** results in a more compact single-line layout where appropriate. + +This produces stable diffs and predictable wrapping behavior. + +(https://docs.astral.sh/ruff/formatter/#skip-magic-trailing-comma) + +#### Automatic Line Ending Detection + +Ruff automatically detects and preserves the correct line-ending style (LF or CRLF) based on the existing file. +This prevents accidental line-ending changes when multiple developers work on different systems. + +(https://docs.astral.sh/ruff/formatter/#line-ending) + +#### Docstring Code Blocks + +The formatter **does not reformat** code blocks inside docstrings. +This ensures that examples, snippets, API usage patterns, and documentation content remain exactly as written, preventing unintended modifications to teaching material or markdown-style fenced blocks. + +(https://docs.astral.sh/ruff/formatter/#docstring-code-format) + +### Applying Formatting + +Unlike lint autofixes, **formatting changes are safe by design**. +The formatter never changes logical behavior, control flow, or semantics. It only standardizes layout. + +You can run formatting locally using: + +```bash +uv run ruff format +``` + +All formatting issues must be resolved before creating a pull request or merging into protected branches. + + + --- ### Important Notes diff --git a/pyproject.toml b/pyproject.toml index 774f8af..a2692fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dependencies = [ "openai>=1.106.1", "numpy>=2.3.2", "pre-commit>=4.3.0", - "pyright>=1.1.404", + "pyright>=1.1.407", "pytest>=8.4.1", "pyyaml>=6.0.2", "ruff>=0.12.12", @@ -37,6 +37,76 @@ dependencies = [ "langfuse>=3.8.1", ] +[tool.ruff] +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Same as Black Formatter. +line-length = 88 +indent-width = 4 + +# Set Python Version - 3.12 +target-version = "py312" + +fix = false + + +[tool.ruff.lint] + + +select = ["E4", "E7", "E9", "F", "B", "T20", "N", "ANN", "ERA", "PERF"] +ignore = [] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + + +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +docstring-code-format = false +docstring-code-line-length = "dynamic" + + + [tool.pyright] # --- Environment & discovery --- pythonVersion = "3.12.10" # Target Python semantics (pattern matching, typing features, stdlib types). @@ -44,34 +114,11 @@ venvPath = "." # Where virtual envs live relative to repo root venv = ".venv" # The specific env name uv manages (uv sync creates .venv). # --- What to analyze --- -include = ["src", "tests"] # Top-level packages & tests to check. +include = ["*"] # Top-level packages & tests to check. exclude = [ "**/.venv", "**/__pycache__", "build", "dist", ".git", - ".ruff_cache", ".mypy_cache" + ".ruff_cache", ".mypy_cache", "tests/", "**/tests/" ] # --- Global strictness --- -typeCheckingMode = "strict" # Enforce full strict mode repo-wide (see notes below). -useLibraryCodeForTypes = true # If a lib lacks stubs, inspect its code to infer types where possible. - -# Make the most common "loose" mistakes fail fast in strict mode. -# You can tune these individually if you need a temporary carve-out. -reportMissingTypeStubs = "error" # Untyped third-party libs must have type info (stubs or inline). -reportUnknownVariableType = "error" # Vars with unknown/implicit Any are not allowed. -reportUnknownMemberType = "error" # Members on unknowns are not allowed. -reportUnknownArgumentType = "error" # Call arguments can't be unknown. -reportUnknownLambdaType = "error" # Lambda params must be typed in strict contexts. -reportImplicitOptional = "error" # T | None must be explicit; no silent Optional. -reportMissingTypeArgument = "error" # Generic types must specify their parameters. -reportIncompatibleVariableOverride = "error" # Subclass fields must type-refine correctly. -reportInvalidTypeVarUse = "error" # Catch misuse of TypeVar/variance. -reportUntypedFunctionDecorator = "error" # Decorators must be typed (prevents Any leakage). -reportUnusedVariable = "error" # Ditto; promote to "error" if you want hard hygiene. -reportUnusedImport = "warning" # Hygiene: warn, but don’t fail builds. - - -# Tests often deserialize lots of data and patch frameworks; keep them strict, -# but relax "missing stubs" so untyped test-only libs don’t block you. -[[tool.pyright.overrides]] -module = "tests/**" -reportMissingTypeStubs = "warning" +typeCheckingMode = "standard" # Standard typechecking mode \ No newline at end of file diff --git a/src/llm_orchestration_service.py b/src/llm_orchestration_service.py index a7de4c6..2de809a 100644 --- a/src/llm_orchestration_service.py +++ b/src/llm_orchestration_service.py @@ -23,6 +23,7 @@ from prompt_refine_manager.prompt_refiner import PromptRefinerAgent from src.response_generator.response_generate import ResponseGeneratorAgent from src.response_generator.response_generate import stream_response_native +from src.vector_indexer.constants import ResponseGenerationConstants from src.llm_orchestrator_config.llm_ochestrator_constants import ( OUT_OF_SCOPE_MESSAGE, TECHNICAL_ISSUE_MESSAGE, @@ -343,7 +344,7 @@ async def stream_orchestration_response( ].check_scope_quick( question=refined_output.original_question, chunks=relevant_chunks, - max_blocks=10, + max_blocks=ResponseGenerationConstants.DEFAULT_MAX_BLOCKS, ) timing_dict["scope_check"] = time.time() - start_time @@ -382,7 +383,7 @@ async def bot_response_generator() -> AsyncIterator[str]: agent=components["response_generator"], question=refined_output.original_question, chunks=relevant_chunks, - max_blocks=10, + max_blocks=ResponseGenerationConstants.DEFAULT_MAX_BLOCKS, ): yield token @@ -1619,13 +1620,17 @@ def _format_chunks_for_test_response( relevant_chunks: List of retrieved chunks with metadata Returns: - List of ChunkInfo objects with rank and content, or None if no chunks + List of ChunkInfo objects with rank and content (limited to top 5), or None if no chunks """ if not relevant_chunks: return None + # Limit to top-k chunks that are actually used in response generation + max_blocks = ResponseGenerationConstants.DEFAULT_MAX_BLOCKS + limited_chunks = relevant_chunks[:max_blocks] + formatted_chunks = [] - for rank, chunk in enumerate(relevant_chunks, start=1): + for rank, chunk in enumerate(limited_chunks, start=1): # Extract text content - prefer "text" key, fallback to "content" chunk_text = chunk.get("text", chunk.get("content", "")) if isinstance(chunk_text, str) and chunk_text.strip(): @@ -1682,7 +1687,7 @@ def _generate_rag_response( generator_result = response_generator.forward( question=refined_output.original_question, chunks=relevant_chunks or [], - max_blocks=10, + max_blocks=ResponseGenerationConstants.DEFAULT_MAX_BLOCKS, ) answer = (generator_result.get("answer") or "").strip() diff --git a/src/response_generator/response_generate.py b/src/response_generator/response_generate.py index 395597e..f8338f8 100644 --- a/src/response_generator/response_generate.py +++ b/src/response_generator/response_generate.py @@ -10,6 +10,7 @@ from src.llm_orchestrator_config.llm_ochestrator_constants import OUT_OF_SCOPE_MESSAGE from src.utils.cost_utils import get_lm_usage_since from src.optimization.optimized_module_loader import get_module_loader +from src.vector_indexer.constants import ResponseGenerationConstants # Configure logging logging.basicConfig( @@ -53,12 +54,14 @@ class ScopeChecker(dspy.Signature): def build_context_and_citations( - chunks: List[Dict[str, Any]], use_top_k: int = 10 + chunks: List[Dict[str, Any]], use_top_k: int = None ) -> Tuple[List[str], List[str], bool]: """ Turn retriever chunks -> numbered context blocks and source labels. Returns (blocks, labels, has_real_context). """ + if use_top_k is None: + use_top_k = ResponseGenerationConstants.DEFAULT_MAX_BLOCKS logger.info(f"Building context from {len(chunks)} chunks (top_k={use_top_k}).") blocks: List[str] = [] labels: List[str] = [] @@ -202,7 +205,7 @@ async def stream_response( self, question: str, chunks: List[Dict[str, Any]], - max_blocks: int = 10, + max_blocks: Optional[int] = None, ) -> AsyncIterator[str]: """ Stream response tokens directly from LLM using DSPy's native streaming. @@ -210,11 +213,14 @@ async def stream_response( Args: question: User's question chunks: Retrieved context chunks - max_blocks: Maximum number of context blocks + max_blocks: Maximum number of context blocks (default: ResponseGenerationConstants.DEFAULT_MAX_BLOCKS) Yields: Token strings as they arrive from the LLM """ + if max_blocks is None: + max_blocks = ResponseGenerationConstants.DEFAULT_MAX_BLOCKS + logger.info( f"Starting NATIVE DSPy streaming for question with {len(chunks)} chunks" ) @@ -289,7 +295,10 @@ async def stream_response( logger.debug(f"Error during cleanup (aclose): {cleanup_error}") async def check_scope_quick( - self, question: str, chunks: List[Dict[str, Any]], max_blocks: int = 10 + self, + question: str, + chunks: List[Dict[str, Any]], + max_blocks: Optional[int] = None, ) -> bool: """ Quick async check if question is out of scope. @@ -297,11 +306,13 @@ async def check_scope_quick( Args: question: User's question chunks: Retrieved context chunks - max_blocks: Maximum context blocks to use + max_blocks: Maximum context blocks to use (default: ResponseGenerationConstants.DEFAULT_MAX_BLOCKS) Returns: True if out of scope, False if in scope """ + if max_blocks is None: + max_blocks = ResponseGenerationConstants.DEFAULT_MAX_BLOCKS try: context_blocks, _, has_real_context = build_context_and_citations( chunks, use_top_k=max_blocks @@ -356,9 +367,15 @@ def _validate_prediction(self, pred: dspy.Prediction) -> bool: return False def forward( - self, question: str, chunks: List[Dict[str, Any]], max_blocks: int = 10 + self, + question: str, + chunks: List[Dict[str, Any]], + max_blocks: Optional[int] = None, ) -> Dict[str, Any]: """Non-streaming forward pass for backward compatibility.""" + if max_blocks is None: + max_blocks = ResponseGenerationConstants.DEFAULT_MAX_BLOCKS + logger.info(f"Generating response for question: '{question}'") lm = dspy.settings.lm diff --git a/src/vector_indexer/constants.py b/src/vector_indexer/constants.py index d8ea9ba..c4f3810 100644 --- a/src/vector_indexer/constants.py +++ b/src/vector_indexer/constants.py @@ -97,6 +97,16 @@ class ProcessingConstants: MAX_REPETITION_RATIO = 0.5 # Maximum allowed repetition in content +class ResponseGenerationConstants: + """Constants for response generation and context retrieval.""" + + # Top-K blocks for response generation + # This controls how many of the retrieved chunks are used + # for generating the final response + DEFAULT_MAX_BLOCKS = 5 # Maximum context blocks to use in response generation + MIN_BLOCKS_REQUIRED = 3 # Minimum blocks required for valid response + + class LoggingConstants: """Constants for logging configuration.""" diff --git a/uv.lock b/uv.lock index 5f79bf1..f662ff5 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = "==3.12.10" [[package]] @@ -718,6 +718,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" }, + { url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" }, { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, ] @@ -1930,15 +1932,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.406" +version = "1.1.407" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, ] [[package]] @@ -2161,7 +2163,7 @@ requires-dist = [ { name = "openai", specifier = ">=1.106.1" }, { name = "pre-commit", specifier = ">=4.3.0" }, { name = "pydantic", specifier = ">=2.11.7" }, - { name = "pyright", specifier = ">=1.1.404" }, + { name = "pyright", specifier = ">=1.1.407" }, { name = "pytest", specifier = ">=8.4.1" }, { name = "pytest-json-report", specifier = ">=1.5.0" }, { name = "python-dotenv", specifier = ">=1.1.1" },