From c92e6e1c1611ffa9806136c759dbfe33027f482f Mon Sep 17 00:00:00 2001 From: konard Date: Wed, 10 Sep 2025 22:24:29 +0300 Subject: [PATCH 1/3] Initial commit with task details for issue #5 Adding CLAUDE.md with task information for AI processing. This file will be removed when the task is complete. Issue: https://github.com/deep-assistant/GPTutor/issues/5 --- CLAUDE.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..4449b987 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,5 @@ +Issue to solve: https://github.com/deep-assistant/GPTutor/issues/5 +Your prepared branch: issue-5-c80ccd4a +Your prepared working directory: /tmp/gh-issue-solver-1757532250678 + +Proceed. \ No newline at end of file From 788143c1810db6518c096216c10c83a57b5c207e Mon Sep 17 00:00:00 2001 From: konard Date: Wed, 10 Sep 2025 22:24:45 +0300 Subject: [PATCH 2/3] Remove CLAUDE.md - PR created successfully --- CLAUDE.md | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md deleted file mode 100644 index 4449b987..00000000 --- a/CLAUDE.md +++ /dev/null @@ -1,5 +0,0 @@ -Issue to solve: https://github.com/deep-assistant/GPTutor/issues/5 -Your prepared branch: issue-5-c80ccd4a -Your prepared working directory: /tmp/gh-issue-solver-1757532250678 - -Proceed. \ No newline at end of file From e21052ab9686830c1dc88f53bb6e4c0c1e730dbf Mon Sep 17 00:00:00 2001 From: konard Date: Wed, 10 Sep 2025 22:31:58 +0300 Subject: [PATCH 3/3] Add comprehensive unit testing framework for GPTutor classes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added pytest, pytest-mock, and pytest-flask to requirements.txt - Created comprehensive unit tests for all major components: * DeepInfra LLM provider class (test_deepinfra.py) * DALL-E 3 image generation functions (test_dalle3.py) * Prodia image generation functions (test_prodia.py) * VK docs functionality and utilities (test_vk_docs.py) * Flask app endpoints and routing (test_app.py) - Added pytest configuration (pytest.ini) - Created test infrastructure with proper Python package structure - Added syntax verification and test runner utilities - Added Python-specific patterns to .gitignore - Included Russian documentation for test framework Tests cover: - Class attributes and methods - API endpoint functionality - Error handling and edge cases - Parameter validation and mapping - Mock external dependencies - Various success/failure scenarios πŸ€– Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .gitignore | 9 + GPTutor-Models/pytest.ini | 9 + GPTutor-Models/requirements.txt | 5 +- GPTutor-Models/syntax_checker.py | 72 +++++++ GPTutor-Models/test_runner.py | 85 ++++++++ GPTutor-Models/tests/README.md | 81 +++++++ GPTutor-Models/tests/__init__.py | 1 + GPTutor-Models/tests/test_app.py | 286 +++++++++++++++++++++++++ GPTutor-Models/tests/test_dalle3.py | 178 +++++++++++++++ GPTutor-Models/tests/test_deepinfra.py | 120 +++++++++++ GPTutor-Models/tests/test_prodia.py | 180 ++++++++++++++++ GPTutor-Models/tests/test_vk_docs.py | 252 ++++++++++++++++++++++ 12 files changed, 1277 insertions(+), 1 deletion(-) create mode 100644 GPTutor-Models/pytest.ini create mode 100644 GPTutor-Models/syntax_checker.py create mode 100644 GPTutor-Models/test_runner.py create mode 100644 GPTutor-Models/tests/README.md create mode 100644 GPTutor-Models/tests/__init__.py create mode 100644 GPTutor-Models/tests/test_app.py create mode 100644 GPTutor-Models/tests/test_dalle3.py create mode 100644 GPTutor-Models/tests/test_deepinfra.py create mode 100644 GPTutor-Models/tests/test_prodia.py create mode 100644 GPTutor-Models/tests/test_vk_docs.py diff --git a/.gitignore b/.gitignore index fc1c0c23..9f43887f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,12 @@ .env-frontend .env-frontend-stage node_modules + +# Python +__pycache__/ +*.py[cod] +*$py.class +.pytest_cache/ +*.egg-info/ +dist/ +build/ diff --git a/GPTutor-Models/pytest.ini b/GPTutor-Models/pytest.ini new file mode 100644 index 00000000..db869ece --- /dev/null +++ b/GPTutor-Models/pytest.ini @@ -0,0 +1,9 @@ +[tool:pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = -v --tb=short +filterwarnings = + ignore::DeprecationWarning + ignore::PendingDeprecationWarning \ No newline at end of file diff --git a/GPTutor-Models/requirements.txt b/GPTutor-Models/requirements.txt index fa5042cb..3cac8c8a 100644 --- a/GPTutor-Models/requirements.txt +++ b/GPTutor-Models/requirements.txt @@ -18,4 +18,7 @@ langchain-community==0.2.7 gigachat==0.1.31 faiss-cpu==1.8.0.post1 langchainhub==0.1.20 -langgraph==0.1.8 \ No newline at end of file +langgraph==0.1.8 +pytest==7.4.2 +pytest-mock==3.11.1 +pytest-flask==1.2.0 \ No newline at end of file diff --git a/GPTutor-Models/syntax_checker.py b/GPTutor-Models/syntax_checker.py new file mode 100644 index 00000000..76398b33 --- /dev/null +++ b/GPTutor-Models/syntax_checker.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +""" +Syntax checker for unit test files +""" + +import ast +import os +import sys + +def check_syntax(file_path): + """Check if a Python file has valid syntax""" + try: + with open(file_path, 'r', encoding='utf-8') as f: + source = f.read() + + # Parse the source code + ast.parse(source) + return True, None + except SyntaxError as e: + return False, f"Syntax error: {e}" + except Exception as e: + return False, f"Error: {e}" + +def main(): + test_files = [ + 'tests/test_deepinfra.py', + 'tests/test_dalle3.py', + 'tests/test_prodia.py', + 'tests/test_vk_docs.py', + 'tests/test_app.py' + ] + + print("Unit Test Syntax Verification") + print("=" * 40) + + all_valid = True + + for test_file in test_files: + if os.path.exists(test_file): + valid, error = check_syntax(test_file) + if valid: + print(f"βœ“ {test_file}: Valid syntax") + else: + print(f"βœ— {test_file}: {error}") + all_valid = False + else: + print(f"βœ— {test_file}: File not found") + all_valid = False + + print("\n" + "=" * 40) + if all_valid: + print("βœ“ All test files have valid Python syntax!") + print("\nTest Summary:") + print("- Created comprehensive unit tests for all major classes and functions") + print("- Added pytest, pytest-mock, and pytest-flask to requirements.txt") + print("- Created test infrastructure with proper configuration") + print("- Tests cover:") + print(" β€’ DeepInfra LLM provider class") + print(" β€’ DALL-E 3 image generation functions") + print(" β€’ Prodia image generation functions") + print(" β€’ VK docs functionality and utilities") + print(" β€’ Flask app endpoints and routing") + print("\nTo run tests after installing dependencies:") + print(" pip install -r requirements.txt") + print(" python -m pytest tests/ -v") + return 0 + else: + print("βœ— Some test files have syntax errors.") + return 1 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/GPTutor-Models/test_runner.py b/GPTutor-Models/test_runner.py new file mode 100644 index 00000000..d1f497fd --- /dev/null +++ b/GPTutor-Models/test_runner.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +""" +Simple test runner to verify our unit tests are syntactically correct +and imports work properly. +""" + +import sys +import os +import importlib.util + +def test_imports(): + """Test that all test modules can be imported successfully""" + test_modules = [ + 'tests.test_deepinfra', + 'tests.test_dalle3', + 'tests.test_prodia', + 'tests.test_vk_docs', + 'tests.test_app' + ] + + success_count = 0 + total_count = len(test_modules) + + for module_name in test_modules: + try: + # Try to import the module + module = importlib.import_module(module_name) + print(f"βœ“ Successfully imported {module_name}") + success_count += 1 + except ImportError as e: + print(f"βœ— Failed to import {module_name}: {e}") + except Exception as e: + print(f"βœ— Error importing {module_name}: {e}") + + print(f"\nImport Test Results: {success_count}/{total_count} modules imported successfully") + return success_count == total_count + +def test_source_modules(): + """Test that the source modules can be imported""" + source_modules = [ + 'llm.DeepInfra', + 'images.dalle3', + 'images.prodia', + 'vk_docs.utils', + 'vk_docs.index', + 'app' + ] + + success_count = 0 + total_count = len(source_modules) + + for module_name in source_modules: + try: + module = importlib.import_module(module_name) + print(f"βœ“ Successfully imported source module {module_name}") + success_count += 1 + except ImportError as e: + print(f"βœ— Failed to import source module {module_name}: {e}") + except Exception as e: + print(f"βœ— Error importing source module {module_name}: {e}") + + print(f"\nSource Module Test Results: {success_count}/{total_count} modules imported successfully") + return success_count == total_count + +def main(): + print("GPTutor Unit Test Verification") + print("=" * 40) + + # Test source module imports first + print("\n1. Testing source module imports...") + source_success = test_source_modules() + + print("\n2. Testing test module imports...") + test_success = test_imports() + + print("\n" + "=" * 40) + if source_success and test_success: + print("βœ“ All tests passed! Unit test structure is valid.") + return 0 + else: + print("βœ— Some tests failed. Please check the error messages above.") + return 1 + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/GPTutor-Models/tests/README.md b/GPTutor-Models/tests/README.md new file mode 100644 index 00000000..3b019d16 --- /dev/null +++ b/GPTutor-Models/tests/README.md @@ -0,0 +1,81 @@ +# GPTutor Unit Tests + +Π­Ρ‚ΠΎΡ‚ ΠΊΠ°Ρ‚Π°Π»ΠΎΠ³ содСрТит unit тСсты для ΠΏΡ€ΠΎΠ΅ΠΊΡ‚Π° GPTutor Models. + +## Π‘Ρ‚Ρ€ΡƒΠΊΡ‚ΡƒΡ€Π° тСстов + +- `test_deepinfra.py` - ВСсты для класса DeepInfra LLM ΠΏΡ€ΠΎΠ²Π°ΠΉΠ΄Π΅Ρ€Π° +- `test_dalle3.py` - ВСсты для Ρ„ΡƒΠ½ΠΊΡ†ΠΈΠΉ Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΠΈ ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ DALL-E 3 +- `test_prodia.py` - ВСсты для Ρ„ΡƒΠ½ΠΊΡ†ΠΈΠΉ Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΠΈ ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ Prodia +- `test_vk_docs.py` - ВСсты для Ρ„ΡƒΠ½ΠΊΡ†ΠΈΠΎΠ½Π°Π»ΡŒΠ½ΠΎΡΡ‚ΠΈ VK Π΄ΠΎΠΊΡƒΠΌΠ΅Π½Ρ‚Π°Ρ†ΠΈΠΈ +- `test_app.py` - ВСсты для Flask прилоТСния ΠΈ Π΅Π³ΠΎ эндпоинтов + +## Установка зависимостСй + +УстановитС Π½Π΅ΠΎΠ±Ρ…ΠΎΠ΄ΠΈΠΌΡ‹Π΅ зависимости: + +```bash +pip install -r requirements.txt +``` + +## Запуск тСстов + +ЗапуститС всС тСсты: + +```bash +python -m pytest tests/ -v +``` + +ЗапуститС ΠΊΠΎΠ½ΠΊΡ€Π΅Ρ‚Π½Ρ‹ΠΉ тСстовый Ρ„Π°ΠΉΠ»: + +```bash +python -m pytest tests/test_app.py -v +``` + +ЗапуститС ΠΊΠΎΠ½ΠΊΡ€Π΅Ρ‚Π½Ρ‹ΠΉ тСст: + +```bash +python -m pytest tests/test_app.py::TestFlaskApp::test_llm_get_endpoint -v +``` + +## ΠŸΠΎΠΊΡ€Ρ‹Ρ‚ΠΈΠ΅ тСстами + +ВСсты ΠΏΠΎΠΊΡ€Ρ‹Π²Π°ΡŽΡ‚: + +### DeepInfra класс +- Атрибуты класса +- Алиасы ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ +- ΠœΠ΅Ρ‚ΠΎΠ΄ `create_async_generator` +- ΠžΠ±Ρ€Π°Π±ΠΎΡ‚ΠΊΠ° JWT Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² + +### Π€ΡƒΠ½ΠΊΡ†ΠΈΠΈ Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΠΈ ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ +- Π˜Π·Π²Π»Π΅Ρ‡Π΅Π½ΠΈΠ΅ URL ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² +- Π€ΠΎΡ€ΠΌΠ°Ρ‚ΠΈΡ€ΠΎΠ²Π°Π½ΠΈΠ΅ ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² ΠΎΡ‚ API +- Π—Π°Π³Ρ€ΡƒΠ·ΠΊΠ° ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ ΠΏΠΎ URL +- ГСнСрация ΠΈΠ·ΠΎΠ±Ρ€Π°ΠΆΠ΅Π½ΠΈΠΉ Ρ‡Π΅Ρ€Π΅Π· DALL-E 3 ΠΈ Prodia + +### VK докумСнтация +- Π£Ρ‚ΠΈΠ»ΠΈΡ‚Π°Ρ€Π½Ρ‹Π΅ Ρ„ΡƒΠ½ΠΊΡ†ΠΈΠΈ +- Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ вопросов ΠΊ Π΄ΠΎΠΊΡƒΠΌΠ΅Π½Ρ‚Π°Ρ†ΠΈΠΈ +- ΠžΠ±Ρ€Π°Π±ΠΎΡ‚ΠΊΠ° ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² ΠΎΡ‚ систСмы поиска + +### Flask ΠΏΡ€ΠΈΠ»ΠΎΠΆΠ΅Π½ΠΈΠ΅ +- ВсС REST API эндпоинты +- ΠžΠ±Ρ€Π°Π±ΠΎΡ‚ΠΊΠ° HTTP запросов ΠΈ ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² +- ΠœΠ°Ρ€ΡˆΡ€ΡƒΡ‚ΠΈΠ·Π°Ρ†ΠΈΡ ΠΈ ΠΌΠ΅Ρ‚ΠΎΠ΄Ρ‹ HTTP +- ΠžΠ±Ρ€Π°Π±ΠΎΡ‚ΠΊΠ° ошибок + +## Моки ΠΈ Π·Π°Π³Π»ΡƒΡˆΠΊΠΈ + +ВСсты ΠΈΡΠΏΠΎΠ»ΡŒΠ·ΡƒΡŽΡ‚ unittest.mock для: +- Π˜Π·ΠΎΠ»ΡΡ†ΠΈΠΈ Π²Π½Π΅ΡˆΠ½ΠΈΡ… зависимостСй +- ΠœΠΎΠΊΠΈΡ€ΠΎΠ²Π°Π½ΠΈΡ API Π²Ρ‹Π·ΠΎΠ²ΠΎΠ² +- Бимуляции Ρ€Π°Π·Π»ΠΈΡ‡Π½Ρ‹Ρ… сцСнариСв (успСх/ошибка) +- ΠŸΡ€ΠΎΠ²Π΅Ρ€ΠΊΠΈ ΠΏΡ€Π°Π²ΠΈΠ»ΡŒΠ½ΠΎΡΡ‚ΠΈ ΠΏΠ°Ρ€Π°ΠΌΠ΅Ρ‚Ρ€ΠΎΠ² Ρ„ΡƒΠ½ΠΊΡ†ΠΈΠΉ + +## ΠšΠΎΠ½Ρ„ΠΈΠ³ΡƒΡ€Π°Ρ†ΠΈΡ + +Настройки pytest находятся Π² `pytest.ini`: +- АвтоматичСскоС ΠΎΠ±Π½Π°Ρ€ΡƒΠΆΠ΅Π½ΠΈΠ΅ тСстов +- Настройки Π²Ρ‹Π²ΠΎΠ΄Π° +- Π€ΠΈΠ»ΡŒΡ‚Ρ€Π°Ρ†ΠΈΡ ΠΏΡ€Π΅Π΄ΡƒΠΏΡ€Π΅ΠΆΠ΄Π΅Π½ΠΈΠΉ \ No newline at end of file diff --git a/GPTutor-Models/tests/__init__.py b/GPTutor-Models/tests/__init__.py new file mode 100644 index 00000000..5e1a8a58 --- /dev/null +++ b/GPTutor-Models/tests/__init__.py @@ -0,0 +1 @@ +# This file makes the tests directory a Python package \ No newline at end of file diff --git a/GPTutor-Models/tests/test_app.py b/GPTutor-Models/tests/test_app.py new file mode 100644 index 00000000..2c15ba1f --- /dev/null +++ b/GPTutor-Models/tests/test_app.py @@ -0,0 +1,286 @@ +import pytest +from unittest.mock import patch, MagicMock +import sys +import os +import json + +# Add the parent directory to sys.path to import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +# Import the Flask app +from app import app + + +class TestFlaskApp: + """Test cases for Flask application endpoints""" + + @pytest.fixture + def client(self): + """Create a test client for the Flask app""" + app.config['TESTING'] = True + with app.test_client() as client: + yield client + + def test_llm_get_endpoint(self, client): + """Test GET /llm endpoint""" + response = client.get('/llm') + assert response.status_code == 200 + assert response.get_json() == [] + + def test_llm_post_endpoint(self, client): + """Test POST /llm endpoint""" + response = client.post('/llm') + assert response.status_code == 200 + assert response.get_json() is None + + @patch('app.txt2img') + def test_image_post_endpoint_success(self, mock_txt2img, client): + """Test successful POST /image endpoint""" + # Mock the txt2img function + mock_txt2img.return_value = { + "output": ["https://example.com/generated_image.jpg"], + "meta": {"seed": 12345} + } + + # Test payload + payload = { + "prompt": "A beautiful landscape", + "modelId": "flux-pro", + "negativePrompt": "blurry, low quality", + "scheduler": "euler", + "guidanceScale": 7.5, + "seed": 12345, + "numInferenceSteps": 20 + } + + response = client.post('/image', + data=json.dumps(payload), + content_type='application/json') + + assert response.status_code == 200 + + # Verify txt2img was called with correct parameters + mock_txt2img.assert_called_once_with( + prompt="A beautiful landscape", + model="flux-pro", + negative_prompt="blurry, low quality", + scheduler="euler", + guidance_scale=7.5, + seed=12345, + steps=20 + ) + + # Verify response + result = response.get_json() + assert result["output"] == ["https://example.com/generated_image.jpg"] + assert result["meta"]["seed"] == 12345 + + def test_image_post_endpoint_missing_data(self, client): + """Test POST /image endpoint with missing data""" + # Missing required fields should cause KeyError + payload = {"prompt": "test"} + + response = client.post('/image', + data=json.dumps(payload), + content_type='application/json') + + # Should return 500 due to KeyError + assert response.status_code == 500 + + @patch('app.create_question_vk_doc') + def test_vk_doc_question_endpoint_success(self, mock_create_question, client): + """Test successful POST /vk-doc-question endpoint""" + # Mock the create_question_vk_doc function + mock_create_question.return_value = { + "question": "How to use VK API?", + "generation": "VK API allows you to...", + "documents": [ + { + "metadata": {"source": "vk_api_docs"}, + "page_content": "API documentation content" + } + ] + } + + payload = { + "question": "How to use VK API?", + "source": "vk_api_docs" + } + + response = client.post('/vk-doc-question', + data=json.dumps(payload), + content_type='application/json') + + assert response.status_code == 200 + + # Verify create_question_vk_doc was called correctly + mock_create_question.assert_called_once_with( + question="How to use VK API?", + source="vk_api_docs" + ) + + # Verify response + result = response.get_json() + assert result["question"] == "How to use VK API?" + assert result["generation"] == "VK API allows you to..." + assert len(result["documents"]) == 1 + + def test_vk_doc_question_endpoint_missing_data(self, client): + """Test POST /vk-doc-question endpoint with missing data""" + payload = {"question": "test"} # missing 'source' + + response = client.post('/vk-doc-question', + data=json.dumps(payload), + content_type='application/json') + + # Should return 500 due to KeyError + assert response.status_code == 500 + + @patch('app.txt2img') + def test_dalle_endpoint_success(self, mock_txt2img, client): + """Test successful POST /dalle endpoint""" + mock_txt2img.return_value = { + "output": ["https://example.com/dalle_image.jpg"], + "meta": {"seed": 54321} + } + + payload = { + "prompt": "A futuristic city", + "modelId": "dalle-3", + "negativePrompt": "", + "scheduler": "default", + "guidanceScale": 8.0, + "seed": 54321, + "numInferenceSteps": 25 + } + + response = client.post('/dalle', + data=json.dumps(payload), + content_type='application/json') + + assert response.status_code == 200 + + # Should call txt2img twice (once in try, once in except if first fails) + # But since our mock doesn't raise an exception, it should only be called once + assert mock_txt2img.call_count == 1 + + # Verify response + result = response.get_json() + assert result["output"] == ["https://example.com/dalle_image.jpg"] + + @patch('app.txt2img') + def test_dalle_endpoint_with_exception(self, mock_txt2img, client): + """Test POST /dalle endpoint when first call fails""" + # Mock txt2img to raise exception on first call, succeed on second + mock_txt2img.side_effect = [ + Exception("First call failed"), + { + "output": ["https://example.com/dalle_retry.jpg"], + "meta": {"seed": 99999} + } + ] + + payload = { + "prompt": "Test prompt", + "modelId": "dalle-3", + "negativePrompt": "", + "scheduler": "default", + "guidanceScale": 7.0, + "seed": 99999, + "numInferenceSteps": 20 + } + + response = client.post('/dalle', + data=json.dumps(payload), + content_type='application/json') + + assert response.status_code == 200 + + # Should be called twice (first fails, second succeeds) + assert mock_txt2img.call_count == 2 + + # Both calls should have identical parameters + call_args_1 = mock_txt2img.call_args_list[0][1] + call_args_2 = mock_txt2img.call_args_list[1][1] + assert call_args_1 == call_args_2 + + # Verify response from second call + result = response.get_json() + assert result["output"] == ["https://example.com/dalle_retry.jpg"] + + def test_dalle_endpoint_missing_data(self, client): + """Test POST /dalle endpoint with missing data""" + payload = {"prompt": "test"} # missing other required fields + + response = client.post('/dalle', + data=json.dumps(payload), + content_type='application/json') + + # Should return 500 due to KeyError + assert response.status_code == 500 + + def test_run_flask_function_exists(self): + """Test that run_flask function exists""" + from app import run_flask + assert callable(run_flask) + + def test_app_configuration(self): + """Test Flask app basic configuration""" + from app import app + assert app is not None + assert app.name == 'app' + + def test_endpoint_routes_exist(self): + """Test that all expected routes are registered""" + from app import app + + routes = [rule.rule for rule in app.url_map.iter_rules()] + + # Check that our endpoints exist + assert '/llm' in routes + assert '/image' in routes + assert '/vk-doc-question' in routes + assert '/dalle' in routes + + def test_endpoint_methods(self): + """Test that endpoints accept correct HTTP methods""" + from app import app + + rules = {rule.rule: rule.methods for rule in app.url_map.iter_rules()} + + # Check HTTP methods + assert 'GET' in rules['/llm'] + assert 'POST' in rules['/llm'] + assert 'POST' in rules['/image'] + assert 'POST' in rules['/vk-doc-question'] + assert 'POST' in rules['/dalle'] + + @patch('app.txt2img') + def test_image_endpoint_parameter_mapping(self, mock_txt2img, client): + """Test that image endpoint correctly maps JSON parameters to function arguments""" + mock_txt2img.return_value = {"output": [], "meta": {}} + + payload = { + "prompt": "test_prompt", + "modelId": "test_model", + "negativePrompt": "test_negative", + "scheduler": "test_scheduler", + "guidanceScale": 9.5, + "seed": 42, + "numInferenceSteps": 15 + } + + client.post('/image', + data=json.dumps(payload), + content_type='application/json') + + # Verify parameter mapping + mock_txt2img.assert_called_once_with( + prompt="test_prompt", + model="test_model", + negative_prompt="test_negative", + scheduler="test_scheduler", + guidance_scale=9.5, + seed=42, + steps=15 + ) \ No newline at end of file diff --git a/GPTutor-Models/tests/test_dalle3.py b/GPTutor-Models/tests/test_dalle3.py new file mode 100644 index 00000000..7a2dbb6d --- /dev/null +++ b/GPTutor-Models/tests/test_dalle3.py @@ -0,0 +1,178 @@ +import pytest +from unittest.mock import patch, MagicMock +import sys +import os +import re + +# Add the parent directory to sys.path to import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from images.dalle3 import ( + get_image_form_response, + format_image_from_request, + download_by_url, + generate_dalle +) + + +class TestDalle3Functions: + """Test cases for DALL-E 3 functions""" + + def test_get_image_form_response_with_match(self): + """Test get_image_form_response with valid image URL""" + test_text = "Here is your image: ![image](https://files.oaiusercontent.com/file-12345abcdef)" + result = get_image_form_response(test_text) + assert result == "https://files.oaiusercontent.com/file-12345abcdef" + + def test_get_image_form_response_no_match(self): + """Test get_image_form_response with no image URL""" + test_text = "This is just regular text without an image URL" + result = get_image_form_response(test_text) + assert result is None + + def test_get_image_form_response_multiple_images(self): + """Test get_image_form_response with multiple images (should return first)""" + test_text = "![image](https://files.oaiusercontent.com/file-first) and ![image](https://files.oaiusercontent.com/file-second)" + result = get_image_form_response(test_text) + assert result == "https://files.oaiusercontent.com/file-first" + + @patch('images.dalle3.get_image_form_response') + def test_format_image_from_request(self, mock_get_image): + """Test format_image_from_request function""" + mock_get_image.return_value = "https://files.oaiusercontent.com/file-test123" + + test_input = '''{"prompt": "test prompt", "size": "1024x1024"} + Some additional text here + ![image](https://files.oaiusercontent.com/file-test123) + More text after image''' + + result = format_image_from_request(test_input) + + assert "image" in result + assert "text" in result + assert result["image"] == "https://files.oaiusercontent.com/file-test123" + assert '{"prompt"' not in result["text"] # JSON should be removed + assert "![image]" not in result["text"] # Image markdown should be removed + + @patch('images.dalle3.requests.get') + @patch('images.dalle3.base64.b64encode') + def test_download_by_url_success(self, mock_b64encode, mock_requests_get): + """Test successful download_by_url""" + # Mock response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.content = b"fake_image_data" + mock_requests_get.return_value = mock_response + + # Mock base64 encoding + mock_b64encode.return_value = b"ZmFrZV9pbWFnZV9kYXRh" # "fake_image_data" in base64 + + result = download_by_url("https://example.com/image.jpg") + + # Verify requests.get was called with correct URL and proxies + mock_requests_get.assert_called_once() + call_args = mock_requests_get.call_args + assert call_args[0][0] == "https://example.com/image.jpg" + assert "proxies" in call_args[1] + + # Verify result + assert result == "data:image/jpen;base64,ZmFrZV9pbWFnZV9kYXRh" + + @patch('images.dalle3.requests.get') + def test_download_by_url_failure(self, mock_requests_get): + """Test failed download_by_url""" + # Mock failed response + mock_response = MagicMock() + mock_response.status_code = 404 + mock_requests_get.return_value = mock_response + + result = download_by_url("https://example.com/nonexistent.jpg") + + assert result is None + + @patch('images.dalle3.OpenAI') + @patch('images.dalle3.format_image_from_request') + @patch.dict(os.environ, {'API_KEYS_120': 'test_api_key'}) + def test_generate_dalle_success(self, mock_format, mock_openai_class): + """Test successful generate_dalle function""" + # Mock OpenAI client and response + mock_openai_instance = MagicMock() + mock_openai_class.return_value = mock_openai_instance + + mock_completion = MagicMock() + mock_completion.choices[0].message.content = "Generated image response" + mock_completion.usage.total_tokens = 150 + mock_openai_instance.chat.completions.create.return_value = mock_completion + + # Mock format function + mock_format.return_value = { + "image": "https://files.oaiusercontent.com/file-test", + "text": "Generated text" + } + + result = generate_dalle("Create a beautiful sunset") + + # Verify OpenAI client creation + mock_openai_class.assert_called_once_with( + api_key='test_api_key', + base_url="https://api.deep.assistant.run.place/v1/" + ) + + # Verify chat completion call + mock_openai_instance.chat.completions.create.assert_called_once() + call_args = mock_openai_instance.chat.completions.create.call_args + assert call_args[1]["model"] == "gpt-4o-plus" + assert call_args[1]["max_tokens"] == 4096 + assert call_args[1]["stream"] is False + assert len(call_args[1]["messages"]) == 2 + + # Verify result structure + assert "image" in result + assert "text" in result + assert "total_tokens" in result + assert result["total_tokens"] == 150 + assert result["image"] == "https://files.oaiusercontent.com/file-test" + assert result["text"] == "Generated text" + + @patch('images.dalle3.OpenAI') + @patch.dict(os.environ, {}, clear=True) + def test_generate_dalle_missing_api_key(self, mock_openai_class): + """Test generate_dalle with missing API key""" + # This should still work but with None as API key + mock_openai_instance = MagicMock() + mock_openai_class.return_value = mock_openai_instance + + mock_completion = MagicMock() + mock_completion.choices[0].message.content = "test response" + mock_completion.usage.total_tokens = 100 + mock_openai_instance.chat.completions.create.return_value = mock_completion + + with patch('images.dalle3.format_image_from_request') as mock_format: + mock_format.return_value = {"image": "test", "text": "test"} + + generate_dalle("test prompt") + + # Verify OpenAI was called with None as API key + mock_openai_class.assert_called_once_with( + api_key=None, + base_url="https://api.deep.assistant.run.place/v1/" + ) + + def test_format_image_from_request_complex_input(self): + """Test format_image_from_request with complex JSON structures""" + complex_input = ''' + {"prompt": "complex prompt with quotes", "size": "512x512"} + {"prompt": "another prompt", "size": "1024x1024", "n": 1} + Some text in between + ![image](https://files.oaiusercontent.com/file-complex123) + Final text + ''' + + result = format_image_from_request(complex_input) + + # Should remove both JSON patterns + assert '{"prompt"' not in result["text"] + assert '"n": 1' not in result["text"] + assert "![image]" not in result["text"] + assert "Some text in between" in result["text"] + assert "Final text" in result["text"] \ No newline at end of file diff --git a/GPTutor-Models/tests/test_deepinfra.py b/GPTutor-Models/tests/test_deepinfra.py new file mode 100644 index 00000000..151715ba --- /dev/null +++ b/GPTutor-Models/tests/test_deepinfra.py @@ -0,0 +1,120 @@ +import pytest +from unittest.mock import patch, MagicMock +import sys +import os + +# Add the parent directory to sys.path to import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from llm.DeepInfra import DeepInfra + + +class TestDeepInfra: + """Test cases for DeepInfra class""" + + def test_class_attributes(self): + """Test that DeepInfra class has correct attributes""" + assert DeepInfra.label == "DeepInfra" + assert DeepInfra.url == "https://deepinfra.com" + assert DeepInfra.working is True + assert DeepInfra.needs_auth is False + assert DeepInfra.has_auth is True + assert DeepInfra.supports_stream is True + assert DeepInfra.supports_message_history is True + assert DeepInfra.default_model == "meta-llama/Meta-Llama-3-70b-instruct" + assert DeepInfra.default_vision_model == "llava-hf/llava-1.5-7b-hf" + + def test_model_aliases(self): + """Test model aliases dictionary""" + expected_aliases = { + 'dbrx-instruct': 'databricks/dbrx-instruct', + } + assert DeepInfra.model_aliases == expected_aliases + + @patch('llm.DeepInfra.random.choice') + @patch('llm.DeepInfra.g4f.Provider.Openai.create_async_generator') + def test_create_async_generator(self, mock_super_method, mock_random_choice): + """Test create_async_generator method""" + # Mock random.choice to return a specific token + mock_token = "Bearer dChpwTq4VSmDBI8yxa3MVZzapqQNapNx" + mock_random_choice.return_value = mock_token + + # Mock the super method + mock_result = MagicMock() + mock_super_method.return_value = mock_result + + # Test data + model = "test-model" + messages = [{"role": "user", "content": "test message"}] + stream = True + + # Call the method + result = DeepInfra.create_async_generator( + model=model, + messages=messages, + stream=stream + ) + + # Verify random.choice was called with jwt_tokens + mock_random_choice.assert_called_once() + + # Verify super method was called with correct parameters + mock_super_method.assert_called_once() + call_args = mock_super_method.call_args + + # Check that the correct model and messages were passed + assert call_args[0][0] == model + assert call_args[0][1] == messages + + # Check that stream parameter was passed correctly + assert call_args[1]['stream'] == stream + + # Check that default parameters are set correctly + assert call_args[1]['api_base'] == "https://api.deepinfra.com/v1/openai" + assert call_args[1]['temperature'] == 0.7 + assert call_args[1]['max_tokens'] == 2056 + + # Check that headers were set correctly + headers = call_args[1]['headers'] + assert headers['Authorization'] == mock_token + assert headers['Origin'] == 'https://deepinfra.com' + assert headers['Referer'] == 'https://deepinfra.com/' + assert headers['X-Deepinfra-Source'] == 'web-embed' + + # Verify result + assert result == mock_result + + @patch('llm.DeepInfra.random.choice') + @patch('llm.DeepInfra.g4f.Provider.Openai.create_async_generator') + def test_create_async_generator_with_custom_params(self, mock_super_method, mock_random_choice): + """Test create_async_generator with custom parameters""" + mock_token = "Bearer dChpwTq4VSmDBI8yxa3MVZzapqQNapNx" + mock_random_choice.return_value = mock_token + mock_result = MagicMock() + mock_super_method.return_value = mock_result + + # Custom parameters + custom_api_base = "https://custom.api.com/v1" + custom_temperature = 0.5 + custom_max_tokens = 1000 + + result = DeepInfra.create_async_generator( + model="custom-model", + messages=[{"role": "user", "content": "custom message"}], + stream=False, + api_base=custom_api_base, + temperature=custom_temperature, + max_tokens=custom_max_tokens + ) + + call_args = mock_super_method.call_args + assert call_args[1]['api_base'] == custom_api_base + assert call_args[1]['temperature'] == custom_temperature + assert call_args[1]['max_tokens'] == custom_max_tokens + assert call_args[1]['stream'] == False + + def test_jwt_tokens_not_empty(self): + """Test that jwt_tokens list is not empty""" + from llm.DeepInfra import jwt_tokens + assert len(jwt_tokens) > 0 + assert all(token.startswith("Bearer ") for token in jwt_tokens) \ No newline at end of file diff --git a/GPTutor-Models/tests/test_prodia.py b/GPTutor-Models/tests/test_prodia.py new file mode 100644 index 00000000..9d071f02 --- /dev/null +++ b/GPTutor-Models/tests/test_prodia.py @@ -0,0 +1,180 @@ +import pytest +from unittest.mock import patch, MagicMock +import sys +import os +from requests.exceptions import RequestException + +# Add the parent directory to sys.path to import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from images.prodia import txt2img + + +class TestProdiaFunctions: + """Test cases for Prodia image generation functions""" + + @patch('images.prodia.Client') + @patch('images.prodia.randint') + def test_txt2img_success(self, mock_randint, mock_client_class): + """Test successful txt2img function""" + # Mock random seed + mock_randint.return_value = 12345 + + # Mock client and response + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + + # Mock the response structure + mock_response_data = MagicMock() + mock_response_data.url = "https://example.com/generated_image.jpg" + mock_response = MagicMock() + mock_response.data = [mock_response_data] + mock_client_instance.images.generate.return_value = mock_response + + # Test parameters + prompt = "A beautiful landscape" + negative_prompt = "blurry, low quality" + model = "flux-pro" + scheduler = "euler" + guidance_scale = 7.5 + steps = 20 + seed = 54321 + + result = txt2img( + prompt=prompt, + negative_prompt=negative_prompt, + model=model, + scheduler=scheduler, + guidance_scale=guidance_scale, + steps=steps, + seed=seed + ) + + # Verify Client was created with correct provider + from g4f.Provider import PollinationsAI + mock_client_class.assert_called_once_with(provider=PollinationsAI) + + # Verify images.generate was called with correct parameters + mock_client_instance.images.generate.assert_called_once_with( + model="flux-pro", + prompt=prompt, + response_format="url" + ) + + # Verify result structure + assert "output" in result + assert "meta" in result + assert result["output"] == ["https://example.com/generated_image.jpg"] + assert result["meta"]["seed"] == seed + + @patch('images.prodia.Client') + @patch('images.prodia.randint') + def test_txt2img_with_default_seed(self, mock_randint, mock_client_class): + """Test txt2img with default random seed""" + # Mock random seed + mock_randint.return_value = 9876 + + # Mock client and response + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + + mock_response_data = MagicMock() + mock_response_data.url = "https://example.com/test_image.jpg" + mock_response = MagicMock() + mock_response.data = [mock_response_data] + mock_client_instance.images.generate.return_value = mock_response + + # Call without seed parameter (should use random) + result = txt2img( + prompt="test prompt", + negative_prompt="test negative", + model="test-model", + scheduler="test-scheduler", + guidance_scale=8.0, + steps=25 + ) + + # Verify randint was called for default seed + mock_randint.assert_called_with(1, 10000) + + # Verify result uses the random seed + assert result["meta"]["seed"] == 9876 + + @patch('images.prodia.Client') + def test_txt2img_request_exception(self, mock_client_class): + """Test txt2img handling RequestException""" + # Mock client to raise RequestException + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + mock_client_instance.images.generate.side_effect = RequestException("Connection failed") + + # Should re-raise as RequestException with custom message + with pytest.raises(RequestException) as exc_info: + txt2img( + prompt="test", + negative_prompt="test", + model="test", + scheduler="test", + guidance_scale=7.0, + steps=20 + ) + + assert str(exc_info.value) == "Unable to fetch the response." + assert exc_info.value.__cause__ is not None + assert isinstance(exc_info.value.__cause__, RequestException) + + @patch('images.prodia.Client') + def test_txt2img_other_exception(self, mock_client_class): + """Test txt2img handling other exceptions""" + # Mock client to raise a different exception + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + mock_client_instance.images.generate.side_effect = ValueError("Invalid parameter") + + # Should not catch non-RequestException errors + with pytest.raises(ValueError): + txt2img( + prompt="test", + negative_prompt="test", + model="test", + scheduler="test", + guidance_scale=7.0, + steps=20 + ) + + @patch('images.prodia.Client') + def test_txt2img_response_structure(self, mock_client_class): + """Test txt2img response structure with multiple URLs""" + mock_client_instance = MagicMock() + mock_client_class.return_value = mock_client_instance + + # Mock response with multiple data items + mock_response_data1 = MagicMock() + mock_response_data1.url = "https://example.com/image1.jpg" + mock_response_data2 = MagicMock() + mock_response_data2.url = "https://example.com/image2.jpg" + + mock_response = MagicMock() + mock_response.data = [mock_response_data1, mock_response_data2] + mock_client_instance.images.generate.return_value = mock_response + + result = txt2img( + prompt="test", + negative_prompt="test", + model="test", + scheduler="test", + guidance_scale=7.0, + steps=20, + seed=12345 + ) + + # Should only return the first URL + assert result["output"] == ["https://example.com/image1.jpg"] + assert result["meta"]["seed"] == 12345 + + def test_randint_import(self): + """Test that randint is properly imported""" + from images.prodia import randint + # Should be able to call randint + result = randint(1, 100) + assert 1 <= result <= 100 \ No newline at end of file diff --git a/GPTutor-Models/tests/test_vk_docs.py b/GPTutor-Models/tests/test_vk_docs.py new file mode 100644 index 00000000..079e5377 --- /dev/null +++ b/GPTutor-Models/tests/test_vk_docs.py @@ -0,0 +1,252 @@ +import pytest +from unittest.mock import patch, MagicMock, mock_open +import sys +import os + +# Add the parent directory to sys.path to import modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from vk_docs.utils import unique_objects_by_field +from vk_docs.index import create_question_vk_doc + + +class TestVKDocsUtils: + """Test cases for VK docs utility functions""" + + def test_unique_objects_by_field_basic(self): + """Test unique_objects_by_field with basic input""" + objects = [ + {"id": 1, "name": "Alice"}, + {"id": 2, "name": "Bob"}, + {"id": 3, "name": "Alice"}, # duplicate name + {"id": 4, "name": "Charlie"} + ] + + result = unique_objects_by_field(objects, "name") + + # Should keep first occurrence of each unique name + assert len(result) == 3 + assert result[0] == {"id": 1, "name": "Alice"} + assert result[1] == {"id": 2, "name": "Bob"} + assert result[2] == {"id": 4, "name": "Charlie"} + + def test_unique_objects_by_field_empty_list(self): + """Test unique_objects_by_field with empty list""" + result = unique_objects_by_field([], "field") + assert result == [] + + def test_unique_objects_by_field_single_item(self): + """Test unique_objects_by_field with single item""" + objects = [{"id": 1, "category": "test"}] + result = unique_objects_by_field(objects, "category") + assert result == objects + + def test_unique_objects_by_field_all_unique(self): + """Test unique_objects_by_field where all items are unique""" + objects = [ + {"id": 1, "status": "active"}, + {"id": 2, "status": "inactive"}, + {"id": 3, "status": "pending"} + ] + + result = unique_objects_by_field(objects, "status") + assert result == objects + + def test_unique_objects_by_field_all_same(self): + """Test unique_objects_by_field where all items have same field value""" + objects = [ + {"id": 1, "type": "document"}, + {"id": 2, "type": "document"}, + {"id": 3, "type": "document"} + ] + + result = unique_objects_by_field(objects, "type") + assert len(result) == 1 + assert result[0] == {"id": 1, "type": "document"} + + def test_unique_objects_by_field_missing_field(self): + """Test unique_objects_by_field with missing field raises KeyError""" + objects = [ + {"id": 1, "name": "Alice"}, + {"id": 2, "name": "Bob"} + ] + + with pytest.raises(KeyError): + unique_objects_by_field(objects, "missing_field") + + def test_unique_objects_by_field_different_types(self): + """Test unique_objects_by_field with different value types""" + objects = [ + {"id": 1, "value": "string"}, + {"id": 2, "value": 123}, + {"id": 3, "value": "string"}, # duplicate string + {"id": 4, "value": 123}, # duplicate number + {"id": 5, "value": None}, + {"id": 6, "value": None} # duplicate None + ] + + result = unique_objects_by_field(objects, "value") + assert len(result) == 3 + assert result[0]["value"] == "string" + assert result[1]["value"] == 123 + assert result[2]["value"] is None + + +class TestVKDocsIndex: + """Test cases for VK docs index functionality""" + + @patch('vk_docs.index.create_retriever_app') + def test_create_question_vk_doc_success(self, mock_create_retriever): + """Test successful create_question_vk_doc execution""" + # Mock the retriever app and its stream method + mock_app = MagicMock() + mock_create_retriever.return_value = mock_app + + # Mock document objects + mock_doc1 = MagicMock() + mock_doc1.metadata = {"source": "vk_api", "page": 1} + mock_doc1.page_content = "Content of document 1" + + mock_doc2 = MagicMock() + mock_doc2.metadata = {"source": "vk_ui", "page": 2} + mock_doc2.page_content = "Content of document 2" + + # Mock the stream output + stream_output = [ + {"retrieve": "some_data"}, + { + "generate": { + "generation": "This is the generated answer", + "documents": [mock_doc1, mock_doc2] + } + } + ] + mock_app.stream.return_value = stream_output + + question = "How to use VK API?" + source = "vk_api_docs" + + result = create_question_vk_doc(question, source) + + # Verify create_retriever_app was called with correct source + mock_create_retriever.assert_called_once_with(source) + + # Verify stream was called with correct question + mock_app.stream.assert_called_once_with({"question": question}) + + # Verify result structure + assert "question" in result + assert "generation" in result + assert "documents" in result + + assert result["question"] == question + assert result["generation"] == "This is the generated answer" + assert len(result["documents"]) == 2 + + # Check document structure + doc1_result = result["documents"][0] + assert doc1_result["metadata"] == {"source": "vk_api", "page": 1} + assert doc1_result["page_content"] == "Content of document 1" + + doc2_result = result["documents"][1] + assert doc2_result["metadata"] == {"source": "vk_ui", "page": 2} + assert doc2_result["page_content"] == "Content of document 2" + + @patch('vk_docs.index.create_retriever_app') + def test_create_question_vk_doc_no_generate_output(self, mock_create_retriever): + """Test create_question_vk_doc when stream doesn't contain 'generate' key""" + mock_app = MagicMock() + mock_create_retriever.return_value = mock_app + + # Mock stream output without 'generate' key + stream_output = [ + {"retrieve": "some_data"}, + {"other_key": "other_value"} + ] + mock_app.stream.return_value = stream_output + + result = create_question_vk_doc("test question", "test_source") + + # Should return None since no 'generate' key found + assert result is None + + @patch('vk_docs.index.create_retriever_app') + def test_create_question_vk_doc_empty_documents(self, mock_create_retriever): + """Test create_question_vk_doc with empty documents list""" + mock_app = MagicMock() + mock_create_retriever.return_value = mock_app + + stream_output = [ + { + "generate": { + "generation": "No relevant documents found", + "documents": [] + } + } + ] + mock_app.stream.return_value = stream_output + + result = create_question_vk_doc("test question", "test_source") + + assert result["generation"] == "No relevant documents found" + assert result["documents"] == [] + + @patch('vk_docs.index.create_retriever_app') + def test_create_question_vk_doc_different_sources(self, mock_create_retriever): + """Test create_question_vk_doc with different source types""" + mock_app = MagicMock() + mock_create_retriever.return_value = mock_app + + mock_doc = MagicMock() + mock_doc.metadata = {"test": "data"} + mock_doc.page_content = "test content" + + stream_output = [ + { + "generate": { + "generation": "test generation", + "documents": [mock_doc] + } + } + ] + mock_app.stream.return_value = stream_output + + # Test different source values + sources = ["vk_api_docs", "vk_ui", "all", "custom_source"] + + for source in sources: + result = create_question_vk_doc("test question", source) + + # Verify create_retriever_app was called with the specific source + mock_create_retriever.assert_called_with(source) + + # Result should be consistent regardless of source + assert result["generation"] == "test generation" + assert len(result["documents"]) == 1 + + +class TestVKDocsRetriever: + """Test cases for VK docs retriever functionality (mocked)""" + + @patch('vk_docs.retriver.FAISS.load_local') + @patch('vk_docs.retriver.GigaChatEmbeddings') + @patch('vk_docs.retriver.GigaChat') + def test_retriever_imports(self, mock_gigachat, mock_embeddings, mock_faiss): + """Test that retriever module imports work correctly""" + # This test ensures the module can be imported without errors + # when the dependencies are mocked + try: + import vk_docs.retriver + assert True # If we get here, imports worked + except ImportError as e: + pytest.fail(f"Failed to import vk_docs.retriver: {e}") + + def test_get_docs_index_function_exists(self): + """Test that get_docs_index function exists and is callable""" + from vk_docs.retriver import get_docs_index + assert callable(get_docs_index) + + def test_create_retriever_app_function_exists(self): + """Test that create_retriever_app function exists and is callable""" + from vk_docs.retriver import create_retriever_app + assert callable(create_retriever_app) \ No newline at end of file