diff --git a/src/contextual_retrieval/contextual_retrieval_api_client.py b/src/contextual_retrieval/contextual_retrieval_api_client.py index 1777857..3b82e1c 100644 --- a/src/contextual_retrieval/contextual_retrieval_api_client.py +++ b/src/contextual_retrieval/contextual_retrieval_api_client.py @@ -319,32 +319,6 @@ def client_stats(self) -> Dict[str, Any]: "is_closed": self._client.is_closed, } - # Try to get connection pool statistics safely - # Note: Accessing internal attributes for monitoring only - try: - transport = getattr(self._client, "_transport", None) - if transport and hasattr(transport, "_pool"): - pool = getattr(transport, "_pool", None) - if pool: - # Use getattr with defaults to safely access pool statistics - connections = getattr(pool, "_connections", []) - keepalive_connections = getattr( - pool, "_keepalive_connections", [] - ) - stats.update( - { - "pool_connections": len(connections) - if connections - else 0, - "keepalive_connections": len(keepalive_connections) - if keepalive_connections - else 0, - } - ) - except (AttributeError, TypeError): - # If we can't access pool stats, just continue without them - pass - return stats except Exception as e: diff --git a/src/contextual_retrieval/qdrant_search.py b/src/contextual_retrieval/qdrant_search.py index c8ebe44..47c2199 100644 --- a/src/contextual_retrieval/qdrant_search.py +++ b/src/contextual_retrieval/qdrant_search.py @@ -256,48 +256,6 @@ async def _search_single_collection( ) return [] - def get_embedding_for_query( - self, - query: str, - environment: str = "production", - connection_id: Optional[str] = None, - ) -> Optional[List[float]]: - """ - Get embedding for query using existing LLMOrchestrationService infrastructure. - - Args: - query: Text to embed - environment: Environment for model resolution - connection_id: Optional connection ID - - Returns: - Query embedding vector or None if failed - """ - try: - # Import here to avoid circular dependencies - from src.llm_orchestration_service import LLMOrchestrationService - - llm_service = LLMOrchestrationService() - - # Use existing embedding creation method - embedding_result = llm_service.create_embeddings_for_indexer( - texts=[query], - environment=environment, - connection_id=connection_id, - batch_size=self._config.performance.batch_size, - ) - - embeddings = embedding_result.get("embeddings", []) - if embeddings and len(embeddings) > 0: - return embeddings[0] - else: - logger.error("No embedding returned for query") - return None - - except Exception as e: - logger.error(f"Failed to get query embedding: {e}") - return None - def get_embedding_for_query_with_service( self, query: str, diff --git a/src/llm_orchestrator_config/config/loader.py b/src/llm_orchestrator_config/config/loader.py index 9398777..9612210 100644 --- a/src/llm_orchestrator_config/config/loader.py +++ b/src/llm_orchestrator_config/config/loader.py @@ -435,14 +435,10 @@ def _update_default_provider(self, config: Dict[str, Any]) -> None: Args: config: Configuration dictionary to update """ - if "providers" not in config: + if "providers" not in config or not config["providers"]: return - available_providers = config["providers"] - if not available_providers: - return - # Auto-set default provider if not specified if "default_provider" not in config: new_default = next(iter(available_providers.keys()))