Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 0 additions & 26 deletions src/contextual_retrieval/contextual_retrieval_api_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,32 +319,6 @@ def client_stats(self) -> Dict[str, Any]:
"is_closed": self._client.is_closed,
}

# Try to get connection pool statistics safely
# Note: Accessing internal attributes for monitoring only
try:
transport = getattr(self._client, "_transport", None)
if transport and hasattr(transport, "_pool"):
pool = getattr(transport, "_pool", None)
if pool:
# Use getattr with defaults to safely access pool statistics
connections = getattr(pool, "_connections", [])
keepalive_connections = getattr(
pool, "_keepalive_connections", []
)
stats.update(
{
"pool_connections": len(connections)
if connections
else 0,
"keepalive_connections": len(keepalive_connections)
if keepalive_connections
else 0,
}
)
except (AttributeError, TypeError):
# If we can't access pool stats, just continue without them
pass

return stats

except Exception as e:
Expand Down
42 changes: 0 additions & 42 deletions src/contextual_retrieval/qdrant_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,48 +256,6 @@ async def _search_single_collection(
)
return []

def get_embedding_for_query(
self,
query: str,
environment: str = "production",
connection_id: Optional[str] = None,
) -> Optional[List[float]]:
"""
Get embedding for query using existing LLMOrchestrationService infrastructure.

Args:
query: Text to embed
environment: Environment for model resolution
connection_id: Optional connection ID

Returns:
Query embedding vector or None if failed
"""
try:
# Import here to avoid circular dependencies
from src.llm_orchestration_service import LLMOrchestrationService

llm_service = LLMOrchestrationService()

# Use existing embedding creation method
embedding_result = llm_service.create_embeddings_for_indexer(
texts=[query],
environment=environment,
connection_id=connection_id,
batch_size=self._config.performance.batch_size,
)

embeddings = embedding_result.get("embeddings", [])
if embeddings and len(embeddings) > 0:
return embeddings[0]
else:
logger.error("No embedding returned for query")
return None

except Exception as e:
logger.error(f"Failed to get query embedding: {e}")
return None

def get_embedding_for_query_with_service(
self,
query: str,
Expand Down
6 changes: 1 addition & 5 deletions src/llm_orchestrator_config/config/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,14 +435,10 @@ def _update_default_provider(self, config: Dict[str, Any]) -> None:
Args:
config: Configuration dictionary to update
"""
if "providers" not in config:
if "providers" not in config or not config["providers"]:
return

available_providers = config["providers"]

if not available_providers:
return

# Auto-set default provider if not specified
if "default_provider" not in config:
new_default = next(iter(available_providers.keys()))
Expand Down
Loading