-
Notifications
You must be signed in to change notification settings - Fork 20
feat: support 'same-as-agent' model option for legacy evaluators #1048
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
66acfc2
feat: support 'same-as-agent' model option for legacy evaluators
2e41f5c
feat: add LLMAgentFactoryProtocol for model resolution
a3fde44
refactor: move get_agent_model from factory to runtime protocol
16f04cc
refactor: optimize metadata loading to use single temporary runtime
3b74169
test: add unit tests for eval runtime metadata loading
49b0f7c
chore: bump version to 2.3.1
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,7 +5,16 @@ | |
| from contextlib import contextmanager | ||
| from pathlib import Path | ||
| from time import time | ||
| from typing import Any, Awaitable, Iterable, Iterator, Sequence, Tuple | ||
| from typing import ( | ||
| Any, | ||
| Awaitable, | ||
| Iterable, | ||
| Iterator, | ||
| Protocol, | ||
| Sequence, | ||
| Tuple, | ||
| runtime_checkable, | ||
| ) | ||
|
|
||
| import coverage | ||
| from opentelemetry import context as context_api | ||
|
|
@@ -67,6 +76,27 @@ | |
| set_execution_context, | ||
| ) | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
|
|
||
| @runtime_checkable | ||
| class LLMAgentRuntimeProtocol(Protocol): | ||
| """Protocol for runtimes that can provide agent model information. | ||
|
|
||
| Runtimes that implement this protocol can be queried for | ||
| the agent's configured LLM model, enabling features like 'same-as-agent' | ||
| model resolution for evaluators. | ||
| """ | ||
|
|
||
| def get_agent_model(self) -> str | None: | ||
| """Return the agent's configured LLM model name. | ||
|
|
||
| Returns: | ||
| The model name from agent settings (e.g., 'gpt-4o-2024-11-20'), | ||
| or None if no model is configured. | ||
| """ | ||
| ... | ||
|
|
||
|
|
||
| class ExecutionSpanExporter(SpanExporter): | ||
| """Custom exporter that stores spans grouped by execution ids.""" | ||
|
|
@@ -180,6 +210,8 @@ def __init__( | |
| self.logs_exporter: ExecutionLogsExporter = ExecutionLogsExporter() | ||
| self.execution_id = str(uuid.uuid4()) | ||
| self.schema: UiPathRuntimeSchema | None = None | ||
| self._agent_model: str | None = None | ||
| self._metadata_loaded: bool = False | ||
| self.coverage = coverage.Coverage(branch=True) | ||
|
|
||
| async def __aenter__(self) -> "UiPathEvalRuntime": | ||
|
|
@@ -192,14 +224,33 @@ async def __aexit__(self, *args: Any) -> None: | |
| self.coverage.stop() | ||
| self.coverage.report(include=["./*"], show_missing=True) | ||
|
|
||
| async def get_schema(self) -> UiPathRuntimeSchema: | ||
| if not self.schema: | ||
| temp_runtime = await self.factory.new_runtime( | ||
| entrypoint=self.context.entrypoint or "", | ||
| runtime_id="default", | ||
| ) | ||
| async def _ensure_metadata_loaded(self) -> None: | ||
| """Load metadata (schema, agent model) from a single temporary runtime. | ||
|
|
||
| This method creates one temporary runtime to fetch both schema and agent | ||
| model, avoiding the overhead of creating multiple runtimes for metadata | ||
| queries. Results are cached for subsequent access. | ||
| """ | ||
| if self._metadata_loaded: | ||
| return | ||
|
|
||
| temp_runtime = await self.factory.new_runtime( | ||
| entrypoint=self.context.entrypoint or "", | ||
| runtime_id="metadata-query", | ||
| ) | ||
| try: | ||
| self.schema = await temp_runtime.get_schema() | ||
| self._agent_model = self._find_agent_model_in_runtime(temp_runtime) | ||
| if self._agent_model: | ||
| logger.debug(f"Got agent model from runtime: {self._agent_model}") | ||
| self._metadata_loaded = True | ||
| finally: | ||
| await temp_runtime.dispose() | ||
|
|
||
| async def get_schema(self) -> UiPathRuntimeSchema: | ||
| await self._ensure_metadata_loaded() | ||
| if self.schema is None: | ||
| raise ValueError("Schema could not be loaded") | ||
| return self.schema | ||
|
|
||
| @contextmanager | ||
|
|
@@ -232,7 +283,7 @@ async def initiate_evaluation( | |
| evaluation_set, _ = EvalHelpers.load_eval_set( | ||
| self.context.eval_set, self.context.eval_ids | ||
| ) | ||
| evaluators = self._load_evaluators(evaluation_set) | ||
| evaluators = await self._load_evaluators(evaluation_set) | ||
|
|
||
| await self.event_bus.publish( | ||
| EvaluationEvents.CREATE_EVAL_SET_RUN, | ||
|
|
@@ -601,7 +652,48 @@ async def run_evaluator( | |
|
|
||
| return result | ||
|
|
||
| def _load_evaluators( | ||
| async def _get_agent_model(self) -> str | None: | ||
| """Get agent model from the runtime. | ||
|
|
||
| Uses the cached metadata from _ensure_metadata_loaded(), which creates | ||
| a single temporary runtime to fetch both schema and agent model. | ||
|
|
||
| Returns: | ||
| The model name from agent settings, or None if not found. | ||
| """ | ||
| try: | ||
| await self._ensure_metadata_loaded() | ||
| return self._agent_model | ||
| except Exception: | ||
| return None | ||
|
|
||
| def _find_agent_model_in_runtime(self, runtime: Any) -> str | None: | ||
| """Recursively search for get_agent_model in runtime and its delegates. | ||
|
|
||
| Runtimes may be wrapped (e.g., ResumableRuntime wraps TelemetryWrapper | ||
| which wraps the base runtime). This method traverses the wrapper chain | ||
| to find a runtime that implements LLMAgentRuntimeProtocol. | ||
|
|
||
| Args: | ||
| runtime: The runtime to check (may be a wrapper) | ||
|
|
||
| Returns: | ||
| The model name if found, None otherwise. | ||
| """ | ||
| # Check if this runtime implements the protocol | ||
| if isinstance(runtime, LLMAgentRuntimeProtocol): | ||
| return runtime.get_agent_model() | ||
|
|
||
| # Check for delegate property (used by UiPathResumableRuntime, TelemetryRuntimeWrapper) | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'd prefer adding this method in UiPathResumableRuntime and TelemetryRuntimeWrapper but we can do this later.. |
||
| delegate = getattr(runtime, "delegate", None) or getattr( | ||
| runtime, "_delegate", None | ||
| ) | ||
| if delegate is not None: | ||
| return self._find_agent_model_in_runtime(delegate) | ||
|
|
||
| return None | ||
|
|
||
| async def _load_evaluators( | ||
| self, evaluation_set: EvaluationSet | ||
| ) -> list[BaseEvaluator[Any, Any, Any]]: | ||
| """Load evaluators referenced by the evaluation set.""" | ||
|
|
@@ -611,6 +703,9 @@ def _load_evaluators( | |
| raise ValueError("eval_set cannot be None") | ||
| evaluators_dir = Path(eval_set).parent.parent / "evaluators" | ||
|
|
||
| # Load agent model for 'same-as-agent' resolution in legacy evaluators | ||
| agent_model = await self._get_agent_model() | ||
|
|
||
| # If evaluatorConfigs is specified, use that (new field with weights) | ||
| # Otherwise, fall back to evaluatorRefs (old field without weights) | ||
| if ( | ||
|
|
@@ -638,7 +733,9 @@ def _load_evaluators( | |
| try: | ||
| evaluator_id = data.get("id") | ||
| if evaluator_id in evaluator_ref_ids: | ||
| evaluator = EvaluatorFactory.create_evaluator(data, evaluators_dir) | ||
| evaluator = EvaluatorFactory.create_evaluator( | ||
| data, evaluators_dir, agent_model=agent_model | ||
| ) | ||
| evaluators.append(evaluator) | ||
| found_evaluator_ids.add(evaluator_id) | ||
| except Exception as e: | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.