diff --git a/backend/add_users_to_db.py b/backend/add_users_to_db.py index 1efbe80..82235e3 100644 --- a/backend/add_users_to_db.py +++ b/backend/add_users_to_db.py @@ -1,8 +1,11 @@ import asyncio import os from datetime import datetime, timezone +from typing import Union from redis import asyncio as aioredis +from sqlalchemy import select +from sqlalchemy.orm import Session from app.config import REDIS_HOST from app.database import get_session @@ -33,7 +36,7 @@ async def async_redis_operations(key: str, value: int | None) -> None: await redis.aclose() -def run_redis_async_tasks(key: str, value: int | str) -> None: +def run_redis_async_tasks(key: str, value: Union[int, str]) -> None: """ Run asynchronous Redis operations to set the remaining API calls for a user. """ @@ -43,6 +46,103 @@ def run_redis_async_tasks(key: str, value: int | str) -> None: loop.run_until_complete(async_redis_operations(key, value_int)) +def ensure_default_workspace(db_session: Session, user_db: UserDB) -> None: + """ + Ensure that a user has a default workspace. + + Parameters + ---------- + db_session + The database session. + user_db + The user DB record. + """ + # Check if user already has a workspace + stmt = select(UserWorkspaceDB).where(UserWorkspaceDB.user_id == user_db.user_id) + result = db_session.execute(stmt) + existing_workspace = result.scalar_one_or_none() + + if existing_workspace: + logger.info( + f"User {user_db.username} already has workspace relationship: " + f"{existing_workspace.workspace_id}" + ) + # Check if any workspace is set as default + stmt = select(UserWorkspaceDB).where( + UserWorkspaceDB.user_id == user_db.user_id, + UserWorkspaceDB.default_workspace, + ) + result = db_session.execute(stmt) + default_workspace = result.scalar_one_or_none() + + if default_workspace: + logger.info( + f"User {user_db.username} already has default workspace: " + f"{default_workspace.workspace_id}" + ) + return + else: + # Set first workspace as default + existing_workspace.default_workspace = True + db_session.add(existing_workspace) + db_session.commit() + logger.info( + f"Set workspace {existing_workspace.workspace_id} as default for " + f"{user_db.username}" + ) + return + + # Create a default workspace for the user + workspace_name = f"{user_db.username}'s Workspace" + + # Check if workspace with this name already exists + stmt = select(WorkspaceDB).where(WorkspaceDB.workspace_name == workspace_name) + result = db_session.execute(stmt) + existing_workspace_db = result.scalar_one_or_none() + + if existing_workspace_db: + workspace_db = existing_workspace_db + logger.info( + f"Workspace '{workspace_name}' already exists with ID " + f"{workspace_db.workspace_id}" + ) + else: + # Create new workspace + workspace_db = WorkspaceDB( + workspace_name=workspace_name, + api_daily_quota=100, + content_quota=10, + created_datetime_utc=datetime.now(timezone.utc), + updated_datetime_utc=datetime.now(timezone.utc), + is_default=True, + hashed_api_key=get_key_hash("workspace-api-key-" + workspace_name), + api_key_first_characters="works", + api_key_updated_datetime_utc=datetime.now(timezone.utc), + api_key_rotated_by_user_id=user_db.user_id, + ) + db_session.add(workspace_db) + db_session.commit() + logger.info( + f"Created workspace '{workspace_name}' with ID {workspace_db.workspace_id}" + ) + + # Create user-workspace relationship + user_workspace = UserWorkspaceDB( + user_id=user_db.user_id, + workspace_id=workspace_db.workspace_id, + user_role=UserRoles.ADMIN, + default_workspace=True, + created_datetime_utc=datetime.now(timezone.utc), + updated_datetime_utc=datetime.now(timezone.utc), + ) + db_session.add(user_workspace) + db_session.commit() + logger.info( + f"Created workspace relationship for user {user_db.username} with workspace " + f"{workspace_db.workspace_id}" + ) + + if __name__ == "__main__": db_session = next(get_session()) diff --git a/backend/app/__init__.py b/backend/app/__init__.py index 37790a8..88e9dfd 100644 --- a/backend/app/__init__.py +++ b/backend/app/__init__.py @@ -5,8 +5,9 @@ from fastapi.middleware.cors import CORSMiddleware from redis import asyncio as aioredis -from . import auth, bayes_ab, contextual_mab, mab, messages +from . import auth, messages from .config import BACKEND_ROOT_PATH, DOMAIN, REDIS_HOST +from .experiments.routers import router as experiments_router from .users.routers import ( router as users_router, ) # to avoid circular imports @@ -56,9 +57,7 @@ def create_app() -> FastAPI: expose_headers=["*"], ) - app.include_router(mab.router) - app.include_router(contextual_mab.router) - app.include_router(bayes_ab.router) + app.include_router(experiments_router) app.include_router(auth.router) app.include_router(users_router) app.include_router(messages.router) diff --git a/backend/app/bayes_ab/__init__.py b/backend/app/bayes_ab/__init__.py deleted file mode 100644 index fa07d07..0000000 --- a/backend/app/bayes_ab/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .routers import router # noqa: F401 diff --git a/backend/app/bayes_ab/models.py b/backend/app/bayes_ab/models.py deleted file mode 100644 index 8caee04..0000000 --- a/backend/app/bayes_ab/models.py +++ /dev/null @@ -1,433 +0,0 @@ -from datetime import datetime, timezone -from typing import Sequence - -from sqlalchemy import ( - Boolean, - Float, - ForeignKey, - and_, - delete, - select, -) -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from ..models import ( - ArmBaseDB, - DrawsBaseDB, - ExperimentBaseDB, - NotificationsDB, -) -from ..schemas import ObservationType -from .schemas import BayesianAB - - -class BayesianABDB(ExperimentBaseDB): - """ - ORM for managing experiments. - """ - - __tablename__ = "bayes_ab_experiments" - - experiment_id: Mapped[int] = mapped_column( - ForeignKey("experiments_base.experiment_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - arms: Mapped[list["BayesianABArmDB"]] = relationship( - "BayesianABArmDB", back_populates="experiment", lazy="selectin" - ) - - draws: Mapped[list["BayesianABDrawDB"]] = relationship( - "BayesianABDrawDB", back_populates="experiment", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "bayes_ab_experiments"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "workspace_id": self.workspace_id, - "name": self.name, - "description": self.description, - "sticky_assignment": self.sticky_assignment, - "auto_fail": self.auto_fail, - "auto_fail_value": self.auto_fail_value, - "auto_fail_unit": self.auto_fail_unit, - "created_datetime_utc": self.created_datetime_utc, - "is_active": self.is_active, - "n_trials": self.n_trials, - "arms": [arm.to_dict() for arm in self.arms], - "prior_type": self.prior_type, - "reward_type": self.reward_type, - } - - -class BayesianABArmDB(ArmBaseDB): - """ - ORM for managing arms. - """ - - __tablename__ = "bayes_ab_arms" - - arm_id: Mapped[int] = mapped_column( - ForeignKey("arms_base.arm_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - # prior variables for AB arms - mu_init: Mapped[float] = mapped_column(Float, nullable=False) - sigma_init: Mapped[float] = mapped_column(Float, nullable=False) - mu: Mapped[float] = mapped_column(Float, nullable=False) - sigma: Mapped[float] = mapped_column(Float, nullable=False) - is_treatment_arm: Mapped[bool] = mapped_column( - Boolean, nullable=False, default=False - ) - - experiment: Mapped[BayesianABDB] = relationship( - "BayesianABDB", back_populates="arms", lazy="joined" - ) - draws: Mapped[list["BayesianABDrawDB"]] = relationship( - "BayesianABDrawDB", back_populates="arm", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "bayes_ab_arms"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "arm_id": self.arm_id, - "name": self.name, - "description": self.description, - "mu_init": self.mu_init, - "sigma_init": self.sigma_init, - "mu": self.mu, - "sigma": self.sigma, - "is_treatment_arm": self.is_treatment_arm, - "draws": [draw.to_dict() for draw in self.draws], - } - - -class BayesianABDrawDB(DrawsBaseDB): - """ - ORM for managing draws of AB experiment. - """ - - __tablename__ = "bayes_ab_draws" - - draw_id: Mapped[str] = mapped_column( # Changed from int to str - ForeignKey("draws_base.draw_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - arm: Mapped[BayesianABArmDB] = relationship( - "BayesianABArmDB", back_populates="draws", lazy="joined" - ) - experiment: Mapped[BayesianABDB] = relationship( - "BayesianABDB", back_populates="draws", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "bayes_ab_draws"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "draw_id": self.draw_id, - "client_id": self.client_id, - "draw_datetime_utc": self.draw_datetime_utc, - "arm_id": self.arm_id, - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "reward": self.reward, - "observation_type": self.observation_type, - "observed_datetime_utc": self.observed_datetime_utc, - } - - -async def save_bayes_ab_to_db( - ab_experiment: BayesianAB, - user_id: int, - workspace_id: int, - asession: AsyncSession, -) -> BayesianABDB: - """ - Save the A/B experiment to the database. - """ - arms = [ - BayesianABArmDB( - name=arm.name, - description=arm.description, - mu_init=arm.mu_init, - sigma_init=arm.sigma_init, - n_outcomes=arm.n_outcomes, - is_treatment_arm=arm.is_treatment_arm, - mu=arm.mu_init, - sigma=arm.sigma_init, - user_id=user_id, - ) - for arm in ab_experiment.arms - ] - - bayes_ab_db = BayesianABDB( - name=ab_experiment.name, - description=ab_experiment.description, - user_id=user_id, - workspace_id=workspace_id, - is_active=ab_experiment.is_active, - created_datetime_utc=datetime.now(timezone.utc), - n_trials=0, - arms=arms, - sticky_assignment=ab_experiment.sticky_assignment, - auto_fail=ab_experiment.auto_fail, - auto_fail_value=ab_experiment.auto_fail_value, - auto_fail_unit=ab_experiment.auto_fail_unit, - prior_type=ab_experiment.prior_type.value, - reward_type=ab_experiment.reward_type.value, - ) - - asession.add(bayes_ab_db) - await asession.commit() - await asession.refresh(bayes_ab_db) - - return bayes_ab_db - - -async def get_all_bayes_ab_experiments( - workspace_id: int, - asession: AsyncSession, -) -> Sequence[BayesianABDB]: - """ - Get all the A/B experiments from the database for a specific workspace. - """ - stmt = ( - select(BayesianABDB) - .where(BayesianABDB.workspace_id == workspace_id) - .order_by(BayesianABDB.experiment_id) - ) - result = await asession.execute(stmt) - return result.unique().scalars().all() - - -async def get_bayes_ab_experiment_by_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> BayesianABDB | None: - """ - Get the A/B experiment by id from a specific workspace. - """ - conditions = [ - BayesianABDB.workspace_id == workspace_id, - BayesianABDB.experiment_id == experiment_id, - ] - - stmt = select(BayesianABDB).where(and_(*conditions)) - result = await asession.execute(stmt) - return result.unique().scalar_one_or_none() - - -async def delete_bayes_ab_experiment_by_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> None: - """ - Delete the A/B experiment by id from a specific workspace. - """ - stmt = delete(BayesianABDB).where( - and_( - BayesianABDB.workspace_id == workspace_id, - BayesianABDB.experiment_id == experiment_id, - BayesianABDB.experiment_id == ExperimentBaseDB.experiment_id, - ) - ) - await asession.execute(stmt) - - stmt = delete(NotificationsDB).where( - NotificationsDB.experiment_id == experiment_id, - ) - await asession.execute(stmt) - - stmt = delete(BayesianABDrawDB).where( - and_( - BayesianABDrawDB.draw_id == DrawsBaseDB.draw_id, - BayesianABDrawDB.experiment_id == experiment_id, - ) - ) - await asession.execute(stmt) - - stmt = delete(BayesianABArmDB).where( - and_( - BayesianABArmDB.arm_id == ArmBaseDB.arm_id, - BayesianABArmDB.experiment_id == experiment_id, - ) - ) - await asession.execute(stmt) - - await asession.commit() - return None - - -async def save_bayes_ab_observation_to_db( - draw: BayesianABDrawDB, - reward: float, - asession: AsyncSession, - observation_type: ObservationType = ObservationType.AUTO, -) -> BayesianABDrawDB: - """ - Save the A/B observation to the database. - """ - draw.reward = reward - draw.observed_datetime_utc = datetime.now(timezone.utc) - draw.observation_type = observation_type - - await asession.commit() - await asession.refresh(draw) - - return draw - - -async def save_bayes_ab_draw_to_db( - experiment_id: int, - arm_id: int, - draw_id: str, - client_id: str | None, - user_id: int | None, - asession: AsyncSession, - workspace_id: int | None, -) -> BayesianABDrawDB: - """ - Save a draw to the database - """ - # If user_id is not provided but needed, get it from the experiment - if user_id is None and workspace_id is not None: - experiment = await get_bayes_ab_experiment_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - if experiment: - user_id = experiment.user_id - else: - raise ValueError(f"Experiment with id {experiment_id} not found") - - if user_id is None: - raise ValueError("User ID must be provided or derivable from experiment") - - draw_datetime_utc: datetime = datetime.now(timezone.utc) - - draw = BayesianABDrawDB( - draw_id=draw_id, - client_id=client_id, - experiment_id=experiment_id, - user_id=user_id, - arm_id=arm_id, - draw_datetime_utc=draw_datetime_utc, - ) - - asession.add(draw) - await asession.commit() - await asession.refresh(draw) - - return draw - - -async def get_bayes_ab_obs_by_experiment_arm_id( - experiment_id: int, - arm_id: int, - asession: AsyncSession, -) -> Sequence[BayesianABDrawDB]: - """ - Get the observations of a specific arm in an A/B experiment. - """ - stmt = ( - select(BayesianABDrawDB) - .where( - and_( - BayesianABDrawDB.experiment_id == experiment_id, - BayesianABDrawDB.arm_id == arm_id, - BayesianABDrawDB.reward.is_not(None), - ) - ) - .order_by(BayesianABDrawDB.observed_datetime_utc) - ) - - result = await asession.execute(stmt) - return result.unique().scalars().all() - - -async def get_bayes_ab_obs_by_experiment_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> Sequence[BayesianABDrawDB]: - """ - Get the observations of the A/B experiment. - Verified to belong to the specified workspace. - """ - # First, verify experiment belongs to the workspace - experiment = await get_bayes_ab_experiment_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - if experiment is None: - # Return empty list if experiment doesn't exist or doesn't belong to workspace - return [] - - # Get observations for this experiment - stmt = ( - select(BayesianABDrawDB) - .where( - and_( - BayesianABDrawDB.experiment_id == experiment_id, - BayesianABDrawDB.reward.is_not(None), - ) - ) - .order_by(BayesianABDrawDB.observed_datetime_utc) - ) - - result = await asession.execute(stmt) - return result.unique().scalars().all() - - -async def get_bayes_ab_draw_by_id( - draw_id: str, asession: AsyncSession -) -> BayesianABDrawDB | None: - """ - Get a draw by its ID - """ - statement = select(BayesianABDrawDB).where(BayesianABDrawDB.draw_id == draw_id) - result = await asession.execute(statement) - - return result.unique().scalar_one_or_none() - - -async def get_bayes_ab_draw_by_client_id( - client_id: str, experiment_id: int, asession: AsyncSession -) -> BayesianABDrawDB | None: - """ - Get a draw by its client ID for a specific experiment. - """ - statement = select(BayesianABDrawDB).where( - and_( - BayesianABDrawDB.client_id == client_id, - BayesianABDrawDB.client_id.is_not(None), - BayesianABDrawDB.experiment_id == experiment_id, - ) - ) - result = await asession.execute(statement) - - return result.unique().scalars().first() diff --git a/backend/app/bayes_ab/observation.py b/backend/app/bayes_ab/observation.py deleted file mode 100644 index 212dc91..0000000 --- a/backend/app/bayes_ab/observation.py +++ /dev/null @@ -1,75 +0,0 @@ -from datetime import datetime, timezone - -from fastapi.exceptions import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..schemas import ObservationType, Outcome, RewardLikelihood -from .models import ( - BayesianABArmDB, - BayesianABDB, - BayesianABDrawDB, - save_bayes_ab_observation_to_db, -) -from .schemas import ( - BayesABArmResponse, - BayesianABSample, -) - - -async def update_based_on_outcome( - experiment: BayesianABDB, - draw: BayesianABDrawDB, - outcome: float, - asession: AsyncSession, - observation: ObservationType, -) -> BayesABArmResponse: - """ - Update the arm parameters based on the outcome. - - This is a helper function to allow `auto_fail` job to call - it as well. - """ - update_experiment_metadata(experiment) - - arm = get_arm_from_experiment(experiment, draw.arm_id) - arm.n_outcomes += 1 - - experiment_data = BayesianABSample.model_validate(experiment) - if experiment_data.reward_type == RewardLikelihood.BERNOULLI: - Outcome(outcome) # Check if reward is 0 or 1 - - await save_updated_data(arm, draw, outcome, asession) - - return BayesABArmResponse.model_validate(arm) - - -def update_experiment_metadata(experiment: BayesianABDB) -> None: - """ - Update the experiment metadata with new information. - """ - experiment.n_trials += 1 - experiment.last_trial_datetime_utc = datetime.now(tz=timezone.utc) - - -def get_arm_from_experiment(experiment: BayesianABDB, arm_id: int) -> BayesianABArmDB: - """ - Get and validate the arm from the experiment. - """ - arms = [a for a in experiment.arms if a.arm_id == arm_id] - if not arms: - raise HTTPException(status_code=404, detail=f"Arm with id {arm_id} not found") - return arms[0] - - -async def save_updated_data( - arm: BayesianABArmDB, - draw: BayesianABDrawDB, - outcome: float, - asession: AsyncSession, -) -> None: - """ - Save the updated data to the database. - """ - asession.add(arm) - await asession.commit() - await save_bayes_ab_observation_to_db(draw, outcome, asession) diff --git a/backend/app/bayes_ab/routers.py b/backend/app/bayes_ab/routers.py deleted file mode 100644 index c1041a2..0000000 --- a/backend/app/bayes_ab/routers.py +++ /dev/null @@ -1,524 +0,0 @@ -from typing import Annotated, Optional -from uuid import uuid4 - -from fastapi import APIRouter, Depends -from fastapi.exceptions import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..auth.dependencies import ( - authenticate_workspace_key, - get_verified_user, - require_admin_role, -) -from ..database import get_async_session -from ..models import get_notifications_from_db, save_notifications_to_db -from ..schemas import NotificationsResponse, ObservationType -from ..users.models import UserDB -from ..workspaces.models import ( - WorkspaceDB, - get_user_default_workspace, -) -from .models import ( - BayesianABDB, - BayesianABDrawDB, - delete_bayes_ab_experiment_by_id, - get_all_bayes_ab_experiments, - get_bayes_ab_draw_by_client_id, - get_bayes_ab_draw_by_id, - get_bayes_ab_experiment_by_id, - get_bayes_ab_obs_by_experiment_id, - save_bayes_ab_draw_to_db, - save_bayes_ab_to_db, -) -from .observation import update_based_on_outcome -from .sampling_utils import choose_arm, update_arm_params -from .schemas import ( - BayesABArmResponse, - BayesianAB, - BayesianABDrawResponse, - BayesianABObservationResponse, - BayesianABResponse, - BayesianABSample, -) - -router = APIRouter(prefix="/bayes_ab", tags=["Bayesian A/B Testing"]) - - -@router.post("/", response_model=BayesianABResponse) -async def create_ab_experiment( - experiment: BayesianAB, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> BayesianABResponse: - """ - Create a new experiment in the user's current workspace. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found for the user.", - ) - - bayes_ab = await save_bayes_ab_to_db( - experiment, user_db.user_id, workspace_db.workspace_id, asession - ) - - notifications = await save_notifications_to_db( - experiment_id=bayes_ab.experiment_id, - user_id=user_db.user_id, - notifications=experiment.notifications, - asession=asession, - ) - - bayes_ab_dict = bayes_ab.to_dict() - bayes_ab_dict["notifications"] = [n.to_dict() for n in notifications] - - return BayesianABResponse.model_validate(bayes_ab_dict) - - -@router.get("/", response_model=list[BayesianABResponse]) -async def get_bayes_abs( - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> list[BayesianABResponse]: - """ - Get details of all experiments in the user's current workspace. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found for the user.", - ) - - experiments = await get_all_bayes_ab_experiments( - workspace_db.workspace_id, asession - ) - - all_experiments = [] - for exp in experiments: - exp_dict = exp.to_dict() - exp_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - exp.experiment_id, exp.user_id, asession - ) - ] - all_experiments.append( - BayesianABResponse.model_validate( - { - **exp_dict, - "notifications": [ - NotificationsResponse(**n) for n in exp_dict["notifications"] - ], - } - ) - ) - return all_experiments - - -@router.get("/{experiment_id}", response_model=BayesianABResponse) -async def get_bayes_ab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> BayesianABResponse: - """ - Get details of experiment with the provided `experiment_id`. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found for the user.", - ) - - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - experiment_dict = experiment.to_dict() - experiment_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - experiment.experiment_id, experiment.user_id, asession - ) - ] - - return BayesianABResponse.model_validate(experiment_dict) - - -@router.delete("/{experiment_id}", response_model=dict) -async def delete_bayes_ab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> dict: - """ - Delete the experiment with the provided `experiment_id`. - """ - try: - workspace_db = await get_user_default_workspace( - asession=asession, user_db=user_db - ) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found for the user.", - ) - - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - await delete_bayes_ab_experiment_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - - return {"message": f"Experiment with id {experiment_id} deleted successfully."} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error: {e}") from e - - -@router.get("/{experiment_id}/draw", response_model=BayesianABDrawResponse) -async def draw_arm( - experiment_id: int, - draw_id: Optional[str] = None, - client_id: Optional[str] = None, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> BayesianABDrawResponse: - """ - Get which arm to pull next for provided experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_id, asession - ) - - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - if experiment.sticky_assignment and not client_id: - raise HTTPException( - status_code=400, - detail="Client ID is required for sticky assignment.", - ) - - experiment_data = BayesianABSample.model_validate(experiment) - chosen_arm = choose_arm(experiment=experiment_data) - chosen_arm_id = experiment.arms[chosen_arm].arm_id - if experiment.sticky_assignment and client_id: - # Check if the client_id is already assigned to an arm - previous_draw = await get_bayes_ab_draw_by_client_id( - client_id=client_id, - experiment_id=experiment_id, - asession=asession, - ) - if previous_draw: - chosen_arm_id = previous_draw.arm_id - - # Check for existing draws - if draw_id is None: - draw_id = str(uuid4()) - - existing_draw = await get_bayes_ab_draw_by_id(draw_id=draw_id, asession=asession) - if existing_draw: - raise HTTPException( - status_code=400, - detail=f"Draw with id {draw_id} already exists for \ - experiment {experiment_id}", - ) - - try: - await save_bayes_ab_draw_to_db( - experiment_id=experiment.experiment_id, - arm_id=chosen_arm_id, - draw_id=draw_id, - client_id=client_id, - user_id=None, - asession=asession, - workspace_id=workspace_id, - ) - except Exception as e: - raise HTTPException( - status_code=500, - detail=f"Error saving draw to database: {e}", - ) from e - - return BayesianABDrawResponse.model_validate( - { - "draw_id": draw_id, - "client_id": client_id, - "arm": BayesABArmResponse.model_validate( - [arm for arm in experiment.arms if arm.arm_id == chosen_arm_id][0], - ), - } - ) - - -@router.put("/{experiment_id}/{draw_id}/{outcome}", response_model=BayesABArmResponse) -async def save_observation_for_arm( - experiment_id: int, - draw_id: str, - outcome: float, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> BayesABArmResponse: - """ - Update the arm with the provided `arm_id` for the given - `experiment_id` based on the `outcome`. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - # Get and validate experiment - experiment, draw = await validate_experiment_and_draw( - experiment_id=experiment_id, - draw_id=draw_id, - workspace_id=workspace_id, - asession=asession, - ) - - return await update_based_on_outcome( - experiment=experiment, - draw=draw, - outcome=outcome, - asession=asession, - observation=ObservationType.USER, - ) - - -@router.get( - "/{experiment_id}/outcomes", - response_model=list[BayesianABObservationResponse], -) -async def get_outcomes( - experiment_id: int, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> list[BayesianABObservationResponse]: - """ - Get the outcomes for the experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_id, asession - ) - if not experiment: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - rewards = await get_bayes_ab_obs_by_experiment_id( - experiment_id=experiment.experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - return [BayesianABObservationResponse.model_validate(reward) for reward in rewards] - - -@router.get( - "/{experiment_id}/arms", - response_model=list[BayesABArmResponse], -) -async def update_arms( - experiment_id: int, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> list[BayesABArmResponse]: - """ - Get the outcomes for the experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - # Check experiment params - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_id, asession - ) - if not experiment: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - # Prepare data for arms update - ( - rewards, - treatments, - treatment_mu, - treatment_sigma, - control_mu, - control_sigma, - ) = await prepare_data_for_arms_update( - experiment=experiment, - workspace_id=workspace_id, - asession=asession, - ) - - # Make updates - arms_data = await make_updates_to_arms( - experiment=experiment, - treatment_mu=treatment_mu, - treatment_sigma=treatment_sigma, - control_mu=control_mu, - control_sigma=control_sigma, - rewards=rewards, - treatments=treatments, - asession=asession, - ) - - return arms_data - - -# ---- Helper functions ---- - - -async def validate_experiment_and_draw( - experiment_id: int, - draw_id: str, - workspace_id: int, - asession: AsyncSession, -) -> tuple[BayesianABDB, BayesianABDrawDB]: - """Validate the experiment and draw""" - experiment = await get_bayes_ab_experiment_by_id( - experiment_id, workspace_id, asession - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - draw = await get_bayes_ab_draw_by_id(draw_id=draw_id, asession=asession) - if draw is None: - raise HTTPException(status_code=404, detail=f"Draw with id {draw_id} not found") - - if draw.experiment_id != experiment_id: - raise HTTPException( - status_code=400, - detail=( - f"Draw with id {draw_id} does not belong " - f"to experiment with id {experiment_id}", - ), - ) - - if draw.reward is not None: - raise HTTPException( - status_code=400, - detail=f"Draw with id {draw_id} already has an outcome.", - ) - - return experiment, draw - - -async def prepare_data_for_arms_update( - experiment: BayesianABDB, - workspace_id: int, - asession: AsyncSession, -) -> tuple[list[float], list[float], float, float, float, float]: - """ - Prepare the data for arm update. - """ - # Get observations - observations = await get_bayes_ab_obs_by_experiment_id( - experiment_id=experiment.experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - if not observations: - raise HTTPException( - status_code=404, - detail=f"No observations found for experiment {experiment.experiment_id}", - ) - - rewards = [obs.reward for obs in observations] - - # Get treatment and control arms - arms_dict = { - arm.arm_id: 1.0 if arm.is_treatment_arm else 0.0 for arm in experiment.arms - } - - # Get params - treatment_mu, treatment_sigma = [ - (arm.mu_init, arm.sigma_init) for arm in experiment.arms if arm.is_treatment_arm - ][0] - control_mu, control_sigma = [ - (arm.mu_init, arm.sigma_init) - for arm in experiment.arms - if not arm.is_treatment_arm - ][0] - - treatments = [arms_dict[obs.arm_id] for obs in observations] - - return ( - rewards, - treatments, - treatment_mu, - treatment_sigma, - control_mu, - control_sigma, - ) - - -async def make_updates_to_arms( - experiment: BayesianABDB, - treatment_mu: float, - treatment_sigma: float, - control_mu: float, - control_sigma: float, - rewards: list[float], - treatments: list[float], - asession: AsyncSession, -) -> list[BayesABArmResponse]: - """ - Make updates to the arms of the experiment. - """ - # Make updates - experiment_data = BayesianABSample.model_validate(experiment) - new_means, new_sigmas = update_arm_params( - experiment=experiment_data, - mus=[treatment_mu, control_mu], - sigmas=[treatment_sigma, control_sigma], - rewards=rewards, - treatments=treatments, - ) - - arms_data = [] - for arm in experiment.arms: - if arm.is_treatment_arm: - arm.mu = new_means[0] - arm.sigma = new_sigmas[0] - else: - arm.mu = new_means[1] - arm.sigma = new_sigmas[1] - - asession.add(arm) - arms_data.append(BayesABArmResponse.model_validate(arm)) - - asession.add(experiment) - - await asession.commit() - - return arms_data diff --git a/backend/app/bayes_ab/sampling_utils.py b/backend/app/bayes_ab/sampling_utils.py deleted file mode 100644 index 0416f64..0000000 --- a/backend/app/bayes_ab/sampling_utils.py +++ /dev/null @@ -1,126 +0,0 @@ -import numpy as np -from scipy.optimize import minimize - -from ..schemas import ArmPriors, ContextLinkFunctions, RewardLikelihood -from .schemas import BayesianABSample - - -def _update_arms( - mus: np.ndarray, - sigmas: np.ndarray, - rewards: np.ndarray, - treatments: np.ndarray, - link_function: ContextLinkFunctions, - reward_likelihood: RewardLikelihood, - prior_type: ArmPriors, -) -> tuple[list, list]: - """ - Get arm posteriors. - - Parameters - ---------- - mu : np.ndarray - The mean of the Normal distribution. - sigma : np.ndarray - The standard deviation of the Normal distribution. - rewards : np.ndarray - The rewards. - treatments : np.ndarray - The treatments (binary-valued). - link_function : ContextLinkFunctions - The link function for parameters to rewards. - reward_likelihood : RewardLikelihood - The likelihood function of the reward. - prior_type : ArmPriors - The prior type of the arm. - """ - - # TODO we explicitly assume that there is only 1 treatment arm - def objective(treatment_effect_arms_bias: np.ndarray) -> float: - """ - Objective function for arm to outcome. - - Parameters - ---------- - treatment_effect : float - The treatment effect. - """ - treatment, control, bias = treatment_effect_arms_bias - - # log prior - log_prior = prior_type( - np.array([treatment, control]), mu=mus, covariance=np.diag(sigmas) - ) - - # log likelihood - log_likelihood = reward_likelihood( - rewards, - link_function(treatment * treatments + control * (1 - treatments) + bias), - ) - return -(log_prior + log_likelihood) - - result = minimize(objective, x0=np.zeros(3), method="L-BFGS-B", hess="2-point") - new_treatment_mean, new_control_mean, _ = result.x - new_treatment_sigma, new_control_sigma, _ = np.sqrt( - np.diag(result.hess_inv.todense()) # type: ignore - ) - return [new_treatment_mean, new_control_mean], [ - new_treatment_sigma, - new_control_sigma, - ] - - -def choose_arm(experiment: BayesianABSample) -> int: - """ - Choose arm based on posterior - - Parameters - ---------- - experiment : BayesianABSample - The experiment data containing priors and rewards for each arm. - """ - index = np.random.choice(len(experiment.arms), size=1) - return int(index[0]) - - -def update_arm_params( - experiment: BayesianABSample, - mus: list[float], - sigmas: list[float], - rewards: list[float], - treatments: list[float], -) -> tuple[list, list]: - """ - Update the arm parameters based on the reward type. - - Parameters - ---------- - experiment : BayesianABSample - The experiment data containing arms, prior type and reward - type information. - mus : list[float] - The means of the arms. - sigmas : list[float] - The standard deviations of the arms. - rewards : list[float] - The rewards. - treatments : list[float] - Which arm was applied corresponding to the reward. - """ - link_function = None - if experiment.reward_type == RewardLikelihood.NORMAL: - link_function = ContextLinkFunctions.NONE - elif experiment.reward_type == RewardLikelihood.BERNOULLI: - link_function = ContextLinkFunctions.LOGISTIC - else: - raise ValueError("Invalid reward type") - - return _update_arms( - mus=np.array(mus), - sigmas=np.array(sigmas), - rewards=np.array(rewards), - treatments=np.array(treatments), - link_function=link_function, - reward_likelihood=experiment.reward_type, - prior_type=experiment.prior_type, - ) diff --git a/backend/app/bayes_ab/schemas.py b/backend/app/bayes_ab/schemas.py deleted file mode 100644 index ef55d07..0000000 --- a/backend/app/bayes_ab/schemas.py +++ /dev/null @@ -1,145 +0,0 @@ -from datetime import datetime -from typing import Optional, Self - -from pydantic import BaseModel, ConfigDict, Field, model_validator - -from ..mab.schemas import ( - MABObservationResponse, - MultiArmedBanditBase, -) -from ..schemas import Notifications, NotificationsResponse, allowed_combos_bayes_ab - - -class BayesABArm(BaseModel): - """ - Pydantic model for a arm of the experiment. - """ - - name: str = Field( - max_length=150, - examples=["Arm 1"], - ) - description: str = Field( - max_length=500, - examples=["This is a description of the arm."], - ) - - mu_init: float = Field( - default=0.0, description="Mean parameter for treatment effect prior" - ) - sigma_init: float = Field( - default=1.0, description="Std dev parameter for treatment effect prior" - ) - n_outcomes: Optional[int] = Field( - default=0, - description="Number of outcomes for the arm", - examples=[0, 10, 15], - ) - is_treatment_arm: bool = Field( - default=True, - description="Is the arm a treatment arm", - examples=[True, False], - ) - - @model_validator(mode="after") - def check_values(self) -> Self: - """ - Check if the values are unique and set new attributes. - """ - if self.sigma_init is not None and self.sigma_init <= 0: - raise ValueError("Std dev must be greater than 0.") - return self - - model_config = ConfigDict(from_attributes=True) - - -class BayesABArmResponse(BayesABArm): - """ - Pydantic model for a response for contextual arm creation - """ - - arm_id: int - mu: float - sigma: float - model_config = ConfigDict(from_attributes=True) - - -class BayesianAB(MultiArmedBanditBase): - """ - Pydantic model for an A/B experiment. - """ - - arms: list[BayesABArm] - notifications: Notifications - model_config = ConfigDict(from_attributes=True) - - @model_validator(mode="after") - def arms_exactly_two(self) -> Self: - """ - Validate that the experiment has exactly two arms. - """ - if len(self.arms) != 2: - raise ValueError("The experiment must have at exactly two arms.") - return self - - @model_validator(mode="after") - def check_prior_reward_type_combo(self) -> Self: - """ - Validate that the prior and reward type combination is allowed. - """ - - if (self.prior_type, self.reward_type) not in allowed_combos_bayes_ab: - raise ValueError("Prior and reward type combo not supported.") - return self - - @model_validator(mode="after") - def check_treatment_and_control_arm(self) -> Self: - """ - Validate that the experiment has at least one control arm. - """ - if sum(arm.is_treatment_arm for arm in self.arms) != 1: - raise ValueError("The experiment must have one treatment and control arm.") - return self - - -class BayesianABResponse(MultiArmedBanditBase): - """ - Pydantic model for a response for an A/B experiment. - """ - - experiment_id: int - workspace_id: int - arms: list[BayesABArmResponse] - notifications: list[NotificationsResponse] - created_datetime_utc: datetime - last_trial_datetime_utc: Optional[datetime] = None - n_trials: int - - model_config = ConfigDict(from_attributes=True) - - -class BayesianABSample(MultiArmedBanditBase): - """ - Pydantic model for a sample A/B experiment. - """ - - experiment_id: int - arms: list[BayesABArmResponse] - - -class BayesianABObservationResponse(MABObservationResponse): - """ - Pydantic model for an observation response in an A/B experiment. - """ - - pass - - -class BayesianABDrawResponse(BaseModel): - """ - Pydantic model for a draw response in an A/B experiment. - """ - - draw_id: str - client_id: str | None - arm: BayesABArmResponse diff --git a/backend/app/contextual_mab/__init__.py b/backend/app/contextual_mab/__init__.py deleted file mode 100644 index fa07d07..0000000 --- a/backend/app/contextual_mab/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .routers import router # noqa: F401 diff --git a/backend/app/contextual_mab/models.py b/backend/app/contextual_mab/models.py deleted file mode 100644 index 60cf723..0000000 --- a/backend/app/contextual_mab/models.py +++ /dev/null @@ -1,483 +0,0 @@ -from datetime import datetime, timezone -from typing import Sequence - -import numpy as np -from sqlalchemy import ( - Float, - ForeignKey, - Integer, - String, - and_, - delete, - select, -) -from sqlalchemy.dialects.postgresql import ARRAY -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from ..models import ( - ArmBaseDB, - Base, - DrawsBaseDB, - ExperimentBaseDB, - NotificationsDB, -) -from ..schemas import ObservationType -from .schemas import ContextualBandit - - -class ContextualBanditDB(ExperimentBaseDB): - """ - ORM for managing contextual experiments. - """ - - __tablename__ = "contextual_mabs" - - experiment_id: Mapped[int] = mapped_column( - ForeignKey("experiments_base.experiment_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - arms: Mapped[list["ContextualArmDB"]] = relationship( - "ContextualArmDB", back_populates="experiment", lazy="joined" - ) - - contexts: Mapped[list["ContextDB"]] = relationship( - "ContextDB", back_populates="experiment", lazy="joined" - ) - - draws: Mapped[list["ContextualDrawDB"]] = relationship( - "ContextualDrawDB", back_populates="experiment", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "contextual_mabs"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "workspace_id": self.workspace_id, - "name": self.name, - "description": self.description, - "sticky_assignment": self.sticky_assignment, - "auto_fail": self.auto_fail, - "auto_fail_value": self.auto_fail_value, - "auto_fail_unit": self.auto_fail_unit, - "created_datetime_utc": self.created_datetime_utc, - "is_active": self.is_active, - "n_trials": self.n_trials, - "arms": [arm.to_dict() for arm in self.arms], - "contexts": [context.to_dict() for context in self.contexts], - "prior_type": self.prior_type, - "reward_type": self.reward_type, - } - - -class ContextualArmDB(ArmBaseDB): - """ - ORM for managing contextual arms of an experiment - """ - - __tablename__ = "contextual_arms" - - arm_id: Mapped[int] = mapped_column( - ForeignKey("arms_base.arm_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - # prior variables for CMAB arms - mu_init: Mapped[float] = mapped_column(Float, nullable=False) - sigma_init: Mapped[float] = mapped_column(Float, nullable=False) - mu: Mapped[list[float]] = mapped_column(ARRAY(Float), nullable=False) - covariance: Mapped[list[float]] = mapped_column(ARRAY(Float), nullable=False) - - experiment: Mapped[ContextualBanditDB] = relationship( - "ContextualBanditDB", back_populates="arms", lazy="joined" - ) - draws: Mapped[list["ContextualDrawDB"]] = relationship( - "ContextualDrawDB", back_populates="arm", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "contextual_arms"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "arm_id": self.arm_id, - "name": self.name, - "description": self.description, - "mu_init": self.mu_init, - "sigma_init": self.sigma_init, - "mu": self.mu, - "covariance": self.covariance, - "draws": [draw.to_dict() for draw in self.draws], - } - - -class ContextDB(Base): - """ - ORM for managing context for an experiment - """ - - __tablename__ = "contexts" - - context_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False) - experiment_id: Mapped[int] = mapped_column( - Integer, ForeignKey("contextual_mabs.experiment_id"), nullable=False - ) - user_id: Mapped[int] = mapped_column( - Integer, ForeignKey("users.user_id"), nullable=False - ) - name: Mapped[str] = mapped_column(String(length=150), nullable=False) - description: Mapped[str] = mapped_column(String(length=500), nullable=True) - value_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - - experiment: Mapped[ContextualBanditDB] = relationship( - "ContextualBanditDB", back_populates="contexts", lazy="joined" - ) - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "context_id": self.context_id, - "name": self.name, - "description": self.description, - "value_type": self.value_type, - } - - -class ContextualDrawDB(DrawsBaseDB): - """ - ORM for managing draws of an experiment - """ - - __tablename__ = "contextual_draws" - - draw_id: Mapped[str] = mapped_column( - ForeignKey("draws_base.draw_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - context_val: Mapped[list] = mapped_column(ARRAY(Float), nullable=False) - arm: Mapped[ContextualArmDB] = relationship( - "ContextualArmDB", back_populates="draws", lazy="joined" - ) - experiment: Mapped[ContextualBanditDB] = relationship( - "ContextualBanditDB", back_populates="draws", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "contextual_draws"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "draw_id": self.draw_id, - "client_id": self.client_id, - "draw_datetime_utc": self.draw_datetime_utc, - "context_val": self.context_val, - "arm_id": self.arm_id, - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "reward": self.reward, - "observation_type": self.observation_type, - "observed_datetime_utc": self.observed_datetime_utc, - } - - -async def save_contextual_mab_to_db( - experiment: ContextualBandit, - user_id: int, - workspace_id: int, - asession: AsyncSession, -) -> ContextualBanditDB: - """ - Save the experiment to the database. - """ - contexts = [ - ContextDB( - name=context.name, - description=context.description, - value_type=context.value_type.value, - user_id=user_id, - ) - for context in experiment.contexts - ] - arms = [] - for arm in experiment.arms: - arms.append( - ContextualArmDB( - name=arm.name, - description=arm.description, - mu_init=arm.mu_init, - sigma_init=arm.sigma_init, - mu=(np.ones(len(experiment.contexts)) * arm.mu_init).tolist(), - covariance=( - np.identity(len(experiment.contexts)) * arm.sigma_init - ).tolist(), - user_id=user_id, - n_outcomes=arm.n_outcomes, - ) - ) - - experiment_db = ContextualBanditDB( - name=experiment.name, - description=experiment.description, - user_id=user_id, - workspace_id=workspace_id, - is_active=experiment.is_active, - created_datetime_utc=datetime.now(timezone.utc), - n_trials=0, - arms=arms, - sticky_assignment=experiment.sticky_assignment, - auto_fail=experiment.auto_fail, - auto_fail_value=experiment.auto_fail_value, - auto_fail_unit=experiment.auto_fail_unit, - contexts=contexts, - prior_type=experiment.prior_type.value, - reward_type=experiment.reward_type.value, - ) - - asession.add(experiment_db) - await asession.commit() - await asession.refresh(experiment_db) - - return experiment_db - - -async def get_all_contextual_mabs( - workspace_id: int, - asession: AsyncSession, -) -> Sequence[ContextualBanditDB]: - """ - Get all the contextual experiments from the database for a specific workspace. - """ - statement = ( - select(ContextualBanditDB) - .where(ContextualBanditDB.workspace_id == workspace_id) - .order_by(ContextualBanditDB.experiment_id) - ) - - return (await asession.execute(statement)).unique().scalars().all() - - -async def get_contextual_mab_by_id( - experiment_id: int, workspace_id: int, asession: AsyncSession -) -> ContextualBanditDB | None: - """ - Get the contextual experiment by id from a specific workspace. - """ - condition = [ - ContextualBanditDB.experiment_id == experiment_id, - ContextualBanditDB.workspace_id == workspace_id, - ] - - statement = select(ContextualBanditDB).where(*condition) - result = await asession.execute(statement) - - return result.unique().scalar_one_or_none() - - -async def delete_contextual_mab_by_id( - experiment_id: int, workspace_id: int, asession: AsyncSession -) -> None: - """ - Delete the contextual experiment by id. - """ - await asession.execute( - delete(NotificationsDB).where(NotificationsDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(ContextualDrawDB).where(ContextualDrawDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(ContextDB).where(ContextDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(ContextualArmDB).where(ContextualArmDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(ContextualBanditDB).where( - and_( - ContextualBanditDB.workspace_id == workspace_id, - ContextualBanditDB.experiment_id == experiment_id, - ContextualBanditDB.experiment_id == ExperimentBaseDB.experiment_id, - ) - ) - ) - await asession.commit() - return None - - -async def save_contextual_obs_to_db( - draw: ContextualDrawDB, - reward: float, - asession: AsyncSession, - observation_type: ObservationType, -) -> ContextualDrawDB: - """ - Save the observation to the database. - """ - draw.reward = reward - draw.observed_datetime_utc = datetime.now(timezone.utc) - draw.observation_type = observation_type # Remove .value, pass enum directly - - await asession.commit() - await asession.refresh(draw) - - return draw - - -async def get_contextual_obs_by_experiment_arm_id( - experiment_id: int, - arm_id: int, - asession: AsyncSession, -) -> Sequence[ContextualDrawDB]: - """Get the observations for a specific arm of an experiment.""" - statement = ( - select(ContextualDrawDB) - .where( - and_( - ContextualDrawDB.experiment_id == experiment_id, - ContextualDrawDB.arm_id == arm_id, - ContextualDrawDB.reward.is_not(None), - ) - ) - .order_by(ContextualDrawDB.observed_datetime_utc) - ) - - result = await asession.execute(statement) - return result.unique().scalars().all() - - -async def get_all_contextual_obs_by_experiment_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> Sequence[ContextualDrawDB]: - """ - Get all observations for an experiment, - verified to belong to the specified workspace. - """ - # First, verify experiment belongs to the workspace - experiment = await get_contextual_mab_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - if experiment is None: - # Return empty list if experiment doesn't exist or doesn't belong to workspace - return [] - - # Get all observations for this experiment - statement = ( - select(ContextualDrawDB) - .where( - and_( - ContextualDrawDB.experiment_id == experiment_id, - ContextualDrawDB.reward.is_not(None), - ) - ) - .order_by(ContextualDrawDB.observed_datetime_utc) - ) - - result = await asession.execute(statement) - return result.unique().scalars().all() - - -async def get_draw_by_id( - draw_id: str, asession: AsyncSession -) -> ContextualDrawDB | None: - """ - Get the draw by its ID, which should be unique across the system. - """ - statement = select(ContextualDrawDB).where(ContextualDrawDB.draw_id == draw_id) - result = await asession.execute(statement) - return result.unique().scalar_one_or_none() - - -async def get_draw_by_client_id( - client_id: str, experiment_id: int, asession: AsyncSession -) -> ContextualDrawDB | None: - """ - Get the draw by client id for a specific experiment. - """ - statement = ( - select(ContextualDrawDB) - .where(ContextualDrawDB.client_id == client_id) - .where(ContextualDrawDB.client_id.is_not(None)) - .where(ContextualDrawDB.experiment_id == experiment_id) - ) - result = await asession.execute(statement) - - return result.unique().scalars().first() - - -async def save_draw_to_db( - experiment_id: int, - arm_id: int, - context_val: list[float], - draw_id: str, - client_id: str | None, - user_id: int | None, - asession: AsyncSession, - workspace_id: int | None, -) -> ContextualDrawDB: - """ - Save the draw to the database. - """ - # If user_id is not provided but needed, get it from the experiment - if user_id is None: - if workspace_id is not None: - # Try to get experiment with workspace_id - experiment = await get_contextual_mab_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - if experiment: - user_id = experiment.user_id - else: - raise ValueError(f"Experiment with id {experiment_id} not found") - else: - # Fall back to direct get if workspace_id not provided - experiment = await asession.get(ContextualBanditDB, experiment_id) - if experiment: - user_id = experiment.user_id - else: - raise ValueError(f"Experiment with id {experiment_id} not found") - - if user_id is None: - raise ValueError("User ID must be provided or derivable from experiment") - - draw_db = ContextualDrawDB( - draw_id=draw_id, - client_id=client_id, - arm_id=arm_id, - experiment_id=experiment_id, - user_id=user_id, - context_val=context_val, - draw_datetime_utc=datetime.now(timezone.utc), - ) - - asession.add(draw_db) - await asession.commit() - await asession.refresh(draw_db) - - return draw_db diff --git a/backend/app/contextual_mab/observation.py b/backend/app/contextual_mab/observation.py deleted file mode 100644 index e655bbf..0000000 --- a/backend/app/contextual_mab/observation.py +++ /dev/null @@ -1,126 +0,0 @@ -from datetime import datetime, timezone -from typing import Sequence - -import numpy as np -from fastapi.exceptions import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..schemas import ( - ObservationType, - RewardLikelihood, -) -from .models import ( - ContextualArmDB, - ContextualBanditDB, - ContextualDrawDB, - get_contextual_obs_by_experiment_arm_id, - save_contextual_obs_to_db, -) -from .sampling_utils import update_arm_params -from .schemas import ( - ContextualArmResponse, - ContextualBanditSample, -) - - -async def update_based_on_outcome( - experiment: ContextualBanditDB, - draw: ContextualDrawDB, - reward: float, - asession: AsyncSession, - observation_type: ObservationType, -) -> ContextualArmResponse: - """ - Update the arm based on the outcome of the draw. - - This is a helper function to allow `auto_fail` job to call - it as well. - """ - - update_experiment_metadata(experiment) - - arm = get_arm_from_experiment(experiment, draw.arm_id) - arm.n_outcomes += 1 - - # Ensure reward is binary for Bernoulli reward type - if experiment.reward_type == RewardLikelihood.BERNOULLI.value: - if reward not in [0, 1]: - raise HTTPException( - status_code=400, - detail="Reward must be 0 or 1 for Bernoulli reward type.", - ) - - # Get data for arm update - all_obs, contexts, rewards = await prepare_data_for_arm_update( - experiment.experiment_id, arm.arm_id, asession, draw, reward - ) - - experiment_data = ContextualBanditSample.model_validate(experiment) - mu, covariance = update_arm_params( - arm=ContextualArmResponse.model_validate(arm), - prior_type=experiment_data.prior_type, - reward_type=experiment_data.reward_type, - context=contexts, - reward=rewards, - ) - - await save_updated_data( - arm, mu, covariance, draw, reward, observation_type, asession - ) - - return ContextualArmResponse.model_validate(arm) - - -def update_experiment_metadata(experiment: ContextualBanditDB) -> None: - """Update experiment metadata with new trial information""" - experiment.n_trials += 1 - experiment.last_trial_datetime_utc = datetime.now(tz=timezone.utc) - - -def get_arm_from_experiment( - experiment: ContextualBanditDB, arm_id: int -) -> ContextualArmDB: - """Get and validate the arm from the experiment""" - arms = [a for a in experiment.arms if a.arm_id == arm_id] - if not arms: - raise HTTPException(status_code=404, detail=f"Arm with id {arm_id} not found") - return arms[0] - - -async def prepare_data_for_arm_update( - experiment_id: int, - arm_id: int, - asession: AsyncSession, - draw: ContextualDrawDB, - reward: float, -) -> tuple[Sequence[ContextualDrawDB], list[list], list[float]]: - """Prepare the data needed for updating arm parameters""" - all_obs = await get_contextual_obs_by_experiment_arm_id( - experiment_id=experiment_id, - arm_id=arm_id, - asession=asession, - ) - - rewards = [obs.reward for obs in all_obs] + [reward] - contexts = [obs.context_val for obs in all_obs] - contexts.append(draw.context_val) - - return all_obs, contexts, rewards - - -async def save_updated_data( - arm: ContextualArmDB, - mu: np.ndarray, - covariance: np.ndarray, - draw: ContextualDrawDB, - reward: float, - observation_type: ObservationType, - asession: AsyncSession, -) -> None: - """Save the updated arm and observation data""" - arm.mu = mu.tolist() - arm.covariance = covariance.tolist() - asession.add(arm) - await asession.commit() - - await save_contextual_obs_to_db(draw, reward, asession, observation_type) diff --git a/backend/app/contextual_mab/routers.py b/backend/app/contextual_mab/routers.py deleted file mode 100644 index 08eea28..0000000 --- a/backend/app/contextual_mab/routers.py +++ /dev/null @@ -1,395 +0,0 @@ -from typing import Annotated, List, Optional -from uuid import uuid4 - -from fastapi import APIRouter, Depends -from fastapi.exceptions import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..auth.dependencies import ( - authenticate_workspace_key, - get_verified_user, - require_admin_role, -) -from ..database import get_async_session -from ..models import get_notifications_from_db, save_notifications_to_db -from ..schemas import ( - ContextType, - NotificationsResponse, - ObservationType, - Outcome, -) -from ..users.models import UserDB -from ..utils import setup_logger -from ..workspaces.models import ( - WorkspaceDB, - get_user_default_workspace, -) -from .models import ( - ContextualBanditDB, - ContextualDrawDB, - delete_contextual_mab_by_id, - get_all_contextual_mabs, - get_all_contextual_obs_by_experiment_id, - get_contextual_mab_by_id, - get_draw_by_client_id, - get_draw_by_id, - save_contextual_mab_to_db, - save_draw_to_db, -) -from .observation import update_based_on_outcome -from .sampling_utils import choose_arm -from .schemas import ( - CMABDrawResponse, - CMABObservationResponse, - ContextInput, - ContextualArmResponse, - ContextualBandit, - ContextualBanditResponse, - ContextualBanditSample, -) - -router = APIRouter(prefix="/contextual_mab", tags=["Contextual Bandits"]) - -logger = setup_logger(__name__) - - -@router.post("/", response_model=ContextualBanditResponse) -async def create_contextual_mabs( - experiment: ContextualBandit, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> ContextualBanditResponse | HTTPException: - """ - Create a new contextual experiment with different priors for each context. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - cmab = await save_contextual_mab_to_db( - experiment, user_db.user_id, workspace_db.workspace_id, asession - ) - notifications = await save_notifications_to_db( - experiment_id=cmab.experiment_id, - user_id=user_db.user_id, - notifications=experiment.notifications, - asession=asession, - ) - cmab_dict = cmab.to_dict() - cmab_dict["notifications"] = [n.to_dict() for n in notifications] - return ContextualBanditResponse.model_validate(cmab_dict) - - -@router.get("/", response_model=list[ContextualBanditResponse]) -async def get_contextual_mabs( - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> list[ContextualBanditResponse]: - """ - Get details of all experiments. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiments = await get_all_contextual_mabs(workspace_db.workspace_id, asession) - all_experiments = [] - for exp in experiments: - exp_dict = exp.to_dict() - exp_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - exp.experiment_id, exp.user_id, asession - ) - ] - all_experiments.append( - ContextualBanditResponse.model_validate( - { - **exp_dict, - "notifications": [ - NotificationsResponse.model_validate(n) - for n in exp_dict["notifications"] - ], - } - ) - ) - - return all_experiments - - -@router.get("/{experiment_id}", response_model=ContextualBanditResponse) -async def get_contextual_mab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> ContextualBanditResponse | HTTPException: - """ - Get details of experiment with the provided `experiment_id`. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiment = await get_contextual_mab_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - experiment_dict = experiment.to_dict() - experiment_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - experiment.experiment_id, experiment.user_id, asession - ) - ] - return ContextualBanditResponse.model_validate(experiment_dict) - - -@router.delete("/{experiment_id}", response_model=dict) -async def delete_contextual_mab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> dict: - """ - Delete the experiment with the provided `experiment_id`. - """ - try: - workspace_db = await get_user_default_workspace( - asession=asession, user_db=user_db - ) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiment = await get_contextual_mab_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - await delete_contextual_mab_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - return {"detail": f"Experiment {experiment_id} deleted successfully."} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error: {e}") from e - - -@router.post("/{experiment_id}/draw", response_model=CMABDrawResponse) -async def draw_arm( - experiment_id: int, - context: List[ContextInput], - draw_id: Optional[str] = None, - client_id: Optional[str] = None, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> CMABDrawResponse: - """ - Get which arm to pull next for provided experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_contextual_mab_by_id(experiment_id, workspace_id, asession) - - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - # Check context inputs - if len(experiment.contexts) != len(context): - raise HTTPException( - status_code=400, - detail="Number of contexts provided does not match the num contexts.", - ) - experiment_data = ContextualBanditSample.model_validate(experiment) - sorted_context = list(sorted(context, key=lambda x: x.context_id)) - - try: - for c_input, c_exp in zip( - sorted_context, - sorted(experiment.contexts, key=lambda x: x.context_id), - ): - if c_exp.value_type == ContextType.BINARY.value: - Outcome(c_input.context_value) - except ValueError as e: - raise HTTPException( - status_code=400, - detail=f"Invalid context value: {e}", - ) from e - - # Generate UUID if not provided - if draw_id is None: - draw_id = str(uuid4()) - - existing_draw = await get_draw_by_id(draw_id, asession) - if existing_draw: - raise HTTPException( - status_code=400, - detail=f"Draw ID {draw_id} already exists.", - ) - - # Check if sticky assignment - if experiment.sticky_assignment and not client_id: - raise HTTPException( - status_code=400, - detail="Client ID is required for sticky assignment.", - ) - - chosen_arm = choose_arm( - experiment_data, - [c.context_value for c in sorted_context], - ) - chosen_arm_id = experiment.arms[chosen_arm].arm_id - if experiment.sticky_assignment and client_id: - previous_draw = await get_draw_by_client_id( - client_id=client_id, - experiment_id=experiment.experiment_id, - asession=asession, - ) - if previous_draw: - chosen_arm_id = previous_draw.arm_id - - try: - _ = await save_draw_to_db( - experiment_id=experiment.experiment_id, - arm_id=chosen_arm_id, - context_val=[c.context_value for c in sorted_context], - draw_id=draw_id, - client_id=client_id, - user_id=None, - asession=asession, - workspace_id=workspace_id, - ) - except Exception as e: - raise HTTPException( - status_code=500, - detail=f"Error saving draw to database: {e}", - ) from e - - return CMABDrawResponse.model_validate( - { - "draw_id": draw_id, - "client_id": client_id, - "arm": ContextualArmResponse.model_validate( - [arm for arm in experiment.arms if arm.arm_id == chosen_arm_id][0] - ), - } - ) - - -@router.put("/{experiment_id}/{draw_id}/{reward}", response_model=ContextualArmResponse) -async def update_arm( - experiment_id: int, - draw_id: str, - reward: float, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> ContextualArmResponse: - """ - Update the arm with the provided `arm_id` for the given - `experiment_id` based on the reward. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - # Get the experiment and do checks - experiment, draw = await validate_experiment_and_draw( - experiment_id, draw_id, workspace_id, asession - ) - - return await update_based_on_outcome( - experiment, draw, reward, asession, ObservationType.USER - ) - - -@router.get( - "/{experiment_id}/outcomes", - response_model=list[CMABObservationResponse], -) -async def get_outcomes( - experiment_id: int, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> list[CMABObservationResponse]: - """ - Get the outcomes for the experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_contextual_mab_by_id(experiment_id, workspace_id, asession) - if not experiment: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - observations = await get_all_contextual_obs_by_experiment_id( - experiment_id=experiment.experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - return [CMABObservationResponse.model_validate(obs) for obs in observations] - - -async def validate_experiment_and_draw( - experiment_id: int, - draw_id: str, - workspace_id: int, - asession: AsyncSession, -) -> tuple[ContextualBanditDB, ContextualDrawDB]: - """ - Validate that the experiment exists in the workspace - and the draw exists for that experiment. - """ - experiment = await get_contextual_mab_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - draw = await get_draw_by_id(draw_id=draw_id, asession=asession) - if draw is None: - raise HTTPException(status_code=404, detail=f"Draw with id {draw_id} not found") - - if draw.experiment_id != experiment_id: - raise HTTPException( - status_code=400, - detail=( - f"Draw with id {draw_id} does not belong " - f"to experiment with id {experiment_id}", - ), - ) - - if draw.reward is not None: - raise HTTPException( - status_code=400, - detail=f"Draw with id {draw_id} already has a reward.", - ) - - return experiment, draw diff --git a/backend/app/contextual_mab/sampling_utils.py b/backend/app/contextual_mab/sampling_utils.py deleted file mode 100644 index 03c9784..0000000 --- a/backend/app/contextual_mab/sampling_utils.py +++ /dev/null @@ -1,172 +0,0 @@ -import numpy as np -from scipy.optimize import minimize - -from ..schemas import ArmPriors, ContextLinkFunctions, RewardLikelihood -from .schemas import ContextualArmResponse, ContextualBanditSample - - -def sample_normal( - mus: list[np.ndarray], - covariances: list[np.ndarray], - context: np.ndarray, - link_function: ContextLinkFunctions, -) -> int: - """ - Thompson Sampling with normal prior. - - Parameters - ---------- - mus: mean of Normal distribution for each arm - covariances: covariance matrix of Normal distribution for each arm - context: context vector - link_function: link function for the context - """ - samples = np.array( - [ - np.random.multivariate_normal(mean=mu, cov=cov) - for mu, cov in zip(mus, covariances) - ] - ).reshape(-1, len(context)) - probs = link_function(samples @ context) - return int(probs.argmax()) - - -def update_arm_normal( - current_mu: np.ndarray, - current_covariance: np.ndarray, - reward: float, - context: np.ndarray, - sigma_llhood: float, -) -> tuple[np.ndarray, np.ndarray]: - """ - Update the mean and covariance of the normal distribution. - - Parameters - ---------- - current_mu : The mean of the normal distribution. - current_covariance : The covariance matrix of the normal distribution. - reward : The reward of the arm. - context : The context vector. - sigma_llhood : The stddev of the likelihood. - """ - new_covariance_inv = ( - np.linalg.inv(current_covariance) + (context.T @ context) / sigma_llhood**2 - ) - new_covariance = np.linalg.inv(new_covariance_inv) - - new_mu = new_covariance @ ( - np.linalg.inv(current_covariance) @ current_mu - + context * reward / sigma_llhood**2 - ) - return new_mu, new_covariance - - -def update_arm_laplace( - current_mu: np.ndarray, - current_covariance: np.ndarray, - reward: np.ndarray, - context: np.ndarray, - link_function: ContextLinkFunctions, - reward_likelihood: RewardLikelihood, - prior_type: ArmPriors, -) -> tuple[np.ndarray, np.ndarray]: - """ - Update the mean and covariance using the Laplace approximation. - - Parameters - ---------- - current_mu : The mean of the normal distribution. - current_covariance : The covariance matrix of the normal distribution. - reward : The list of rewards for the arm. - context : The list of contexts for the arm. - link_function : The link function for parameters to rewards. - reward_likelihood : The likelihood function of the reward. - prior_type : The prior type of the arm. - """ - - def objective(theta: np.ndarray) -> float: - """ - Objective function for the Laplace approximation. - - Parameters - ---------- - theta : The parameters of the arm. - """ - # Log prior - log_prior = prior_type(theta, mu=current_mu, covariance=current_covariance) - - # Log likelihood - log_likelihood = reward_likelihood(reward, link_function(context @ theta)) - - return -log_prior - log_likelihood - - result = minimize(objective, current_mu, method="L-BFGS-B", hess="2-point") - new_mu = result.x - covariance = result.hess_inv.todense() # type: ignore - - new_covariance = 0.5 * (covariance + covariance.T) - return new_mu, new_covariance.astype(np.float64) - - -def choose_arm(experiment: ContextualBanditSample, context: list[float]) -> int: - """ - Choose the arm with the highest probability. - - Parameters - ---------- - experiment : The experiment object. - context : The context vector. - """ - link_function = ( - ContextLinkFunctions.NONE - if experiment.reward_type == RewardLikelihood.NORMAL - else ContextLinkFunctions.LOGISTIC - ) - return sample_normal( - mus=[np.array(arm.mu) for arm in experiment.arms], - covariances=[np.array(arm.covariance) for arm in experiment.arms], - context=np.array(context), - link_function=link_function, - ) - - -def update_arm_params( - arm: ContextualArmResponse, - prior_type: ArmPriors, - reward_type: RewardLikelihood, - reward: list, - context: list, -) -> tuple[np.ndarray, np.ndarray]: - """ - Update the arm parameters. - - Parameters - ---------- - arm : The arm object. - prior_type : The prior type of the arm. - reward_type : The reward type of the arm. - reward : All rewards for the arm. - context : All context vectors for the arm. - """ - if (prior_type == ArmPriors.NORMAL) and (reward_type == RewardLikelihood.NORMAL): - return update_arm_normal( - current_mu=np.array(arm.mu), - current_covariance=np.array(arm.covariance), - reward=reward[-1], - context=np.array(context[-1]), - sigma_llhood=1.0, # TODO: need to implement likelihood stddev - ) - elif (prior_type == ArmPriors.NORMAL) and ( - reward_type == RewardLikelihood.BERNOULLI - ): - return update_arm_laplace( - current_mu=np.array(arm.mu), - current_covariance=np.array(arm.covariance), - reward=np.array(reward), - context=np.array(context), - link_function=ContextLinkFunctions.LOGISTIC, - reward_likelihood=RewardLikelihood.BERNOULLI, - prior_type=ArmPriors.NORMAL, - ) - else: - raise ValueError("Prior and reward type combination is not supported.") diff --git a/backend/app/contextual_mab/schemas.py b/backend/app/contextual_mab/schemas.py deleted file mode 100644 index 57baaf4..0000000 --- a/backend/app/contextual_mab/schemas.py +++ /dev/null @@ -1,268 +0,0 @@ -from datetime import datetime -from typing import Optional, Self - -from pydantic import BaseModel, ConfigDict, Field, model_validator - -from ..schemas import ( - ArmPriors, - AutoFailUnitType, - ContextType, - Notifications, - NotificationsResponse, - RewardLikelihood, - allowed_combos_cmab, -) - - -class Context(BaseModel): - """ - Pydantic model for a binary-valued context of the experiment. - """ - - name: str = Field( - description="Name of the context", - examples=["Context 1"], - ) - description: str = Field( - description="Description of the context", - examples=["This is a description of the context."], - ) - value_type: ContextType = Field( - description="Type of value the context can take", default=ContextType.BINARY - ) - model_config = ConfigDict(from_attributes=True) - - -class ContextResponse(Context): - """ - Pydantic model for an response for context creation - """ - - context_id: int - model_config = ConfigDict(from_attributes=True) - - -class ContextInput(BaseModel): - """ - Pydantic model for a context input - """ - - context_id: int - context_value: float - model_config = ConfigDict(from_attributes=True) - - -class ContextualArm(BaseModel): - """ - Pydantic model for a contextual arm of the experiment. - """ - - name: str = Field( - max_length=150, - examples=["Arm 1"], - ) - description: str = Field( - max_length=500, - examples=["This is a description of the arm."], - ) - - mu_init: float = Field( - default=0.0, - examples=[0.0, 1.2, 5.7], - description="Mean parameter for Normal prior", - ) - - sigma_init: float = Field( - default=1.0, - examples=[1.0, 0.5, 2.0], - description="Standard deviation parameter for Normal prior", - ) - n_outcomes: Optional[int] = Field( - default=0, - description="Number of outcomes for the arm", - examples=[0, 10, 15], - ) - - @model_validator(mode="after") - def check_values(self) -> Self: - """ - Check if the values are unique and set new attributes. - """ - sigma = self.sigma_init - if sigma is not None and sigma <= 0: - raise ValueError("Std dev must be greater than 0.") - return self - - model_config = ConfigDict(from_attributes=True) - - -class ContextualArmResponse(ContextualArm): - """ - Pydantic model for an response for contextual arm creation - """ - - arm_id: int - mu: list[float] - covariance: list[list[float]] - - model_config = ConfigDict(from_attributes=True) - - -class ContextualBanditBase(BaseModel): - """ - Pydantic model for a contextual experiment - Base model. - Note: Do not use this model directly. Use ContextualBandit instead. - """ - - name: str = Field( - max_length=150, - examples=["Experiment 1"], - ) - - description: str = Field( - max_length=500, - examples=["This is a description of the experiment."], - ) - - sticky_assignment: bool = Field( - description="Whether the arm assignment is sticky or not.", - default=False, - ) - - auto_fail: bool = Field( - description=( - "Whether the experiment should fail automatically after " - "a certain period if no outcome is registered." - ), - default=False, - ) - - auto_fail_value: Optional[int] = Field( - description="The time period after which the experiment should fail.", - default=None, - ) - - auto_fail_unit: Optional[AutoFailUnitType] = Field( - description="The time unit for the auto fail period.", - default=None, - ) - - reward_type: RewardLikelihood = Field( - description="The type of reward we observe from the experiment.", - default=RewardLikelihood.BERNOULLI, - ) - - prior_type: ArmPriors = Field( - description="The type of prior distribution for the arms.", - default=ArmPriors.NORMAL, - ) - - is_active: bool = True - - model_config = ConfigDict(from_attributes=True) - - -class ContextualBandit(ContextualBanditBase): - """ - Pydantic model for a contextual experiment. - """ - - arms: list[ContextualArm] - contexts: list[Context] - notifications: Notifications - - @model_validator(mode="after") - def auto_fail_unit_and_value_set(self) -> Self: - """ - Validate that the auto fail unit and value are set if auto fail is True. - """ - if self.auto_fail: - if ( - not self.auto_fail_value - or not self.auto_fail_unit - or self.auto_fail_value <= 0 - ): - raise ValueError( - ( - "Auto fail is enabled. " - "Please provide both auto_fail_value and auto_fail_unit." - ) - ) - return self - - @model_validator(mode="after") - def arms_at_least_two(self) -> Self: - """ - Validate that the experiment has at least two arms. - """ - if len(self.arms) < 2: - raise ValueError("The experiment must have at least two arms.") - return self - - @model_validator(mode="after") - def check_prior_reward_type_combo(self) -> Self: - """ - Validate that the prior and reward type combination is allowed. - """ - - if (self.prior_type, self.reward_type) not in allowed_combos_cmab: - raise ValueError("Prior and reward type combo not supported.") - return self - - model_config = ConfigDict(from_attributes=True) - - -class ContextualBanditResponse(ContextualBanditBase): - """ - Pydantic model for an response for contextual experiment creation. - Returns the id of the experiment, the arms and the contexts - """ - - experiment_id: int - workspace_id: int - arms: list[ContextualArmResponse] - contexts: list[ContextResponse] - notifications: list[NotificationsResponse] - created_datetime_utc: datetime - last_trial_datetime_utc: Optional[datetime] = None - n_trials: int - - model_config = ConfigDict(from_attributes=True) - - -class ContextualBanditSample(ContextualBanditBase): - """ - Pydantic model for a contextual experiment sample. - """ - - experiment_id: int - arms: list[ContextualArmResponse] - contexts: list[ContextResponse] - - -class CMABObservationResponse(BaseModel): - """ - Pydantic model for an response for contextual observation creation - """ - - arm_id: int - reward: float - context_val: list[float] - - draw_id: str - client_id: str | None - observed_datetime_utc: datetime - - model_config = ConfigDict(from_attributes=True) - - -class CMABDrawResponse(BaseModel): - """ - Pydantic model for an response for contextual arm draw - """ - - draw_id: str - client_id: str | None - arm: ContextualArmResponse - - model_config = ConfigDict(from_attributes=True) diff --git a/backend/app/experiments/dependencies.py b/backend/app/experiments/dependencies.py new file mode 100644 index 0000000..bcf15d2 --- /dev/null +++ b/backend/app/experiments/dependencies.py @@ -0,0 +1,321 @@ +from typing import Union + +import numpy as np +from fastapi.exceptions import HTTPException +from sqlalchemy.ext.asyncio import AsyncSession + +from .models import ( + ArmDB, + DrawDB, + ExperimentDB, + get_draw_by_id, + get_draws_with_rewards_by_experiment_id, + get_experiment_by_id_from_db, + get_notifications_from_db, + save_observation_to_db, +) +from .sampling_utils import update_arm +from .schemas import ( + ArmPriors, + ArmResponse, + ExperimentSample, + ExperimentsEnum, + NotificationsResponse, + ObservationType, + Outcome, + RewardLikelihood, +) + + +async def experiments_db_to_schema( + experiments_db: list[ExperimentDB], + asession: AsyncSession, +) -> list[ExperimentSample]: + """ + Convert a list of ExperimentDB objects to a list of ExperimentResponse schemas. + """ + all_experiments = [] + for exp in experiments_db: + exp_dict = exp.to_dict() + exp_dict["notifications"] = [ + n.to_dict() + for n in await get_notifications_from_db( + experiment_id=exp.experiment_id, + user_id=exp.user_id, + workspace_id=exp.workspace_id, + asession=asession, + ) + ] + all_experiments.append( + ExperimentSample.model_validate( + { + **exp_dict, + "notifications": [ + NotificationsResponse(**n) for n in exp_dict["notifications"] + ], + } + ) + ) + + return all_experiments + + +async def validate_experiment_and_draw( + experiment_id: int, draw_id: str, workspace_id: int, asession: AsyncSession +) -> tuple[ExperimentDB, DrawDB]: + """ + Validate the experiment and draw. + Checks if: + (a) `experiment_id` exists + (b) `draw_id` exists + (c) `draw_id` belongs to `experiment_id` + (d) `draw_id` doesn't already have a reward + """ + experiment = await get_experiment_by_id_from_db( + workspace_id=workspace_id, experiment_id=experiment_id, asession=asession + ) + # Check experiment + if experiment is None: + raise HTTPException( + status_code=404, detail=f"Experiment with id {experiment_id} not found" + ) + + draw = await get_draw_by_id(draw_id=draw_id, asession=asession) + # Check draw + if draw is None: + raise HTTPException(status_code=404, detail=f"Draw with id {draw_id} not found") + if draw.experiment_id != experiment_id: + raise HTTPException( + status_code=404, + detail=( + f"Draw with id {draw_id} does not belong to " + f"experiment with id {experiment_id}" + ), + ) + if draw.reward: + raise HTTPException( + status_code=400, + detail=f"Draw with id {draw_id} has already been updated with a reward.", + ) + + return experiment, draw + + +async def format_rewards_for_arm_update( + experiment: ExperimentDB, + chosen_arm_id: int, + reward: float, + context_val: Union[list[float], None], + asession: AsyncSession, +) -> tuple[list[float], list[list[float]] | None, list[float] | None]: + """ + Aggregates and formats reward, context, and treatment data for updating experiment + arm parameters. + + This function collects all previous rewards associated with the specified experiment + and arm, appends the latest observed reward, and structures the data (including + context and treatment values when applicable) for downstream update algorithms. It + ensures that data passed to update routines is comprehensive and correctly ordered + for robust experiment tracking, including support for contextual bandits and + Bayesian A/B experiments. + + Parameters + ---------- + experiment : ExperimentDB + The experiment object containing metadata and arms. + chosen_arm_id : int + The ID of the arm for which the new reward is being recorded. + reward : float + The most recent observed reward for the chosen arm. + context_val : list of float or None + The context vector associated with the latest draw, if available. + asession : AsyncSession + The asynchronous database session for performing queries. + + Returns + ------- + rewards_list : list of float + List of rewards for the arm, with the new reward prepended. + context_list : list of list of float or None + List of context vectors (if applicable), with the new context prepended. + `None` if context is not used. + treatments_list : list of float or None + List of treatment assignments (if applicable), with the new assignment + prepended. + `None` if treatments are not used. + + Raises + ------ + ValueError + If context values are missing for prior draws in a contextual bandit experiment. + + Notes + ----- + This function ensures that historical and new data are combined and + formatted as expected by arm update algorithms, supporting various + experiment types and configurations. + """ + previous_rewards = await get_draws_with_rewards_by_experiment_id( + experiment_id=experiment.experiment_id, asession=asession + ) + + rewards = [] + treatments = None + contexts = None + + if previous_rewards: + if experiment.exp_type != ExperimentsEnum.BAYESAB.value: + rewards = [ + draw.reward for draw in previous_rewards if draw.arm_id == chosen_arm_id + ] + else: + treatments = [] + for draw in previous_rewards: + rewards.append(draw.reward) + treatments.append( + [ + float(arm.is_treatment_arm) + for arm in experiment.arms + if arm.arm_id == draw.arm_id + ][0] + ) + + if experiment.exp_type == ExperimentsEnum.CMAB.value: + contexts = [] + for draw in previous_rewards: + if draw.context_val: + contexts.append(draw.context_val) + else: + raise ValueError( + f"Context value is missing for draw id {draw.draw_id}" + f" in CMAB experiment {draw.experiment_id}." + ) + + rewards_list = [reward] if rewards is None else [reward] + rewards + + context_list = None if not context_val else [context_val] + if contexts and context_list: + context_list = context_list + contexts + + chosen_arm_index = int( + np.argwhere([a.arm_id == chosen_arm_id for a in experiment.arms])[0][0] + ) + new_treatment = [float(experiment.arms[chosen_arm_index].is_treatment_arm)] + treatments_list = ( + new_treatment if treatments is None else new_treatment + treatments + ) + + return rewards_list, context_list, treatments_list + + +async def update_arm_based_on_outcome( + experiment: ExperimentDB, + draw: DrawDB, + rewards: list[float], + contexts: Union[list[list[float]], None], + treatments: Union[list[float], None], + observation_type: ObservationType, + asession: AsyncSession, +) -> ArmResponse: + """ + Update the arm parameters based on the outcome. + + This is a helper function to allow `auto_fail` job to call + it as well. + """ + ExperimentDB.update_metadata(experiment) + + arm = get_arm_from_experiment(experiment, draw.arm_id) + arm.n_outcomes += 1 + + chosen_arm = int( + np.argwhere([a.arm_id == arm.arm_id for a in experiment.arms])[0][0] + ) + + await update_arm_parameters( + arm=arm, + experiment=experiment, + chosen_arm=chosen_arm, + rewards=rewards, + contexts=contexts, + treatments=treatments, + ) + + await save_updated_data( + arm=experiment.arms[chosen_arm], + draw=draw, + reward=rewards[0], + observation_type=observation_type, + asession=asession, + ) + + return ArmResponse.model_validate(arm) + + +def get_arm_from_experiment(experiment: ExperimentDB, arm_id: int) -> ArmDB: + """Get and validate the arm from the experiment""" + arms = [a for a in experiment.arms if a.arm_id == arm_id] + if not arms: + raise HTTPException(status_code=404, detail=f"Arm with id {arm_id} not found") + return arms[0] + + +async def update_arm_parameters( + arm: ArmDB, + experiment: ExperimentDB, + chosen_arm: int, + rewards: list[float], + contexts: Union[list[list[float]], None], + treatments: Union[list[float], None], +) -> None: + """Update the arm parameters based on the reward type and outcome""" + experiment_data = ExperimentSample.model_validate(experiment.to_dict()) + if experiment_data.reward_type == RewardLikelihood.BERNOULLI: + Outcome(rewards[0]) # Check if reward is 0 or 1 + params = update_arm( + experiment=experiment_data, + rewards=rewards, + arm_to_update=chosen_arm, + context=contexts, + treatments=treatments, + ) + + if experiment_data.exp_type == ExperimentsEnum.BAYESAB: + if experiment_data.prior_type == ArmPriors.NORMAL: + mus, covariances = params + for arm in experiment.arms: + if arm.is_treatment_arm: + arm.mu = [mus[0]] + arm.covariance = covariances[0] + else: + arm.mu = [mus[1]] + arm.covariance = covariances[1] + else: + raise HTTPException( + status_code=400, + detail="Prior type not supported for Bayesian A/B experiments.", + ) + else: + if experiment_data.prior_type == ArmPriors.BETA: + arm.alpha, arm.beta = params + elif experiment_data.prior_type == ArmPriors.NORMAL: + arm.mu, arm.covariance = params + else: + raise HTTPException( + status_code=400, + detail="Prior type not supported.", + ) + + +async def save_updated_data( + arm: ArmDB, + draw: DrawDB, + reward: float, + observation_type: ObservationType, + asession: AsyncSession, +) -> None: + """Save the updated arm and observation data""" + await asession.commit() + await save_observation_to_db( + draw=draw, reward=reward, observation_type=observation_type, asession=asession + ) diff --git a/backend/app/experiments/models.py b/backend/app/experiments/models.py new file mode 100644 index 0000000..3438b1d --- /dev/null +++ b/backend/app/experiments/models.py @@ -0,0 +1,744 @@ +import uuid +from datetime import datetime, timezone +from typing import Optional, Sequence + +import numpy as np +from sqlalchemy import ( + Boolean, + DateTime, + Enum, + Float, + ForeignKey, + Integer, + String, + delete, + select, +) +from sqlalchemy.dialects.postgresql import ARRAY +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from ..models import Base +from .schemas import ( + AutoFailUnitType, + EventType, + Experiment, + Notifications, + ObservationType, +) + + +# --- Base model for experiments --- +class ExperimentDB(Base): + """ + Base model for experiments. + """ + + __tablename__ = "experiments" + + # IDs + experiment_id: Mapped[int] = mapped_column( + Integer, primary_key=True, nullable=False + ) + user_id: Mapped[int] = mapped_column( + Integer, ForeignKey("users.user_id"), nullable=False + ) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + + # Description + name: Mapped[str] = mapped_column(String(length=150), nullable=False) + description: Mapped[str] = mapped_column(String(length=500), nullable=False) + is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + + # Assignments config + sticky_assignment: Mapped[bool] = mapped_column( + Boolean, nullable=False, default=False + ) + auto_fail: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) + auto_fail_value: Mapped[int] = mapped_column(Integer, nullable=True) + auto_fail_unit: Mapped[AutoFailUnitType] = mapped_column( + Enum(AutoFailUnitType), nullable=True + ) + + # Experiment config + exp_type: Mapped[str] = mapped_column(String(length=50), nullable=False) + prior_type: Mapped[str] = mapped_column(String(length=50), nullable=False) + reward_type: Mapped[str] = mapped_column(String(length=50), nullable=False) + + # State variables + created_datetime_utc: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False + ) + n_trials: Mapped[int] = mapped_column(Integer, nullable=False) + last_trial_datetime_utc: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=True + ) + + # Relationships + arms: Mapped[list["ArmDB"]] = relationship( + "ArmDB", back_populates="experiment", lazy="joined" + ) + draws: Mapped[list["DrawDB"]] = relationship( + "DrawDB", + back_populates="experiment", + primaryjoin="ExperimentDB.experiment_id==DrawDB.experiment_id", + lazy="joined", + ) + clients: Mapped[list["ClientDB"]] = relationship( + "ClientDB", + back_populates="experiment", + lazy="joined", + ) + contexts: Mapped[Optional[list["ContextDB"]]] = relationship( + "ContextDB", + back_populates="experiment", + lazy="joined", + primaryjoin="and_(ExperimentDB.experiment_id==ContextDB.experiment_id," + + "ExperimentDB.exp_type=='cmab')", + ) + + def __repr__(self) -> str: + """ + String representation of the model + """ + return f"" + + @property + def has_contexts(self) -> bool: + """Check if this experiment type supports contexts.""" + return self.exp_type == "cmab" + + @property + def context_list(self) -> list["ContextDB"] | list: + """Get contexts, returning empty list if not applicable.""" + return self.contexts if self.has_contexts and self.contexts is not None else [] + + @staticmethod + def update_metadata(experiment: "ExperimentDB") -> "ExperimentDB": + """ + Update the metadata of the experiment. + """ + experiment.n_trials += 1 + experiment.last_trial_datetime_utc = datetime.now(timezone.utc) + return experiment + + def to_dict(self) -> dict: + """ + Convert the ORM object to a dictionary. + """ + return { + "experiment_id": self.experiment_id, + "user_id": self.user_id, + "workspace_id": self.workspace_id, + "name": self.name, + "description": self.description, + "sticky_assignment": self.sticky_assignment, + "auto_fail": self.auto_fail, + "auto_fail_value": self.auto_fail_value, + "auto_fail_unit": self.auto_fail_unit, + "exp_type": self.exp_type, + "prior_type": self.prior_type, + "reward_type": self.reward_type, + "created_datetime_utc": str(self.created_datetime_utc), + "is_active": self.is_active, + "n_trials": self.n_trials, + "last_trial_datetime_utc": str(self.last_trial_datetime_utc), + "arms": [arm.to_dict() for arm in self.arms], + "contexts": ( + [context.to_dict() for context in self.context_list if context] + if len(self.context_list) > 0 + else [] + ), + } + + +# --- Arm model --- +class ArmDB(Base): + """ + Base model for arms. + """ + + __tablename__ = "arms" + + # IDs + arm_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + experiment_id: Mapped[int] = mapped_column( + Integer, ForeignKey("experiments.experiment_id"), nullable=False + ) + + # Description + name: Mapped[str] = mapped_column(String(length=150), nullable=False) + description: Mapped[str] = mapped_column(String(length=500), nullable=False) + n_outcomes: Mapped[int] = mapped_column(Integer, nullable=False, default=0) + + # Prior variables + mu_init: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + sigma_init: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + mu: Mapped[Optional[list[float]]] = mapped_column(ARRAY(Float), nullable=True) + covariance: Mapped[Optional[list[float]]] = mapped_column( + ARRAY(Float), nullable=True + ) + is_treatment_arm: Mapped[bool] = mapped_column(Boolean, nullable=True, default=True) + + alpha_init: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + beta_init: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + alpha: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + beta: Mapped[Optional[float]] = mapped_column(Float, nullable=True) + + # Relationships + experiment: Mapped[ExperimentDB] = relationship( + "ExperimentDB", back_populates="arms", lazy="joined" + ) + draws: Mapped[list["DrawDB"]] = relationship( + "DrawDB", + back_populates="arm", + lazy="joined", + ) + + def to_dict(self) -> dict: + """ + Convert the ORM object to a dictionary. + """ + return { + "arm_id": self.arm_id, + "experiment_id": self.experiment_id, + "name": self.name, + "description": self.description, + "alpha": self.alpha, + "beta": self.beta, + "mu": self.mu, + "covariance": self.covariance, + "alpha_init": self.alpha_init, + "beta_init": self.beta_init, + "mu_init": self.mu_init, + "sigma_init": self.sigma_init, + "draws": [draw.to_dict() for draw in self.draws], + "n_outcomes": self.n_outcomes, + } + + +# --- Draw model --- +class DrawDB(Base): + """ + Base model for draws. + """ + + __tablename__ = "draws" + + # IDs + draw_id: Mapped[str] = mapped_column( + String, primary_key=True, default=lambda x: str(uuid.uuid4()) + ) + arm_id: Mapped[int] = mapped_column( + Integer, ForeignKey("arms.arm_id"), nullable=False + ) + experiment_id: Mapped[int] = mapped_column( + Integer, ForeignKey("experiments.experiment_id"), nullable=False + ) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + client_id: Mapped[str] = mapped_column( + String(length=36), ForeignKey("clients.client_id"), nullable=True + ) + + # Logging + draw_datetime_utc: Mapped[datetime] = mapped_column( + DateTime(timezone=True), + nullable=False, + ) + observed_datetime_utc: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=True + ) + observation_type: Mapped[ObservationType] = mapped_column( + Enum(ObservationType), nullable=True + ) + reward: Mapped[float] = mapped_column(Float, nullable=True) + context_val: Mapped[Optional[list[float]]] = mapped_column( + ARRAY(Float), nullable=True + ) + + # Relationships + arm: Mapped[ArmDB] = relationship("ArmDB", back_populates="draws", lazy="joined") + experiment: Mapped[ExperimentDB] = relationship( + "ExperimentDB", back_populates="draws", lazy="joined" + ) + client: Mapped[Optional["ClientDB"]] = relationship( + "ClientDB", + back_populates="draws", + lazy="joined", + primaryjoin="DrawDB.client_id==ClientDB.client_id", # noqa: E501 + ) + + def to_dict(self) -> dict: + """ + Convert the ORM object to a dictionary. + """ + return { + "draw_id": self.draw_id, + "arm_id": self.arm_id, + "experiment_id": self.experiment_id, + "client_id": self.client_id, + "draw_datetime_utc": self.draw_datetime_utc, + "observed_datetime_utc": self.observed_datetime_utc, + "observation_type": self.observation_type, + "reward": self.reward, + "context_val": self.context_val, + } + + +# --- Context model --- +class ContextDB(Base): + """ + ORM for managing context for an experiment + """ + + __tablename__ = "context" + + # IDs + context_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False) + experiment_id: Mapped[int] = mapped_column( + Integer, ForeignKey("experiments.experiment_id"), nullable=False + ) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + + # Description + name: Mapped[str] = mapped_column(String(length=150), nullable=False) + description: Mapped[str] = mapped_column(String(length=500), nullable=True) + value_type: Mapped[str] = mapped_column(String(length=50), nullable=False) + + # Relationships + experiment: Mapped[ExperimentDB] = relationship( + "ExperimentDB", back_populates="contexts", lazy="joined" + ) + + def to_dict(self) -> dict: + """ + Convert the ORM object to a dictionary. + """ + return { + "context_id": self.context_id, + "name": self.name, + "description": self.description, + "value_type": self.value_type, + } + + +# --- Client model --- +class ClientDB(Base): + """ + ORM for managing clients for an experiment + """ + + __tablename__ = "clients" + + # IDs + client_id: Mapped[str] = mapped_column( + String, primary_key=True, default=lambda x: str(uuid.uuid4()) + ) + experiment_id: Mapped[int] = mapped_column( + Integer, ForeignKey("experiments.experiment_id"), nullable=False + ) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + + # Relationships + draws: Mapped[list[DrawDB]] = relationship( + "DrawDB", + back_populates="client", + lazy="joined", + ) + experiment: Mapped[ExperimentDB] = relationship( + "ExperimentDB", + back_populates="clients", + lazy="joined", + primaryjoin="and_(ClientDB.experiment_id==ExperimentDB.experiment_id," + + "ExperimentDB.sticky_assignment == True)", + ) + + def to_dict(self) -> dict: + """ + Convert the ORM object to a dictionary. + """ + return { + "client_id": self.client_id, + "experiment_id": self.experiment_id, + "workspace_id": self.workspace_id, + "draws": [draw.to_dict() for draw in self.draws], + } + + +# --- Notifications model --- +class NotificationsDB(Base): + """ + Model for notifications. + Note: if you are updating this, you should also update models in + the background celery job + """ + + __tablename__ = "notifications" + + notification_id: Mapped[int] = mapped_column( + Integer, primary_key=True, nullable=False + ) + experiment_id: Mapped[int] = mapped_column( + Integer, ForeignKey("experiments.experiment_id"), nullable=False + ) + user_id: Mapped[int] = mapped_column( + Integer, ForeignKey("users.user_id"), nullable=False + ) + workspace_id: Mapped[int] = mapped_column( + Integer, ForeignKey("workspace.workspace_id"), nullable=False + ) + notification_type: Mapped[EventType] = mapped_column( + Enum(EventType), nullable=False + ) + notification_value: Mapped[int] = mapped_column(Integer, nullable=False) + is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + + def to_dict(self) -> dict: + """ + Convert the model to a dictionary. + """ + return { + "notification_id": self.notification_id, + "experiment_id": self.experiment_id, + "user_id": self.user_id, + "notification_type": self.notification_type, + "notification_value": self.notification_value, + "is_active": self.is_active, + } + + +# --- ORM functions --- + + +# ---- Notifications functions ---- +async def save_notifications_to_db( + experiment_id: int, + user_id: int, + workspace_id: int, + notifications: Notifications, + asession: AsyncSession, +) -> list[NotificationsDB]: + """ + Save notifications to the database + """ + notification_records = [] + + if notifications.onTrialCompletion: + notification_row = NotificationsDB( + experiment_id=experiment_id, + user_id=user_id, + workspace_id=workspace_id, + notification_type=EventType.TRIALS_COMPLETED, + notification_value=notifications.numberOfTrials, + is_active=True, + ) + notification_records.append(notification_row) + + if notifications.onDaysElapsed: + notification_row = NotificationsDB( + experiment_id=experiment_id, + user_id=user_id, + workspace_id=workspace_id, + notification_type=EventType.DAYS_ELAPSED, + notification_value=notifications.daysElapsed, + is_active=True, + ) + notification_records.append(notification_row) + + if notifications.onPercentBetter: + notification_row = NotificationsDB( + experiment_id=experiment_id, + user_id=user_id, + workspace_id=workspace_id, + notification_type=EventType.PERCENTAGE_BETTER, + notification_value=notifications.percentBetterThreshold, + is_active=True, + ) + notification_records.append(notification_row) + + asession.add_all(notification_records) + await asession.commit() + + return notification_records + + +async def get_notifications_from_db( + experiment_id: int, user_id: int, workspace_id: int, asession: AsyncSession +) -> Sequence[NotificationsDB]: + """ + Get notifications from the database + """ + statement = ( + select(NotificationsDB) + .where(NotificationsDB.experiment_id == experiment_id) + .where(NotificationsDB.user_id == user_id) + .where(NotificationsDB.workspace_id == workspace_id) + ) + + return (await asession.execute(statement)).scalars().all() + + +# --- Experiment functions --- +async def save_experiment_to_db( + experiment: Experiment, + user_id: int, + workspace_id: int, + asession: AsyncSession, +) -> ExperimentDB: + """ + Save an experiment to the database. + """ + len_contexts = len(experiment.contexts) if experiment.contexts else 1 + contexts = [] + + arms = [ + ArmDB( + workspace_id=workspace_id, + # description + name=arm.name, + description=arm.description, + n_outcomes=0, + # prior variables + mu_init=arm.mu_init, + sigma_init=arm.sigma_init, + mu=[arm.mu_init] * len_contexts, + covariance=( + (np.identity(len_contexts) * arm.sigma_init**2).tolist() + if arm.sigma_init + else [[None]] + ), + alpha_init=arm.alpha_init, + beta_init=arm.beta_init, + alpha=arm.alpha_init, + beta=arm.beta_init, + is_treatment_arm=arm.is_treatment_arm, + ) + for arm in experiment.arms + ] + if experiment.contexts and len_contexts > 0: + contexts = [ + ContextDB( + workspace_id=workspace_id, + name=context.name, + description=context.description, + value_type=context.value_type, + ) + for context in experiment.contexts + ] + + experiment_db = ExperimentDB( + user_id=user_id, + workspace_id=workspace_id, + # description + name=experiment.name, + description=experiment.description, + is_active=experiment.is_active, + # assignments config + sticky_assignment=experiment.sticky_assignment, + auto_fail=experiment.auto_fail, + auto_fail_value=experiment.auto_fail_value, + auto_fail_unit=experiment.auto_fail_unit, + # experiment config + exp_type=experiment.exp_type, + prior_type=experiment.prior_type, + reward_type=experiment.reward_type, + # datetime + created_datetime_utc=datetime.now(timezone.utc), + n_trials=0, + # relationships + arms=arms, + contexts=contexts, + ) + + asession.add(experiment_db) + await asession.commit() + await asession.refresh(experiment_db) + + return experiment_db + + +async def get_all_experiments_from_db( + workspace_id: int, asession: AsyncSession +) -> Sequence[ExperimentDB]: + """ + Get all experiments for a given workspace. + """ + statement = ( + select(ExperimentDB) + .where(ExperimentDB.workspace_id == workspace_id) + .order_by(ExperimentDB.created_datetime_utc.desc()) + ) + return (await asession.execute(statement)).unique().scalars().all() + + +async def get_all_experiment_types_from_db( + workspace_id: int, experiment_type: str, asession: AsyncSession +) -> Sequence[ExperimentDB]: + """ + Get all experiments for a given workspace. + """ + statement = ( + select(ExperimentDB) + .where(ExperimentDB.workspace_id == workspace_id) + .where(ExperimentDB.exp_type == experiment_type) + .order_by(ExperimentDB.created_datetime_utc.desc()) + ) + return (await asession.execute(statement)).unique().scalars().all() + + +async def get_experiment_by_id_from_db( + workspace_id: int, experiment_id: int, asession: AsyncSession +) -> ExperimentDB | None: + """ + Get all experiments for a given workspace. + """ + statement = ( + select(ExperimentDB) + .where(ExperimentDB.workspace_id == workspace_id) + .where(ExperimentDB.experiment_id == experiment_id) + ) + return (await asession.execute(statement)).unique().scalars().one_or_none() + + +async def delete_experiment_by_id_from_db( + workspace_id: int, experiment_id: int, asession: AsyncSession +) -> None: + """ + Delete an experiment by ID for a given workspace. + """ + await asession.execute( + delete(NotificationsDB) + .where(NotificationsDB.workspace_id == workspace_id) + .where(NotificationsDB.experiment_id == experiment_id) + ) + + await asession.execute( + delete(ContextDB) + .where(ContextDB.workspace_id == workspace_id) + .where(ContextDB.experiment_id == experiment_id) + ) + + await asession.execute( + delete(ClientDB) + .where(ClientDB.workspace_id == workspace_id) + .where(ClientDB.experiment_id == experiment_id) + ) + + await asession.execute( + delete(DrawDB) + .where(DrawDB.workspace_id == workspace_id) + .where(DrawDB.experiment_id == experiment_id) + ) + + await asession.execute( + delete(ArmDB) + .where(ArmDB.workspace_id == workspace_id) + .where(ArmDB.experiment_id == experiment_id) + ) + + await asession.execute( + delete(ExperimentDB) + .where(ExperimentDB.workspace_id == workspace_id) + .where(ExperimentDB.experiment_id == experiment_id) + ) + + await asession.commit() + return None + + +# Draw functions +async def get_draw_by_id(draw_id: str, asession: AsyncSession) -> DrawDB | None: + """ + Get a draw by its ID, which should be unique across the system. + """ + statement = select(DrawDB).where(DrawDB.draw_id == draw_id) + result = await asession.execute(statement) + + return result.unique().scalar_one_or_none() + + +async def save_draw_to_db( + draw_id: str, + arm_id: int, + experiment_id: int, + workspace_id: int, + client_id: str | None, + context: list[float] | None, + asession: AsyncSession, +) -> DrawDB: + """ + Save a draw to the database. + """ + draw = DrawDB( + draw_id=draw_id, + arm_id=arm_id, + experiment_id=experiment_id, + workspace_id=workspace_id, + client_id=client_id, + draw_datetime_utc=datetime.now(timezone.utc), + context_val=context, + ) + asession.add(draw) + await asession.commit() + await asession.refresh(draw) + + return draw + + +async def save_observation_to_db( + draw: DrawDB, + reward: float, + observation_type: ObservationType, + asession: AsyncSession, +) -> DrawDB: + """ + Save an observation to the database. + """ + draw.observed_datetime_utc = datetime.now(timezone.utc) + draw.observation_type = observation_type + draw.reward = reward + + await asession.commit() + await asession.refresh(draw) + + return draw + + +async def get_draws_by_experiment_id( + experiment_id: int, asession: AsyncSession +) -> Sequence[DrawDB]: + """ + Get all draws for a given experiment ID. + """ + statement = ( + select(DrawDB) + .where(DrawDB.experiment_id == experiment_id) + .order_by(DrawDB.draw_datetime_utc.desc()) + ) + return (await asession.execute(statement)).unique().scalars().all() + + +async def get_draws_with_rewards_by_experiment_id( + experiment_id: int, asession: AsyncSession +) -> Sequence[DrawDB]: + """ + Get all draws with rewards for a given experiment ID. + """ + statement = ( + select(DrawDB) + .where(DrawDB.experiment_id == experiment_id) + .where(DrawDB.reward.is_not(None)) + .order_by(DrawDB.draw_datetime_utc.desc()) + ) + return (await asession.execute(statement)).unique().scalars().all() diff --git a/backend/app/experiments/routers.py b/backend/app/experiments/routers.py new file mode 100644 index 0000000..feaba03 --- /dev/null +++ b/backend/app/experiments/routers.py @@ -0,0 +1,482 @@ +from typing import Annotated, Optional +from uuid import uuid4 + +import numpy as np +from fastapi import APIRouter, Depends +from fastapi.exceptions import HTTPException +from sqlalchemy.ext.asyncio import AsyncSession + +from ..auth.dependencies import ( + authenticate_workspace_key, + get_verified_user, + require_admin_role, +) +from ..database import get_async_session +from ..users.models import UserDB +from ..utils import setup_logger +from ..workspaces.models import ( + WorkspaceDB, + get_user_default_workspace, +) +from .dependencies import ( + experiments_db_to_schema, + format_rewards_for_arm_update, + update_arm_based_on_outcome, + validate_experiment_and_draw, +) +from .models import ( + delete_experiment_by_id_from_db, + get_all_experiment_types_from_db, + get_all_experiments_from_db, + get_draw_by_id, + get_draws_by_experiment_id, + get_experiment_by_id_from_db, + save_draw_to_db, + save_experiment_to_db, + save_notifications_to_db, +) +from .sampling_utils import choose_arm +from .schemas import ( + ArmResponse, + ContextInput, + ContextType, + DrawResponse, + Experiment, + ExperimentSample, + ExperimentsEnum, + ObservationType, + Outcome, +) + +router = APIRouter(prefix="/experiment", tags=["Experiments"]) + +logger = setup_logger(__name__) + + +# --- POST experiments routers --- +@router.post("/", response_model=ExperimentSample) +async def create_experiment( + experiment: Experiment, + user_db: Annotated[UserDB, Depends(require_admin_role)], + asession: AsyncSession = Depends(get_async_session), +) -> ExperimentSample: + """ + Create a new experiment in the current user's workspace. + """ + workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiment_db = await save_experiment_to_db( + experiment=experiment, + workspace_id=workspace_db.workspace_id, + user_id=user_db.user_id, + asession=asession, + ) + notifications = await save_notifications_to_db( + experiment_id=experiment_db.experiment_id, + user_id=user_db.user_id, + workspace_id=workspace_db.workspace_id, + notifications=experiment.notifications, + asession=asession, + ) + + experiment_dict = experiment_db.to_dict() + experiment_dict["notifications"] = [n.to_dict() for n in notifications] + return ExperimentSample.model_validate(experiment_dict) + + +# -- GET experiment routers --- +@router.get("/", response_model=list[ExperimentSample]) +async def get_all_experiments( + user_db: Annotated[UserDB, Depends(get_verified_user)], + asession: AsyncSession = Depends(get_async_session), +) -> list[ExperimentSample]: + """ + Retrieve all experiments for the current user's workspace. + """ + workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiments = await get_all_experiments_from_db( + workspace_id=workspace_db.workspace_id, + asession=asession, + ) + + all_experiments = await experiments_db_to_schema( + experiments_db=list(experiments), + asession=asession, + ) + return all_experiments + + +@router.get("/type/{experiment_type}", response_model=list[ExperimentSample]) +async def get_all_experiments_by_type( + experiment_type: ExperimentsEnum, + user_db: Annotated[UserDB, Depends(get_verified_user)], + asession: AsyncSession = Depends(get_async_session), +) -> list[ExperimentSample]: + """ + Retrieve all experiments for the current user's workspace. + """ + workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiments = await get_all_experiment_types_from_db( + workspace_id=workspace_db.workspace_id, + experiment_type=experiment_type.value, + asession=asession, + ) + + all_experiments = await experiments_db_to_schema( + experiments_db=list(experiments), + asession=asession, + ) + return all_experiments + + +@router.get("/id/{experiment_id}", response_model=ExperimentSample) +async def get_experiment_by_id( + experiment_id: int, + user_db: Annotated[UserDB, Depends(get_verified_user)], + asession: AsyncSession = Depends(get_async_session), +) -> ExperimentSample: + """ + Retrieve a specific experiment by ID for the current user's workspace. + """ + workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiment = await get_experiment_by_id_from_db( + workspace_id=workspace_db.workspace_id, + experiment_id=experiment_id, + asession=asession, + ) + + if not experiment: + raise HTTPException( + status_code=404, + detail="Experiment not found.", + ) + + experiment_dict = await experiments_db_to_schema( + experiments_db=[experiment], + asession=asession, + ) + + return experiment_dict[0] + + +# -- DELETE experiment routers --- +@router.delete("/type/{experiment_type}", response_model=dict[str, str]) +async def delete_experiment_by_type( + experiment_type: ExperimentsEnum, + user_db: Annotated[UserDB, Depends(get_verified_user)], + asession: AsyncSession = Depends(get_async_session), +) -> dict[str, str]: + """ + Retrieve a specific experiment by ID for the current user's workspace. + """ + try: + workspace_db = await get_user_default_workspace( + asession=asession, user_db=user_db + ) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiments = await get_all_experiment_types_from_db( + workspace_id=workspace_db.workspace_id, + experiment_type=experiment_type.value, + asession=asession, + ) + + if len(experiments) == 0: + raise HTTPException( + status_code=404, + detail="No experiments found.", + ) + + for exp in experiments: + await delete_experiment_by_id_from_db( + workspace_id=workspace_db.workspace_id, + experiment_id=exp.experiment_id, + asession=asession, + ) + + return { + "message": f"Experiments of type {experiment_type} deleted successfully." + } + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error: {str(e)}", + ) from e + + +@router.delete("/id/{experiment_id}", response_model=dict[str, str]) +async def delete_experiment_by_id( + experiment_id: int, + user_db: Annotated[UserDB, Depends(get_verified_user)], + asession: AsyncSession = Depends(get_async_session), +) -> dict[str, str]: + """ + Retrieve a specific experiment by ID for the current user's workspace. + """ + try: + workspace_db = await get_user_default_workspace( + asession=asession, user_db=user_db + ) + + if workspace_db is None: + raise HTTPException( + status_code=404, + detail="Workspace not found. Please create a workspace first.", + ) + + experiment = await get_experiment_by_id_from_db( + workspace_id=workspace_db.workspace_id, + experiment_id=experiment_id, + asession=asession, + ) + + if not experiment: + raise HTTPException( + status_code=404, + detail="Experiment not found.", + ) + + await delete_experiment_by_id_from_db( + workspace_id=workspace_db.workspace_id, + experiment_id=experiment_id, + asession=asession, + ) + + return {"message": f"Experiment with id {experiment_id} deleted successfully."} + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error: {str(e)}", + ) from e + + +# --- Draw and update arms --- +@router.put("/{experiment_id}/draw", response_model=DrawResponse) +async def draw_experiment_arm( + experiment_id: int, + contexts: Optional[list[ContextInput]] = None, + draw_id: Optional[str] = None, + workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), + asession: AsyncSession = Depends(get_async_session), +) -> DrawResponse: + """ + Draw an arm from the specified experiment. + """ + workspace_id = workspace_db.workspace_id + + experiment = await get_experiment_by_id_from_db( + workspace_id=workspace_id, experiment_id=experiment_id, asession=asession + ) + if experiment is None: + raise HTTPException( + status_code=404, detail=f"Experiment with id {experiment_id} not found" + ) + + # Check contexts + if (experiment.exp_type == ExperimentsEnum.CMAB.value) and (not contexts): + raise HTTPException( + status_code=400, detail="Context is required for CMAB experiments." + ) + elif (experiment.exp_type == ExperimentsEnum.CMAB.value) and contexts: + context_length = 0 if not experiment.contexts else len(experiment.contexts) + if len(contexts) != context_length: + raise HTTPException( + status_code=400, + detail=( + f"Expected {context_length} contexts" f" but got {len(contexts)}." + ), + ) + + # Check for existing draws + if draw_id is None: + draw_id = str(uuid4()) + + existing_draw = await get_draw_by_id(draw_id=draw_id, asession=asession) + if existing_draw: + raise HTTPException( + status_code=400, detail=f"Draw with id {draw_id} already exists." + ) + + # -- Perform the draw --- + experiment_data = ExperimentSample.model_validate(experiment.to_dict()) + + # Validate contexts input + if contexts: + sorted_contexts = list(sorted(contexts, key=lambda x: x.context_id)) + try: + exp_contexts = experiment_data.contexts or [] + sorted_exp_contexts = ( + sorted(exp_contexts, key=lambda x: x.context_id) if exp_contexts else [] + ) + if [c1.context_id for c1 in sorted_contexts] != [ + c2.context_id for c2 in sorted_exp_contexts + ]: + raise ValueError( + "Provided contexts do not match the experiment's expected contexts." + ) + for c_input, c_exp in zip( + sorted_contexts, + sorted_exp_contexts, + ): + if c_exp.value_type == ContextType.BINARY.value: + Outcome(c_input.context_value) + except ValueError as e: + raise HTTPException( + status_code=400, + detail=f"Invalid context value: {e}", + ) from e + + # Choose arm + chosen_arm = choose_arm( + experiment=experiment_data, + context=[c.context_value for c in sorted_contexts] if contexts else None, + ) + chosen_arm_id = experiment.arms[chosen_arm].arm_id + + try: + draw = await save_draw_to_db( + draw_id=draw_id, + arm_id=chosen_arm_id, + experiment_id=experiment_id, + workspace_id=workspace_id, + client_id=None, # TODO: Update for sticky assignment + context=[c.context_value for c in sorted_contexts] if contexts else None, + asession=asession, + ) + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error saving draw: {str(e)}", + ) from e + + draw_response_data = { + "draw_id": draw_id, + "draw_datetime_utc": str(draw.draw_datetime_utc), + "arm": experiment_data.arms[chosen_arm], + "context_val": draw.context_val, + } + return DrawResponse.model_validate(draw_response_data) + + +@router.put("/{experiment_id}/{draw_id}/{reward}", response_model=ArmResponse) +async def update_experiment_arm( + experiment_id: int, + draw_id: str, + reward: float, + workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), + asession: AsyncSession = Depends(get_async_session), +) -> ArmResponse: + """ + Update the arm with the given reward. + """ + + experiment, draw = await validate_experiment_and_draw( + experiment_id=experiment_id, + draw_id=draw_id, + workspace_id=workspace_db.workspace_id, + asession=asession, + ) + + # Get rewards + chosen_arm_index = int( + np.argwhere(np.array([arm.arm_id for arm in experiment.arms]) == draw.arm_id)[ + 0 + ][0], + ) + rewards_list, context_list, treatments_list = await format_rewards_for_arm_update( + experiment=experiment, + chosen_arm_id=draw.arm_id, + reward=reward, + context_val=draw.context_val, + asession=asession, + ) + + # Update the arm with the given reward + try: + await update_arm_based_on_outcome( + experiment=experiment, + draw=draw, + rewards=rewards_list, + contexts=context_list, + treatments=treatments_list, + observation_type=ObservationType.USER, + asession=asession, + ) + + return ArmResponse.model_validate(experiment.arms[chosen_arm_index]) + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error updating arm: {str(e)}", + ) from e + + +@router.get("/{experiment_id}/rewards", response_model=list[DrawResponse]) +async def get_rewards( + experiment_id: int, + workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), + asession: AsyncSession = Depends(get_async_session), +) -> list[DrawResponse]: + """ + Retrieve all rewards for the specified experiment. + """ + experiment = await get_experiment_by_id_from_db( + workspace_id=workspace_db.workspace_id, + experiment_id=experiment_id, + asession=asession, + ) + + if not experiment: + raise HTTPException( + status_code=404, detail=f"Experiment with id {experiment_id} not found" + ) + + draws = await get_draws_by_experiment_id( + experiment_id=experiment_id, asession=asession + ) + + return [ + DrawResponse.model_validate( + { + "draw_id": draw.draw_id, + "draw_datetime_utc": str(draw.draw_datetime_utc), + "observed_datetime_utc": str(draw.observed_datetime_utc), + "arm": [arm for arm in experiment.arms if arm.arm_id == draw.arm_id][0], + "reward": draw.reward, + "context_val": draw.context_val, + } + ) + for draw in draws + ] diff --git a/backend/app/experiments/sampling_utils.py b/backend/app/experiments/sampling_utils.py new file mode 100644 index 0000000..7ae8eca --- /dev/null +++ b/backend/app/experiments/sampling_utils.py @@ -0,0 +1,324 @@ +from typing import Any, Optional, Union + +import numpy as np +from numpy.random import beta +from scipy.optimize import minimize + +from .schemas import ( + ArmPriors, + ContextLinkFunctions, + ExperimentSample, + ExperimentsEnum, + Outcome, + RewardLikelihood, +) + + +# ------------- Utilities for sampling and updating arms ---------------- +# --- Sampling functions for Thompson Sampling --- +def _sample_beta_binomial(alphas: np.ndarray, betas: np.ndarray) -> int: + """ + Thompson Sampling with Beta-Binomial distribution. + + Parameters + ---------- + alphas : alpha parameter of Beta distribution for each arm + betas : beta parameter of Beta distribution for each arm + """ + samples = beta(alphas, betas) + return int(samples.argmax()) + + +def _sample_normal( + mus: list[np.ndarray], + covariances: list[np.ndarray], + context: np.ndarray, + link_function: ContextLinkFunctions, +) -> int: + """ + Thompson Sampling with normal prior. + + Parameters + ---------- + mus: mean of Normal distribution for each arm + covariances: covariance matrix of Normal distribution for each arm + context: context vector + link_function: link function for the context + """ + samples = np.array( + [ + np.random.multivariate_normal(mean=mu, cov=cov) + for mu, cov in zip(mus, covariances) + ] + ).reshape(-1, len(context)) + probs = link_function(samples @ context) + return int(probs.argmax()) + + +# --- Arm update functions --- +def _update_arm_beta_binomial( + alpha: float, beta: float, reward: Outcome +) -> tuple[float, float]: + """ + Update the alpha and beta parameters of the Beta distribution. + + Parameters + ---------- + alpha : int + The alpha parameter of the Beta distribution. + beta : int + The beta parameter of the Beta distribution. + reward : Outcome + The reward of the arm. + """ + if reward == Outcome.SUCCESS: + + return alpha + 1, beta + else: + return alpha, beta + 1 + + +def _update_arm_normal( + current_mu: np.ndarray, + current_covariance: np.ndarray, + reward: float, + llhood_sigma: float, + context: np.ndarray, +) -> tuple[float, np.ndarray]: + """ + Update the mean and standard deviation of the Normal distribution. + + Parameters + ---------- + current_mu : The mean of the Normal distribution. + current_covariance : The covariance of the Normal distribution. + reward : The reward of the arm. + llhood_sigma : The standard deviation of the likelihood. + context : The context vector. + """ + # Likelihood covariance matrix inverse + llhood_covariance_inv = np.eye(len(current_mu)) / llhood_sigma**2 + llhood_covariance_inv *= context.T @ context + + # Prior covariance matrix inverse + prior_covariance_inv = np.linalg.inv(current_covariance) + + # New covariance + new_covariance = np.linalg.inv(prior_covariance_inv + llhood_covariance_inv) + + # New mean + llhood_term: Union[np.ndarray, float] = reward / llhood_sigma**2 + if context is not None: + llhood_term = (context * llhood_term).squeeze() + + new_mu = new_covariance @ ((prior_covariance_inv @ current_mu) + llhood_term) + return new_mu.tolist(), new_covariance.tolist() + + +def _update_arm_laplace( + current_mu: np.ndarray, + current_covariance: np.ndarray, + reward: np.ndarray, + context: np.ndarray, + link_function: ContextLinkFunctions, + reward_likelihood: RewardLikelihood, + prior_type: ArmPriors, +) -> tuple[np.ndarray, np.ndarray]: + """ + Update the mean and covariance using the Laplace approximation. + + Parameters + ---------- + current_mu : The mean of the normal distribution. + current_covariance : The covariance matrix of the normal distribution. + reward : The list of rewards for the arm. + context : The list of contexts for the arm. + link_function : The link function for parameters to rewards. + reward_likelihood : The likelihood function of the reward. + prior_type : The prior type of the arm. + """ + + def objective(theta: np.ndarray) -> float: + """ + Objective function for the Laplace approximation. + + Parameters + ---------- + theta : The parameters of the arm. + """ + # Log prior + log_prior = prior_type(theta, mu=current_mu, covariance=current_covariance) + + # Log likelihood + log_likelihood = reward_likelihood(reward, link_function(context @ theta)) + + return -log_prior - log_likelihood + + result = minimize( + objective, x0=np.zeros_like(current_mu), method="L-BFGS-B", hess="2-point" + ) + new_mu = result.x + covariance = result.hess_inv.todense() # type: ignore + + new_covariance = 0.5 * (covariance + covariance.T) + return new_mu.tolist(), new_covariance.tolist() + + +# ------------- Import functions ---------------- +# --- Choose arm function --- +def choose_arm( + experiment: ExperimentSample, context: Optional[Union[list, np.ndarray, None]] +) -> int: + """ + Choose arm based on posterior using Thompson Sampling. + + Parameters + ---------- + experiment: The experiment data containing priors and rewards for each arm. + context: Optional context vector for the experiment. + """ + # Choose arms with equal probability for Bayesian A/B tests + if experiment.exp_type == ExperimentsEnum.BAYESAB: + index = np.random.choice(len(experiment.arms), size=1) + return int(index[0]) + else: + if experiment.prior_type == ArmPriors.BETA: + if experiment.reward_type != RewardLikelihood.BERNOULLI: + raise ValueError("Beta prior is only supported for Bernoulli rewards.") + alphas = np.array([arm.alpha for arm in experiment.arms]) + betas = np.array([arm.beta for arm in experiment.arms]) + + return _sample_beta_binomial(alphas=alphas, betas=betas) + + elif experiment.prior_type == ArmPriors.NORMAL: + mus = [np.array(arm.mu) for arm in experiment.arms] + covariances = [np.array(arm.covariance) for arm in experiment.arms] + + context_array = ( + np.ones_like(mus[0]) if context is None else np.array(context) + ) + + return _sample_normal( + mus=mus, + covariances=covariances, + context=context_array, + link_function=( + ContextLinkFunctions.NONE + if experiment.reward_type == RewardLikelihood.NORMAL + else ContextLinkFunctions.LOGISTIC + ), + ) + + +# --- Update arm parameters --- +def update_arm( + experiment: ExperimentSample, + rewards: list[float], + arm_to_update: Optional[int] = None, + context: Optional[Union[list, np.ndarray, None]] = None, + treatments: Optional[list[float]] = None, +) -> Any: + """ + Update the arm parameters based on the experiment type and reward. + + Parameters + ---------- + experiment: The experiment data containing arms, prior type and reward + type information. + rewards: The rewards received from the arm. + context: The context vector for the arm. + treatments: The treatments applied to the arm, for a Bayesian A/B test. + """ + + # NB: For Bayesian AB tests, we assume that the update runs + # AFTER all rewards have been observed. + # We hijack the Laplace approximation function to update the + # model parameters as follows: + # 1. current_mu -> [treatment_mu, control_mu, bias_mu = 0] + # 2. current_covariance -> [treatment_sigma, control_sigma, bias_sigma = 1] + # 3. context -> [is_treatment_arm, is_control_arm, 1] + if experiment.exp_type == ExperimentsEnum.BAYESAB: + + assert treatments, "Treatments must be provided for Bayesian A/B tests." + assert [ + arm.mu for arm in experiment.arms + ], "Arms must have mu parameters for Bayesian A/B tests." + assert [ + arm.covariance for arm in experiment.arms + ], "Arms must have covariance parameters for Bayesian A/B tests." + + mus = np.array([arm.mu[0] for arm in experiment.arms if arm.mu] + [0.0]) + covariances = np.diag( + [ + np.array(arm.covariance).ravel()[0] + for arm in experiment.arms + if arm.covariance + ] + + [1.0] + ) + context = np.zeros((len(rewards), 3)) if not context else np.array(context) + context[:, 0] = np.array(treatments) + context[:, 1] = 1.0 - np.array(treatments) + context[:, 2] = 1.0 + + new_mus, new_covariances = _update_arm_laplace( + current_mu=mus, + current_covariance=covariances, + reward=np.array(rewards), + context=context, + link_function=( + ContextLinkFunctions.NONE + if experiment.reward_type == RewardLikelihood.NORMAL + else ContextLinkFunctions.LOGISTIC + ), + reward_likelihood=experiment.reward_type, + prior_type=experiment.prior_type, + ) + + treatment_mu, control_mu, _ = new_mus + treatment_sigma, control_sigma, _ = np.diag(new_covariances) + return [treatment_mu, control_mu], [ + [[float(treatment_sigma)]], + [[float(control_sigma)]], + ] + else: + # Update for MABs and CMABs + assert arm_to_update is not None, "Arm to update must be provided." + arm = experiment.arms[arm_to_update] + + # Beta-binomial priors + if experiment.prior_type == ArmPriors.BETA: + assert arm.alpha and arm.beta, "Arm must have alpha and beta parameters." + return _update_arm_beta_binomial( + alpha=arm.alpha, beta=arm.beta, reward=Outcome(rewards[0]) + ) + + # Normal priors + elif experiment.prior_type == ArmPriors.NORMAL: + assert ( + arm.mu and arm.covariance + ), "Arm must have mu and covariance parameters." + if context is None: + context = np.ones((1, len(arm.mu))) + # Normal likelihood + if experiment.reward_type == RewardLikelihood.NORMAL: + return _update_arm_normal( + current_mu=np.array(arm.mu), + current_covariance=np.array(arm.covariance), + reward=rewards[0], + llhood_sigma=1.0, # TODO: Assuming a fixed likelihood sigma + context=np.array(context[0]), + ) + # TODO: only supports Bernoulli likelihood + else: + return _update_arm_laplace( + current_mu=np.array(arm.mu), + current_covariance=np.array(arm.covariance), + reward=np.array(rewards), + context=np.array(context), + link_function=ContextLinkFunctions.LOGISTIC, + reward_likelihood=experiment.reward_type, + prior_type=experiment.prior_type, + ) + else: + raise ValueError("Unsupported prior type for arm update.") diff --git a/backend/app/experiments/schemas.py b/backend/app/experiments/schemas.py new file mode 100644 index 0000000..ffd7e6e --- /dev/null +++ b/backend/app/experiments/schemas.py @@ -0,0 +1,548 @@ +from enum import Enum, StrEnum +from typing import Any, List, Optional, Self, Union + +import numpy as np +from pydantic import BaseModel, ConfigDict, Field, model_validator +from pydantic.types import NonNegativeInt + + +# --- Enums --- +class ExperimentsEnum(StrEnum): + """ + Enum for the experiment types. + """ + + MAB = "mab" + CMAB = "cmab" + BAYESAB = "bayes_ab" + + +class EventType(StrEnum): + """Types of events that can trigger a notification""" + + DAYS_ELAPSED = "days_elapsed" + TRIALS_COMPLETED = "trials_completed" + PERCENTAGE_BETTER = "percentage_better" + + +class ObservationType(StrEnum): + """Types of observations that can be made""" + + USER = "user" # Generated by the user + AUTO = "auto" # Generated by the system + + +class AutoFailUnitType(StrEnum): + """Types of units for auto fail""" + + DAYS = "days" + HOURS = "hours" + + +class Outcome(float, Enum): + """ + Enum for the outcome of a trial. + """ + + SUCCESS = 1 + FAILURE = 0 + + +class ArmPriors(StrEnum): + """ + Enum for the prior distribution of the arm. + """ + + BETA = "beta" + NORMAL = "normal" + + def __call__(self, theta: np.ndarray, **kwargs: Any) -> np.ndarray: + """ + Return the log pdf of the input param. + """ + if self == ArmPriors.BETA: + alpha = kwargs.get("alpha", np.ones_like(theta)) + beta = kwargs.get("beta", np.ones_like(theta)) + return (alpha - 1) * np.log(theta) + (beta - 1) * np.log(1 - theta) + + elif self == ArmPriors.NORMAL: + mu = kwargs.get("mu", np.zeros_like(theta)) + covariance = kwargs.get("covariance", np.diag(np.ones_like(theta))) + inv_cov = np.linalg.inv(covariance) + x = theta - mu + return -0.5 * x @ inv_cov @ x + + +class RewardLikelihood(StrEnum): + """ + Enum for the likelihood distribution of the reward. + """ + + BERNOULLI = "binary" + NORMAL = "real-valued" + + def __call__(self, reward: np.ndarray, probs: np.ndarray) -> np.ndarray: + """ + Calculate the log likelihood of the reward. + + Parameters + ---------- + reward : The reward. + probs : The probability of the reward. + """ + if self == RewardLikelihood.NORMAL: + return -0.5 * np.sum((reward - probs) ** 2) + elif self == RewardLikelihood.BERNOULLI: + return np.sum(reward * np.log(probs) + (1 - reward) * np.log(1 - probs)) + + +class ContextType(StrEnum): + """ + Enum for the type of context. + """ + + BINARY = "binary" + REAL_VALUED = "real-valued" + + +class ContextLinkFunctions(StrEnum): + """ + Enum for the link function of the arm params and context. + """ + + NONE = "none" + LOGISTIC = "logistic" + + def __call__(self, x: np.ndarray) -> np.ndarray: + """ + Apply the link function to the input param. + + Parameters + ---------- + x : The input param. + """ + if self == ContextLinkFunctions.NONE: + return x + elif self == ContextLinkFunctions.LOGISTIC: + return 1.0 / (1.0 + np.exp(-x)) + + +# --- Schemas --- +# Notifications schema +class Notifications(BaseModel): + """ + Pydantic model for a notifications. + """ + + onTrialCompletion: bool = False + numberOfTrials: NonNegativeInt | None + onDaysElapsed: bool = False + daysElapsed: NonNegativeInt | None + onPercentBetter: bool = False + percentBetterThreshold: NonNegativeInt | None + + @model_validator(mode="after") + def validate_has_assocatiated_value(self) -> Self: + """ + Validate that the required corresponding fields have been set. + """ + if self.onTrialCompletion and ( + not self.numberOfTrials or self.numberOfTrials == 0 + ): + raise ValueError( + "numberOfTrials is required when onTrialCompletion is True" + ) + if self.onDaysElapsed and (not self.daysElapsed or self.daysElapsed == 0): + raise ValueError("daysElapsed is required when onDaysElapsed is True") + if self.onPercentBetter and ( + not self.percentBetterThreshold or self.percentBetterThreshold == 0 + ): + raise ValueError( + "percentBetterThreshold is required when onPercentBetter is True" + ) + + return self + + +class NotificationsResponse(BaseModel): + """ + Pydantic model for a response for notifications + """ + + model_config = ConfigDict(from_attributes=True) + + notification_id: int + notification_type: EventType + notification_value: NonNegativeInt + is_active: bool + + +# Arms +class Arm(BaseModel): + """ + Pydantic model for an arm. + """ + + model_config = ConfigDict(from_attributes=True) + + # Description + name: str = Field( + max_length=150, + examples=["Arm 1"], + ) + description: str = Field( + max_length=500, + examples=["This is a description of the arm."], + ) + + # Prior variables + alpha_init: Optional[float] = Field( + default=None, examples=[None, 1.0], description="Alpha parameter for Beta prior" + ) + beta_init: Optional[float] = Field( + default=None, examples=[None, 1.0], description="Beta parameter for Beta prior" + ) + mu_init: Optional[float] = Field( + default=None, + examples=[None, 0.0], + description="Mean parameter for Normal prior", + ) + sigma_init: Optional[float] = Field( + default=None, + examples=[None, 1.0], + description="Standard deviation parameter for Normal prior", + ) + is_treatment_arm: Optional[bool] = Field( + default=True, + description="Whether the arm is a treatment arm or not", + ) + + @model_validator(mode="after") + def check_values(self) -> Self: + """ + Check if the values are unique. + """ + alpha = self.alpha_init + beta = self.beta_init + sigma = self.sigma_init + if alpha is not None and alpha <= 0: + raise ValueError("Alpha must be greater than 0.") + if beta is not None and beta <= 0: + raise ValueError("Beta must be greater than 0.") + if sigma is not None and sigma <= 0: + raise ValueError("Sigma must be greater than 0.") + return self + + +class ArmResponse(Arm): + """ + Pydantic model for an response for arm creation + """ + + arm_id: int + experiment_id: int + n_outcomes: int + alpha: Optional[Union[float, None]] + beta: Optional[Union[float, None]] + mu: Optional[List[Union[float, None]]] + covariance: Optional[List[List[Union[float, None]]]] + model_config = ConfigDict( + from_attributes=True, + ) + + +# Contexts +class Context(BaseModel): + """ + Pydantic model for a binary-valued context of the experiment. + """ + + name: str = Field( + description="Name of the context", + examples=["Context 1"], + ) + description: str = Field( + description="Description of the context", + examples=["This is a description of the context."], + ) + value_type: ContextType = Field( + description="Type of value the context can take", default=ContextType.BINARY + ) + model_config = ConfigDict(from_attributes=True) + + +class ContextResponse(Context): + """ + Pydantic model for an response for context creation + """ + + context_id: int + model_config = ConfigDict(from_attributes=True) + + +class ContextInput(BaseModel): + """ + Pydantic model for a context input + """ + + context_id: int + context_value: float + model_config = ConfigDict(from_attributes=True) + + +# Client +class Client(BaseModel): + """ + Pydantic model for a client. + """ + + model_config = ConfigDict(from_attributes=True) + + client_id: str = Field( + description="Unique identifier for the client", + examples=["client_123"], + ) + + +class DrawResponse(BaseModel): + """ + Pydantic model for a response for draw creation + """ + + model_config = ConfigDict(from_attributes=True) + + draw_id: str = Field( + description="Unique identifier for the draw", + examples=["draw_123"], + ) + draw_datetime_utc: str = Field( + description="Timestamp of when the draw was made", + examples=["2023-10-01T12:00:00Z"], + ) + observed_datetime_utc: Optional[str] = Field( + description="Timestamp of when the reward was observed", + default=None, + ) + + # Draw info + reward: Optional[float] = Field( + description="Reward observed from the draw", + default=None, + ) + context_val: Optional[list[float]] = Field( + description="Context values associated with the draw", + default=None, + ) + arm: ArmResponse + client: Optional[Client] = None + + +# Experiments +class ExperimentBase(BaseModel): + """ + Pydantic base model for an experiment. + + Note: This is a base model and should not be used directly. + Use the `Experiment` model instead. + """ + + model_config = ConfigDict(from_attributes=True) + + # Description + name: str = Field( + max_length=150, + examples=["Experiment 1"], + ) + description: str = Field( + max_length=500, + examples=["This is a description of the experiment."], + ) + + is_active: bool = True + + # Assignments config + sticky_assignment: bool = Field( + description="Whether the arm assignment is sticky or not.", + default=False, + ) + + auto_fail: bool = Field( + description=( + "Whether the experiment should fail automatically after " + "a certain period if no outcome is registered." + ), + default=False, + ) + + auto_fail_value: Optional[int] = Field( + description="The time period after which the experiment should fail.", + default=None, + ) + + auto_fail_unit: Optional[AutoFailUnitType] = Field( + description="The time unit for the auto fail period.", + default=None, + ) + + # Experiment config + exp_type: ExperimentsEnum = Field( + description="The type of experiment.", + default=ExperimentsEnum.MAB, + ) + prior_type: ArmPriors = Field( + description="The type of prior distribution for the arms.", + default=ArmPriors.BETA, + ) + reward_type: RewardLikelihood = Field( + description="The type of reward we observe from the experiment.", + default=RewardLikelihood.BERNOULLI, + ) + + +class Experiment(ExperimentBase): + """ + Pydantic model for an experiment. + """ + + # Relationships + arms: list[Arm] + notifications: Notifications + contexts: Optional[list[Context]] + clients: Optional[list[Client]] + + @model_validator(mode="after") + def auto_fail_unit_and_value_set(self) -> Self: + """ + Validate that the auto fail unit and value are set if auto fail is True. + """ + if self.auto_fail: + if ( + not self.auto_fail_value + or not self.auto_fail_unit + or self.auto_fail_value <= 0 + ): + raise ValueError( + ( + "Auto fail is enabled. " + "Please provide both auto_fail_value and auto_fail_unit." + ) + ) + return self + + @model_validator(mode="after") + def check_num_arms(self) -> Self: + """ + Validate that the experiment has at least two arms. + """ + if len(self.arms) < 2: + raise ValueError("The experiment must have at least two arms.") + if self.exp_type == ExperimentsEnum.BAYESAB and len(self.arms) > 2: + raise ValueError("Bayes AB experiments can only have two arms.") + return self + + @model_validator(mode="after") + def check_arm_missing_params(self) -> Self: + """ + Check if the arm reward type is same as the experiment reward type. + """ + prior_type = self.prior_type + arms = self.arms + + prior_params = { + ArmPriors.BETA: ("alpha_init", "beta_init"), + ArmPriors.NORMAL: ("mu_init", "sigma_init"), + } + + for arm in arms: + arm_dict = arm.model_dump() + if prior_type in prior_params: + missing_params = [] + for param in prior_params[prior_type]: + if param not in arm_dict.keys(): + missing_params.append(param) + elif arm_dict[param] is None: + missing_params.append(param) + + if missing_params: + val = prior_type.value + raise ValueError(f"{val} prior needs {','.join(missing_params)}.") + return self + + @model_validator(mode="after") + def check_treatment_info(self) -> Self: + """ + Validate that the treatment arm information is set correctly. + """ + arms = self.arms + if self.exp_type == ExperimentsEnum.BAYESAB: + if not any(arm.is_treatment_arm for arm in arms): + raise ValueError("At least one arm must be a treatment arm.") + if all(arm.is_treatment_arm for arm in arms): + raise ValueError("At least one arm must be a control arm.") + return self + + @model_validator(mode="after") + def check_prior_reward_type_combo(self) -> Self: + """ + Validate that the prior and reward type combination is allowed. + """ + if self.prior_type == ArmPriors.BETA: + if not self.reward_type == RewardLikelihood.BERNOULLI: + raise ValueError( + "Beta prior can only be used with binary-valued rewards." + ) + if self.exp_type != ExperimentsEnum.MAB: + raise ValueError( + f"Experiments of type {self.exp_type} can only use Gaussian priors." + ) + + return self + + @model_validator(mode="after") + def check_contexts(self) -> Self: + """ + Validate that the contexts inputs are valid. + """ + if self.exp_type == "cmab" and not self.contexts: + raise ValueError("Contextual MAB experiments require at least one context.") + if self.exp_type != "cmab" and self.contexts: + raise ValueError( + "Contexts are only applicable for contextual MAB experiments." + ) + return self + + model_config = ConfigDict(from_attributes=True) + + +class ExperimentResponse(ExperimentBase): + """ + Pydantic model for a response for experiment creation + """ + + experiment_id: int + n_trials: int + last_trial_datetime_utc: Optional[str] = None + + arms: list[ArmResponse] + notifications: list[NotificationsResponse] + contexts: Optional[list[ContextResponse]] = None + clients: Optional[list[Client]] = None + + model_config = ConfigDict(from_attributes=True) + + +class ExperimentSample(ExperimentBase): + """ + Pydantic model for experiments for drawing and updating arms. + """ + + experiment_id: int + n_trials: int + last_trial_datetime_utc: Optional[str] = None + observation_type: ObservationType = ObservationType.USER + + arms: list[ArmResponse] + contexts: Optional[list[ContextResponse]] = None + clients: Optional[list[Client]] = None + + model_config = ConfigDict(from_attributes=True) diff --git a/backend/app/mab/__init__.py b/backend/app/mab/__init__.py deleted file mode 100644 index fa07d07..0000000 --- a/backend/app/mab/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .routers import router # noqa: F401 diff --git a/backend/app/mab/models.py b/backend/app/mab/models.py deleted file mode 100644 index f14d483..0000000 --- a/backend/app/mab/models.py +++ /dev/null @@ -1,419 +0,0 @@ -from datetime import datetime, timezone -from typing import Sequence - -from sqlalchemy import ( - Float, - ForeignKey, - and_, - delete, - select, -) -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import Mapped, mapped_column, relationship - -from ..models import ( - ArmBaseDB, - DrawsBaseDB, - ExperimentBaseDB, - NotificationsDB, -) -from ..schemas import ObservationType -from .schemas import MultiArmedBandit - - -class MultiArmedBanditDB(ExperimentBaseDB): - """ - ORM for managing experiments. - """ - - __tablename__ = "mabs" - - experiment_id: Mapped[int] = mapped_column( - ForeignKey("experiments_base.experiment_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - arms: Mapped[list["MABArmDB"]] = relationship( - "MABArmDB", back_populates="experiment", lazy="joined" - ) - - draws: Mapped[list["MABDrawDB"]] = relationship( - "MABDrawDB", back_populates="experiment", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "mabs"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "workspace_id": self.workspace_id, - "name": self.name, - "description": self.description, - "sticky_assignment": self.sticky_assignment, - "auto_fail": self.auto_fail, - "auto_fail_value": self.auto_fail_value, - "auto_fail_unit": self.auto_fail_unit, - "created_datetime_utc": self.created_datetime_utc, - "is_active": self.is_active, - "n_trials": self.n_trials, - "arms": [arm.to_dict() for arm in self.arms], - "prior_type": self.prior_type, - "reward_type": self.reward_type, - } - - -class MABArmDB(ArmBaseDB): - """ - ORM for managing arms of an experiment - """ - - __tablename__ = "mab_arms" - - arm_id: Mapped[int] = mapped_column( - ForeignKey("arms_base.arm_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - # prior variables for MAB arms - alpha: Mapped[float] = mapped_column(Float, nullable=True) - beta: Mapped[float] = mapped_column(Float, nullable=True) - mu: Mapped[float] = mapped_column(Float, nullable=True) - sigma: Mapped[float] = mapped_column(Float, nullable=True) - alpha_init: Mapped[float] = mapped_column(Float, nullable=True) - beta_init: Mapped[float] = mapped_column(Float, nullable=True) - mu_init: Mapped[float] = mapped_column(Float, nullable=True) - sigma_init: Mapped[float] = mapped_column(Float, nullable=True) - experiment: Mapped[MultiArmedBanditDB] = relationship( - "MultiArmedBanditDB", back_populates="arms", lazy="joined" - ) - - draws: Mapped[list["MABDrawDB"]] = relationship( - "MABDrawDB", back_populates="arm", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "mab_arms"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "arm_id": self.arm_id, - "name": self.name, - "description": self.description, - "alpha": self.alpha, - "beta": self.beta, - "mu": self.mu, - "sigma": self.sigma, - "alpha_init": self.alpha_init, - "beta_init": self.beta_init, - "mu_init": self.mu_init, - "sigma_init": self.sigma_init, - "draws": [draw.to_dict() for draw in self.draws], - } - - -class MABDrawDB(DrawsBaseDB): - """ - ORM for managing draws of an experiment - """ - - __tablename__ = "mab_draws" - - draw_id: Mapped[str] = mapped_column( - ForeignKey("draws_base.draw_id", ondelete="CASCADE"), - primary_key=True, - nullable=False, - ) - - arm: Mapped[MABArmDB] = relationship( - "MABArmDB", back_populates="draws", lazy="joined" - ) - experiment: Mapped[MultiArmedBanditDB] = relationship( - "MultiArmedBanditDB", back_populates="draws", lazy="joined" - ) - - __mapper_args__ = {"polymorphic_identity": "mab_draws"} - - def to_dict(self) -> dict: - """ - Convert the ORM object to a dictionary. - """ - return { - "draw_id": self.draw_id, - "client_id": self.client_id, - "draw_datetime_utc": self.draw_datetime_utc, - "arm_id": self.arm_id, - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "reward": self.reward, - "observation_type": self.observation_type, - "observed_datetime_utc": self.observed_datetime_utc, - } - - -async def save_mab_to_db( - experiment: MultiArmedBandit, - user_id: int, - workspace_id: int, - asession: AsyncSession, -) -> MultiArmedBanditDB: - """ - Save the experiment to the database. - """ - arms = [ - MABArmDB( - name=arm.name, - description=arm.description, - alpha_init=arm.alpha_init, - beta_init=arm.beta_init, - mu_init=arm.mu_init, - sigma_init=arm.sigma_init, - n_outcomes=arm.n_outcomes, - alpha=arm.alpha_init, - beta=arm.beta_init, - mu=arm.mu_init, - sigma=arm.sigma_init, - user_id=user_id, - ) - for arm in experiment.arms - ] - experiment_db = MultiArmedBanditDB( - name=experiment.name, - description=experiment.description, - user_id=user_id, - workspace_id=workspace_id, - is_active=experiment.is_active, - created_datetime_utc=datetime.now(timezone.utc), - n_trials=0, - arms=arms, - sticky_assignment=experiment.sticky_assignment, - auto_fail=experiment.auto_fail, - auto_fail_value=experiment.auto_fail_value, - auto_fail_unit=experiment.auto_fail_unit, - prior_type=experiment.prior_type.value, - reward_type=experiment.reward_type.value, - ) - - asession.add(experiment_db) - await asession.commit() - await asession.refresh(experiment_db) - - return experiment_db - - -async def get_all_mabs( - workspace_id: int, - asession: AsyncSession, -) -> Sequence[MultiArmedBanditDB]: - """ - Get all the experiments from the database for a specific workspace. - """ - statement = ( - select(MultiArmedBanditDB) - .where( - MultiArmedBanditDB.workspace_id == workspace_id, - ) - .order_by(MultiArmedBanditDB.experiment_id) - ) - - return (await asession.execute(statement)).unique().scalars().all() - - -async def get_mab_by_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> MultiArmedBanditDB | None: - """ - Get the experiment by id from a specific workspace. - """ - conditions = [ - MultiArmedBanditDB.workspace_id == workspace_id, - MultiArmedBanditDB.experiment_id == experiment_id, - ] - - result = await asession.execute(select(MultiArmedBanditDB).where(and_(*conditions))) - - return result.unique().scalar_one_or_none() - - -async def delete_mab_by_id( - experiment_id: int, workspace_id: int, asession: AsyncSession -) -> None: - """ - Delete the experiment by id. - """ - await asession.execute( - delete(NotificationsDB).where(NotificationsDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(DrawsBaseDB).where(DrawsBaseDB.experiment_id == experiment_id) - ) - - await asession.execute( - delete(MABArmDB).where( - and_( - MABArmDB.arm_id == ArmBaseDB.arm_id, - ArmBaseDB.experiment_id == experiment_id, - ) - ) - ) - await asession.execute( - delete(MultiArmedBanditDB).where( - and_( - MultiArmedBanditDB.experiment_id == experiment_id, - MultiArmedBanditDB.experiment_id == ExperimentBaseDB.experiment_id, - MultiArmedBanditDB.workspace_id == workspace_id, - ) - ) - ) - await asession.commit() - return None - - -async def get_obs_by_experiment_arm_id( - experiment_id: int, arm_id: int, asession: AsyncSession -) -> Sequence[MABDrawDB]: - """ - Get the observations for the experiment and arm. - """ - statement = ( - select(MABDrawDB) - .where(MABDrawDB.experiment_id == experiment_id) - .where(MABDrawDB.reward.is_not(None)) - .where(MABDrawDB.arm_id == arm_id) - .order_by(MABDrawDB.observed_datetime_utc) - ) - - return (await asession.execute(statement)).unique().scalars().all() - - -async def get_all_obs_by_experiment_id( - experiment_id: int, - workspace_id: int, - asession: AsyncSession, -) -> Sequence[MABDrawDB]: - """ - Get the observations for the experiment. - """ - # First, verify experiment belongs to the workspace - experiment = await get_mab_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - if experiment is None: - # Return empty list if experiment doesn't exist or doesn't belong to workspace - return [] - - statement = ( - select(MABDrawDB) - .where(MABDrawDB.experiment_id == experiment_id) - .where(MABDrawDB.reward.is_not(None)) - .order_by(MABDrawDB.observed_datetime_utc) - ) - - return (await asession.execute(statement)).unique().scalars().all() - - -async def get_draw_by_id(draw_id: str, asession: AsyncSession) -> MABDrawDB | None: - """ - Get a draw by its ID, which should be unique across the system. - """ - statement = select(MABDrawDB).where(MABDrawDB.draw_id == draw_id) - result = await asession.execute(statement) - - return result.unique().scalar_one_or_none() - - -async def get_draw_by_client_id( - client_id: str, - experiment_id: int, - asession: AsyncSession, -) -> MABDrawDB | None: - """ - Get a draw by its client ID for a specific experiment. - """ - statement = ( - select(MABDrawDB) - .where(MABDrawDB.client_id == client_id) - .where(MABDrawDB.client_id.is_not(None)) - .where(MABDrawDB.experiment_id == experiment_id) - ) - result = await asession.execute(statement) - - return result.unique().scalars().first() - - -async def save_draw_to_db( - experiment_id: int, - arm_id: int, - draw_id: str, - client_id: str | None, - user_id: int | None, - asession: AsyncSession, - workspace_id: int | None = None, -) -> MABDrawDB: - """ - Save a draw to the database - """ - # If user_id is not provided but needed, get it from the experiment - if user_id is None and workspace_id is not None: - experiment = await get_mab_by_id( - experiment_id=experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - if experiment: - user_id = experiment.user_id - else: - raise ValueError(f"Experiment with id {experiment_id} not found") - - if user_id is None: - raise ValueError("User ID must be provided or derivable from experiment") - - draw_datetime_utc: datetime = datetime.now(timezone.utc) - - draw = MABDrawDB( - draw_id=draw_id, - client_id=client_id, - experiment_id=experiment_id, - user_id=user_id, - arm_id=arm_id, - draw_datetime_utc=draw_datetime_utc, - ) - - asession.add(draw) - await asession.commit() - await asession.refresh(draw) - - return draw - - -async def save_observation_to_db( - draw: MABDrawDB, - reward: float, - asession: AsyncSession, - observation_type: ObservationType, -) -> MABDrawDB: - """ - Save an observation to the database - """ - - draw.reward = reward - draw.observed_datetime_utc = datetime.now(timezone.utc) - draw.observation_type = observation_type - asession.add(draw) - await asession.commit() - await asession.refresh(draw) - - return draw diff --git a/backend/app/mab/observation.py b/backend/app/mab/observation.py deleted file mode 100644 index 0ef34c2..0000000 --- a/backend/app/mab/observation.py +++ /dev/null @@ -1,94 +0,0 @@ -from datetime import datetime, timezone - -from fastapi import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..schemas import ObservationType, Outcome, RewardLikelihood -from .models import ( - MABArmDB, - MABDrawDB, - MultiArmedBanditDB, - save_observation_to_db, -) -from .sampling_utils import update_arm_params -from .schemas import ( - ArmResponse, - MultiArmedBanditSample, -) - - -async def update_based_on_outcome( - experiment: MultiArmedBanditDB, - draw: MABDrawDB, - outcome: float, - asession: AsyncSession, - observation_type: ObservationType, -) -> ArmResponse: - """ - Update the arm parameters based on the outcome. - - This is a helper function to allow `auto_fail` job to call - it as well. - """ - update_experiment_metadata(experiment) - - arm = get_arm_from_experiment(experiment, draw.arm_id) - arm.n_outcomes += 1 - - experiment_data = MultiArmedBanditSample.model_validate(experiment) - await update_arm_parameters(arm, experiment_data, outcome) - await save_updated_data(arm, draw, outcome, observation_type, asession) - - return ArmResponse.model_validate(arm) - - -def update_experiment_metadata(experiment: MultiArmedBanditDB) -> None: - """Update experiment metadata with new trial information""" - experiment.n_trials += 1 - experiment.last_trial_datetime_utc = datetime.now(tz=timezone.utc) - - -def get_arm_from_experiment(experiment: MultiArmedBanditDB, arm_id: int) -> MABArmDB: - """Get and validate the arm from the experiment""" - arms = [a for a in experiment.arms if a.arm_id == arm_id] - if not arms: - raise HTTPException(status_code=404, detail=f"Arm with id {arm_id} not found") - return arms[0] - - -async def update_arm_parameters( - arm: MABArmDB, experiment_data: MultiArmedBanditSample, outcome: float -) -> None: - """Update the arm parameters based on the reward type and outcome""" - if experiment_data.reward_type == RewardLikelihood.BERNOULLI: - Outcome(outcome) # Check if reward is 0 or 1 - arm.alpha, arm.beta = update_arm_params( - ArmResponse.model_validate(arm), - experiment_data.prior_type, - experiment_data.reward_type, - outcome, - ) - elif experiment_data.reward_type == RewardLikelihood.NORMAL: - arm.mu, arm.sigma = update_arm_params( - ArmResponse.model_validate(arm), - experiment_data.prior_type, - experiment_data.reward_type, - outcome, - ) - else: - raise HTTPException( - status_code=400, - detail="Reward type not supported.", - ) - - -async def save_updated_data( - arm: MABArmDB, - draw: MABDrawDB, - outcome: float, - observation_type: ObservationType, - asession: AsyncSession, -) -> None: - """Save the updated arm and observation data""" - await asession.commit() - await save_observation_to_db(draw, outcome, asession, observation_type) diff --git a/backend/app/mab/routers.py b/backend/app/mab/routers.py deleted file mode 100644 index 9a22582..0000000 --- a/backend/app/mab/routers.py +++ /dev/null @@ -1,357 +0,0 @@ -from typing import Annotated, Optional -from uuid import uuid4 - -from fastapi import APIRouter, Depends -from fastapi.exceptions import HTTPException -from sqlalchemy.ext.asyncio import AsyncSession - -from ..auth.dependencies import ( - authenticate_workspace_key, - get_verified_user, - require_admin_role, -) -from ..database import get_async_session -from ..models import get_notifications_from_db, save_notifications_to_db -from ..schemas import NotificationsResponse, ObservationType -from ..users.models import UserDB -from ..utils import setup_logger -from ..workspaces.models import ( - WorkspaceDB, - get_user_default_workspace, -) -from .models import ( - MABDrawDB, - MultiArmedBanditDB, - delete_mab_by_id, - get_all_mabs, - get_all_obs_by_experiment_id, - get_draw_by_client_id, - get_draw_by_id, - get_mab_by_id, - save_draw_to_db, - save_mab_to_db, -) -from .observation import update_based_on_outcome -from .sampling_utils import choose_arm -from .schemas import ( - ArmResponse, - MABDrawResponse, - MABObservationResponse, - MultiArmedBandit, - MultiArmedBanditResponse, - MultiArmedBanditSample, -) - -router = APIRouter(prefix="/mab", tags=["Multi-Armed Bandits"]) - -logger = setup_logger(__name__) - - -@router.post("/", response_model=MultiArmedBanditResponse) -async def create_mab( - experiment: MultiArmedBandit, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> MultiArmedBanditResponse: - """ - Create a new experiment in the user's current workspace. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - mab = await save_mab_to_db( - experiment, user_db.user_id, workspace_db.workspace_id, asession - ) - - notifications = await save_notifications_to_db( - experiment_id=mab.experiment_id, - user_id=user_db.user_id, - notifications=experiment.notifications, - asession=asession, - ) - - mab_dict = mab.to_dict() - mab_dict["notifications"] = [n.to_dict() for n in notifications] - - return MultiArmedBanditResponse.model_validate(mab_dict) - - -@router.get("/", response_model=list[MultiArmedBanditResponse]) -async def get_mabs( - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> list[MultiArmedBanditResponse]: - """ - Get details of all experiments in the user's current workspace. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiments = await get_all_mabs(workspace_db.workspace_id, asession) - - all_experiments = [] - for exp in experiments: - exp_dict = exp.to_dict() - exp_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - exp.experiment_id, exp.user_id, asession - ) - ] - all_experiments.append( - MultiArmedBanditResponse.model_validate( - { - **exp_dict, - "notifications": [ - NotificationsResponse(**n) for n in exp_dict["notifications"] - ], - } - ) - ) - return all_experiments - - -@router.get("/{experiment_id}/", response_model=MultiArmedBanditResponse) -async def get_mab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(get_verified_user)], - asession: AsyncSession = Depends(get_async_session), -) -> MultiArmedBanditResponse: - """ - Get details of experiment with the provided `experiment_id`. - """ - workspace_db = await get_user_default_workspace(asession=asession, user_db=user_db) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiment = await get_mab_by_id(experiment_id, workspace_db.workspace_id, asession) - - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - experiment_dict = experiment.to_dict() - experiment_dict["notifications"] = [ - n.to_dict() - for n in await get_notifications_from_db( - experiment.experiment_id, experiment.user_id, asession - ) - ] - - return MultiArmedBanditResponse.model_validate(experiment_dict) - - -@router.delete("/{experiment_id}", response_model=dict) -async def delete_mab( - experiment_id: int, - user_db: Annotated[UserDB, Depends(require_admin_role)], - asession: AsyncSession = Depends(get_async_session), -) -> dict: - """ - Delete the experiment with the provided `experiment_id`. - """ - try: - workspace_db = await get_user_default_workspace( - asession=asession, user_db=user_db - ) - - if workspace_db is None: - raise HTTPException( - status_code=404, - detail="Workspace not found. Please create a workspace first.", - ) - - experiment = await get_mab_by_id( - experiment_id, workspace_db.workspace_id, asession - ) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - await delete_mab_by_id(experiment_id, workspace_db.workspace_id, asession) - return {"message": f"Experiment with id {experiment_id} deleted successfully."} - except Exception as e: - raise HTTPException(status_code=500, detail=f"Error: {e}") from e - - -@router.get("/{experiment_id}/draw", response_model=MABDrawResponse) -async def draw_arm( - experiment_id: int, - draw_id: Optional[str] = None, - client_id: Optional[str] = None, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> MABDrawResponse: - """ - Draw an arm for the provided experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_mab_by_id(experiment_id, workspace_id, asession) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - if experiment.sticky_assignment and client_id is None: - raise HTTPException( - status_code=400, - detail="Client ID is required for sticky assignment.", - ) - - # Check for existing draws - if draw_id is None: - draw_id = str(uuid4()) - - existing_draw = await get_draw_by_id(draw_id, asession) - if existing_draw: - raise HTTPException( - status_code=400, - detail=f"Draw ID {draw_id} already exists.", - ) - - experiment_data = MultiArmedBanditSample.model_validate(experiment) - chosen_arm = choose_arm(experiment=experiment_data) - chosen_arm_id = experiment.arms[chosen_arm].arm_id - - # If sticky assignment, check if the client_id has a previous arm assigned - if experiment.sticky_assignment and client_id: - previous_draw = await get_draw_by_client_id( - client_id=client_id, - experiment_id=experiment.experiment_id, - asession=asession, - ) - if previous_draw: - print(f"Previous draw found: {previous_draw.arm_id}") - chosen_arm_id = previous_draw.arm_id - - try: - _ = await save_draw_to_db( - experiment_id=experiment.experiment_id, - arm_id=chosen_arm_id, - draw_id=draw_id, - client_id=client_id, - user_id=None, - asession=asession, - workspace_id=workspace_id, - ) - except Exception as e: - raise HTTPException( - status_code=500, - detail=f"Error saving draw to database: {e}", - ) from e - - return MABDrawResponse.model_validate( - { - "draw_id": draw_id, - "client_id": client_id, - "arm": ArmResponse.model_validate( - [arm for arm in experiment.arms if arm.arm_id == chosen_arm_id][0] - ), - } - ) - - -@router.put("/{experiment_id}/{draw_id}/{outcome}", response_model=ArmResponse) -async def update_arm( - experiment_id: int, - draw_id: str, - outcome: float, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> ArmResponse: - """ - Update the arm with the provided `arm_id` for the given - `experiment_id` based on the `outcome`. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment, draw = await validate_experiment_and_draw( - experiment_id, draw_id, workspace_id, asession - ) - - return await update_based_on_outcome( - experiment, draw, outcome, asession, ObservationType.USER - ) - - -@router.get( - "/{experiment_id}/outcomes", - response_model=list[MABObservationResponse], -) -async def get_outcomes( - experiment_id: int, - workspace_db: WorkspaceDB = Depends(authenticate_workspace_key), - asession: AsyncSession = Depends(get_async_session), -) -> list[MABObservationResponse]: - """ - Get the outcomes for the experiment. - """ - # Get workspace from user context - workspace_id = workspace_db.workspace_id - - experiment = await get_mab_by_id(experiment_id, workspace_id, asession) - if not experiment: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - rewards = await get_all_obs_by_experiment_id( - experiment_id=experiment.experiment_id, - workspace_id=workspace_id, - asession=asession, - ) - - return [MABObservationResponse.model_validate(reward) for reward in rewards] - - -async def validate_experiment_and_draw( - experiment_id: int, - draw_id: str, - workspace_id: int, - asession: AsyncSession, -) -> tuple[MultiArmedBanditDB, MABDrawDB]: - """Validate the experiment and draw""" - experiment = await get_mab_by_id(experiment_id, workspace_id, asession) - if experiment is None: - raise HTTPException( - status_code=404, detail=f"Experiment with id {experiment_id} not found" - ) - - draw = await get_draw_by_id(draw_id=draw_id, asession=asession) - if draw is None: - raise HTTPException(status_code=404, detail=f"Draw with id {draw_id} not found") - - if draw.experiment_id != experiment_id: - raise HTTPException( - status_code=400, - detail=( - f"Draw with id {draw_id} does not belong " - f"to experiment with id {experiment_id}", - ), - ) - - if draw.reward is not None: - raise HTTPException( - status_code=400, - detail=f"Draw with id {draw_id} already has an outcome.", - ) - - return experiment, draw diff --git a/backend/app/mab/sampling_utils.py b/backend/app/mab/sampling_utils.py deleted file mode 100644 index 6bc5fe8..0000000 --- a/backend/app/mab/sampling_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -import numpy as np -from numpy.random import beta, normal - -from ..mab.schemas import ArmResponse, MultiArmedBanditSample -from ..schemas import ArmPriors, Outcome, RewardLikelihood - - -def sample_beta_binomial(alphas: np.ndarray, betas: np.ndarray) -> int: - """ - Thompson Sampling with Beta-Binomial distribution. - - Parameters - ---------- - alphas : alpha parameter of Beta distribution for each arm - betas : beta parameter of Beta distribution for each arm - """ - samples = beta(alphas, betas) - return int(samples.argmax()) - - -def sample_normal(mus: np.ndarray, sigmas: np.ndarray) -> int: - """ - Thompson Sampling with conjugate normal distribution. - - Parameters - ---------- - mus: mean of Normal distribution for each arm - sigmas: standard deviation of Normal distribution for each arm - """ - samples = normal(loc=mus, scale=sigmas) - return int(samples.argmax()) - - -def update_arm_beta_binomial( - alpha: float, beta: float, reward: Outcome -) -> tuple[float, float]: - """ - Update the alpha and beta parameters of the Beta distribution. - - Parameters - ---------- - alpha : int - The alpha parameter of the Beta distribution. - beta : int - The beta parameter of the Beta distribution. - reward : Outcome - The reward of the arm. - """ - if reward == Outcome.SUCCESS: - - return alpha + 1, beta - else: - return alpha, beta + 1 - - -def update_arm_normal( - current_mu: float, current_sigma: float, reward: float, sigma_llhood: float -) -> tuple[float, float]: - """ - Update the mean and standard deviation of the Normal distribution. - - Parameters - ---------- - current_mu : The mean of the Normal distribution. - current_sigma : The standard deviation of the Normal distribution. - reward : The reward of the arm. - sigma_llhood : The likelihood of the standard deviation. - """ - denom = sigma_llhood**2 + current_sigma**2 - new_sigma = sigma_llhood * current_sigma / np.sqrt(denom) - new_mu = (current_mu * sigma_llhood**2 + reward * current_sigma**2) / denom - return new_mu, new_sigma - - -def choose_arm(experiment: MultiArmedBanditSample) -> int: - """ - Choose arm based on posterior - - Parameters - ---------- - experiment : MultiArmedBanditResponse - The experiment data containing priors and rewards for each arm. - """ - if (experiment.prior_type == ArmPriors.BETA) and ( - experiment.reward_type == RewardLikelihood.BERNOULLI - ): - alphas = np.array([arm.alpha for arm in experiment.arms]) - betas = np.array([arm.beta for arm in experiment.arms]) - - return sample_beta_binomial(alphas=alphas, betas=betas) - - elif (experiment.prior_type == ArmPriors.NORMAL) and ( - experiment.reward_type == RewardLikelihood.NORMAL - ): - mus = np.array([arm.mu for arm in experiment.arms]) - sigmas = np.array([arm.sigma for arm in experiment.arms]) - # TODO: add support for non-std sigma_llhood - return sample_normal(mus=mus, sigmas=sigmas) - else: - raise ValueError("Prior and reward type combination is not supported.") - - -def update_arm_params( - arm: ArmResponse, - prior_type: ArmPriors, - reward_type: RewardLikelihood, - reward: float, -) -> tuple: - """ - Update the arm with the provided `arm_id` based on the `reward`. - - Parameters - ---------- - arm: The arm to update. - prior_type: The type of prior distribution for the arms. - reward_type: The likelihood distribution of the reward. - reward: The reward of the arm. - """ - - if (prior_type == ArmPriors.BETA) and (reward_type == RewardLikelihood.BERNOULLI): - if arm.alpha is None or arm.beta is None: - raise ValueError("Beta prior requires alpha and beta.") - outcome = Outcome(reward) - return update_arm_beta_binomial(alpha=arm.alpha, beta=arm.beta, reward=outcome) - - elif ( - (prior_type == ArmPriors.NORMAL) - and (reward_type == RewardLikelihood.NORMAL) - and (arm.mu and arm.sigma) - ): - return update_arm_normal( - current_mu=arm.mu, - current_sigma=arm.sigma, - reward=reward, - sigma_llhood=1.0, # TODO: add support for non-std sigma_llhood - ) - else: - raise ValueError("Prior and reward type combination is not supported.") diff --git a/backend/app/mab/schemas.py b/backend/app/mab/schemas.py deleted file mode 100644 index 60fbf0e..0000000 --- a/backend/app/mab/schemas.py +++ /dev/null @@ -1,262 +0,0 @@ -from datetime import datetime -from typing import Optional, Self - -from pydantic import BaseModel, ConfigDict, Field, model_validator - -from ..schemas import ( - ArmPriors, - AutoFailUnitType, - Notifications, - NotificationsResponse, - RewardLikelihood, - allowed_combos_mab, -) - - -class Arm(BaseModel): - """ - Pydantic model for a arm of the experiment. - """ - - name: str = Field( - max_length=150, - examples=["Arm 1"], - ) - description: str = Field( - max_length=500, - examples=["This is a description of the arm."], - ) - - # prior variables - alpha_init: Optional[float] = Field( - default=None, examples=[None, 1.0], description="Alpha parameter for Beta prior" - ) - beta_init: Optional[float] = Field( - default=None, examples=[None, 1.0], description="Beta parameter for Beta prior" - ) - mu_init: Optional[float] = Field( - default=None, - examples=[None, 0.0], - description="Mean parameter for Normal prior", - ) - sigma_init: Optional[float] = Field( - default=None, - examples=[None, 1.0], - description="Standard deviation parameter for Normal prior", - ) - n_outcomes: Optional[int] = Field( - default=0, - description="Number of outcomes for the arm", - examples=[0, 10, 15], - ) - - @model_validator(mode="after") - def check_values(self) -> Self: - """ - Check if the values are unique. - """ - alpha = self.alpha_init - beta = self.beta_init - sigma = self.sigma_init - if alpha is not None and alpha <= 0: - raise ValueError("Alpha must be greater than 0.") - if beta is not None and beta <= 0: - raise ValueError("Beta must be greater than 0.") - if sigma is not None and sigma <= 0: - raise ValueError("Sigma must be greater than 0.") - return self - - -class ArmResponse(Arm): - """ - Pydantic model for an response for arm creation - """ - - arm_id: int - alpha: Optional[float] - beta: Optional[float] - mu: Optional[float] - sigma: Optional[float] - model_config = ConfigDict( - from_attributes=True, - ) - - -class MultiArmedBanditBase(BaseModel): - """ - Pydantic model for an experiment - Base model. - Note: Do not use this model directly. Use `MultiArmedBandit` instead. - """ - - name: str = Field( - max_length=150, - examples=["Experiment 1"], - ) - - description: str = Field( - max_length=500, - examples=["This is a description of the experiment."], - ) - - sticky_assignment: bool = Field( - description="Whether the arm assignment is sticky or not.", - default=False, - ) - - auto_fail: bool = Field( - description=( - "Whether the experiment should fail automatically after " - "a certain period if no outcome is registered." - ), - default=False, - ) - - auto_fail_value: Optional[int] = Field( - description="The time period after which the experiment should fail.", - default=None, - ) - - auto_fail_unit: Optional[AutoFailUnitType] = Field( - description="The time unit for the auto fail period.", - default=None, - ) - - reward_type: RewardLikelihood = Field( - description="The type of reward we observe from the experiment.", - default=RewardLikelihood.BERNOULLI, - ) - prior_type: ArmPriors = Field( - description="The type of prior distribution for the arms.", - default=ArmPriors.BETA, - ) - - is_active: bool = True - - model_config = ConfigDict(from_attributes=True) - - -class MultiArmedBandit(MultiArmedBanditBase): - """ - Pydantic model for an experiment. - """ - - arms: list[Arm] - notifications: Notifications - - @model_validator(mode="after") - def auto_fail_unit_and_value_set(self) -> Self: - """ - Validate that the auto fail unit and value are set if auto fail is True. - """ - if self.auto_fail: - if ( - not self.auto_fail_value - or not self.auto_fail_unit - or self.auto_fail_value <= 0 - ): - raise ValueError( - ( - "Auto fail is enabled. " - "Please provide both auto_fail_value and auto_fail_unit." - ) - ) - return self - - @model_validator(mode="after") - def arms_at_least_two(self) -> Self: - """ - Validate that the experiment has at least two arms. - """ - if len(self.arms) < 2: - raise ValueError("The experiment must have at least two arms.") - return self - - @model_validator(mode="after") - def check_prior_reward_type_combo(self) -> Self: - """ - Validate that the prior and reward type combination is allowed. - """ - - if (self.prior_type, self.reward_type) not in allowed_combos_mab: - raise ValueError("Prior and reward type combo not supported.") - return self - - @model_validator(mode="after") - def check_arm_missing_params(self) -> Self: - """ - Check if the arm reward type is same as the experiment reward type. - """ - prior_type = self.prior_type - arms = self.arms - - prior_params = { - ArmPriors.BETA: ("alpha_init", "beta_init"), - ArmPriors.NORMAL: ("mu_init", "sigma_init"), - } - - for arm in arms: - arm_dict = arm.model_dump() - if prior_type in prior_params: - missing_params = [] - for param in prior_params[prior_type]: - if param not in arm_dict.keys(): - missing_params.append(param) - elif arm_dict[param] is None: - missing_params.append(param) - - if missing_params: - val = prior_type.value - raise ValueError(f"{val} prior needs {','.join(missing_params)}.") - return self - - model_config = ConfigDict(from_attributes=True) - - -class MultiArmedBanditResponse(MultiArmedBanditBase): - """ - Pydantic model for an response for experiment creation. - Returns the id of the experiment and the arms - """ - - experiment_id: int - workspace_id: int - arms: list[ArmResponse] - notifications: list[NotificationsResponse] - created_datetime_utc: datetime - last_trial_datetime_utc: Optional[datetime] = None - n_trials: int - model_config = ConfigDict(from_attributes=True, revalidate_instances="always") - - -class MultiArmedBanditSample(MultiArmedBanditBase): - """ - Pydantic model for an experiment sample. - """ - - experiment_id: int - arms: list[ArmResponse] - - -class MABObservationResponse(BaseModel): - """ - Pydantic model for binary observations of the experiment. - """ - - experiment_id: int - arm_id: int - reward: float - draw_id: str - client_id: str | None - observed_datetime_utc: datetime - - model_config = ConfigDict(from_attributes=True) - - -class MABDrawResponse(BaseModel): - """ - Pydantic model for the response of the draw endpoint. - """ - - draw_id: str - client_id: str | None - arm: ArmResponse diff --git a/backend/app/messages/models.py b/backend/app/messages/models.py index 28ec3fb..61b557b 100644 --- a/backend/app/messages/models.py +++ b/backend/app/messages/models.py @@ -102,7 +102,7 @@ class EventMessageDB(MessageDB): nullable=False, ) experiment_id: Mapped[int] = mapped_column( - Integer, ForeignKey("experiments_base.experiment_id"), nullable=False + Integer, ForeignKey("experiments.experiment_id"), nullable=False ) __mapper_args__ = {"polymorphic_identity": "event"} diff --git a/backend/app/models.py b/backend/app/models.py index 097aa2b..b8df8a0 100644 --- a/backend/app/models.py +++ b/backend/app/models.py @@ -1,254 +1,12 @@ -import uuid -from datetime import datetime -from typing import TYPE_CHECKING, Sequence +from typing import TYPE_CHECKING -from sqlalchemy import ( - Boolean, - DateTime, - Enum, - Float, - ForeignKey, - Integer, - String, - select, -) -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship - -from .schemas import AutoFailUnitType, EventType, Notifications, ObservationType +from sqlalchemy.orm import DeclarativeBase if TYPE_CHECKING: - from .workspaces.models import WorkspaceDB + pass class Base(DeclarativeBase): """Base class for SQLAlchemy models""" pass - - -class ExperimentBaseDB(Base): - """ - Base model for experiments. - """ - - __tablename__ = "experiments_base" - - experiment_id: Mapped[int] = mapped_column( - Integer, primary_key=True, nullable=False - ) - name: Mapped[str] = mapped_column(String(length=150), nullable=False) - description: Mapped[str] = mapped_column(String(length=500), nullable=False) - sticky_assignment: Mapped[bool] = mapped_column( - Boolean, nullable=False, default=False - ) - auto_fail: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) - auto_fail_value: Mapped[int] = mapped_column(Integer, nullable=True) - auto_fail_unit: Mapped[AutoFailUnitType] = mapped_column( - Enum(AutoFailUnitType), nullable=True - ) - - user_id: Mapped[int] = mapped_column( - Integer, ForeignKey("users.user_id"), nullable=False - ) - workspace_id: Mapped[int] = mapped_column( - Integer, ForeignKey("workspace.workspace_id"), nullable=False - ) - is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) - exp_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - prior_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - reward_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - - created_datetime_utc: Mapped[datetime] = mapped_column( - DateTime(timezone=True), nullable=False - ) - n_trials: Mapped[int] = mapped_column(Integer, nullable=False) - last_trial_datetime_utc: Mapped[datetime] = mapped_column( - DateTime(timezone=True), nullable=True - ) - workspace: Mapped["WorkspaceDB"] = relationship( - "WorkspaceDB", back_populates="experiments" - ) - - __mapper_args__ = { - "polymorphic_identity": "experiment", - "polymorphic_on": "exp_type", - } - - def __repr__(self) -> str: - """ - String representation of the model - """ - return f"" - - -class ArmBaseDB(Base): - """ - Base model for arms. - """ - - __tablename__ = "arms_base" - - arm_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False) - experiment_id: Mapped[int] = mapped_column( - Integer, ForeignKey("experiments_base.experiment_id"), nullable=False - ) - user_id: Mapped[int] = mapped_column( - Integer, ForeignKey("users.user_id"), nullable=False - ) - - name: Mapped[str] = mapped_column(String(length=150), nullable=False) - description: Mapped[str] = mapped_column(String(length=500), nullable=False) - arm_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - n_outcomes: Mapped[int] = mapped_column(Integer, nullable=False, default=0) - - __mapper_args__ = { - "polymorphic_identity": "arm", - "polymorphic_on": "arm_type", - } - - -class DrawsBaseDB(Base): - """ - Base model for draws. - """ - - __tablename__ = "draws_base" - - draw_id: Mapped[str] = mapped_column( - String, primary_key=True, default=lambda x: str(uuid.uuid4()) - ) - - client_id: Mapped[str] = mapped_column(String, nullable=True) - - arm_id: Mapped[int] = mapped_column( - Integer, ForeignKey("arms_base.arm_id"), nullable=False - ) - experiment_id: Mapped[int] = mapped_column( - Integer, ForeignKey("experiments_base.experiment_id"), nullable=False - ) - user_id: Mapped[int] = mapped_column( - Integer, ForeignKey("users.user_id"), nullable=False - ) - - draw_datetime_utc: Mapped[datetime] = mapped_column( - DateTime(timezone=True), - nullable=False, - ) - - observed_datetime_utc: Mapped[datetime] = mapped_column( - DateTime(timezone=True), nullable=True - ) - - observation_type: Mapped[ObservationType] = mapped_column( - Enum(ObservationType), nullable=True - ) - - draw_type: Mapped[str] = mapped_column(String(length=50), nullable=False) - - reward: Mapped[float] = mapped_column(Float, nullable=True) - - __mapper_args__ = { - "polymorphic_identity": "draw", - "polymorphic_on": "draw_type", - } - - -class NotificationsDB(Base): - """ - Model for notifications. - Note: if you are updating this, you should also update models in - the background celery job - """ - - __tablename__ = "notifications" - - notification_id: Mapped[int] = mapped_column( - Integer, primary_key=True, nullable=False - ) - experiment_id: Mapped[int] = mapped_column( - Integer, ForeignKey("experiments_base.experiment_id"), nullable=False - ) - user_id: Mapped[int] = mapped_column( - Integer, ForeignKey("users.user_id"), nullable=False - ) - notification_type: Mapped[EventType] = mapped_column( - Enum(EventType), nullable=False - ) - notification_value: Mapped[int] = mapped_column(Integer, nullable=False) - is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) - - def to_dict(self) -> dict: - """ - Convert the model to a dictionary - """ - return { - "notification_id": self.notification_id, - "experiment_id": self.experiment_id, - "user_id": self.user_id, - "notification_type": self.notification_type, - "notification_value": self.notification_value, - "is_active": self.is_active, - } - - -async def save_notifications_to_db( - experiment_id: int, - user_id: int, - notifications: Notifications, - asession: AsyncSession, -) -> list[NotificationsDB]: - """ - Save notifications to the database - """ - notification_records = [] - - if notifications.onTrialCompletion: - notification_row = NotificationsDB( - experiment_id=experiment_id, - user_id=user_id, - notification_type=EventType.TRIALS_COMPLETED, - notification_value=notifications.numberOfTrials, - is_active=True, - ) - notification_records.append(notification_row) - - if notifications.onDaysElapsed: - notification_row = NotificationsDB( - experiment_id=experiment_id, - user_id=user_id, - notification_type=EventType.DAYS_ELAPSED, - notification_value=notifications.daysElapsed, - is_active=True, - ) - notification_records.append(notification_row) - - if notifications.onPercentBetter: - notification_row = NotificationsDB( - experiment_id=experiment_id, - user_id=user_id, - notification_type=EventType.PERCENTAGE_BETTER, - notification_value=notifications.percentBetterThreshold, - is_active=True, - ) - notification_records.append(notification_row) - - asession.add_all(notification_records) - await asession.commit() - - return notification_records - - -async def get_notifications_from_db( - experiment_id: int, user_id: int, asession: AsyncSession -) -> Sequence[NotificationsDB]: - """ - Get notifications from the database - """ - statement = ( - select(NotificationsDB) - .where(NotificationsDB.experiment_id == experiment_id) - .where(NotificationsDB.user_id == user_id) - ) - - return (await asession.execute(statement)).scalars().all() diff --git a/backend/app/schemas.py b/backend/app/schemas.py deleted file mode 100644 index 783c52e..0000000 --- a/backend/app/schemas.py +++ /dev/null @@ -1,180 +0,0 @@ -from enum import Enum, StrEnum -from typing import Any, Self - -import numpy as np -from pydantic import BaseModel, ConfigDict, model_validator -from pydantic.types import NonNegativeInt - - -class EventType(StrEnum): - """Types of events that can trigger a notification""" - - DAYS_ELAPSED = "days_elapsed" - TRIALS_COMPLETED = "trials_completed" - PERCENTAGE_BETTER = "percentage_better" - - -class ObservationType(StrEnum): - """Types of observations that can be made""" - - USER = "user" # Generated by the user - AUTO = "auto" # Generated by the system - - -class AutoFailUnitType(StrEnum): - """Types of units for auto fail""" - - DAYS = "days" - HOURS = "hours" - - -class Notifications(BaseModel): - """ - Pydantic model for a notifications. - """ - - onTrialCompletion: bool = False - numberOfTrials: NonNegativeInt | None - onDaysElapsed: bool = False - daysElapsed: NonNegativeInt | None - onPercentBetter: bool = False - percentBetterThreshold: NonNegativeInt | None - - @model_validator(mode="after") - def validate_has_assocatiated_value(self) -> Self: - """ - Validate that the required corresponding fields have been set. - """ - if self.onTrialCompletion and ( - not self.numberOfTrials or self.numberOfTrials == 0 - ): - raise ValueError( - "numberOfTrials is required when onTrialCompletion is True" - ) - if self.onDaysElapsed and (not self.daysElapsed or self.daysElapsed == 0): - raise ValueError("daysElapsed is required when onDaysElapsed is True") - if self.onPercentBetter and ( - not self.percentBetterThreshold or self.percentBetterThreshold == 0 - ): - raise ValueError( - "percentBetterThreshold is required when onPercentBetter is True" - ) - - return self - - -class NotificationsResponse(BaseModel): - """ - Pydantic model for a response for notifications - """ - - model_config = ConfigDict(from_attributes=True) - - notification_id: int - notification_type: EventType - notification_value: NonNegativeInt - is_active: bool - - -class Outcome(float, Enum): - """ - Enum for the outcome of a trial. - """ - - SUCCESS = 1 - FAILURE = 0 - - -class ArmPriors(StrEnum): - """ - Enum for the prior distribution of the arm. - """ - - BETA = "beta" - NORMAL = "normal" - - def __call__(self, theta: np.ndarray, **kwargs: Any) -> np.ndarray: - """ - Return the log pdf of the input param. - """ - if self == ArmPriors.BETA: - alpha = kwargs.get("alpha", np.ones_like(theta)) - beta = kwargs.get("beta", np.ones_like(theta)) - return (alpha - 1) * np.log(theta) + (beta - 1) * np.log(1 - theta) - - elif self == ArmPriors.NORMAL: - mu = kwargs.get("mu", np.zeros_like(theta)) - covariance = kwargs.get("covariance", np.diag(np.ones_like(theta))) - inv_cov = np.linalg.inv(covariance) - x = theta - mu - return -0.5 * x @ inv_cov @ x - - -class RewardLikelihood(StrEnum): - """ - Enum for the likelihood distribution of the reward. - """ - - BERNOULLI = "binary" - NORMAL = "real-valued" - - def __call__(self, reward: np.ndarray, probs: np.ndarray) -> np.ndarray: - """ - Calculate the log likelihood of the reward. - - Parameters - ---------- - reward : The reward. - probs : The probability of the reward. - """ - if self == RewardLikelihood.NORMAL: - return -0.5 * np.sum((reward - probs) ** 2) - elif self == RewardLikelihood.BERNOULLI: - return np.sum(reward * np.log(probs) + (1 - reward) * np.log(1 - probs)) - - -class ContextType(StrEnum): - """ - Enum for the type of context. - """ - - BINARY = "binary" - REAL_VALUED = "real-valued" - - -class ContextLinkFunctions(StrEnum): - """ - Enum for the link function of the arm params and context. - """ - - NONE = "none" - LOGISTIC = "logistic" - - def __call__(self, x: np.ndarray) -> np.ndarray: - """ - Apply the link function to the input param. - - Parameters - ---------- - x : The input param. - """ - if self == ContextLinkFunctions.NONE: - return x - elif self == ContextLinkFunctions.LOGISTIC: - return 1.0 / (1.0 + np.exp(-x)) - - -allowed_combos_mab = [ - (ArmPriors.BETA, RewardLikelihood.BERNOULLI), - (ArmPriors.NORMAL, RewardLikelihood.NORMAL), -] - -allowed_combos_cmab = [ - (ArmPriors.NORMAL, RewardLikelihood.BERNOULLI), - (ArmPriors.NORMAL, RewardLikelihood.NORMAL), -] - -allowed_combos_bayes_ab = [ - (ArmPriors.NORMAL, RewardLikelihood.BERNOULLI), - (ArmPriors.NORMAL, RewardLikelihood.NORMAL), -] diff --git a/backend/app/workspaces/models.py b/backend/app/workspaces/models.py index b79aeab..52146cc 100644 --- a/backend/app/workspaces/models.py +++ b/backend/app/workspaces/models.py @@ -19,7 +19,7 @@ from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Mapped, mapped_column, relationship -from ..models import Base, ExperimentBaseDB +from ..models import Base from ..users.exceptions import UserNotFoundError from ..users.schemas import UserCreate from .schemas import UserCreateWithCode, UserRoles @@ -77,9 +77,6 @@ class WorkspaceDB(Base): workspace_id: Mapped[int] = mapped_column(Integer, primary_key=True, nullable=False) workspace_name: Mapped[str] = mapped_column(String, nullable=False, unique=True) is_default: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) - experiments: Mapped[list["ExperimentBaseDB"]] = relationship( - "ExperimentBaseDB", back_populates="workspace", cascade="all, delete-orphan" - ) pending_invitations: Mapped[list["PendingInvitationDB"]] = relationship( "PendingInvitationDB", back_populates="workspace", cascade="all, delete-orphan" diff --git a/backend/jobs/auto_fail.py b/backend/jobs/auto_fail.py index 3665831..d1791f2 100644 --- a/backend/jobs/auto_fail.py +++ b/backend/jobs/auto_fail.py @@ -15,21 +15,16 @@ from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession -from app.bayes_ab.models import BayesianABDB, BayesianABDrawDB -from app.bayes_ab.observation import ( - update_based_on_outcome as bayes_ab_update_based_on_outcome, -) -from app.contextual_mab.models import ContextualBanditDB, ContextualDrawDB -from app.contextual_mab.observation import ( - update_based_on_outcome as cmab_update_based_on_outcome, -) from app.database import get_async_session -from app.mab.models import MABDrawDB, MultiArmedBanditDB -from app.mab.observation import update_based_on_outcome as mab_update_based_on_outcome -from app.schemas import ObservationType +from app.experiments.dependencies import ( + format_rewards_for_arm_update, + update_arm_based_on_outcome, +) +from app.experiments.models import DrawDB, ExperimentDB +from app.experiments.schemas import ObservationType -async def auto_fail_mab(asession: AsyncSession) -> int: +async def auto_fail_experiment(asession: AsyncSession) -> int: """ Auto fail experiments draws that have not been updated in a certain amount of time. @@ -40,75 +35,10 @@ async def auto_fail_mab(asession: AsyncSession) -> int: int: Number of draws automatically failed """ total_failed = 0 - now = datetime.now(tz=timezone.utc) - - # Fetch all required experiments data in one query - experiment_query = select(MultiArmedBanditDB).where( - MultiArmedBanditDB.auto_fail.is_(True) - ) - experiments_result = (await asession.execute(experiment_query)).unique() - experiments = experiments_result.scalars().all() - for experiment in experiments: - hours_threshold = ( - experiment.auto_fail_value * 24 - if experiment.auto_fail_unit == "days" - else experiment.auto_fail_value - ) - - cutoff_datetime = now - timedelta(hours=hours_threshold) - - draws_query = ( - select(MABDrawDB) - .join( - MultiArmedBanditDB, - MABDrawDB.experiment_id == MultiArmedBanditDB.experiment_id, - ) - .where( - MABDrawDB.experiment_id == experiment.experiment_id, - MABDrawDB.observation_type.is_(None), - MABDrawDB.draw_datetime_utc <= cutoff_datetime, - ) - .limit(100) - ) # Process in smaller batches - - # Paginate through results if there are many draws to avoid memory issues - offset = 0 - while True: - batch_query = draws_query.offset(offset) - draws_result = (await asession.execute(batch_query)).unique() - draws_batch = draws_result.scalars().all() - if not draws_batch: - break - - for draw in draws_batch: - draw.observation_type = ObservationType.AUTO - - await mab_update_based_on_outcome( - experiment, - draw, - 0.0, - asession, - ObservationType.AUTO, - ) - - total_failed += 1 - - await asession.commit() - offset += len(draws_batch) - - return total_failed - - -async def auto_fail_bayes_ab(asession: AsyncSession) -> int: - """ - Auto fail experiments draws that have not been updated in a certain amount of time. - - """ - total_failed = 0 - now = datetime.now(tz=timezone.utc) + now = datetime.now(timezone.utc) # Fetch all required experiments data in one query - experiment_query = select(BayesianABDB).where(BayesianABDB.auto_fail.is_(True)) + experiment_query = select(ExperimentDB).where(ExperimentDB.auto_fail.is_(True)) experiments_result = (await asession.execute(experiment_query)).unique() experiments = experiments_result.scalars().all() for experiment in experiments: @@ -121,15 +51,15 @@ async def auto_fail_bayes_ab(asession: AsyncSession) -> int: cutoff_datetime = now - timedelta(hours=hours_threshold) draws_query = ( - select(BayesianABDrawDB) + select(DrawDB) .join( - BayesianABDB, - BayesianABDrawDB.experiment_id == BayesianABDB.experiment_id, + ExperimentDB, + DrawDB.experiment_id == ExperimentDB.experiment_id, ) .where( - BayesianABDrawDB.experiment_id == experiment.experiment_id, - BayesianABDrawDB.observation_type.is_(None), - BayesianABDrawDB.draw_datetime_utc <= cutoff_datetime, + DrawDB.experiment_id == experiment.experiment_id, + DrawDB.observation_type.is_(None), + DrawDB.draw_datetime_utc <= cutoff_datetime, ) .limit(100) ) # Process in smaller batches @@ -146,83 +76,19 @@ async def auto_fail_bayes_ab(asession: AsyncSession) -> int: for draw in draws_batch: draw.observation_type = ObservationType.AUTO - await bayes_ab_update_based_on_outcome( - experiment, - draw, - 0.0, - asession, - ObservationType.AUTO, + rewards_list, context_list, treatments_list = ( + await format_rewards_for_arm_update( + experiment, draw.arm_id, 0.0, draw.context_val, asession + ) ) - - total_failed += 1 - - await asession.commit() - offset += len(draws_batch) - - return total_failed - - -async def auto_fail_cmab(asession: AsyncSession) -> int: - """ - Auto fail experiments draws that have not been updated in a certain amount of time. - - Args: - asession: SQLAlchemy async session - - Returns: - int: Number of draws automatically failed - """ - total_failed = 0 - now = datetime.now(tz=timezone.utc) - - # Fetch all required experiments data in one query - experiment_query = select(ContextualBanditDB).where( - ContextualBanditDB.auto_fail.is_(True) - ) - experiments_result = (await asession.execute(experiment_query)).unique() - experiments = experiments_result.scalars().all() - for experiment in experiments: - hours_threshold = ( - experiment.auto_fail_value * 24 - if experiment.auto_fail_unit == "days" - else experiment.auto_fail_value - ) - - cutoff_datetime = now - timedelta(hours=hours_threshold) - - draws_query = ( - select(ContextualDrawDB) - .join( - ContextualBanditDB, - ContextualDrawDB.experiment_id == ContextualBanditDB.experiment_id, - ) - .where( - ContextualDrawDB.experiment_id == experiment.experiment_id, - ContextualDrawDB.observation_type.is_(None), - ContextualDrawDB.draw_datetime_utc <= cutoff_datetime, - ) - .limit(100) - ) # Process in smaller batches - - # Paginate through results if there are many draws to avoid memory issues - offset = 0 - while True: - batch_query = draws_query.offset(offset) - draws_result = (await asession.execute(batch_query)).unique() - draws_batch = draws_result.scalars().all() - - if not draws_batch: - break - - for draw in draws_batch: - draw.observation_type = ObservationType.AUTO - - await cmab_update_based_on_outcome( - experiment, - draw, - 0.0, - asession, - ObservationType.AUTO, + await update_arm_based_on_outcome( + experiment=experiment, + draw=draw, + rewards=rewards_list, + contexts=context_list, + treatments=treatments_list, + observation_type=ObservationType.AUTO, + asession=asession, ) total_failed += 1 @@ -238,12 +104,8 @@ async def main() -> None: Main function to process notifications """ async for asession in get_async_session(): - failed_count = await auto_fail_mab(asession) - print(f"Auto-failed MABs: {failed_count} draws") - failed_count = await auto_fail_cmab(asession) - print(f"Auto-failed CMABs: {failed_count} draws") - failed_count = await auto_fail_bayes_ab(asession) - print(f"Auto-failed Bayes ABs: {failed_count} draws") + failed_count = await auto_fail_experiment(asession) + print(f"Auto-failed experiments: {failed_count} draws") break diff --git a/backend/jobs/create_notifications.py b/backend/jobs/create_notifications.py index bb50508..f35adee 100644 --- a/backend/jobs/create_notifications.py +++ b/backend/jobs/create_notifications.py @@ -16,9 +16,9 @@ from sqlalchemy.ext.asyncio import AsyncSession from app.database import get_async_session +from app.experiments.models import ExperimentDB, NotificationsDB +from app.experiments.schemas import EventType from app.messages.models import EventMessageDB -from app.models import ExperimentBaseDB, NotificationsDB -from app.schemas import EventType from app.utils import setup_logger logger = setup_logger(log_level=logging.INFO) @@ -34,10 +34,10 @@ async def check_days_elapsed( Check if the number of days elapsed since the experiment was created is greater than or equal to the milestone """ - experiments_stmt = select(ExperimentBaseDB).where( - ExperimentBaseDB.experiment_id == experiment_id + experiments_stmt = select(ExperimentDB).where( + ExperimentDB.experiment_id == experiment_id ) - experiment: ExperimentBaseDB | None = ( + experiment: ExperimentDB | None = ( (await asession.execute(experiments_stmt)).scalars().first() ) @@ -100,12 +100,8 @@ async def check_trials_completed( or equal to the milestone. """ # Fetch experiment - stmt = select(ExperimentBaseDB).where( - ExperimentBaseDB.experiment_id == experiment_id - ) - experiment: ExperimentBaseDB | None = ( - (await asession.execute(stmt)).scalars().first() - ) + stmt = select(ExperimentDB).where(ExperimentDB.experiment_id == experiment_id) + experiment: ExperimentDB | None = (await asession.execute(stmt)).scalars().first() if experiment: if experiment.n_trials >= milestone_trials: diff --git a/backend/migrations/versions/275ff74c0866_add_client_id_to_draws_db.py b/backend/migrations/versions/275ff74c0866_add_client_id_to_draws_db.py deleted file mode 100644 index 02d31e3..0000000 --- a/backend/migrations/versions/275ff74c0866_add_client_id_to_draws_db.py +++ /dev/null @@ -1,30 +0,0 @@ -"""add client id to draws db - -Revision ID: 275ff74c0866 -Revises: 5c15463fda65 -Create Date: 2025-04-28 20:01:35.705717 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "275ff74c0866" -down_revision: Union[str, None] = "5c15463fda65" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.add_column("draws_base", sa.Column("client_id", sa.String(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column("draws_base", "client_id") - # ### end Alembic commands ### diff --git a/backend/migrations/versions/28adf347e68d_add_tables_for_bayesian_ab_experiments.py b/backend/migrations/versions/28adf347e68d_add_tables_for_bayesian_ab_experiments.py deleted file mode 100644 index 94ecd57..0000000 --- a/backend/migrations/versions/28adf347e68d_add_tables_for_bayesian_ab_experiments.py +++ /dev/null @@ -1,66 +0,0 @@ -"""add tables for Bayesian AB experiments - -Revision ID: 28adf347e68d -Revises: feb042798cad -Create Date: 2025-04-27 11:23:26.823140 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "28adf347e68d" -down_revision: Union[str, None] = "feb042798cad" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.create_table( - "bayes_ab_experiments", - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], ["experiments_base.experiment_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("experiment_id"), - ) - op.create_table( - "bayes_ab_arms", - sa.Column("arm_id", sa.Integer(), nullable=False), - sa.Column("mu_init", sa.Float(), nullable=False), - sa.Column("sigma_init", sa.Float(), nullable=False), - sa.Column("mu", sa.Float(), nullable=False), - sa.Column("sigma", sa.Float(), nullable=False), - sa.Column("is_treatment_arm", sa.Boolean(), nullable=False), - sa.ForeignKeyConstraint(["arm_id"], ["arms_base.arm_id"], ondelete="CASCADE"), - sa.PrimaryKeyConstraint("arm_id"), - ) - op.create_table( - "bayes_ab_draws", - sa.Column("draw_id", sa.String(), nullable=False), - sa.ForeignKeyConstraint( - ["draw_id"], ["draws_base.draw_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("draw_id"), - ) - op.add_column("mab_arms", sa.Column("alpha_init", sa.Float(), nullable=True)) - op.add_column("mab_arms", sa.Column("beta_init", sa.Float(), nullable=True)) - op.add_column("mab_arms", sa.Column("mu_init", sa.Float(), nullable=True)) - op.add_column("mab_arms", sa.Column("sigma_init", sa.Float(), nullable=True)) - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column("mab_arms", "sigma_init") - op.drop_column("mab_arms", "mu_init") - op.drop_column("mab_arms", "beta_init") - op.drop_column("mab_arms", "alpha_init") - op.drop_table("bayes_ab_draws") - op.drop_table("bayes_ab_arms") - op.drop_table("bayes_ab_experiments") - # ### end Alembic commands ### diff --git a/backend/migrations/versions/45b9483ee392_fix_messages_foreign_key_constraint.py b/backend/migrations/versions/45b9483ee392_fix_messages_foreign_key_constraint.py new file mode 100644 index 0000000..cdfcd4c --- /dev/null +++ b/backend/migrations/versions/45b9483ee392_fix_messages_foreign_key_constraint.py @@ -0,0 +1,41 @@ +"""fix messages foreign key constraint + +Revision ID: 45b9483ee392 +Revises: 6101ba814d91 +Create Date: 2025-06-05 18:10:33.744331 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "45b9483ee392" +down_revision: Union[str, None] = "6101ba814d91" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint( + op.f("event_messages_experiment_id_fkey"), "event_messages", type_="foreignkey" + ) + op.create_foreign_key( + None, "event_messages", "experiments", ["experiment_id"], ["experiment_id"] + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint(None, "event_messages", type_="foreignkey") + op.create_foreign_key( + op.f("event_messages_experiment_id_fkey"), + "event_messages", + "experiments_base", + ["experiment_id"], + ["experiment_id"], + ) + # ### end Alembic commands ### diff --git a/backend/migrations/versions/5c15463fda65_added_first_name_and_last_name_to_users.py b/backend/migrations/versions/5c15463fda65_added_first_name_and_last_name_to_users.py deleted file mode 100644 index 39b1a4d..0000000 --- a/backend/migrations/versions/5c15463fda65_added_first_name_and_last_name_to_users.py +++ /dev/null @@ -1,36 +0,0 @@ -"""added first name and last name to users - -Revision ID: 5c15463fda65 -Revises: 28adf347e68d -Create Date: 2025-04-26 15:47:23.199751 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "5c15463fda65" -down_revision: Union[str, None] = "28adf347e68d" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # Add columns as nullable first - op.add_column("users", sa.Column("first_name", sa.String(), nullable=True)) - op.add_column("users", sa.Column("last_name", sa.String(), nullable=True)) - - # Set default values for existing records - op.execute("UPDATE users SET first_name = '', last_name = ''") - - # Make columns non-nullable - op.alter_column("users", "first_name", nullable=False) - op.alter_column("users", "last_name", nullable=False) - - -def downgrade() -> None: - op.drop_column("users", "last_name") - op.drop_column("users", "first_name") diff --git a/backend/migrations/versions/6101ba814d91_fresh_start.py b/backend/migrations/versions/6101ba814d91_fresh_start.py new file mode 100644 index 0000000..d246310 --- /dev/null +++ b/backend/migrations/versions/6101ba814d91_fresh_start.py @@ -0,0 +1,438 @@ +"""fresh start + +Revision ID: 6101ba814d91 +Revises: +Create Date: 2025-06-03 18:00:18.919218 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = "6101ba814d91" +down_revision: Union[str, None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "users", + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("username", sa.String(), nullable=False), + sa.Column("first_name", sa.String(), nullable=False), + sa.Column("last_name", sa.String(), nullable=False), + sa.Column("hashed_password", sa.String(length=96), nullable=False), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("access_level", sa.String(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column("is_verified", sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint("user_id"), + sa.UniqueConstraint("username"), + ) + op.create_table( + "messages", + sa.Column("message_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("text", sa.String(), nullable=False), + sa.Column("title", sa.String(), nullable=False), + sa.Column("is_unread", sa.Boolean(), nullable=False), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("message_type", sa.String(length=50), nullable=False), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.PrimaryKeyConstraint("message_id"), + ) + op.create_table( + "workspace", + sa.Column("api_daily_quota", sa.Integer(), nullable=True), + sa.Column("api_key_first_characters", sa.String(length=5), nullable=True), + sa.Column( + "api_key_updated_datetime_utc", sa.DateTime(timezone=True), nullable=True + ), + sa.Column("api_key_rotated_by_user_id", sa.Integer(), nullable=True), + sa.Column("content_quota", sa.Integer(), nullable=True), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("hashed_api_key", sa.String(length=96), nullable=True), + sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("workspace_name", sa.String(), nullable=False), + sa.Column("is_default", sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ["api_key_rotated_by_user_id"], + ["users.user_id"], + ), + sa.PrimaryKeyConstraint("workspace_id"), + sa.UniqueConstraint("hashed_api_key"), + sa.UniqueConstraint("workspace_name"), + ) + op.create_table( + "api_key_rotation_history", + sa.Column("rotation_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("rotated_by_user_id", sa.Integer(), nullable=False), + sa.Column("key_first_characters", sa.String(length=5), nullable=False), + sa.Column("rotation_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.ForeignKeyConstraint( + ["rotated_by_user_id"], + ["users.user_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("rotation_id"), + ) + op.create_table( + "experiments", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(length=150), nullable=False), + sa.Column("description", sa.String(length=500), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column("sticky_assignment", sa.Boolean(), nullable=False), + sa.Column("auto_fail", sa.Boolean(), nullable=False), + sa.Column("auto_fail_value", sa.Integer(), nullable=True), + sa.Column( + "auto_fail_unit", + sa.Enum("DAYS", "HOURS", name="autofailunittype"), + nullable=True, + ), + sa.Column("exp_type", sa.String(length=50), nullable=False), + sa.Column("prior_type", sa.String(length=50), nullable=False), + sa.Column("reward_type", sa.String(length=50), nullable=False), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("n_trials", sa.Integer(), nullable=False), + sa.Column("last_trial_datetime_utc", sa.DateTime(timezone=True), nullable=True), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("experiment_id"), + ) + op.create_table( + "experiments_base", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(length=150), nullable=False), + sa.Column("description", sa.String(length=500), nullable=False), + sa.Column("sticky_assignment", sa.Boolean(), nullable=False), + sa.Column("auto_fail", sa.Boolean(), nullable=False), + sa.Column("auto_fail_value", sa.Integer(), nullable=True), + sa.Column( + "auto_fail_unit", + sa.Enum("DAYS", "HOURS", name="autofailunittype"), + nullable=True, + ), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column("exp_type", sa.String(length=50), nullable=False), + sa.Column("prior_type", sa.String(length=50), nullable=False), + sa.Column("reward_type", sa.String(length=50), nullable=False), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("n_trials", sa.Integer(), nullable=False), + sa.Column("last_trial_datetime_utc", sa.DateTime(timezone=True), nullable=True), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("experiment_id"), + ) + op.create_table( + "pending_invitations", + sa.Column("invitation_id", sa.Integer(), nullable=False), + sa.Column("email", sa.String(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column( + "role", + sa.Enum("ADMIN", "READ_ONLY", name="userroles", native_enum=False), + nullable=False, + ), + sa.Column("inviter_id", sa.Integer(), nullable=False), + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.ForeignKeyConstraint( + ["inviter_id"], + ["users.user_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("invitation_id"), + ) + op.create_table( + "user_workspace", + sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column( + "default_workspace", + sa.Boolean(), + server_default=sa.text("false"), + nullable=False, + ), + sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column( + "user_role", + sa.Enum("ADMIN", "READ_ONLY", name="userroles", native_enum=False), + nullable=False, + ), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint(["user_id"], ["users.user_id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint( + ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("user_id", "workspace_id"), + ) + op.create_table( + "arms", + sa.Column("arm_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(length=150), nullable=False), + sa.Column("description", sa.String(length=500), nullable=False), + sa.Column("n_outcomes", sa.Integer(), nullable=False), + sa.Column("mu_init", sa.Float(), nullable=True), + sa.Column("sigma_init", sa.Float(), nullable=True), + sa.Column("mu", postgresql.ARRAY(sa.Float()), nullable=True), + sa.Column("covariance", postgresql.ARRAY(sa.Float()), nullable=True), + sa.Column("is_treatment_arm", sa.Boolean(), nullable=True), + sa.Column("alpha_init", sa.Float(), nullable=True), + sa.Column("beta_init", sa.Float(), nullable=True), + sa.Column("alpha", sa.Float(), nullable=True), + sa.Column("beta", sa.Float(), nullable=True), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("arm_id"), + ) + op.create_table( + "arms_base", + sa.Column("arm_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(length=150), nullable=False), + sa.Column("description", sa.String(length=500), nullable=False), + sa.Column("arm_type", sa.String(length=50), nullable=False), + sa.Column("n_outcomes", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments_base.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.PrimaryKeyConstraint("arm_id"), + ) + op.create_table( + "clients", + sa.Column("client_id", sa.String(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("client_id"), + ) + op.create_table( + "context", + sa.Column("context_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(length=150), nullable=False), + sa.Column("description", sa.String(length=500), nullable=True), + sa.Column("value_type", sa.String(length=50), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("context_id"), + ) + op.create_table( + "event_messages", + sa.Column("message_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments_base.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["message_id"], ["messages.message_id"], ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("message_id"), + ) + op.create_table( + "notifications", + sa.Column("notification_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column( + "notification_type", + sa.Enum( + "DAYS_ELAPSED", + "TRIALS_COMPLETED", + "PERCENTAGE_BETTER", + name="eventtype", + ), + nullable=False, + ), + sa.Column("notification_value", sa.Integer(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("notification_id"), + ) + op.create_table( + "notifications_db", + sa.Column("notification_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column( + "notification_type", + sa.Enum( + "DAYS_ELAPSED", + "TRIALS_COMPLETED", + "PERCENTAGE_BETTER", + name="eventtype", + ), + nullable=False, + ), + sa.Column("notification_value", sa.Integer(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments_base.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.PrimaryKeyConstraint("notification_id"), + ) + op.create_table( + "draws", + sa.Column("draw_id", sa.String(), nullable=False), + sa.Column("arm_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("workspace_id", sa.Integer(), nullable=False), + sa.Column("client_id", sa.String(length=36), nullable=True), + sa.Column("draw_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("observed_datetime_utc", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "observation_type", + sa.Enum("USER", "AUTO", name="observationtype"), + nullable=True, + ), + sa.Column("reward", sa.Float(), nullable=True), + sa.Column("context_val", postgresql.ARRAY(sa.Float()), nullable=True), + sa.ForeignKeyConstraint( + ["arm_id"], + ["arms.arm_id"], + ), + sa.ForeignKeyConstraint( + ["client_id"], + ["clients.client_id"], + ), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["workspace_id"], + ["workspace.workspace_id"], + ), + sa.PrimaryKeyConstraint("draw_id"), + ) + op.create_table( + "draws_base", + sa.Column("draw_id", sa.String(), nullable=False), + sa.Column("client_id", sa.String(), nullable=True), + sa.Column("arm_id", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("draw_datetime_utc", sa.DateTime(timezone=True), nullable=False), + sa.Column("observed_datetime_utc", sa.DateTime(timezone=True), nullable=True), + sa.Column( + "observation_type", + sa.Enum("USER", "AUTO", name="observationtype"), + nullable=True, + ), + sa.Column("draw_type", sa.String(length=50), nullable=False), + sa.Column("reward", sa.Float(), nullable=True), + sa.ForeignKeyConstraint( + ["arm_id"], + ["arms_base.arm_id"], + ), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments_base.experiment_id"], + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.user_id"], + ), + sa.PrimaryKeyConstraint("draw_id"), + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table("draws_base") + op.drop_table("draws") + op.drop_table("notifications_db") + op.drop_table("notifications") + op.drop_table("event_messages") + op.drop_table("context") + op.drop_table("clients") + op.drop_table("arms_base") + op.drop_table("arms") + op.drop_table("user_workspace") + op.drop_table("pending_invitations") + op.drop_table("experiments_base") + op.drop_table("experiments") + op.drop_table("api_key_rotation_history") + op.drop_table("workspace") + op.drop_table("messages") + op.drop_table("users") + # ### end Alembic commands ### diff --git a/backend/migrations/versions/9f7482ba882f_workspace_model.py b/backend/migrations/versions/9f7482ba882f_workspace_model.py deleted file mode 100644 index 3543211..0000000 --- a/backend/migrations/versions/9f7482ba882f_workspace_model.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Workspace model - -Revision ID: 9f7482ba882f -Revises: 275ff74c0866 -Create Date: 2025-05-04 11:56:03.939578 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "9f7482ba882f" -down_revision: Union[str, None] = "275ff74c0866" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.create_table( - "workspace", - sa.Column("api_daily_quota", sa.Integer(), nullable=True), - sa.Column("api_key_first_characters", sa.String(length=5), nullable=True), - sa.Column( - "api_key_updated_datetime_utc", sa.DateTime(timezone=True), nullable=True - ), - sa.Column("api_key_rotated_by_user_id", sa.Integer(), nullable=True), - sa.Column("content_quota", sa.Integer(), nullable=True), - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("hashed_api_key", sa.String(length=96), nullable=True), - sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("workspace_id", sa.Integer(), nullable=False), - sa.Column("workspace_name", sa.String(), nullable=False), - sa.Column("is_default", sa.Boolean(), nullable=False), - sa.ForeignKeyConstraint( - ["api_key_rotated_by_user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("workspace_id"), - sa.UniqueConstraint("hashed_api_key"), - sa.UniqueConstraint("workspace_name"), - ) - op.create_table( - "api_key_rotation_history", - sa.Column("rotation_id", sa.Integer(), nullable=False), - sa.Column("workspace_id", sa.Integer(), nullable=False), - sa.Column("rotated_by_user_id", sa.Integer(), nullable=False), - sa.Column("key_first_characters", sa.String(length=5), nullable=False), - sa.Column("rotation_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.ForeignKeyConstraint( - ["rotated_by_user_id"], - ["users.user_id"], - ), - sa.ForeignKeyConstraint( - ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("rotation_id"), - ) - op.create_table( - "pending_invitations", - sa.Column("invitation_id", sa.Integer(), nullable=False), - sa.Column("email", sa.String(), nullable=False), - sa.Column("workspace_id", sa.Integer(), nullable=False), - sa.Column( - "role", - sa.Enum("ADMIN", "READ_ONLY", name="userroles", native_enum=False), - nullable=False, - ), - sa.Column("inviter_id", sa.Integer(), nullable=False), - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.ForeignKeyConstraint( - ["inviter_id"], - ["users.user_id"], - ), - sa.ForeignKeyConstraint( - ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("invitation_id"), - ) - op.create_table( - "user_workspace", - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column( - "default_workspace", - sa.Boolean(), - server_default=sa.text("false"), - nullable=False, - ), - sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column( - "user_role", - sa.Enum("ADMIN", "READ_ONLY", name="userroles", native_enum=False), - nullable=False, - ), - sa.Column("workspace_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(["user_id"], ["users.user_id"], ondelete="CASCADE"), - sa.ForeignKeyConstraint( - ["workspace_id"], ["workspace.workspace_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("user_id", "workspace_id"), - ) - op.add_column( - "experiments_base", sa.Column("workspace_id", sa.Integer(), nullable=False) - ) - op.create_foreign_key( - None, "experiments_base", "workspace", ["workspace_id"], ["workspace_id"] - ) - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_constraint(None, "experiments_base", type_="foreignkey") - op.drop_column("experiments_base", "workspace_id") - op.drop_table("user_workspace") - op.drop_table("pending_invitations") - op.drop_table("api_key_rotation_history") - op.drop_table("workspace") - # ### end Alembic commands ### diff --git a/backend/migrations/versions/ecddd830b464_remove_user_api_key.py b/backend/migrations/versions/ecddd830b464_remove_user_api_key.py deleted file mode 100644 index b03b032..0000000 --- a/backend/migrations/versions/ecddd830b464_remove_user_api_key.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Remove User API key - -Revision ID: ecddd830b464 -Revises: 9f7482ba882f -Create Date: 2025-05-21 13:59:22.199884 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op -from sqlalchemy.dialects import postgresql - -# revision identifiers, used by Alembic. -revision: str = "ecddd830b464" -down_revision: Union[str, None] = "9f7482ba882f" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_constraint("users_hashed_api_key_key", "users", type_="unique") - op.drop_column("users", "api_daily_quota") - op.drop_column("users", "hashed_api_key") - op.drop_column("users", "api_key_updated_datetime_utc") - op.drop_column("users", "api_key_first_characters") - op.drop_column("users", "experiments_quota") - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.add_column( - "users", - sa.Column( - "experiments_quota", sa.INTEGER(), autoincrement=False, nullable=True - ), - ) - op.add_column( - "users", - sa.Column( - "api_key_first_characters", - sa.VARCHAR(length=5), - autoincrement=False, - nullable=False, - ), - ) - op.add_column( - "users", - sa.Column( - "api_key_updated_datetime_utc", - postgresql.TIMESTAMP(timezone=True), - autoincrement=False, - nullable=False, - ), - ) - op.add_column( - "users", - sa.Column( - "hashed_api_key", sa.VARCHAR(length=96), autoincrement=False, nullable=False - ), - ) - op.add_column( - "users", - sa.Column("api_daily_quota", sa.INTEGER(), autoincrement=False, nullable=True), - ) - op.create_unique_constraint("users_hashed_api_key_key", "users", ["hashed_api_key"]) - # ### end Alembic commands ### diff --git a/backend/migrations/versions/faf4228e13a3_clean_start.py b/backend/migrations/versions/faf4228e13a3_clean_start.py deleted file mode 100644 index 71af813..0000000 --- a/backend/migrations/versions/faf4228e13a3_clean_start.py +++ /dev/null @@ -1,257 +0,0 @@ -"""clean start - -Revision ID: faf4228e13a3 -Revises: -Create Date: 2025-04-17 21:18:03.761219 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op -from sqlalchemy.dialects import postgresql - -# revision identifiers, used by Alembic. -revision: str = "faf4228e13a3" -down_revision: Union[str, None] = None -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.create_table( - "users", - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("username", sa.String(), nullable=False), - sa.Column("hashed_password", sa.String(length=96), nullable=False), - sa.Column("hashed_api_key", sa.String(length=96), nullable=False), - sa.Column("api_key_first_characters", sa.String(length=5), nullable=False), - sa.Column( - "api_key_updated_datetime_utc", sa.DateTime(timezone=True), nullable=False - ), - sa.Column("experiments_quota", sa.Integer(), nullable=True), - sa.Column("api_daily_quota", sa.Integer(), nullable=True), - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("updated_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("access_level", sa.String(), nullable=False), - sa.Column("is_active", sa.Boolean(), nullable=False), - sa.Column("is_verified", sa.Boolean(), nullable=False), - sa.PrimaryKeyConstraint("user_id"), - sa.UniqueConstraint("hashed_api_key"), - sa.UniqueConstraint("username"), - ) - op.create_table( - "experiments_base", - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.Column("name", sa.String(length=150), nullable=False), - sa.Column("description", sa.String(length=500), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("is_active", sa.Boolean(), nullable=False), - sa.Column("exp_type", sa.String(length=50), nullable=False), - sa.Column("prior_type", sa.String(length=50), nullable=False), - sa.Column("reward_type", sa.String(length=50), nullable=False), - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("n_trials", sa.Integer(), nullable=False), - sa.Column("last_trial_datetime_utc", sa.DateTime(timezone=True), nullable=True), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("experiment_id"), - ) - op.create_table( - "messages", - sa.Column("message_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("text", sa.String(), nullable=False), - sa.Column("title", sa.String(), nullable=False), - sa.Column("is_unread", sa.Boolean(), nullable=False), - sa.Column("created_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("message_type", sa.String(length=50), nullable=False), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("message_id"), - ) - op.create_table( - "arms_base", - sa.Column("arm_id", sa.Integer(), nullable=False), - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("name", sa.String(length=150), nullable=False), - sa.Column("description", sa.String(length=500), nullable=False), - sa.Column("arm_type", sa.String(length=50), nullable=False), - sa.Column("n_outcomes", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], - ["experiments_base.experiment_id"], - ), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("arm_id"), - ) - op.create_table( - "contextual_mabs", - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], ["experiments_base.experiment_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("experiment_id"), - ) - op.create_table( - "event_messages", - sa.Column("message_id", sa.Integer(), nullable=False), - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], - ["experiments_base.experiment_id"], - ), - sa.ForeignKeyConstraint( - ["message_id"], ["messages.message_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("message_id"), - ) - op.create_table( - "mabs", - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], ["experiments_base.experiment_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("experiment_id"), - ) - op.create_table( - "notifications", - sa.Column("notification_id", sa.Integer(), nullable=False), - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column( - "notification_type", - sa.Enum( - "DAYS_ELAPSED", - "TRIALS_COMPLETED", - "PERCENTAGE_BETTER", - name="eventtype", - ), - nullable=False, - ), - sa.Column("notification_value", sa.Integer(), nullable=False), - sa.Column("is_active", sa.Boolean(), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], - ["experiments_base.experiment_id"], - ), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("notification_id"), - ) - op.create_table( - "contexts", - sa.Column("context_id", sa.Integer(), nullable=False), - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("name", sa.String(length=150), nullable=False), - sa.Column("description", sa.String(length=500), nullable=True), - sa.Column("value_type", sa.String(length=50), nullable=False), - sa.ForeignKeyConstraint( - ["experiment_id"], - ["contextual_mabs.experiment_id"], - ), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("context_id"), - ) - op.create_table( - "contextual_arms", - sa.Column("arm_id", sa.Integer(), nullable=False), - sa.Column("mu_init", sa.Float(), nullable=False), - sa.Column("sigma_init", sa.Float(), nullable=False), - sa.Column("mu", postgresql.ARRAY(sa.Float()), nullable=False), - sa.Column("covariance", postgresql.ARRAY(sa.Float()), nullable=False), - sa.ForeignKeyConstraint(["arm_id"], ["arms_base.arm_id"], ondelete="CASCADE"), - sa.PrimaryKeyConstraint("arm_id"), - ) - op.create_table( - "draws_base", - sa.Column("draw_id", sa.String(), nullable=False), - sa.Column("arm_id", sa.Integer(), nullable=False), - sa.Column("experiment_id", sa.Integer(), nullable=False), - sa.Column("user_id", sa.Integer(), nullable=False), - sa.Column("draw_datetime_utc", sa.DateTime(timezone=True), nullable=False), - sa.Column("observed_datetime_utc", sa.DateTime(timezone=True), nullable=True), - sa.Column( - "observation_type", - sa.Enum("USER", "AUTO", name="observationtype"), - nullable=True, - ), - sa.Column("draw_type", sa.String(length=50), nullable=False), - sa.Column("reward", sa.Float(), nullable=True), - sa.ForeignKeyConstraint( - ["arm_id"], - ["arms_base.arm_id"], - ), - sa.ForeignKeyConstraint( - ["experiment_id"], - ["experiments_base.experiment_id"], - ), - sa.ForeignKeyConstraint( - ["user_id"], - ["users.user_id"], - ), - sa.PrimaryKeyConstraint("draw_id"), - ) - op.create_table( - "mab_arms", - sa.Column("arm_id", sa.Integer(), nullable=False), - sa.Column("alpha", sa.Float(), nullable=True), - sa.Column("beta", sa.Float(), nullable=True), - sa.Column("mu", sa.Float(), nullable=True), - sa.Column("sigma", sa.Float(), nullable=True), - sa.ForeignKeyConstraint(["arm_id"], ["arms_base.arm_id"], ondelete="CASCADE"), - sa.PrimaryKeyConstraint("arm_id"), - ) - op.create_table( - "contextual_draws", - sa.Column("draw_id", sa.String(), nullable=False), - sa.Column("context_val", postgresql.ARRAY(sa.Float()), nullable=False), - sa.ForeignKeyConstraint( - ["draw_id"], ["draws_base.draw_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("draw_id"), - ) - op.create_table( - "mab_draws", - sa.Column("draw_id", sa.String(), nullable=False), - sa.ForeignKeyConstraint( - ["draw_id"], ["draws_base.draw_id"], ondelete="CASCADE" - ), - sa.PrimaryKeyConstraint("draw_id"), - ) - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_table("mab_draws") - op.drop_table("contextual_draws") - op.drop_table("mab_arms") - op.drop_table("draws_base") - op.drop_table("contextual_arms") - op.drop_table("contexts") - op.drop_table("notifications") - op.drop_table("mabs") - op.drop_table("event_messages") - op.drop_table("contextual_mabs") - op.drop_table("arms_base") - op.drop_table("messages") - op.drop_table("experiments_base") - op.drop_table("users") - # ### end Alembic commands ### diff --git a/backend/migrations/versions/feb042798cad_added_sticky_assignments_and_autofail.py b/backend/migrations/versions/feb042798cad_added_sticky_assignments_and_autofail.py deleted file mode 100644 index 824c2ba..0000000 --- a/backend/migrations/versions/feb042798cad_added_sticky_assignments_and_autofail.py +++ /dev/null @@ -1,59 +0,0 @@ -"""added sticky assignments and autofail - -Revision ID: feb042798cad -Revises: faf4228e13a3 -Create Date: 2025-04-18 15:11:40.688651 - -""" - -from typing import Sequence, Union - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision: str = "feb042798cad" -down_revision: Union[str, None] = "faf4228e13a3" -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None - - -def upgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - autofailunittype = sa.Enum( - "DAYS", - "HOURS", - name="autofailunittype", - ) - autofailunittype.create(op.get_bind()) - - op.add_column( - "experiments_base", sa.Column("sticky_assignment", sa.Boolean(), nullable=False) - ) - op.add_column( - "experiments_base", sa.Column("auto_fail", sa.Boolean(), nullable=False) - ) - op.add_column( - "experiments_base", sa.Column("auto_fail_value", sa.Integer(), nullable=True) - ) - op.add_column( - "experiments_base", - sa.Column( - "auto_fail_unit", - autofailunittype, - nullable=True, - ), - ) - # ### end Alembic commands ### - - -def downgrade() -> None: - # ### commands auto generated by Alembic - please adjust! ### - op.drop_column("experiments_base", "auto_fail_unit") - op.drop_column("experiments_base", "auto_fail_value") - op.drop_column("experiments_base", "auto_fail") - op.drop_column("experiments_base", "sticky_assignment") - - sa.Enum(name="autofailunittype").drop(op.get_bind()) - - # ### end Alembic commands ### diff --git a/backend/tests/pytest.ini b/backend/tests/pytest.ini new file mode 100644 index 0000000..22932f8 --- /dev/null +++ b/backend/tests/pytest.ini @@ -0,0 +1,5 @@ +[pytest] +asyncio_mode = auto + +# Set the default fixture loop scope to function +asyncio_default_fixture_loop_scope = function diff --git a/backend/tests/test_auto_fail.py b/backend/tests/test_auto_fail.py index 6a91def..e8ae0b5 100644 --- a/backend/tests/test_auto_fail.py +++ b/backend/tests/test_auto_fail.py @@ -6,14 +6,13 @@ from pytest import FixtureRequest, MonkeyPatch, fixture, mark from sqlalchemy.ext.asyncio import AsyncSession -from backend.app.bayes_ab import models as bayes_ab_models -from backend.app.contextual_mab import models as cmab_models -from backend.app.mab import models as mab_models -from backend.jobs.auto_fail import auto_fail_bayes_ab, auto_fail_cmab, auto_fail_mab +from backend.app.experiments import models +from backend.jobs.auto_fail import auto_fail_experiment -base_mab_payload = { +base_experiment_payload = { "name": "Test AUTO FAIL", "description": "Test AUTO FAIL description", + "exp_type": "mab", "prior_type": "beta", "reward_type": "binary", "auto_fail": True, @@ -41,84 +40,8 @@ "onPercentBetter": False, "percentBetterThreshold": 5, }, -} - -base_cmab_payload = { - "name": "Test", - "description": "Test description", - "prior_type": "normal", - "reward_type": "real-valued", - "auto_fail": True, - "auto_fail_value": 3, - "auto_fail_unit": "hours", - "arms": [ - { - "name": "arm 1", - "description": "arm 1 description", - "mu_init": 0, - "sigma_init": 1, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "mu_init": 0, - "sigma_init": 1, - }, - ], - "contexts": [ - { - "name": "Context 1", - "description": "context 1 description", - "value_type": "binary", - }, - { - "name": "Context 2", - "description": "context 2 description", - "value_type": "real-valued", - }, - ], - "notifications": { - "onTrialCompletion": True, - "numberOfTrials": 2, - "onDaysElapsed": False, - "daysElapsed": 3, - "onPercentBetter": False, - "percentBetterThreshold": 5, - }, -} - -base_ab_payload = { - "name": "Test", - "description": "Test description", - "prior_type": "normal", - "reward_type": "real-valued", - "auto_fail": True, - "auto_fail_value": 3, - "auto_fail_unit": "hours", - "arms": [ - { - "name": "arm 1", - "description": "arm 1 description", - "mu_init": 0, - "sigma_init": 1, - "is_treatment_arm": True, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "mu_init": 2, - "sigma_init": 2, - "is_treatment_arm": False, - }, - ], - "notifications": { - "onTrialCompletion": True, - "numberOfTrials": 2, - "onDaysElapsed": False, - "daysElapsed": 3, - "onPercentBetter": False, - "percentBetterThreshold": 5, - }, + "contexts": [], + "clients": [], } @@ -131,201 +54,47 @@ def now(cls, *arg: list) -> datetime: return mydatetime -class TestMABAutoFailJob: - @fixture - def create_mab_with_autofail( - self, - client: TestClient, - admin_token: str, - request: FixtureRequest, - ) -> Generator: - auto_fail_value, auto_fail_unit = request.param - mab_payload = copy.deepcopy(base_mab_payload) - mab_payload["auto_fail_value"] = auto_fail_value - mab_payload["auto_fail_unit"] = auto_fail_unit - - headers = {"Authorization": f"Bearer {admin_token}"} - response = client.post( - "/mab", - json=mab_payload, - headers=headers, - ) - assert response.status_code == 200 - mab = response.json() - yield mab - headers = {"Authorization": f"Bearer {admin_token}"} - client.delete(f"/mab/{mab['experiment_id']}", headers=headers) - - @mark.parametrize( - "create_mab_with_autofail, fail_value, fail_unit, n_observed", - [ - ((12, "hours"), 12, "hours", 2), - ((10, "days"), 10, "days", 3), - ((3, "hours"), 3, "hours", 0), - ((5, "days"), 5, "days", 0), - ], - indirect=["create_mab_with_autofail"], - ) - async def test_auto_fail_job( - self, - client: TestClient, - admin_token: str, - monkeypatch: MonkeyPatch, - create_mab_with_autofail: dict, - fail_value: int, - fail_unit: Literal["days", "hours"], - n_observed: int, - asession: AsyncSession, - workspace_api_key: str, - ) -> None: - draws = [] - headers = {"Authorization": f"Bearer {workspace_api_key}"} - for i in range(1, 15): - monkeypatch.setattr( - mab_models, - "datetime", - fake_datetime( - days=i if fail_unit == "days" else 0, - hours=i if fail_unit == "hours" else 0, - ), - ) - response = client.get( - f"/mab/{create_mab_with_autofail['experiment_id']}/draw", - headers=headers, - ) - assert response.status_code == 200 - draws.append(response.json()["draw_id"]) - - if i >= (15 - n_observed): - response = client.put( - f"/mab/{create_mab_with_autofail['experiment_id']}/{draws[-1]}/1", - headers=headers, - ) - assert response.status_code == 200 - - n_failed = await auto_fail_mab(asession=asession) - - assert n_failed == (15 - fail_value - n_observed) - - -class TestBayesABAutoFailJob: - @fixture - def create_bayes_ab_with_autofail( - self, - client: TestClient, - admin_token: str, - request: FixtureRequest, - ) -> Generator: - auto_fail_value, auto_fail_unit = request.param - ab_payload = copy.deepcopy(base_ab_payload) - ab_payload["auto_fail_value"] = auto_fail_value - ab_payload["auto_fail_unit"] = auto_fail_unit - - headers = {"Authorization": f"Bearer {admin_token}"} - response = client.post( - "/bayes_ab", - json=ab_payload, - headers=headers, - ) - assert response.status_code == 200 - ab = response.json() - yield ab - headers = {"Authorization": f"Bearer {admin_token}"} - client.delete(f"/bayes_ab/{ab['experiment_id']}", headers=headers) - - @mark.parametrize( - "create_bayes_ab_with_autofail, fail_value, fail_unit, n_observed", - [ - ((12, "hours"), 12, "hours", 2), - ((10, "days"), 10, "days", 3), - ((3, "hours"), 3, "hours", 0), - ((5, "days"), 5, "days", 0), - ], - indirect=["create_bayes_ab_with_autofail"], - ) - async def test_auto_fail_job( - self, - client: TestClient, - admin_token: str, - monkeypatch: MonkeyPatch, - create_bayes_ab_with_autofail: dict, - fail_value: int, - fail_unit: Literal["days", "hours"], - n_observed: int, - asession: AsyncSession, - workspace_api_key: str, - ) -> None: - draws = [] - headers = {"Authorization": f"Bearer {workspace_api_key}"} - for i in range(1, 15): - monkeypatch.setattr( - bayes_ab_models, - "datetime", - fake_datetime( - days=i if fail_unit == "days" else 0, - hours=i if fail_unit == "hours" else 0, - ), - ) - response = client.get( - f"/bayes_ab/{create_bayes_ab_with_autofail['experiment_id']}/draw", - headers=headers, - ) - assert response.status_code == 200 - draws.append(response.json()["draw_id"]) - - if i >= (15 - n_observed): - response = client.put( - f"/bayes_ab/{create_bayes_ab_with_autofail['experiment_id']}/{draws[-1]}/1", - headers=headers, - ) - assert response.status_code == 200 - - n_failed = await auto_fail_bayes_ab(asession=asession) - - assert n_failed == (15 - fail_value - n_observed) - - -class TestCMABAutoFailJob: +class TestExperimentAutoFailJob: @fixture - def create_cmab_with_autofail( + def create_experiment_with_autofail( self, client: TestClient, admin_token: str, request: FixtureRequest, ) -> Generator: auto_fail_value, auto_fail_unit = request.param - cmab_payload = copy.deepcopy(base_cmab_payload) - cmab_payload["auto_fail_value"] = auto_fail_value - cmab_payload["auto_fail_unit"] = auto_fail_unit + experiment_payload = copy.deepcopy(base_experiment_payload) + experiment_payload["auto_fail_value"] = auto_fail_value + experiment_payload["auto_fail_unit"] = auto_fail_unit headers = {"Authorization": f"Bearer {admin_token}"} response = client.post( - "/contextual_mab", - json=cmab_payload, + "/experiment", + json=experiment_payload, headers=headers, ) assert response.status_code == 200 - cmab = response.json() - yield cmab + experiment = response.json() + yield experiment headers = {"Authorization": f"Bearer {admin_token}"} - client.delete(f"/contextual_mab/{cmab['experiment_id']}", headers=headers) + client.delete(f"/experiment/id/{experiment['experiment_id']}", headers=headers) @mark.parametrize( - "create_cmab_with_autofail, fail_value, fail_unit, n_observed", + "create_experiment_with_autofail, fail_value, fail_unit, n_observed", [ ((12, "hours"), 12, "hours", 2), ((10, "days"), 10, "days", 3), ((3, "hours"), 3, "hours", 0), ((5, "days"), 5, "days", 0), ], - indirect=["create_cmab_with_autofail"], + indirect=["create_experiment_with_autofail"], ) async def test_auto_fail_job( self, client: TestClient, admin_token: str, monkeypatch: MonkeyPatch, - create_cmab_with_autofail: dict, + create_experiment_with_autofail: dict, fail_value: int, fail_unit: Literal["days", "hours"], n_observed: int, @@ -336,19 +105,15 @@ async def test_auto_fail_job( headers = {"Authorization": f"Bearer {workspace_api_key}"} for i in range(1, 15): monkeypatch.setattr( - cmab_models, + models, "datetime", fake_datetime( days=i if fail_unit == "days" else 0, hours=i if fail_unit == "hours" else 0, ), ) - response = client.post( - f"/contextual_mab/{create_cmab_with_autofail['experiment_id']}/draw", - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0}, - ], + response = client.put( + f"/experiment/{create_experiment_with_autofail['experiment_id']}/draw", headers=headers, ) assert response.status_code == 200 @@ -356,11 +121,12 @@ async def test_auto_fail_job( if i >= (15 - n_observed): response = client.put( - f"/contextual_mab/{create_cmab_with_autofail['experiment_id']}/{draws[-1]}/1", + f"/experiment/{create_experiment_with_autofail['experiment_id']}/{draws[-1]}/1", headers=headers, ) + print(response.json()) assert response.status_code == 200 - n_failed = await auto_fail_cmab(asession=asession) + n_failed = await auto_fail_experiment(asession=asession) assert n_failed == (15 - fail_value - n_observed) diff --git a/backend/tests/test_bayes_ab.py b/backend/tests/test_bayes_ab.py deleted file mode 100644 index 6b75b6f..0000000 --- a/backend/tests/test_bayes_ab.py +++ /dev/null @@ -1,430 +0,0 @@ -import copy -import os -from typing import Generator - -import numpy as np -from fastapi.testclient import TestClient -from pytest import FixtureRequest, fixture, mark -from sqlalchemy.orm import Session - -from backend.app.bayes_ab.models import BayesianABArmDB, BayesianABDB -from backend.app.models import NotificationsDB - -base_normal_payload = { - "name": "Test", - "description": "Test description", - "prior_type": "normal", - "reward_type": "real-valued", - "sticky_assignment": False, - "arms": [ - { - "name": "arm 1", - "description": "arm 1 description", - "mu_init": 0, - "sigma_init": 1, - "is_treatment_arm": True, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "mu_init": 2, - "sigma_init": 2, - "is_treatment_arm": False, - }, - ], - "notifications": { - "onTrialCompletion": True, - "numberOfTrials": 2, - "onDaysElapsed": False, - "daysElapsed": 3, - "onPercentBetter": False, - "percentBetterThreshold": 5, - }, -} - -base_binary_normal_payload = base_normal_payload.copy() -base_binary_normal_payload["reward_type"] = "binary" - - -@fixture -def clean_bayes_ab(db_session: Session) -> Generator: - """ - Fixture to clean the database before each test. - """ - yield - db_session.query(NotificationsDB).delete() - db_session.query(BayesianABArmDB).delete() - db_session.query(BayesianABDB).delete() - - db_session.commit() - - -@fixture -def admin_token(client: TestClient) -> str: - """Get a token for the admin user""" - response = client.post( - "/login", - data={ - "username": os.environ.get("ADMIN_USERNAME", "admin@idinsight.org"), - "password": os.environ.get("ADMIN_PASSWORD", "12345"), - }, - ) - assert response.status_code == 200, f"Login failed: {response.json()}" - token = response.json()["access_token"] - return token - - -class TestBayesAB: - """ - Test class for Bayesian A/B testing. - """ - - @fixture - def create_bayes_ab_payload(self, request: FixtureRequest) -> dict: - """ - Fixture to create a payload for the Bayesian A/B test. - """ - payload_normal: dict = copy.deepcopy(base_normal_payload) - payload_normal["arms"] = list(payload_normal["arms"]) - - payload_binary_normal: dict = copy.deepcopy(base_binary_normal_payload) - payload_binary_normal["arms"] = list(payload_binary_normal["arms"]) - - if request.param == "base_normal": - return payload_normal - if request.param == "base_binary_normal": - return payload_binary_normal - if request.param == "one_arm": - payload_normal["arms"].pop() - return payload_normal - if request.param == "no_notifications": - payload_normal["notifications"]["onTrialCompletion"] = False - return payload_normal - if request.param == "invalid_prior": - payload_normal["prior_type"] = "beta" - return payload_normal - if request.param == "invalid_sigma": - payload_normal["arms"][0]["sigma_init"] = 0 - return payload_normal - if request.param == "invalid_params": - payload_normal["arms"][0].pop("mu_init") - return payload_normal - if request.param == "two_treatment_arms": - payload_normal["arms"][0]["is_treatment_arm"] = True - payload_normal["arms"][1]["is_treatment_arm"] = True - return payload_normal - if request.param == "with_sticky_assignment": - payload_normal["sticky_assignment"] = True - return payload_normal - else: - raise ValueError("Invalid parameter") - - @fixture - def create_bayes_abs( - self, - client: TestClient, - admin_token: str, - create_bayes_ab_payload: dict, - request: FixtureRequest, - ) -> Generator: - bayes_abs = [] - n_bayes_abs = request.param if hasattr(request, "param") else 1 - for _ in range(n_bayes_abs): - response = client.post( - "/bayes_ab", - json=create_bayes_ab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - bayes_abs.append(response.json()) - yield bayes_abs - for bayes_ab in bayes_abs: - client.delete( - f"/bayes_ab/{bayes_ab['experiment_id']}", - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - @mark.parametrize( - "create_bayes_ab_payload, expected_response", - [ - ("base_normal", 200), - ("base_binary_normal", 200), - ("one_arm", 422), - ("no_notifications", 200), - ("invalid_prior", 422), - ("invalid_sigma", 422), - ("invalid_params", 200), - ("two_treatment_arms", 422), - ], - indirect=["create_bayes_ab_payload"], - ) - def test_create_bayes_ab( - self, - create_bayes_ab_payload: dict, - client: TestClient, - expected_response: int, - admin_token: str, - clean_bayes_ab: None, - ) -> None: - """ - Test the creation of a Bayesian A/B test. - """ - response = client.post( - "/bayes_ab", - json=create_bayes_ab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response - - @mark.parametrize( - "create_bayes_abs, n_expected, create_bayes_ab_payload", - [(1, 1, "base_normal"), (2, 2, "base_normal"), (5, 5, "base_normal")], - indirect=["create_bayes_abs", "create_bayes_ab_payload"], - ) - def test_get_bayes_abs( - self, - client: TestClient, - n_expected: int, - admin_token: str, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - ) -> None: - """ - Test the retrieval of Bayesian A/B tests. - """ - response = client.get( - "/bayes_ab", headers={"Authorization": f"Bearer {admin_token}"} - ) - - assert response.status_code == 200 - assert len(response.json()) == n_expected - - @mark.parametrize( - "create_bayes_abs, expected_response, create_bayes_ab_payload", - [(1, 200, "base_normal"), (2, 200, "base_normal"), (5, 200, "base_normal")], - indirect=["create_bayes_abs", "create_bayes_ab_payload"], - ) - def test_draw_arm( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - expected_response: int, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - response = client.get( - f"/bayes_ab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == expected_response - - @mark.parametrize( - "create_bayes_ab_payload, client_id, expected_response", - [ - ("with_sticky_assignment", None, 400), - ("with_sticky_assignment", "test_client_id", 200), - ], - indirect=["create_bayes_ab_payload"], - ) - def test_draw_arm_with_client_id( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - client_id: str | None, - expected_response: int, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - response = client.get( - f"/bayes_ab/{id}/draw{'?client_id=' + client_id if client_id else ''}", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == expected_response - - @mark.parametrize( - "create_bayes_ab_payload", ["with_sticky_assignment"], indirect=True - ) - def test_draw_arm_with_sticky_assignment( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - arm_ids = [] - for _ in range(10): - response = client.get( - f"/bayes_ab/{id}/draw?client_id=123", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - arm_ids.append(response.json()["arm"]["arm_id"]) - assert np.unique(arm_ids).size == 1 - - @mark.parametrize("create_bayes_ab_payload", ["base_normal"], indirect=True) - def test_update_observation( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - - # First, get a draw - response = client.get( - f"/bayes_ab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - - # Then update with an observation - response = client.put( - f"/bayes_ab/{id}/{draw_id}/0.5", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - - # Test that we can't update the same draw twice - response = client.put( - f"/bayes_ab/{id}/{draw_id}/0.5", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 400 - - @mark.parametrize("create_bayes_ab_payload", ["base_normal"], indirect=True) - def test_get_outcomes( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - - # First, get a draw - response = client.get( - f"/bayes_ab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - - # Then update with an observation - response = client.put( - f"/bayes_ab/{id}/{draw_id}/0.5", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - - # Get outcomes - response = client.get( - f"/bayes_ab/{id}/outcomes", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - assert len(response.json()) == 1 - - @mark.parametrize("create_bayes_ab_payload", ["base_normal"], indirect=True) - def test_get_arms( - self, - client: TestClient, - create_bayes_abs: list, - create_bayes_ab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_bayes_abs[0]["experiment_id"] - - # First, get a draw - response = client.get( - f"/bayes_ab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - - # Then update with an observation - response = client.put( - f"/bayes_ab/{id}/{draw_id}/0.5", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - - # Get arms - response = client.get( - f"/bayes_ab/{id}/arms", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - assert len(response.json()) == 2 - - -class TestNotifications: - @fixture() - def create_bayes_ab_payload(self, request: FixtureRequest) -> dict: - payload: dict = copy.deepcopy(base_normal_payload) - payload["arms"] = list(payload["arms"]) - - match request.param: - case "base": - pass - case "daysElapsed_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_only": - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onPercentBetter"] = True - case "all_notifications": - payload["notifications"]["onDaysElapsed"] = True - payload["notifications"]["onPercentBetter"] = True - case "no_notifications": - payload["notifications"]["onTrialCompletion"] = False - case "daysElapsed_missing": - payload["notifications"]["daysElapsed"] = 0 - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_missing": - payload["notifications"]["numberOfTrials"] = 0 - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_missing": - payload["notifications"]["percentBetterThreshold"] = 0 - payload["notifications"]["onPercentBetter"] = True - case _: - raise ValueError("Invalid parameter") - - return payload - - @mark.parametrize( - "create_bayes_ab_payload, expected_response", - [ - ("base", 200), - ("daysElapsed_only", 200), - ("trialCompletion_only", 200), - ("percentBetter_only", 200), - ("all_notifications", 200), - ("no_notifications", 200), - ("daysElapsed_missing", 422), - ("trialCompletion_missing", 422), - ("percentBetter_missing", 422), - ], - indirect=["create_bayes_ab_payload"], - ) - def test_notifications( - self, - client: TestClient, - admin_token: str, - create_bayes_ab_payload: dict, - expected_response: int, - clean_bayes_ab: None, - ) -> None: - response = client.post( - "/bayes_ab", - json=create_bayes_ab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response diff --git a/backend/tests/test_cmabs.py b/backend/tests/test_cmabs.py deleted file mode 100644 index d9b6ed0..0000000 --- a/backend/tests/test_cmabs.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import os -from typing import Generator - -import numpy as np -from fastapi.testclient import TestClient -from pytest import FixtureRequest, fixture, mark -from sqlalchemy.orm import Session - -from backend.app.contextual_mab.models import ( - ContextDB, - ContextualArmDB, - ContextualBanditDB, -) -from backend.app.models import NotificationsDB - -base_normal_payload = { - "name": "Test", - "description": "Test description", - "prior_type": "normal", - "reward_type": "real-valued", - "sticky_assignment": False, - "arms": [ - { - "name": "arm 1", - "description": "arm 1 description", - "mu_init": 0, - "sigma_init": 1, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "mu_init": 0, - "sigma_init": 1, - }, - ], - "contexts": [ - { - "name": "Context 1", - "description": "context 1 description", - "value_type": "binary", - }, - { - "name": "Context 2", - "description": "context 2 description", - "value_type": "real-valued", - }, - ], - "notifications": { - "onTrialCompletion": True, - "numberOfTrials": 2, - "onDaysElapsed": False, - "daysElapsed": 3, - "onPercentBetter": False, - "percentBetterThreshold": 5, - }, -} - -base_binary_normal_payload = base_normal_payload.copy() -base_binary_normal_payload["reward_type"] = "binary" - - -@fixture -def admin_token(client: TestClient) -> str: - response = client.post( - "/login", - data={ - "username": os.environ.get("ADMIN_USERNAME", ""), - "password": os.environ.get("ADMIN_PASSWORD", ""), - }, - ) - token = response.json()["access_token"] - return token - - -@fixture -def clean_cmabs(db_session: Session) -> Generator: - yield - db_session.query(NotificationsDB).delete() - db_session.query(ContextualArmDB).delete() - db_session.query(ContextDB).delete() - db_session.query(ContextualBanditDB).delete() - db_session.commit() - - -class TestCMab: - @fixture - def create_cmab_payload(self, request: FixtureRequest) -> dict: - payload_normal: dict = copy.deepcopy(base_normal_payload) - payload_normal["arms"] = list(payload_normal["arms"]) - payload_normal["contexts"] = list(payload_normal["contexts"]) - - payload_binary_normal: dict = copy.deepcopy(base_binary_normal_payload) - payload_binary_normal["arms"] = list(payload_binary_normal["arms"]) - payload_binary_normal["contexts"] = list(payload_binary_normal["contexts"]) - - if request.param == "base_normal": - return payload_normal - if request.param == "base_binary_normal": - return payload_binary_normal - if request.param == "one_arm": - payload_normal["arms"].pop() - return payload_normal - if request.param == "no_notifications": - payload_normal["notifications"]["onTrialCompletion"] = False - return payload_normal - if request.param == "invalid_prior": - payload_normal["prior_type"] = "beta" - return payload_normal - if request.param == "invalid_reward": - payload_normal["reward_type"] = "invalid" - return payload_normal - if request.param == "invalid_sigma": - payload_normal["arms"][0]["sigma_init"] = 0 - return payload_normal - if request.param == "with_sticky_assignment": - payload_normal["sticky_assignment"] = True - return payload_normal - - else: - raise ValueError("Invalid parameter") - - @mark.parametrize( - "create_cmab_payload, expected_response", - [ - ("base_normal", 200), - ("base_binary_normal", 200), - ("one_arm", 422), - ("no_notifications", 200), - ("invalid_prior", 422), - ("invalid_reward", 422), - ("invalid_sigma", 422), - ], - indirect=["create_cmab_payload"], - ) - def test_create_cmab( - self, - create_cmab_payload: dict, - client: TestClient, - expected_response: int, - admin_token: str, - clean_cmabs: None, - ) -> None: - response = client.post( - "/contextual_mab", - json=create_cmab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response - - @fixture - def create_cmabs( - self, - client: TestClient, - admin_token: str, - request: FixtureRequest, - create_cmab_payload: dict, - ) -> Generator: - cmabs = [] - n_cmabs = request.param if hasattr(request, "param") else 1 - for _ in range(n_cmabs): - response = client.post( - "/contextual_mab", - json=create_cmab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - cmabs.append(response.json()) - yield cmabs - for cmab in cmabs: - client.delete( - f"/contextual_mab/{cmab['experiment_id']}", - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - @mark.parametrize( - "create_cmabs, n_expected, create_cmab_payload", - [(0, 0, "base_normal"), (2, 2, "base_normal"), (5, 5, "base_normal")], - indirect=["create_cmabs", "create_cmab_payload"], - ) - def test_get_all_cmabs( - self, - client: TestClient, - admin_token: str, - n_expected: int, - create_cmab_payload: dict, - create_cmabs: list, - ) -> None: - response = client.get( - "/contextual_mab", headers={"Authorization": f"Bearer {admin_token}"} - ) - assert response.status_code == 200 - assert len(response.json()) == n_expected - - @mark.parametrize( - "create_cmabs, expected_response, create_cmab_payload", - [(0, 404, "base_normal"), (2, 200, "base_normal")], - indirect=["create_cmabs", "create_cmab_payload"], - ) - def test_get_cmab( - self, - client: TestClient, - admin_token: str, - create_cmab_payload: dict, - create_cmabs: list, - expected_response: int, - ) -> None: - id = create_cmabs[0]["experiment_id"] if create_cmabs else 999 - - response = client.get( - f"/contextual_mab/{id}", headers={"Authorization": f"Bearer {admin_token}"} - ) - assert response.status_code == expected_response - - @mark.parametrize("create_cmab_payload", ["base_normal"], indirect=True) - def test_draw_arm_draw_id_provided( - self, - client: TestClient, - create_cmabs: list, - create_cmab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - response = client.post( - f"/contextual_mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - params={"draw_id": "test_draw_id"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0.5}, - ], - ) - assert response.status_code == 200 - assert response.json()["draw_id"] == "test_draw_id" - - @mark.parametrize("create_cmab_payload", ["base_normal"], indirect=True) - def test_draw_arm_no_draw_id_provided( - self, - client: TestClient, - create_cmabs: list, - create_cmab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - response = client.post( - f"/contextual_mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0.5}, - ], - ) - assert response.status_code == 200 - assert len(response.json()["draw_id"]) == 36 - - @mark.parametrize( - "create_cmab_payload, client_id, expected_response", - [ - ("with_sticky_assignment", None, 400), - ("with_sticky_assignment", "test_client_id", 200), - ], - indirect=["create_cmab_payload"], - ) - def test_draw_arm_sticky_assignment_client_id_provided( - self, - client: TestClient, - create_cmabs: list, - create_cmab_payload: dict, - client_id: str | None, - expected_response: int, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - url = f"/contextual_mab/{id}/draw" - if client_id: - url += f"?client_id={client_id}" - - response = client.post( - url, - headers={"Authorization": f"Bearer {workspace_api_key}"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0.5}, - ], - ) - assert response.status_code == expected_response - - @mark.parametrize("create_cmab_payload", ["with_sticky_assignment"], indirect=True) - def test_draw_arm_with_sticky_assignment( - self, - client: TestClient, - create_cmabs: list, - create_cmab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - arm_ids = [] - - for _ in range(10): - response = client.post( - f"/contextual_mab/{id}/draw?client_id=123", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 1}, - ], - ) - arm_ids.append(response.json()["arm"]["arm_id"]) - - assert np.unique(arm_ids).size == 1 - - @mark.parametrize("create_cmab_payload", ["base_normal"], indirect=True) - def test_one_outcome_per_draw( - self, - client: TestClient, - create_cmabs: list, - create_cmab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - response = client.post( - f"/contextual_mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0.5}, - ], - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - - response = client.put( - f"/contextual_mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 200 - - response = client.put( - f"/contextual_mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 400 - - @mark.parametrize( - "n_draws, create_cmab_payload", - [(0, "base_normal"), (1, "base_normal"), (5, "base_normal")], - indirect=["create_cmab_payload"], - ) - def test_get_outcomes( - self, - client: TestClient, - create_cmabs: list, - n_draws: int, - create_cmab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_cmabs[0]["experiment_id"] - - for _ in range(n_draws): - response = client.post( - f"/contextual_mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - json=[ - {"context_id": 1, "context_value": 0}, - {"context_id": 2, "context_value": 0.5}, - ], - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - response = client.put( - f"/contextual_mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - response = client.get( - f"/contextual_mab/{id}/outcomes", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 200 - assert len(response.json()) == n_draws - - -class TestNotifications: - @fixture() - def create_cmab_payload(self, request: FixtureRequest) -> dict: - payload: dict = copy.deepcopy(base_normal_payload) - payload["arms"] = list(payload["arms"]) - payload["contexts"] = list(payload["contexts"]) - - match request.param: - case "base": - pass - case "daysElapsed_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_only": - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onPercentBetter"] = True - case "all_notifications": - payload["notifications"]["onDaysElapsed"] = True - payload["notifications"]["onPercentBetter"] = True - case "no_notifications": - payload["notifications"]["onTrialCompletion"] = False - case "daysElapsed_missing": - payload["notifications"]["daysElapsed"] = 0 - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_missing": - payload["notifications"]["numberOfTrials"] = 0 - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_missing": - payload["notifications"]["percentBetterThreshold"] = 0 - payload["notifications"]["onPercentBetter"] = True - case _: - raise ValueError("Invalid parameter") - - return payload - - @mark.parametrize( - "create_cmab_payload, expected_response", - [ - ("base", 200), - ("daysElapsed_only", 200), - ("trialCompletion_only", 200), - ("percentBetter_only", 200), - ("all_notifications", 200), - ("no_notifications", 200), - ("daysElapsed_missing", 422), - ("trialCompletion_missing", 422), - ("percentBetter_missing", 422), - ], - indirect=["create_cmab_payload"], - ) - def test_notifications( - self, - client: TestClient, - admin_token: str, - create_cmab_payload: dict, - expected_response: int, - clean_cmabs: None, - ) -> None: - response = client.post( - "/contextual_mab", - json=create_cmab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response diff --git a/backend/tests/test_experiments.py b/backend/tests/test_experiments.py new file mode 100644 index 0000000..f2bdd7c --- /dev/null +++ b/backend/tests/test_experiments.py @@ -0,0 +1,604 @@ +import copy +import os +from typing import Generator + +from fastapi.testclient import TestClient +from pytest import FixtureRequest, fixture, mark +from sqlalchemy.orm import Session + +from backend.app.experiments.models import ( + ArmDB, + ContextDB, + ExperimentDB, + NotificationsDB, +) + +mab_beta_binom_payload = { + "name": "Test", + "description": "Test description.", + "exp_type": "mab", + "prior_type": "beta", + "reward_type": "binary", + "arms": [ + { + "name": "arm 1", + "description": "arm 1 description.", + "alpha_init": 5, + "beta_init": 1, + "is_treatment_arm": True, + }, + { + "name": "arm 2", + "description": "arm 2 description.", + "alpha_init": 1, + "beta_init": 4, + "is_treatment_arm": False, + }, + ], + "notifications": { + "onTrialCompletion": True, + "numberOfTrials": 2, + "onDaysElapsed": False, + "daysElapsed": 3, + "onPercentBetter": False, + "percentBetterThreshold": 5, + }, + "contexts": [], + "clients": [], +} + + +@fixture +def admin_token(client: TestClient) -> str: + response = client.post( + "/login", + data={ + "username": os.environ.get("ADMIN_USERNAME", ""), + "password": os.environ.get("ADMIN_PASSWORD", ""), + }, + ) + token = response.json()["access_token"] + return token + + +@fixture +def clean_experiments(db_session: Session) -> Generator: + yield + db_session.query(NotificationsDB).delete() + db_session.query(ContextDB).delete() + db_session.query(ArmDB).delete() + db_session.query(ExperimentDB).delete() + db_session.commit() + + +def _get_experiment_payload(input: str) -> dict: + """Helper function to get the experiment payload based on input.""" + payload_mab_beta_binom: dict = copy.deepcopy(mab_beta_binom_payload) + payload_mab_beta_binom["arms"] = list(payload_mab_beta_binom["arms"]) + + payload_mab_normal: dict = copy.deepcopy(mab_beta_binom_payload) + payload_mab_normal["prior_type"] = "normal" + payload_mab_normal["reward_type"] = "real-valued" + payload_mab_normal["arms"] = [ + { + "name": "arm 1", + "description": "arm 1 description", + "mu_init": 2, + "sigma_init": 3, + "is_treatment_arm": True, + }, + { + "name": "arm 2", + "description": "arm 2 description", + "mu_init": 3, + "sigma_init": 7, + "is_treatment_arm": True, + }, + ] + + match input: + case "base_beta_binom": + return payload_mab_beta_binom + case "base_normal": + return payload_mab_normal + case "one_arm": + payload_mab_beta_binom["arms"].pop() + return payload_mab_beta_binom + case "no_notifications": + payload_mab_beta_binom["notifications"]["onTrialCompletion"] = False + return payload_mab_beta_binom + case "invalid_prior": + payload_mab_beta_binom["prior_type"] = "invalid" + return payload_mab_beta_binom + case "invalid_reward": + payload_mab_beta_binom["reward_type"] = "invalid" + return payload_mab_beta_binom + case "invalid_alpha": + payload_mab_beta_binom["arms"][0]["alpha_init"] = -1 + return payload_mab_beta_binom + case "invalid_beta": + payload_mab_beta_binom["arms"][0]["beta_init"] = -1 + return payload_mab_beta_binom + case "invalid_combo": + payload_mab_beta_binom["reward_type"] = "real-valued" + return payload_mab_beta_binom + case "incorrect_params": + payload_mab_beta_binom["arms"][0].pop("alpha_init") + return payload_mab_beta_binom + case "invalid_sigma": + payload_mab_normal["arms"][0]["sigma_init"] = 0.0 + return payload_mab_normal + case "invalid_context_input": + payload_mab_beta_binom["contexts"] = [ + { + "name": "context 1", + "description": "context 1 description", + "value_type": "binary", + } + ] + return payload_mab_beta_binom + case "bayes_ab_normal_binom": + payload_mab_normal["exp_type"] = "bayes_ab" + payload_mab_normal["reward_type"] = "real-valued" + payload_mab_normal["arms"][1]["is_treatment_arm"] = False + return payload_mab_normal + case "bayes_ab_invalid_prior": + payload_mab_beta_binom["exp_type"] = "bayes_ab" + payload_mab_beta_binom["arms"][1]["is_treatment_arm"] = False + return payload_mab_beta_binom + case "bayes_ab_invalid_arm": + payload_mab_normal["exp_type"] = "bayes_ab" + payload_mab_normal["reward_type"] = "real-valued" + return payload_mab_normal + case "bayes_ab_invalid_context": + payload_mab_normal["exp_type"] = "bayes_ab" + payload_mab_normal["reward_type"] = "real-valued" + payload_mab_normal["arms"][1]["is_treatment_arm"] = False + payload_mab_normal["contexts"] = [ + { + "name": "context 1", + "description": "context 1 description", + "value_type": "binary", + } + ] + return payload_mab_normal + case "cmab_normal": + payload_mab_normal["exp_type"] = "cmab" + payload_mab_normal["contexts"] = [ + { + "name": "context 1", + "description": "context 1 description", + "value_type": "binary", + }, + { + "name": "context 2", + "description": "context 2 description", + "value_type": "real-valued", + }, + ] + return payload_mab_normal + case "cmab_normal_binomial": + payload_mab_normal["exp_type"] = "cmab" + payload_mab_normal["reward_type"] = "binary" + payload_mab_normal["contexts"] = [ + { + "name": "context 1", + "description": "context 1 description", + "value_type": "binary", + }, + { + "name": "context 2", + "description": "context 2 description", + "value_type": "real-valued", + }, + ] + return payload_mab_normal + case "cmab_invalid_prior": + payload_mab_normal["exp_type"] = "cmab" + payload_mab_normal["prior_type"] = "beta" + payload_mab_normal["contexts"] = [ + { + "name": "context 1", + "description": "context 1 description", + "value_type": "binary", + }, + { + "name": "context 2", + "description": "context 2 description", + "value_type": "real-valued", + }, + ] + return payload_mab_normal + case "cmab_invalid_context": + payload_mab_normal["exp_type"] = "cmab" + return payload_mab_normal + + case _: + raise ValueError(f"Invalid input: {input}.") + + +class TestExperiment: + @fixture + def create_experiment_payload(self, request: FixtureRequest) -> dict: + """Fixture to create experiment payload based on request parameter.""" + return _get_experiment_payload(request.param) + + @mark.parametrize( + "create_experiment_payload, expected_response", + [ + ("base_beta_binom", 200), + ("base_normal", 200), + ("one_arm", 422), + ("no_notifications", 200), + ("invalid_prior", 422), + ("invalid_reward", 422), + ("invalid_alpha", 422), + ("invalid_beta", 422), + ("invalid_sigma", 422), + ("invalid_combo", 422), + ("incorrect_params", 422), + ("invalid_context_input", 422), + ("bayes_ab_normal_binom", 200), + ("bayes_ab_invalid_prior", 422), + ("bayes_ab_invalid_arm", 422), + ("bayes_ab_invalid_context", 422), + ("cmab_normal", 200), + ("cmab_normal_binomial", 200), + ("cmab_invalid_prior", 422), + ("cmab_invalid_context", 422), + ], + indirect=["create_experiment_payload"], + ) + def test_create_experiment( + self, + create_experiment_payload: dict, + client: TestClient, + expected_response: int, + admin_token: str, + clean_experiments: None, + ) -> None: + response = client.post( + "/experiment", + json=create_experiment_payload, + headers={"Authorization": f"Bearer {admin_token}"}, + ) + + assert response.status_code == expected_response + + @fixture + def create_experiments( + self, + client: TestClient, + admin_token: str, + request: FixtureRequest, + create_experiment_payload: dict, + ) -> Generator: + experiments = [] + n_experiments = request.param if hasattr(request, "param") else 1 + for _ in range(n_experiments): + response = client.post( + "/experiment", + json=create_experiment_payload, + headers={"Authorization": f"Bearer {admin_token}"}, + ) + experiments.append(response.json()) + yield experiments + for experiment in experiments: + client.delete( + f"/experiment/id/{experiment['experiment_id']}", + headers={"Authorization": f"Bearer {admin_token}"}, + ) + + @fixture + def create_mixed_experiments( + self, + client: TestClient, + admin_token: str, + request: FixtureRequest, + ) -> Generator: + mixed_payload = [] + for param in request.param: + payload = _get_experiment_payload(param) + response = client.post( + "/experiment", + json=payload, + headers={"Authorization": f"Bearer {admin_token}"}, + ) + mixed_payload.append(response.json()) + yield mixed_payload + for experiment in mixed_payload: + client.delete( + f"/experiment/id/{experiment['experiment_id']}", + headers={"Authorization": f"Bearer {admin_token}"}, + ) + + @mark.parametrize( + "create_experiments, create_experiment_payload, n_expected", + [ + (0, "base_beta_binom", 0), + (2, "base_beta_binom", 2), + (5, "base_beta_binom", 5), + ], + indirect=["create_experiments", "create_experiment_payload"], + ) + def test_get_all_experiments( + self, + client: TestClient, + admin_token: str, + n_expected: int, + create_experiments: list, + create_experiment_payload: dict, + ) -> None: + response = client.get( + "/experiment", headers={"Authorization": f"Bearer {admin_token}"} + ) + assert response.status_code == 200 + assert len(response.json()) == n_expected + + @mark.parametrize( + "create_mixed_experiments, exp_type, n_expected", + [ + ( + [ + "base_beta_binom", + "base_normal", + "bayes_ab_normal_binom", + "cmab_normal", + ], + "mab", + 2, + ), + ( + [ + "base_beta_binom", + "bayes_ab_normal_binom", + "bayes_ab_normal_binom", + "cmab_normal", + ], + "bayes_ab", + 2, + ), + ( + [ + "base_beta_binom", + "bayes_ab_normal_binom", + "cmab_normal", + "cmab_normal_binomial", + ], + "cmab", + 2, + ), + ], + indirect=["create_mixed_experiments"], + ) + def test_get_all_experiments_by_type( + self, + client: TestClient, + admin_token: str, + n_expected: int, + create_mixed_experiments: list, + exp_type: str, + ) -> None: + response = client.get( + f"/experiment/type/{exp_type}", + headers={"Authorization": f"Bearer {admin_token}"}, + ) + assert response.status_code == 200 + assert len(response.json()) == n_expected + + @mark.parametrize( + "create_experiments, create_experiment_payload, expected_response", + [(0, "base_beta_binom", 404), (2, "base_beta_binom", 200)], + indirect=["create_experiments", "create_experiment_payload"], + ) + def test_get_experiment( + self, + client: TestClient, + admin_token: str, + create_experiments: list, + create_experiment_payload: dict, + expected_response: int, + ) -> None: + id = create_experiments[0]["experiment_id"] if create_experiments else 999 + + response = client.get( + f"/experiment/id/{id}/", headers={"Authorization": f"Bearer {admin_token}"} + ) + assert response.status_code == expected_response + + @mark.parametrize("create_experiment_payload", ["base_beta_binom"], indirect=True) + def test_draw_arm_draw_id_provided( + self, + client: TestClient, + create_experiments: list, + create_experiment_payload: dict, + workspace_api_key: str, + ) -> None: + id = create_experiments[0]["experiment_id"] + response = client.put( + f"/experiment/{id}/draw", + params={"draw_id": "test_draw"}, + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + assert response.status_code == 200 + assert response.json()["draw_id"] == "test_draw" + + @mark.parametrize("create_experiment_payload", ["base_beta_binom"], indirect=True) + def test_draw_arm_no_draw_id_provided( + self, + client: TestClient, + create_experiments: list, + create_experiment_payload: dict, + workspace_api_key: str, + ) -> None: + id = create_experiments[0]["experiment_id"] + response = client.put( + f"/experiment/{id}/draw", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + assert response.status_code == 200 + assert len(response.json()["draw_id"]) == 36 + + @mark.parametrize( + "create_experiment_payload", + ["base_beta_binom", "bayes_ab_normal_binom", "cmab_normal"], + indirect=True, + ) + def test_one_outcome_per_draw( + self, + client: TestClient, + create_experiments: list, + create_experiment_payload: dict, + workspace_api_key: str, + ) -> None: + id = create_experiments[0]["experiment_id"] + exp_type = create_experiments[0]["exp_type"] + contexts = None + if exp_type == "cmab": + contexts = [ + {"context_id": context["context_id"], "context_value": 1} + for context in create_experiments[0]["contexts"] + ] + response = client.put( + f"/experiment/{id}/draw", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + json=contexts, + ) + assert response.status_code == 200 + draw_id = response.json()["draw_id"] + + response = client.put( + f"/experiment/{id}/{draw_id}/1", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + + assert response.status_code == 200 + + response = client.put( + f"/experiment/{id}/{draw_id}/1", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + + assert response.status_code == 400 + + @mark.parametrize( + "n_draws, create_experiment_payload", + [ + (0, "base_beta_binom"), + (1, "base_beta_binom"), + (5, "base_beta_binom"), + (0, "bayes_ab_normal_binom"), + (1, "bayes_ab_normal_binom"), + (5, "bayes_ab_normal_binom"), + (0, "cmab_normal"), + (1, "cmab_normal"), + (5, "cmab_normal"), + ], + indirect=["create_experiment_payload"], + ) + def test_get_rewards( + self, + client: TestClient, + create_experiments: list, + n_draws: int, + create_experiment_payload: dict, + workspace_api_key: str, + ) -> None: + id = create_experiments[0]["experiment_id"] + exp_type = create_experiments[0]["exp_type"] + contexts = None + if exp_type == "cmab": + contexts = [ + {"context_id": context["context_id"], "context_value": 1} + for context in create_experiments[0]["contexts"] + ] + + for _ in range(n_draws): + response = client.put( + f"/experiment/{id}/draw", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + json=contexts, + ) + assert response.status_code == 200 + draw_id = response.json()["draw_id"] + # put outcomes + response = client.put( + f"/experiment/{id}/{draw_id}/1", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + + response = client.get( + f"/experiment/{id}/rewards", + headers={"Authorization": f"Bearer {workspace_api_key}"}, + ) + + assert response.status_code == 200 + assert len(response.json()) == n_draws + + +class TestNotifications: + @fixture() + def create_experiment_payload(self, request: FixtureRequest) -> dict: + payload: dict = copy.deepcopy(mab_beta_binom_payload) + payload["arms"] = list(payload["arms"]) + + match request.param: + case "base": + pass + case "daysElapsed_only": + payload["notifications"]["onTrialCompletion"] = False + payload["notifications"]["onDaysElapsed"] = True + case "trialCompletion_only": + payload["notifications"]["onTrialCompletion"] = True + case "percentBetter_only": + payload["notifications"]["onTrialCompletion"] = False + payload["notifications"]["onPercentBetter"] = True + case "all_notifications": + payload["notifications"]["onDaysElapsed"] = True + payload["notifications"]["onPercentBetter"] = True + case "no_notifications": + payload["notifications"]["onTrialCompletion"] = False + case "daysElapsed_missing": + payload["notifications"]["daysElapsed"] = 0 + payload["notifications"]["onDaysElapsed"] = True + case "trialCompletion_missing": + payload["notifications"]["numberOfTrials"] = 0 + payload["notifications"]["onTrialCompletion"] = True + case "percentBetter_missing": + payload["notifications"]["percentBetterThreshold"] = 0 + payload["notifications"]["onPercentBetter"] = True + case _: + raise ValueError("Invalid parameter") + + return payload + + @mark.parametrize( + "create_experiment_payload, expected_response", + [ + ("base", 200), + ("daysElapsed_only", 200), + ("trialCompletion_only", 200), + ("percentBetter_only", 200), + ("all_notifications", 200), + ("no_notifications", 200), + ("daysElapsed_missing", 422), + ("trialCompletion_missing", 422), + ("percentBetter_missing", 422), + ], + indirect=["create_experiment_payload"], + ) + def test_notifications( + self, + client: TestClient, + admin_token: str, + create_experiment_payload: dict, + expected_response: int, + clean_experiments: None, + ) -> None: + response = client.post( + "/experiment", + json=create_experiment_payload, + headers={"Authorization": f"Bearer {admin_token}"}, + ) + + assert response.status_code == expected_response diff --git a/backend/tests/test_mabs.py b/backend/tests/test_mabs.py deleted file mode 100644 index e65ccb6..0000000 --- a/backend/tests/test_mabs.py +++ /dev/null @@ -1,463 +0,0 @@ -import copy -import os -from typing import Generator - -import numpy as np -from fastapi.testclient import TestClient -from pytest import FixtureRequest, fixture, mark -from sqlalchemy.orm import Session - -from backend.app.mab.models import MABArmDB, MultiArmedBanditDB -from backend.app.models import NotificationsDB - -base_beta_binom_payload = { - "name": "Test", - "description": "Test description", - "prior_type": "beta", - "reward_type": "binary", - "sticky_assignment": False, - "arms": [ - { - "name": "arm 1", - "description": "arm 1 description", - "alpha_init": 5, - "beta_init": 1, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "alpha_init": 1, - "beta_init": 4, - }, - ], - "notifications": { - "onTrialCompletion": True, - "numberOfTrials": 2, - "onDaysElapsed": False, - "daysElapsed": 3, - "onPercentBetter": False, - "percentBetterThreshold": 5, - }, -} - -base_normal_payload = base_beta_binom_payload.copy() -base_normal_payload["prior_type"] = "normal" -base_normal_payload["reward_type"] = "real-valued" -base_normal_payload["arms"] = [ - { - "name": "arm 1", - "description": "arm 1 description", - "mu_init": 2, - "sigma_init": 3, - }, - { - "name": "arm 2", - "description": "arm 2 description", - "mu_init": 3, - "sigma_init": 7, - }, -] - - -@fixture -def admin_token(client: TestClient) -> str: - response = client.post( - "/login", - data={ - "username": os.environ.get("ADMIN_USERNAME", ""), - "password": os.environ.get("ADMIN_PASSWORD", ""), - }, - ) - token = response.json()["access_token"] - return token - - -@fixture -def clean_mabs(db_session: Session) -> Generator: - yield - db_session.query(NotificationsDB).delete() - db_session.query(MABArmDB).delete() - db_session.query(MultiArmedBanditDB).delete() - db_session.commit() - - -class TestMab: - @fixture - def create_mab_payload(self, request: FixtureRequest) -> dict: - payload_beta_binom: dict = copy.deepcopy(base_beta_binom_payload) - payload_beta_binom["arms"] = list(payload_beta_binom["arms"]) - - payload_normal: dict = copy.deepcopy(base_normal_payload) - payload_normal["arms"] = list(payload_normal["arms"]) - - if request.param == "base_beta_binom": - return payload_beta_binom - if request.param == "base_normal": - return payload_normal - if request.param == "one_arm": - payload_beta_binom["arms"].pop() - return payload_beta_binom - if request.param == "no_notifications": - payload_beta_binom["notifications"]["onTrialCompletion"] = False - return payload_beta_binom - if request.param == "invalid_prior": - payload_beta_binom["prior_type"] = "invalid" - return payload_beta_binom - if request.param == "invalid_reward": - payload_beta_binom["reward_type"] = "invalid" - return payload_beta_binom - if request.param == "invalid_alpha": - payload_beta_binom["arms"][0]["alpha_init"] = -1 - return payload_beta_binom - if request.param == "invalid_beta": - payload_beta_binom["arms"][0]["beta_init"] = -1 - return payload_beta_binom - if request.param == "invalid_combo_1": - payload_beta_binom["prior_type"] = "normal" - return payload_beta_binom - if request.param == "invalid_combo_2": - payload_beta_binom["reward_type"] = "continuous" - return payload_beta_binom - if request.param == "incorrect_params": - payload_beta_binom["arms"][0].pop("alpha_init") - return payload_beta_binom - if request.param == "invalid_sigma": - payload_normal["arms"][0]["sigma_init"] = 0.0 - return payload_normal - if request.param == "with_sticky_assignment": - payload_beta_binom["sticky_assignment"] = True - return payload_beta_binom - else: - raise ValueError("Invalid parameter") - - @mark.parametrize( - "create_mab_payload, expected_response", - [ - ("base_beta_binom", 200), - ("base_normal", 200), - ("one_arm", 422), - ("no_notifications", 200), - ("invalid_prior", 422), - ("invalid_reward", 422), - ("invalid_alpha", 422), - ("invalid_beta", 422), - ("invalid_combo_1", 422), - ("invalid_combo_2", 422), - ("incorrect_params", 422), - ("invalid_sigma", 422), - ], - indirect=["create_mab_payload"], - ) - def test_create_mab( - self, - create_mab_payload: dict, - client: TestClient, - expected_response: int, - admin_token: str, - clean_mabs: None, - ) -> None: - response = client.post( - "/mab", - json=create_mab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response - - @fixture - def create_mabs( - self, - client: TestClient, - admin_token: str, - request: FixtureRequest, - create_mab_payload: dict, - ) -> Generator: - mabs = [] - n_mabs = request.param if hasattr(request, "param") else 1 - for _ in range(n_mabs): - response = client.post( - "/mab", - json=create_mab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - mabs.append(response.json()) - yield mabs - for mab in mabs: - client.delete( - f"/mab/{mab['experiment_id']}", - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - @mark.parametrize( - "create_mabs, create_mab_payload, n_expected", - [ - (0, "base_beta_binom", 0), - (2, "base_beta_binom", 2), - (5, "base_beta_binom", 5), - ], - indirect=["create_mabs", "create_mab_payload"], - ) - def test_get_all_mabs( - self, - client: TestClient, - admin_token: str, - n_expected: int, - create_mabs: list, - create_mab_payload: dict, - ) -> None: - response = client.get( - "/mab", headers={"Authorization": f"Bearer {admin_token}"} - ) - assert response.status_code == 200 - assert len(response.json()) == n_expected - - @mark.parametrize( - "create_mabs, create_mab_payload, expected_response", - [(0, "base_beta_binom", 404), (2, "base_beta_binom", 200)], - indirect=["create_mabs", "create_mab_payload"], - ) - def test_get_mab( - self, - client: TestClient, - admin_token: str, - create_mabs: list, - create_mab_payload: dict, - expected_response: int, - ) -> None: - id = create_mabs[0]["experiment_id"] if create_mabs else 999 - - response = client.get( - f"/mab/{id}/", headers={"Authorization": f"Bearer {admin_token}"} - ) - assert response.status_code == expected_response - - @mark.parametrize("create_mab_payload", ["base_beta_binom"], indirect=True) - def test_draw_arm_draw_id_provided( - self, - client: TestClient, - create_mabs: list, - create_mab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_mabs[0]["experiment_id"] - response = client.get( - f"/mab/{id}/draw", - params={"draw_id": "test_draw"}, - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - assert response.json()["draw_id"] == "test_draw" - - @mark.parametrize("create_mab_payload", ["base_beta_binom"], indirect=True) - def test_draw_arm_no_draw_id_provided( - self, - client: TestClient, - create_mabs: list, - create_mab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_mabs[0]["experiment_id"] - response = client.get( - f"/mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - assert len(response.json()["draw_id"]) == 36 - - @mark.parametrize( - "create_mab_payload, client_id, expected_response", - [ - ("with_sticky_assignment", None, 400), - ("with_sticky_assignment", "test_client_id", 200), - ], - indirect=["create_mab_payload"], - ) - def test_draw_arm_sticky_assignment_with_client_id( - self, - client: TestClient, - admin_token: str, - create_mab_payload: dict, - create_mabs: list, - client_id: str | None, - expected_response: int, - workspace_api_key: str, - ) -> None: - mabs = create_mabs - id = mabs[0]["experiment_id"] - response = client.get( - f"/mab/{id}/draw{'?client_id=' + client_id if client_id else ''}", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == expected_response - - @mark.parametrize("create_mab_payload", ["with_sticky_assignment"], indirect=True) - def test_draw_arm_sticky_assignment_client_id_provided( - self, - client: TestClient, - admin_token: str, - create_mab_payload: dict, - create_mabs: list, - workspace_api_key: str, - ) -> None: - mabs = create_mabs - id = mabs[0]["experiment_id"] - response = client.get( - f"/mab/{id}/draw?client_id=123", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - - @mark.parametrize("create_mab_payload", ["with_sticky_assignment"], indirect=True) - def test_draw_arm_sticky_assignment_similar_arms( - self, - client: TestClient, - admin_token: str, - create_mab_payload: dict, - create_mabs: list, - workspace_api_key: str, - ) -> None: - mabs = create_mabs - id = mabs[0]["experiment_id"] - - arm_ids = [] - for _ in range(10): - response = client.get( - f"/mab/{id}/draw?client_id=123", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - arm_ids.append(response.json()["arm"]["arm_id"]) - assert np.unique(arm_ids).size == 1 - - @mark.parametrize("create_mab_payload", ["base_beta_binom"], indirect=True) - def test_one_outcome_per_draw( - self, - client: TestClient, - create_mabs: list, - create_mab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_mabs[0]["experiment_id"] - response = client.get( - f"/mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - - response = client.put( - f"/mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 200 - - response = client.put( - f"/mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 400 - - @mark.parametrize( - "n_draws, create_mab_payload", - [(0, "base_beta_binom"), (1, "base_beta_binom"), (5, "base_beta_binom")], - indirect=["create_mab_payload"], - ) - def test_get_outcomes( - self, - client: TestClient, - create_mabs: list, - n_draws: int, - create_mab_payload: dict, - workspace_api_key: str, - ) -> None: - id = create_mabs[0]["experiment_id"] - - for _ in range(n_draws): - response = client.get( - f"/mab/{id}/draw", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - assert response.status_code == 200 - draw_id = response.json()["draw_id"] - # put outcomes - response = client.put( - f"/mab/{id}/{draw_id}/1", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - response = client.get( - f"/mab/{id}/outcomes", - headers={"Authorization": f"Bearer {workspace_api_key}"}, - ) - - assert response.status_code == 200 - assert len(response.json()) == n_draws - - -class TestNotifications: - @fixture() - def create_mab_payload(self, request: FixtureRequest) -> dict: - payload: dict = copy.deepcopy(base_beta_binom_payload) - payload["arms"] = list(payload["arms"]) - - match request.param: - case "base": - pass - case "daysElapsed_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_only": - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_only": - payload["notifications"]["onTrialCompletion"] = False - payload["notifications"]["onPercentBetter"] = True - case "all_notifications": - payload["notifications"]["onDaysElapsed"] = True - payload["notifications"]["onPercentBetter"] = True - case "no_notifications": - payload["notifications"]["onTrialCompletion"] = False - case "daysElapsed_missing": - payload["notifications"]["daysElapsed"] = 0 - payload["notifications"]["onDaysElapsed"] = True - case "trialCompletion_missing": - payload["notifications"]["numberOfTrials"] = 0 - payload["notifications"]["onTrialCompletion"] = True - case "percentBetter_missing": - payload["notifications"]["percentBetterThreshold"] = 0 - payload["notifications"]["onPercentBetter"] = True - case _: - raise ValueError("Invalid parameter") - - return payload - - @mark.parametrize( - "create_mab_payload, expected_response", - [ - ("base", 200), - ("daysElapsed_only", 200), - ("trialCompletion_only", 200), - ("percentBetter_only", 200), - ("all_notifications", 200), - ("no_notifications", 200), - ("daysElapsed_missing", 422), - ("trialCompletion_missing", 422), - ("percentBetter_missing", 422), - ], - indirect=["create_mab_payload"], - ) - def test_notifications( - self, - client: TestClient, - admin_token: str, - create_mab_payload: dict, - expected_response: int, - clean_mabs: None, - ) -> None: - response = client.post( - "/mab", - json=create_mab_payload, - headers={"Authorization": f"Bearer {admin_token}"}, - ) - - assert response.status_code == expected_response diff --git a/backend/tests/test_messages.py b/backend/tests/test_messages.py index 86396b4..253e4f2 100644 --- a/backend/tests/test_messages.py +++ b/backend/tests/test_messages.py @@ -9,19 +9,20 @@ base_mab_payload = { "name": "Test", - "description": "Test description", + "description": "Test description.", + "exp_type": "mab", "prior_type": "beta", "reward_type": "binary", "arms": [ { "name": "arm 1", - "description": "arm 1 description", + "description": "arm 1 description.", "alpha_init": 5, "beta_init": 1, }, { "name": "arm 2", - "description": "arm 2 description", + "description": "arm 2 description.", "alpha_init": 1, "beta_init": 4, }, @@ -34,6 +35,8 @@ "onPercentBetter": False, "percentBetterThreshold": 5, }, + "contexts": [], + "clients": [], } @@ -53,13 +56,13 @@ def admin_token(client: TestClient) -> str: @fixture def experiment_id(client: TestClient, admin_token: str) -> Generator[int, None, None]: response = client.post( - "/mab", + "/experiment", headers={"Authorization": f"Bearer {admin_token}"}, json=base_mab_payload, ) yield response.json()["experiment_id"] client.delete( - f"/mab/{response.json()['experiment_id']}", + f"/experiment/id/{response.json()['experiment_id']}", headers={"Authorization": f"Bearer {admin_token}"}, ) diff --git a/backend/tests/test_notifications_job.py b/backend/tests/test_notifications_job.py index 8b1ef80..911cbfc 100644 --- a/backend/tests/test_notifications_job.py +++ b/backend/tests/test_notifications_job.py @@ -12,21 +12,22 @@ from backend.jobs import create_notifications from backend.jobs.create_notifications import process_notifications -base_mab_payload = { +base_experiment_payload = { "name": "Test", - "description": "Test description", + "description": "Test description.", + "exp_type": "mab", "prior_type": "beta", "reward_type": "binary", "arms": [ { "name": "arm 1", - "description": "arm 1 description", + "description": "arm 1 description.", "alpha_init": 5, "beta_init": 1, }, { "name": "arm 2", - "description": "arm 2 description", + "description": "arm 2 description.", "alpha_init": 1, "beta_init": 4, }, @@ -39,6 +40,8 @@ "onPercentBetter": False, "percentBetterThreshold": 5, }, + "contexts": [], + "clients": [], } @@ -67,65 +70,65 @@ def admin_token(client: TestClient) -> str: class TestNotificationsJob: @fixture - def create_mabs_days_elapsed( + def create_experiments_days_elapsed( self, client: TestClient, admin_token: str, request: FixtureRequest ) -> Generator: - mabs = [] - n_mabs, days_elapsed = request.param + experiments = [] + n_experiments, days_elapsed = request.param - payload: dict = copy.deepcopy(base_mab_payload) + payload: dict = copy.deepcopy(base_experiment_payload) payload["notifications"]["onDaysElapsed"] = True payload["notifications"]["daysElapsed"] = days_elapsed - for _ in range(n_mabs): + for _ in range(n_experiments): response = client.post( - "/mab", + "/experiment", json=payload, headers={"Authorization": f"Bearer {admin_token}"}, ) - mabs.append(response.json()) - yield mabs - for mab in mabs: + experiments.append(response.json()) + yield experiments + for experiment in experiments: client.delete( - f"/mab/{mab['experiment_id']}", + f"/experiment/id/{experiment['experiment_id']}", headers={"Authorization": f"Bearer {admin_token}"}, ) @fixture - def create_mabs_trials_run( + def create_experiments_trials_run( self, client: TestClient, admin_token: str, request: FixtureRequest ) -> Generator: - mabs = [] - n_mabs, n_trials = request.param + experiments = [] + n_experiments, n_trials = request.param - payload: dict = copy.deepcopy(base_mab_payload) + payload: dict = copy.deepcopy(base_experiment_payload) payload["notifications"]["onTrialCompletion"] = True payload["notifications"]["numberOfTrials"] = n_trials - for _ in range(n_mabs): + for _ in range(n_experiments): response = client.post( - "/mab", + "/experiment", json=payload, headers={"Authorization": f"Bearer {admin_token}"}, ) - mabs.append(response.json()) - yield mabs - for mab in mabs: + experiments.append(response.json()) + yield experiments + for experiment in experiments: client.delete( - f"/mab/{mab['experiment_id']}", + f"/experiment/id/{experiment['experiment_id']}", headers={"Authorization": f"Bearer {admin_token}"}, ) @mark.parametrize( - "create_mabs_days_elapsed, days_elapsed", + "create_experiments_days_elapsed, days_elapsed", [((3, 4), 4), ((4, 62), 64), ((3, 40), 40)], - indirect=["create_mabs_days_elapsed"], + indirect=["create_experiments_days_elapsed"], ) async def test_days_elapsed_notification( self, client: TestClient, admin_token: str, - create_mabs_days_elapsed: list[dict], + create_experiments_days_elapsed: list[dict], db_session: Session, days_elapsed: int, monkeypatch: MonkeyPatch, @@ -137,18 +140,18 @@ async def test_days_elapsed_notification( fake_datetime(days_elapsed), ) n_processed = await process_notifications(asession) - assert n_processed == len(create_mabs_days_elapsed) + assert n_processed == len(create_experiments_days_elapsed) @mark.parametrize( - "create_mabs_days_elapsed, days_elapsed", + "create_experiments_days_elapsed, days_elapsed", [((3, 4), 3), ((4, 62), 50), ((3, 40), 0)], - indirect=["create_mabs_days_elapsed"], + indirect=["create_experiments_days_elapsed"], ) async def test_days_elapsed_notification_not_sent( self, client: TestClient, admin_token: str, - create_mabs_days_elapsed: list[dict], + create_experiments_days_elapsed: list[dict], db_session: Session, days_elapsed: int, monkeypatch: MonkeyPatch, @@ -163,16 +166,16 @@ async def test_days_elapsed_notification_not_sent( assert n_processed == 0 @mark.parametrize( - "create_mabs_trials_run, n_trials", + "create_experiments_trials_run, n_trials", [((3, 4), 4), ((4, 62), 64), ((3, 40), 40)], - indirect=["create_mabs_trials_run"], + indirect=["create_experiments_trials_run"], ) async def test_trials_run_notification( self, client: TestClient, admin_token: str, n_trials: int, - create_mabs_trials_run: list[dict], + create_experiments_trials_run: list[dict], db_session: Session, asession: AsyncSession, workspace_api_key: str, @@ -180,11 +183,11 @@ async def test_trials_run_notification( n_processed = await process_notifications(asession) assert n_processed == 0 headers = {"Authorization": f"Bearer {workspace_api_key}"} - for mab in create_mabs_trials_run: + for experiment in create_experiments_trials_run: for i in range(n_trials): - draw_id = f"draw_{i}_{mab['experiment_id']}" - response = client.get( - f"/mab/{mab['experiment_id']}/draw", + draw_id = f"draw_{i}_{experiment['experiment_id']}" + response = client.put( + f"/experiment/{experiment['experiment_id']}/draw", params={"draw_id": draw_id}, headers=headers, ) @@ -192,10 +195,10 @@ async def test_trials_run_notification( assert response.json()["draw_id"] == draw_id response = client.put( - f"/mab/{mab['experiment_id']}/{draw_id}/1", + f"/experiment/{experiment['experiment_id']}/{draw_id}/1", headers=headers, ) assert response.status_code == 200 n_processed = await process_notifications(asession) await asyncio.sleep(0.1) - assert n_processed == len(create_mabs_trials_run) + assert n_processed == len(create_experiments_trials_run) diff --git a/deployment/docker-compose/docker-compose-dev.yml b/deployment/docker-compose/docker-compose-dev.yml index 6a7b5df..25db623 100644 --- a/deployment/docker-compose/docker-compose-dev.yml +++ b/deployment/docker-compose/docker-compose-dev.yml @@ -4,13 +4,17 @@ services: build: context: ../../backend dockerfile: Dockerfile + entrypoint: ["/bin/sh", "-c"] command: > - python -m alembic upgrade head && python add_users_to_db.py && uvicorn main:app --host 0.0.0.0 --port 8000 --reload + "python -m alembic upgrade head && + python add_users_to_db.py && + uvicorn main:app --host 0.0.0.0 --port 8000 --reload + " restart: always ports: - "8000:8000" volumes: - # - temp:/usr/src/experiment_engine_backend/temp + - temp:/usr/src/experiment_engine_backend/temp - ../../backend:/usr/src/experiment_engine_backend env_file: - .base.env @@ -65,7 +69,7 @@ services: redis: image: "redis:6.0-alpine" - ports: # Expose the port to port 6380 on the host machine for debugging + ports: - "6380:6379" restart: always @@ -73,4 +77,4 @@ volumes: db_volume: caddy_data: caddy_config: - # temp: + temp: diff --git a/deployment/docker-compose/docker-compose.yml b/deployment/docker-compose/docker-compose.yml index c326f49..307679c 100644 --- a/deployment/docker-compose/docker-compose.yml +++ b/deployment/docker-compose/docker-compose.yml @@ -57,7 +57,7 @@ services: redis: image: "redis:6.0-alpine" - ports: # Expose the port to port 6380 on the host machine for debugging + ports: - "6380:6379" restart: always diff --git a/docs/getting-started/index.md b/docs/getting-started/index.md index 8c5ba05..290a8ea 100644 --- a/docs/getting-started/index.md +++ b/docs/getting-started/index.md @@ -4,6 +4,14 @@ This guides you through the types of experiments available, setting up your firs
+- :octicons-organization-24:{ .lg .middle } __Workspaces__ + + --- + + Learn about workspaces and how to organize your experiments. + + [:octicons-arrow-right-24: Workspaces](./workspaces/index.md) + - :octicons-tools-24:{ .lg .middle } __Set up your first experiment__ --- diff --git a/docs/getting-started/workspaces/creating-switching.md b/docs/getting-started/workspaces/creating-switching.md new file mode 100644 index 0000000..b330025 --- /dev/null +++ b/docs/getting-started/workspaces/creating-switching.md @@ -0,0 +1,96 @@ +# Creating & Switching Workspaces + +Learn how to create new team workspaces and switch between different workspaces. + +## Creating Team Workspaces + +To create a new team workspace for collaboration: + +### Step 1: Access Workspace Creation + +1. Navigate to the **Workspaces** section in the sidebar +2. Click **Create New Workspace** +3. The workspace creation dialog will appear + + +### Step 2: Configure Your Workspace + +1. **Workspace Name**: Enter a descriptive name for your team workspace +3. **Create**: Click the create button + +### Notes +- Workspace names must be unique across the platform +- You automatically become the administrator of workspaces you create + +### What Happens Next +- A new team workspace is created with you as the administrator +- A unique API key is generated for the workspace +- You can immediately start inviting team members +- You can begin creating experiments within the workspace + +## Switching Between Workspaces + +You can easily switch between any workspaces you have access to. + +### Using the Workspace Switcher + +1. Click the **Workspace Switcher** in the sidebar header +2. A dropdown will show all available workspaces +3. Select the workspace you want to switch to +4. The interface will reload with that workspace's content + + +### What Changes When You Switch + +- **Authentication Token**: Updates to reflect the new workspace +- **Experiments Shown**: Only experiments from the selected workspace +- **API Configuration**: Workspace-specific API keys and settings +- **User Permissions**: Your role in the new workspace takes effect + +## Managing Multiple Workspaces + +### Workspace Organization Tips + +**Clear Naming Convention**: +``` +Personal Workspace (your default) +ProjectName-Production +ProjectName-Development +TeamName-Experiments +``` + +**Environment Separation**: +``` +Production Workspace (live experiments) +Staging Workspace (pre-production testing) +Development Workspace (development and testing) +``` + +### Keeping Track of Workspaces + +- **Bookmark Important Workspaces**: Note which workspaces you use most frequently +- **Document Purpose**: Keep track of what each workspace is used for +- **Regular Review**: Periodically review and clean up unused workspaces + + +## Next Steps + +
+ +- :octicons-gear-24:{ .lg .middle } __Managing Workspaces__ + + --- + + Learn how to manage settings, API keys, and team members + + [:octicons-arrow-right-24: Managing Workspaces](./managing.md) + +- :octicons-tools-24:{ .lg .middle } __Create Your First Experiment__ + + --- + + Ready to create experiments in your workspace? + + [:octicons-arrow-right-24: Setup Experiment](../first-experiment/index.md) + +
diff --git a/docs/getting-started/workspaces/index.md b/docs/getting-started/workspaces/index.md new file mode 100644 index 0000000..3a973ad --- /dev/null +++ b/docs/getting-started/workspaces/index.md @@ -0,0 +1,90 @@ +# Workspaces Overview + +Workspaces are organizational containers that help you group and manage your experiments. All experiments in ExE are created within a workspace, which provides isolation, access control, and team collaboration. + +## What are Workspaces? + +A workspace is a logical boundary that contains: + +- **Experiments**: All your A/B tests, multi-armed bandits, and contextual bandits +- **API Configuration**: Unique API keys for secure access +- **User Access**: Team members with different permission levels (for team workspaces) +- **Organization**: Group experiments by project, team, or environment + + +## Key Features + +
+ +- :octicons-organization-24:{ .lg .middle } __Experiment Organization__ + + --- + + Group experiments by project, team, or environment + +- :octicons-key-24:{ .lg .middle } __API Key Management__ + + --- + + Secure, workspace-specific API keys with rotation support + +- :octicons-people-24:{ .lg .middle } __Team Collaboration__ + + --- + + Role-based permissions for team workspaces + +- :octicons-shield-24:{ .lg .middle } __Isolation & Security__ + + --- + + Complete separation between different workspaces + +
+ +## How Workspaces Work + +### Experiment Isolation +- All experiments belong to a specific workspace +- Experiment IDs are unique within each workspace +- Complete data separation between workspaces + +### API Integration +- Each workspace has its own unique API key +- All API calls are scoped to the workspace +- Secure authentication for each workspace + +### Access Control +- Personal workspaces: Single user access only +- Team workspaces: Multiple users with role-based permissions +- Complete isolation between different workspaces + +## Next Steps + +
+ +- :octicons-people-24:{ .lg .middle } __Personal vs Team Workspaces__ + + --- + + Learn about the different types of workspaces + + [:octicons-arrow-right-24: Personal vs Team](./personal-vs-team.md) + +- :octicons-plus-24:{ .lg .middle } __Creating & Switching__ + + --- + + Learn how to create and switch between workspaces + + [:octicons-arrow-right-24: Creating & Switching](./creating-switching.md) + +- :octicons-gear-24:{ .lg .middle } __Managing Workspaces__ + + --- + + Manage settings, API keys, and team members + + [:octicons-arrow-right-24: Managing Workspaces](./managing.md) + +
diff --git a/docs/getting-started/workspaces/managing.md b/docs/getting-started/workspaces/managing.md new file mode 100644 index 0000000..e30f879 --- /dev/null +++ b/docs/getting-started/workspaces/managing.md @@ -0,0 +1,130 @@ +# Managing Workspaces + +Learn how to manage workspace settings, API keys, and team members. + +## Accessing Workspace Settings + +Access workspace settings by clicking **Manage Workspace** from the workspace overview or settings menu. + + +## Workspace Information +- **Workspace Name**: Update the display name +- **Creation Date**: When the workspace was created +- **Administrator**: Who manages the workspace + + +## API Key Management + +### Viewing Your API Key + +For security, only the first few characters of your API key are displayed in the interface. The full key is only shown when rotating an existing key. + +### API Key Information +- **Key Prefix**: First few characters (e.g., `exe_abc...`) +- **Created Date**: When the key was generated +- **Last Rotated**: Most recent rotation date +- **Rotated By**: User who performed the last rotation + +### Rotating API Keys + +Rotate your workspace API key periodically for security: + +1. Go to workspace settings (Manage) +2. Navigate to the **API** configuration tab +3. Click **Rotate API Key** +4. Confirm the action in the dialog +5. **Important**: Copy the new key immediately +6. Update any applications using the old key + + +!!! warning "Important" + Rotating a key immediately invalidates the old key. Make sure to update all applications before rotating. + +### Key Rotation History + +Track when API keys were rotated: + +- **Date & Time**: When each rotation occurred +- **Rotated By**: Which user performed the rotation +- **Key Prefix**: First characters of each rotated key + +## Team Management (Team Workspaces Only) + +!!! note "Personal Workspaces" + Personal workspaces do not support team member invitations. Only team workspaces allow collaboration. + +### Adding Team Members + +For team workspaces, invite users to collaborate: + +1. Go to workspace settings +2. Click the **Users** tab +3. Click **Invite User** +4. Enter their email address +5. Select a role (Admin or Read-only) +6. Click **Send Invitation** + + +### Invitation Process + +The invited user will receive an email with instructions to: + +- Create an account to join the workspace (If they don't have one). Once they will create account, they will add to that workspace. +- Accept the workspace invitation if they have account +- Access the workspace + +### Managing Existing Users + +**View Team Members**: + +- See all current workspace members +- View their roles and permissions +- Check when they joined + +**Remove Users**: + +- Click **Remove** next to a user's name +- Confirm the removal +- User immediately loses access to the workspace + + +## Workspace Maintenance + +### Regular Tasks + +**Monthly**: + +- Review team member access +- Check API key rotation dates +- Clean up unused experiments + +**Quarterly**: + +- Rotate API keys for production workspaces +- Review workspace organization + +### Data Management + +**Experiment Cleanup**: + +- Organize experiments with clear naming + +**Access Review**: + +- Remove inactive team members +- Ensure principle of least privilege + + +## Next Steps + +
+ +- :octicons-tools-24:{ .lg .middle } __Create Experiments__ + + --- + + Ready to start creating experiments? + + [:octicons-arrow-right-24: Setup Experiment](../first-experiment/index.md) + +
diff --git a/docs/getting-started/workspaces/personal-vs-team.md b/docs/getting-started/workspaces/personal-vs-team.md new file mode 100644 index 0000000..3dd0c43 --- /dev/null +++ b/docs/getting-started/workspaces/personal-vs-team.md @@ -0,0 +1,98 @@ +# Personal vs Team Workspaces + +ExE offers two types of workspaces to meet different collaboration needs. + +## Personal Workspace (Default) + +When you first sign up for ExE, a personal workspace is automatically created for you with the name "{Your Username}'s Workspace". + + +### Characteristics +- **Individual Use**: Designed for personal experiments and development +- **No Team Invitations**: Cannot invite other team members +- **Private**: Only you can access and manage experiments +- **Unique API Key**: Has its own API key separate from any team workspaces + +### Best Use Cases +- Personal development and testing +- Proof-of-concept experiments +- Learning and experimentation +- Individual research projects + +## Team Workspaces + +Team workspaces enable collaboration and shared experiment management. + +### Characteristics +- **Multi-User Access**: Support multiple team members +- **Role-Based Permissions**: Different access levels for team members +- **Shared Management**: Collaborative experiment creation and analysis +- **Separate API Keys**: Independent from personal workspace + +### User Roles + +#### Admin Role: +- Create and manage experiments +- Invite and remove users +- Rotate API keys +- Modify workspace settings +- View all workspace data + +#### Read-only Role: +- View experiments and results +- Cannot create or modify experiments +- Cannot invite other users + +### Best Use Cases +- Production experiments +- Team projects requiring collaboration +- Shared analysis and decision-making +- Cross-functional experiment management + +## Choosing the Right Workspace Type + +### Use Personal Workspace When: +- Working on individual experiments +- Testing and development +- Learning ExE features + +### Create Team Workspace When: +- Multiple people need access +- Collaborative decision-making is required +- Production experiments need team oversight +- Shared analysis is beneficial + +## Workspace Comparison + +| Feature | Personal Workspace | Team Workspace | +|---------|-------------------|----------------| +| User Access | Single user only | Multiple users | +| Team Invitations | ❌ Not supported | ✅ Supported | +| Role Management | ❌ N/A | ✅ Admin/Read-only | +| Collaboration | ❌ Individual only | ✅ Full collaboration | +| API Key | ✅ Unique key | ✅ Unique key | +| Experiment Creation | ✅ Admin only (you) | ✅ Admins only | + +## Next Steps + +Ready to create a team workspace or learn more about workspace management? + +
+ +- :octicons-plus-24:{ .lg .middle } __Creating Workspaces__ + + --- + + Learn how to create and switch between workspaces + + [:octicons-arrow-right-24: Creating & Switching](./creating-switching.md) + +- :octicons-gear-24:{ .lg .middle } __Managing Workspaces__ + + --- + + Manage settings, API keys, and team members + + [:octicons-arrow-right-24: Managing Workspaces](./managing.md) + +
diff --git a/frontend/package-lock.json b/frontend/package-lock.json index bd3f82d..d4c756a 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -83,14 +83,25 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@babel/runtime": { - "version": "7.27.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.0.tgz", - "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==", - "license": "MIT", + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", "dependencies": { - "regenerator-runtime": "^0.14.0" + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", + "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "license": "MIT", "engines": { "node": ">=6.9.0" } @@ -130,9 +141,9 @@ } }, "node_modules/@eslint-community/eslint-utils": { - "version": "4.6.1", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.6.1.tgz", - "integrity": "sha512-KTsJMmobmbrFLe3LDh0PC2FXpcSYJt/MLjlkh/9LEnmKYLSYmT/0EW9JWANjeoemiuZrmogti0tW5Ch+qNUYDw==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz", + "integrity": "sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==", "dev": true, "license": "MIT", "dependencies": { @@ -193,28 +204,28 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.6.9", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.9.tgz", - "integrity": "sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.1.tgz", + "integrity": "sha512-azI0DrjMMfIug/ExbBaeDVJXcY0a7EPvPjb2xAJPa4HeimBX+Z18HK8QQR3jb6356SnDDdxx+hinMLcJEDdOjw==", "license": "MIT", "dependencies": { "@floating-ui/utils": "^0.2.9" } }, "node_modules/@floating-ui/dom": { - "version": "1.6.13", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.13.tgz", - "integrity": "sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.1.tgz", + "integrity": "sha512-cwsmW/zyw5ltYTUeeYJ60CnQuPqmGwuGVhG9w0PRaRKkAyi38BT5CKrpIbb+jtahSwUl04cWzSx9ZOIxeS6RsQ==", "license": "MIT", "dependencies": { - "@floating-ui/core": "^1.6.0", + "@floating-ui/core": "^1.7.1", "@floating-ui/utils": "^0.2.9" } }, "node_modules/@floating-ui/react-dom": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.2.tgz", - "integrity": "sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.3.tgz", + "integrity": "sha512-huMBfiU9UnQ2oBwIhgzyIiSpVgvlDstU8CX0AF+wS+KzmYMs0J2a3GwuFHV1Lz+jlrQGeC1fF+Nv0QoumyV0bA==", "license": "MIT", "dependencies": { "@floating-ui/dom": "^1.0.0" @@ -324,23 +335,89 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/@isaacs/fs-minipass": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", + "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.4" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, "node_modules/@napi-rs/wasm-runtime": { - "version": "0.2.9", - "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.9.tgz", - "integrity": "sha512-OKRBiajrrxB9ATokgEQoG87Z25c67pCpYcCwmXYX8PBftC9pBfN18gnm/fh1wurSLEKIAt+QRFLFCQISrb66Jg==", + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.10.tgz", + "integrity": "sha512-bCsCyeZEwVErsGmyPNSzwfwFn4OdxBj0mmv6hOFucB/k81Ojdu68RbZdxYsRQUPc9l6SU5F/cG+bXgWs3oUgsQ==", "dev": true, "license": "MIT", "optional": true, "dependencies": { - "@emnapi/core": "^1.4.0", - "@emnapi/runtime": "^1.4.0", + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", "@tybys/wasm-util": "^0.9.0" } }, "node_modules/@next/env": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.28.tgz", - "integrity": "sha512-PAmWhJfJQlP+kxZwCjrVd9QnR5x0R3u0mTXTiZDgSd4h5LdXmjxCCWbN9kq6hkZBOax8Rm3xDW5HagWyJuT37g==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.29.tgz", + "integrity": "sha512-UzgLR2eBfhKIQt0aJ7PWH7XRPYw7SXz0Fpzdl5THjUnvxy4kfBk9OU4RNPNiETewEEtaBcExNFNn1QWH8wQTjg==", "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { @@ -354,9 +431,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.28.tgz", - "integrity": "sha512-kzGChl9setxYWpk3H6fTZXXPFFjg7urptLq5o5ZgYezCrqlemKttwMT5iFyx/p1e/JeglTwDFRtb923gTJ3R1w==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.29.tgz", + "integrity": "sha512-wWtrAaxCVMejxPHFb1SK/PVV1WDIrXGs9ki0C/kUM8ubKHQm+3hU9MouUywCw8Wbhj3pewfHT2wjunLEr/TaLA==", "cpu": [ "arm64" ], @@ -370,9 +447,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.28.tgz", - "integrity": "sha512-z6FXYHDJlFOzVEOiiJ/4NG8aLCeayZdcRSMjPDysW297Up6r22xw6Ea9AOwQqbNsth8JNgIK8EkWz2IDwaLQcw==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.29.tgz", + "integrity": "sha512-7Z/jk+6EVBj4pNLw/JQrvZVrAh9Bv8q81zCFSfvTMZ51WySyEHWVpwCEaJY910LyBftv2F37kuDPQm0w9CEXyg==", "cpu": [ "x64" ], @@ -386,9 +463,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.28.tgz", - "integrity": "sha512-9ARHLEQXhAilNJ7rgQX8xs9aH3yJSj888ssSjJLeldiZKR4D7N08MfMqljk77fAwZsWwsrp8ohHsMvurvv9liQ==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.29.tgz", + "integrity": "sha512-o6hrz5xRBwi+G7JFTHc+RUsXo2lVXEfwh4/qsuWBMQq6aut+0w98WEnoNwAwt7hkEqegzvazf81dNiwo7KjITw==", "cpu": [ "arm64" ], @@ -402,9 +479,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.28.tgz", - "integrity": "sha512-p6gvatI1nX41KCizEe6JkF0FS/cEEF0u23vKDpl+WhPe/fCTBeGkEBh7iW2cUM0rvquPVwPWdiUR6Ebr/kQWxQ==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.29.tgz", + "integrity": "sha512-9i+JEHBOVgqxQ92HHRFlSW1EQXqa/89IVjtHgOqsShCcB/ZBjTtkWGi+SGCJaYyWkr/lzu51NTMCfKuBf7ULNw==", "cpu": [ "arm64" ], @@ -418,9 +495,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.28.tgz", - "integrity": "sha512-nsiSnz2wO6GwMAX2o0iucONlVL7dNgKUqt/mDTATGO2NY59EO/ZKnKEr80BJFhuA5UC1KZOMblJHWZoqIJddpA==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.29.tgz", + "integrity": "sha512-B7JtMbkUwHijrGBOhgSQu2ncbCYq9E7PZ7MX58kxheiEOwdkM+jGx0cBb+rN5AeqF96JypEppK6i/bEL9T13lA==", "cpu": [ "x64" ], @@ -434,9 +511,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.28.tgz", - "integrity": "sha512-+IuGQKoI3abrXFqx7GtlvNOpeExUH1mTIqCrh1LGFf8DnlUcTmOOCApEnPJUSLrSbzOdsF2ho2KhnQoO0I1RDw==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.29.tgz", + "integrity": "sha512-yCcZo1OrO3aQ38B5zctqKU1Z3klOohIxug6qdiKO3Q3qNye/1n6XIs01YJ+Uf+TdpZQ0fNrOQI2HrTLF3Zprnw==", "cpu": [ "x64" ], @@ -450,9 +527,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.28.tgz", - "integrity": "sha512-l61WZ3nevt4BAnGksUVFKy2uJP5DPz2E0Ma/Oklvo3sGj9sw3q7vBWONFRgz+ICiHpW5mV+mBrkB3XEubMrKaA==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.29.tgz", + "integrity": "sha512-WnrfeOEtTVidI9Z6jDLy+gxrpDcEJtZva54LYC0bSKQqmyuHzl0ego+v0F/v2aXq0am67BRqo/ybmmt45Tzo4A==", "cpu": [ "arm64" ], @@ -466,9 +543,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.28.tgz", - "integrity": "sha512-+Kcp1T3jHZnJ9v9VTJ/yf1t/xmtFAc/Sge4v7mVc1z+NYfYzisi8kJ9AsY8itbgq+WgEwMtOpiLLJsUy2qnXZw==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.29.tgz", + "integrity": "sha512-vkcriFROT4wsTdSeIzbxaZjTNTFKjSYmLd8q/GVH3Dn8JmYjUKOuKXHK8n+lovW/kdcpIvydO5GtN+It2CvKWA==", "cpu": [ "ia32" ], @@ -482,9 +559,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.28.tgz", - "integrity": "sha512-1gCmpvyhz7DkB1srRItJTnmR2UwQPAUXXIg9r0/56g3O8etGmwlX68skKXJOp9EejW3hhv7nSQUJ2raFiz4MoA==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.29.tgz", + "integrity": "sha512-iPPwUEKnVs7pwR0EBLJlwxLD7TTHWS/AoVZx1l9ZQzfQciqaFEr5AlYzA2uB6Fyby1IF18t4PL0nTpB+k4Tzlw==", "cpu": [ "x64" ], @@ -575,12 +652,12 @@ "license": "MIT" }, "node_modules/@radix-ui/react-accessible-icon": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-accessible-icon/-/react-accessible-icon-1.1.4.tgz", - "integrity": "sha512-J8pIt7l32A9fGIn86vwccQzik5MgIOTtceeTxi6EiiFYwWHLxsTHwiOW4pI5sQhQJWd3MOEkumFBIHwIU038Cw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accessible-icon/-/react-accessible-icon-1.1.7.tgz", + "integrity": "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A==", "license": "MIT", "dependencies": { - "@radix-ui/react-visually-hidden": "1.2.0" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -598,19 +675,19 @@ } }, "node_modules/@radix-ui/react-accordion": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-accordion/-/react-accordion-1.2.8.tgz", - "integrity": "sha512-c7OKBvO36PfQIUGIjj1Wko0hH937pYFU2tR5zbIJDUsmTzHoZVHHt4bmb7OOJbzTaWJtVELKWojBHa7OcnUHmQ==", + "version": "1.2.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-accordion/-/react-accordion-1.2.11.tgz", + "integrity": "sha512-l3W5D54emV2ues7jjeG1xcyN7S3jnK3zE2zHqgn0CmMsy9lNJwmgcrmaxS+7ipw15FAivzKNzH3d5EcGoFKw0A==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collapsible": "1.1.8", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collapsible": "1.1.11", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -629,17 +706,17 @@ } }, "node_modules/@radix-ui/react-alert-dialog": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-alert-dialog/-/react-alert-dialog-1.1.11.tgz", - "integrity": "sha512-4KfkwrFnAw3Y5Jeoq6G+JYSKW0JfIS3uDdFC/79Jw9AsMayZMizSSMxk1gkrolYXsa/WzbbDfOA7/D8N5D+l1g==", + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-alert-dialog/-/react-alert-dialog-1.1.14.tgz", + "integrity": "sha512-IOZfZ3nPvN6lXpJTBCunFQPRSvK8MDgSc1FB85xnIpUKOw9en0dJj8JmCAxV7BiZdtYlUpmrQjoTFkVYtdoWzQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dialog": "1.1.11", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0" + "@radix-ui/react-dialog": "1.1.14", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -657,12 +734,12 @@ } }, "node_modules/@radix-ui/react-arrow": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.4.tgz", - "integrity": "sha512-qz+fxrqgNxG0dYew5l7qR3c7wdgRu1XVUHGnGYX7rg5HM4p9SWaRmJwfgR3J0SgyUKayLmzQIun+N6rWRgiRKw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-arrow/-/react-arrow-1.1.7.tgz", + "integrity": "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -680,12 +757,12 @@ } }, "node_modules/@radix-ui/react-aspect-ratio": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-aspect-ratio/-/react-aspect-ratio-1.1.4.tgz", - "integrity": "sha512-ie2mUDtM38LBqVU+Xn+GIY44tWM5yVbT5uXO+th85WZxUUsgEdWNNZWecqqGzkQ4Af+Fq1mYT6TyQ/uUf5gfcw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-aspect-ratio/-/react-aspect-ratio-1.1.7.tgz", + "integrity": "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -703,13 +780,13 @@ } }, "node_modules/@radix-ui/react-avatar": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.7.tgz", - "integrity": "sha512-V7ODUt4mUoJTe3VUxZw6nfURxaPALVqmDQh501YmaQsk3D8AZQrOPRnfKn4H7JGDLBc0KqLhT94H79nV88ppNg==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-avatar/-/react-avatar-1.1.10.tgz", + "integrity": "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog==", "license": "MIT", "dependencies": { "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" @@ -730,16 +807,16 @@ } }, "node_modules/@radix-ui/react-checkbox": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.2.3.tgz", - "integrity": "sha512-pHVzDYsnaDmBlAuwim45y3soIN8H4R7KbkSVirGhXO+R/kO2OLCe0eucUEbddaTcdMHHdzcIGHtZSMSQlA+apw==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.2.tgz", + "integrity": "sha512-yd+dI56KZqawxKZrJ31eENUwqc1QSqg4OZ15rybGjF2ZNwMO+wCyHzAVLRp9qoYJf7kYy0YpZ2b0JCzJ42HZpA==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" @@ -760,9 +837,9 @@ } }, "node_modules/@radix-ui/react-collapsible": { - "version": "1.1.8", - "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.8.tgz", - "integrity": "sha512-hxEsLvK9WxIAPyxdDRULL4hcaSjMZCfP7fHB0Z1uUnDoDBat1Zh46hwYfa69DeZAbJrPckjf0AGAtEZyvDyJbw==", + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.11.tgz", + "integrity": "sha512-2qrRsVGSCYasSz1RFOorXwl0H7g7J1frQtgpQgYrt+MOidtPAINHn9CPovQXb83r8ahapdx3Tu0fa/pdFFSdPg==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", @@ -770,7 +847,7 @@ "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, @@ -790,15 +867,15 @@ } }, "node_modules/@radix-ui/react-collection": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.4.tgz", - "integrity": "sha512-cv4vSf7HttqXilDnAnvINd53OTl1/bjUYVZrkFnA7nwmY9Ob2POUy0WY0sfqBAe1s5FyKsyceQlqiEGPYNTadg==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-collection/-/react-collection-1.1.7.tgz", + "integrity": "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0" + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -846,15 +923,15 @@ } }, "node_modules/@radix-ui/react-context-menu": { - "version": "2.2.12", - "resolved": "https://registry.npmjs.org/@radix-ui/react-context-menu/-/react-context-menu-2.2.12.tgz", - "integrity": "sha512-5UFKuTMX8F2/KjHvyqu9IYT8bEtDSCJwwIx1PghBo4jh9S6jJVsceq9xIjqsOVcxsynGwV5eaqPE3n/Cu+DrSA==", + "version": "2.2.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-context-menu/-/react-context-menu-2.2.15.tgz", + "integrity": "sha512-UsQUMjcYTsBjTSXw0P3GO0werEQvUY2plgRQuKoCTtkNr45q1DiL51j4m7gxhABzZ0BadoXNsIbg7F3KwiUBbw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-menu": "2.1.12", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-menu": "2.1.15", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, @@ -874,22 +951,22 @@ } }, "node_modules/@radix-ui/react-dialog": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.11.tgz", - "integrity": "sha512-yI7S1ipkP5/+99qhSI6nthfo/tR6bL6Zgxi/+1UO6qPa6UeM6nlafWcQ65vB4rU2XjgjMfMhI3k9Y5MztA62VQ==", + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.1.14.tgz", + "integrity": "sha512-+CpweKjqpzTmwRwcYECQcNYbI8V9VSQt0SNFKeEBLgfucbsLssU6Ppq7wUdNXEGb573bMjFhVjKVll8rmV6zMw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.4", + "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" @@ -925,14 +1002,14 @@ } }, "node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.7.tgz", - "integrity": "sha512-j5+WBUdhccJsmH5/H0K6RncjDtoALSEr6jbkaZu+bjw6hOPOhHycr6vEUujl+HBK8kjUfWcoCJXxP6e4lUlMZw==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.1.10.tgz", + "integrity": "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, @@ -952,17 +1029,17 @@ } }, "node_modules/@radix-ui/react-dropdown-menu": { - "version": "2.1.12", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.12.tgz", - "integrity": "sha512-VJoMs+BWWE7YhzEQyVwvF9n22Eiyr83HotCVrMQzla/OwRovXCgah7AcaEr4hMNj4gJxSdtIbcHGvmJXOoJVHA==", + "version": "2.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.1.15.tgz", + "integrity": "sha512-mIBnOjgwo9AH3FyKaSWoSu/dYj6VdhJ7frEPiGTeXCdUFHjl9h3mFh2wwhEtINOmYXWhdpf1rY2minFsmaNgVQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-menu": "2.1.12", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-menu": "2.1.15", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -996,13 +1073,13 @@ } }, "node_modules/@radix-ui/react-focus-scope": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.4.tgz", - "integrity": "sha512-r2annK27lIW5w9Ho5NyQgqs0MmgZSTIKXWpVCJaLC1q2kZrZkcqnmHkCHMEmv8XLvsLlurKMPT+kbKkRkm/xVA==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.1.7.tgz", + "integrity": "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { @@ -1021,17 +1098,17 @@ } }, "node_modules/@radix-ui/react-form": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-form/-/react-form-0.1.4.tgz", - "integrity": "sha512-97Q7Hb0///sMF2X8XvyVx3Aub7WG/ybIofoDVUo8utG/z/6TBzWGjgai7ZjECXYLbKip88t9/ibyQJvYe5k6SA==", + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-form/-/react-form-0.1.7.tgz", + "integrity": "sha512-IXLKFnaYvFg/KkeV5QfOX7tRnwHXp127koOFUjLWMTrRv5Rny3DQcAtIFFeA/Cli4HHM8DuJCXAUsgnFVJndlw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-label": "2.1.4", - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1049,19 +1126,19 @@ } }, "node_modules/@radix-ui/react-hover-card": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.11.tgz", - "integrity": "sha512-q9h9grUpGZKR3MNhtVCLVnPGmx1YnzBgGR+O40mhSNGsUnkR+LChVH8c7FB0mkS+oudhd8KAkZGTJPJCjdAPIg==", + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-hover-card/-/react-hover-card-1.1.14.tgz", + "integrity": "sha512-CPYZ24Mhirm+g6D8jArmLzjYu4Eyg3TTUHswR26QgzXBHBe64BO/RHOJKzmF/Dxb4y4f9PKyJdwm/O/AhNkb+Q==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.7", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-dismissable-layer": "1.1.10", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1107,12 +1184,12 @@ } }, "node_modules/@radix-ui/react-label": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.4.tgz", - "integrity": "sha512-wy3dqizZnZVV4ja0FNnUhIWNwWdoldXrneEyUcVtLYDAt8ovGS4ridtMAOGgXBBIfggL4BOveVWsjXDORdGEQg==", + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-label/-/react-label-2.1.7.tgz", + "integrity": "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1130,26 +1207,26 @@ } }, "node_modules/@radix-ui/react-menu": { - "version": "2.1.12", - "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.12.tgz", - "integrity": "sha512-+qYq6LfbiGo97Zz9fioX83HCiIYYFNs8zAsVCMQrIakoNYylIzWuoD/anAD3UzvvR6cnswmfRFJFq/zYYq/k7Q==", + "version": "2.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.1.15.tgz", + "integrity": "sha512-tVlmA3Vb9n8SZSd+YSbuFR66l87Wiy4du+YE+0hzKQEANA+7cWKH1WgqcEX4pXqxUFQKrWQGHdvEfw00TjFiew==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.4", + "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", - "@radix-ui/react-slot": "1.2.0", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" @@ -1170,20 +1247,20 @@ } }, "node_modules/@radix-ui/react-menubar": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/@radix-ui/react-menubar/-/react-menubar-1.1.12.tgz", - "integrity": "sha512-bM2vT5nxRqJH/d1vFQ9jLsW4qR70yFQw2ZD1TUPWUNskDsV0eYeMbbNJqxNjGMOVogEkOJaHtu11kzYdTJvVJg==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menubar/-/react-menubar-1.1.15.tgz", + "integrity": "sha512-Z71C7LGD+YDYo3TV81paUs8f3Zbmkvg6VLRQpKYfzioOE6n7fOhA3ApK/V/2Odolxjoc4ENk8AYCjohCNayd5A==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-menu": "2.1.12", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", + "@radix-ui/react-menu": "2.1.15", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1202,25 +1279,25 @@ } }, "node_modules/@radix-ui/react-navigation-menu": { - "version": "1.2.10", - "resolved": "https://registry.npmjs.org/@radix-ui/react-navigation-menu/-/react-navigation-menu-1.2.10.tgz", - "integrity": "sha512-kGDqMVPj2SRB1vJmXN/jnhC66REAXNyDmDRubbbmJ+360zSIJUDmWGMKIJOf72PHMwPENrbtJVb3CMAUJDjEIA==", + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/@radix-ui/react-navigation-menu/-/react-navigation-menu-1.2.13.tgz", + "integrity": "sha512-WG8wWfDiJlSF5hELjwfjSGOXcBR/ZMhBFCGYe8vERpC39CQYZeq1PQ2kaYHdye3V95d06H89KGMsVCIE4LWo3g==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", - "@radix-ui/react-visually-hidden": "1.2.0" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -1238,19 +1315,19 @@ } }, "node_modules/@radix-ui/react-one-time-password-field": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-one-time-password-field/-/react-one-time-password-field-0.1.4.tgz", - "integrity": "sha512-CygYLHY8kO1De5iAZBn7gQbIoRNVGYx1paIyqbmwlxP6DF7sF1LLW3chXo/qxc4IWUQnsgAhfl9u6IoLXTndqQ==", + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-one-time-password-field/-/react-one-time-password-field-0.1.7.tgz", + "integrity": "sha512-w1vm7AGI8tNXVovOK7TYQHrAGpRF7qQL+ENpT1a743De5Zmay2RbWGKAiYDKIyIuqptns+znCKwNztE2xl1n0Q==", "license": "MIT", "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0", @@ -1271,24 +1348,54 @@ } } }, + "node_modules/@radix-ui/react-password-toggle-field": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-password-toggle-field/-/react-password-toggle-field-0.1.2.tgz", + "integrity": "sha512-F90uYnlBsLPU1UbSLciLsWQmk8+hdWa6SFw4GXaIdNWxFxI5ITKVdAG64f+Twaa9ic6xE7pqxPyUmodrGjT4pQ==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.2", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-id": "1.1.1", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-effect-event": "0.0.2", + "@radix-ui/react-use-is-hydrated": "0.1.0" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-popover": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.11.tgz", - "integrity": "sha512-yFMfZkVA5G3GJnBgb2PxrrcLKm1ZLWXrbYVgdyTl//0TYEIHS9LJbnyz7WWcZ0qCq7hIlJZpRtxeSeIG5T5oJw==", + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popover/-/react-popover-1.1.14.tgz", + "integrity": "sha512-ODz16+1iIbGUfFEfKx2HTPKizg2MN39uIOV8MXeHnmdd3i/N9Wt7vU46wbHsqA0xoaQyXVcs0KIlBdOA2Y95bw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.4", + "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" @@ -1309,16 +1416,16 @@ } }, "node_modules/@radix-ui/react-popper": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.4.tgz", - "integrity": "sha512-3p2Rgm/a1cK0r/UVkx5F/K9v/EplfjAeIFCGOPYPO4lZ0jtg4iSQXt/YGTSLWaf4x7NG6Z4+uKFcylcTZjeqDA==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.7.tgz", + "integrity": "sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ==", "license": "MIT", "dependencies": { "@floating-ui/react-dom": "^2.0.0", - "@radix-ui/react-arrow": "1.1.4", + "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-rect": "1.1.1", @@ -1341,12 +1448,12 @@ } }, "node_modules/@radix-ui/react-portal": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.6.tgz", - "integrity": "sha512-XmsIl2z1n/TsYFLIdYam2rmFwf9OC/Sh2avkbmVMDuBZIe7hSpM0cYnWPAo7nHOVx8zTuwDZGByfcqLdnzp3Vw==", + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.1.9.tgz", + "integrity": "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { @@ -1389,12 +1496,12 @@ } }, "node_modules/@radix-ui/react-primitive": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.0.tgz", - "integrity": "sha512-/J/FhLdK0zVcILOwt5g+dH4KnkonCtkVJsa2G6JmvbbtZfBEI1gMsO3QMjseL4F/SwfAMt1Vc/0XKYKq+xJ1sw==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-primitive/-/react-primitive-2.1.3.tgz", + "integrity": "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==", "license": "MIT", "dependencies": { - "@radix-ui/react-slot": "1.2.0" + "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -1412,13 +1519,13 @@ } }, "node_modules/@radix-ui/react-progress": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.4.tgz", - "integrity": "sha512-8rl9w7lJdcVPor47Dhws9mUHRHLE+8JEgyJRdNWCpGPa6HIlr3eh+Yn9gyx1CnCLbw5naHsI2gaO9dBWO50vzw==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-progress/-/react-progress-1.1.7.tgz", + "integrity": "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg==", "license": "MIT", "dependencies": { "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1436,9 +1543,9 @@ } }, "node_modules/@radix-ui/react-radio-group": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.4.tgz", - "integrity": "sha512-N4J9QFdW5zcJNxxY/zwTXBN4Uc5VEuRM7ZLjNfnWoKmNvgrPtNNw4P8zY532O3qL6aPkaNO+gY9y6bfzmH4U1g==", + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.7.tgz", + "integrity": "sha512-9w5XhD0KPOrm92OTTE0SysH3sYzHsSTHNvZgUBo/VZ80VdYyB5RneDbc0dKpURS24IxkoFRu/hI0i4XyfFwY6g==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", @@ -1446,8 +1553,8 @@ "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" @@ -1468,18 +1575,18 @@ } }, "node_modules/@radix-ui/react-roving-focus": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.7.tgz", - "integrity": "sha512-C6oAg451/fQT3EGbWHbCQjYTtbyjNO1uzQgMzwyivcHT3GKNEmu1q3UuREhN+HzHAVtv3ivMVK08QlC+PkYw9Q==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-roving-focus/-/react-roving-focus-1.1.10.tgz", + "integrity": "sha512-dT9aOXUen9JSsxnMPv/0VqySQf5eDQ6LCk5Sw28kamz8wSOW2bJdlX2Bg5VUIIcV+6XlHpWTIuTPCf/UNIyq8Q==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, @@ -1499,9 +1606,9 @@ } }, "node_modules/@radix-ui/react-scroll-area": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.6.tgz", - "integrity": "sha512-lj8OMlpPERXrQIHlEQdlXHJoRT52AMpBrgyPYylOhXYq5e/glsEdtOc/kCQlsTdtgN5U0iDbrrolDadvektJGQ==", + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-scroll-area/-/react-scroll-area-1.2.9.tgz", + "integrity": "sha512-YSjEfBXnhUELsO2VzjdtYYD4CfQjvao+lhhrX5XsHD7/cyUNzljF1FHEbgTPN7LH2MClfwRMIsYlqTYpKTTe2A==", "license": "MIT", "dependencies": { "@radix-ui/number": "1.1.1", @@ -1510,7 +1617,7 @@ "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1" }, @@ -1530,30 +1637,30 @@ } }, "node_modules/@radix-ui/react-select": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.2.tgz", - "integrity": "sha512-HjkVHtBkuq+r3zUAZ/CvNWUGKPfuicGDbgtZgiQuFmNcV5F+Tgy24ep2nsAW2nFgvhGPJVqeBZa6KyVN0EyrBA==", + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-select/-/react-select-2.2.5.tgz", + "integrity": "sha512-HnMTdXEVuuyzx63ME0ut4+sEMYW6oouHWNGUZc7ddvUWIcfCva/AMoqEW/3wnEllriMWBa0RHspCYnfCWJQYmA==", "license": "MIT", "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.4", + "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", - "@radix-ui/react-visually-hidden": "1.2.0", + "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, @@ -1573,12 +1680,12 @@ } }, "node_modules/@radix-ui/react-separator": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.4.tgz", - "integrity": "sha512-2fTm6PSiUm8YPq9W0E4reYuv01EE3aFSzt8edBiXqPHshF8N9+Kymt/k0/R+F3dkY5lQyB/zPtrP82phskLi7w==", + "version": "1.1.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz", + "integrity": "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -1596,18 +1703,18 @@ } }, "node_modules/@radix-ui/react-slider": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.2.tgz", - "integrity": "sha512-oQnqfgSiYkxZ1MrF6672jw2/zZvpB+PJsrIc3Zm1zof1JHf/kj7WhmROw7JahLfOwYQ5/+Ip0rFORgF1tjSiaQ==", + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slider/-/react-slider-1.3.5.tgz", + "integrity": "sha512-rkfe2pU2NBAYfGaxa3Mqosi7VZEWX5CxKaanRv0vZd4Zhl9fvQrg0VM93dv3xGLGfrHuoTRF3JXH8nb9g+B3fw==", "license": "MIT", "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", @@ -1629,9 +1736,9 @@ } }, "node_modules/@radix-ui/react-slot": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.0.tgz", - "integrity": "sha512-ujc+V6r0HNDviYqIK3rW4ffgYiZ8g5DEHrGJVk4x7kTlLXRDILnKX9vAUYeIsLOoDpDJ0ujpqMkjH4w2ofuo6w==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz", + "integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==", "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" @@ -1647,15 +1754,15 @@ } }, "node_modules/@radix-ui/react-switch": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.2.2.tgz", - "integrity": "sha512-7Z8n6L+ifMIIYZ83f28qWSceUpkXuslI2FJ34+kDMTiyj91ENdpdQ7VCidrzj5JfwfZTeano/BnGBbu/jqa5rQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-switch/-/react-switch-1.2.5.tgz", + "integrity": "sha512-5ijLkak6ZMylXsaImpZ8u4Rlf5grRmoc0p0QeX9VJtlrM4f5m3nCTX8tWga/zOA8PZYIR/t0p2Mnvd7InrJ6yQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" @@ -1676,9 +1783,9 @@ } }, "node_modules/@radix-ui/react-tabs": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.9.tgz", - "integrity": "sha512-KIjtwciYvquiW/wAFkELZCVnaNLBsYNhTNcvl+zfMAbMhRkcvNuCLXDDd22L0j7tagpzVh/QwbFpwAATg7ILPw==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tabs/-/react-tabs-1.1.12.tgz", + "integrity": "sha512-GTVAlRVrQrSw3cEARM0nAx73ixrWDPNZAruETn3oHCNP6SbZ/hNxdxp+u7VkIEv3/sFoLq1PfcHrl7Pnp0CDpw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", @@ -1686,8 +1793,8 @@ "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1706,23 +1813,23 @@ } }, "node_modules/@radix-ui/react-toast": { - "version": "1.2.11", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.11.tgz", - "integrity": "sha512-Ed2mlOmT+tktOsu2NZBK1bCSHh/uqULu1vWOkpQTVq53EoOuZUZw7FInQoDB3uil5wZc2oe0XN9a7uVZB7/6AQ==", + "version": "1.2.14", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toast/-/react-toast-1.2.14.tgz", + "integrity": "sha512-nAP5FBxBJGQ/YfUB+r+O6USFVkWq3gAInkxyEnmvEV5jtSbfDhfa4hwX8CraCnbjMLsE7XSf/K75l9xXY7joWg==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.7", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-dismissable-layer": "1.1.10", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", - "@radix-ui/react-visually-hidden": "1.2.0" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -1740,13 +1847,13 @@ } }, "node_modules/@radix-ui/react-toggle": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.6.tgz", - "integrity": "sha512-3SeJxKeO3TO1zVw1Nl++Cp0krYk6zHDHMCUXXVkosIzl6Nxcvb07EerQpyD2wXQSJ5RZajrYAmPaydU8Hk1IyQ==", + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle/-/react-toggle-1.1.9.tgz", + "integrity": "sha512-ZoFkBBz9zv9GWer7wIjvdRxmh2wyc2oKWw6C6CseWd6/yq1DK/l5lJ+wnsmFwJZbBYqr02mrf8A2q/CVCuM3ZA==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-primitive": "2.1.0", + "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1765,17 +1872,17 @@ } }, "node_modules/@radix-ui/react-toggle-group": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle-group/-/react-toggle-group-1.1.7.tgz", - "integrity": "sha512-GRaPJhxrRSOqAcmcX3MwRL/SZACkoYdmoY9/sg7Bd5DhBYsB2t4co0NxTvVW8H7jUmieQDQwRtUlZ5Ta8UbgJA==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toggle-group/-/react-toggle-group-1.1.10.tgz", + "integrity": "sha512-kiU694Km3WFLTC75DdqgM/3Jauf3rD9wxeS9XtyWFKsBUeZA337lC+6uUazT7I1DhanZ5gyD5Stf8uf2dbQxOQ==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", - "@radix-ui/react-toggle": "1.1.6", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", + "@radix-ui/react-toggle": "1.1.9", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { @@ -1794,18 +1901,18 @@ } }, "node_modules/@radix-ui/react-toolbar": { - "version": "1.1.7", - "resolved": "https://registry.npmjs.org/@radix-ui/react-toolbar/-/react-toolbar-1.1.7.tgz", - "integrity": "sha512-cL/3snRskM0f955waP+m4Pmr8+QOPpPsfoY5kM06k7eWP41diOcyjLEqSxpd/K9S7fpsV66yq4R6yN2sMwXc6Q==", + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/@radix-ui/react-toolbar/-/react-toolbar-1.1.10.tgz", + "integrity": "sha512-jiwQsduEL++M4YBIurjSa+voD86OIytCod0/dbIxFZDLD8NfO1//keXYMfsW8BPcfqwoNjt+y06XcJqAb4KR7A==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-roving-focus": "1.1.7", - "@radix-ui/react-separator": "1.1.4", - "@radix-ui/react-toggle-group": "1.1.7" + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-roving-focus": "1.1.10", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-toggle-group": "1.1.10" }, "peerDependencies": { "@types/react": "*", @@ -1823,23 +1930,23 @@ } }, "node_modules/@radix-ui/react-tooltip": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.4.tgz", - "integrity": "sha512-DyW8VVeeMSSLFvAmnVnCwvI3H+1tpJFHT50r+tdOoMse9XqYDBCcyux8u3G2y+LOpt7fPQ6KKH0mhs+ce1+Z5w==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.2.7.tgz", + "integrity": "sha512-Ap+fNYwKTYJ9pzqW+Xe2HtMRbQ/EeWkj2qykZ6SuEV4iS/o1bZI5ssJbk4D2r8XuDuOBVz/tIx2JObtuqU+5Zw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-dismissable-layer": "1.1.7", + "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-slot": "1.2.0", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", - "@radix-ui/react-visually-hidden": "1.2.0" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -2011,12 +2118,12 @@ } }, "node_modules/@radix-ui/react-visually-hidden": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.0.tgz", - "integrity": "sha512-rQj0aAWOpCdCMRbI6pLQm8r7S2BM3YhTa0SzOYD55k+hJA8oo9J+H+9wLM9oMlZWOX/wJWPTzfDfmZkf7LvCfg==", + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-visually-hidden/-/react-visually-hidden-1.2.3.tgz", + "integrity": "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==", "license": "MIT", "dependencies": { - "@radix-ui/react-primitive": "2.1.0" + "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", @@ -2109,46 +2216,54 @@ } }, "node_modules/@tailwindcss/node": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.4.tgz", - "integrity": "sha512-MT5118zaiO6x6hNA04OWInuAiP1YISXql8Z+/Y8iisV5nuhM8VXlyhRuqc2PEviPszcXI66W44bCIk500Oolhw==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.8.tgz", + "integrity": "sha512-OWwBsbC9BFAJelmnNcrKuf+bka2ZxCE2A4Ft53Tkg4uoiE67r/PMEYwCsourC26E+kmxfwE0hVzMdxqeW+xu7Q==", "dev": true, "license": "MIT", "dependencies": { + "@ampproject/remapping": "^2.3.0", "enhanced-resolve": "^5.18.1", "jiti": "^2.4.2", - "lightningcss": "1.29.2", - "tailwindcss": "4.1.4" + "lightningcss": "1.30.1", + "magic-string": "^0.30.17", + "source-map-js": "^1.2.1", + "tailwindcss": "4.1.8" } }, "node_modules/@tailwindcss/oxide": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.4.tgz", - "integrity": "sha512-p5wOpXyOJx7mKh5MXh5oKk+kqcz8T+bA3z/5VWWeQwFrmuBItGwz8Y2CHk/sJ+dNb9B0nYFfn0rj/cKHZyjahQ==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.8.tgz", + "integrity": "sha512-d7qvv9PsM5N3VNKhwVUhpK6r4h9wtLkJ6lz9ZY9aeZgrUWk1Z8VPyqyDT9MZlem7GTGseRQHkeB1j3tC7W1P+A==", "dev": true, + "hasInstallScript": true, "license": "MIT", + "dependencies": { + "detect-libc": "^2.0.4", + "tar": "^7.4.3" + }, "engines": { "node": ">= 10" }, "optionalDependencies": { - "@tailwindcss/oxide-android-arm64": "4.1.4", - "@tailwindcss/oxide-darwin-arm64": "4.1.4", - "@tailwindcss/oxide-darwin-x64": "4.1.4", - "@tailwindcss/oxide-freebsd-x64": "4.1.4", - "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.4", - "@tailwindcss/oxide-linux-arm64-gnu": "4.1.4", - "@tailwindcss/oxide-linux-arm64-musl": "4.1.4", - "@tailwindcss/oxide-linux-x64-gnu": "4.1.4", - "@tailwindcss/oxide-linux-x64-musl": "4.1.4", - "@tailwindcss/oxide-wasm32-wasi": "4.1.4", - "@tailwindcss/oxide-win32-arm64-msvc": "4.1.4", - "@tailwindcss/oxide-win32-x64-msvc": "4.1.4" + "@tailwindcss/oxide-android-arm64": "4.1.8", + "@tailwindcss/oxide-darwin-arm64": "4.1.8", + "@tailwindcss/oxide-darwin-x64": "4.1.8", + "@tailwindcss/oxide-freebsd-x64": "4.1.8", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.8", + "@tailwindcss/oxide-linux-arm64-gnu": "4.1.8", + "@tailwindcss/oxide-linux-arm64-musl": "4.1.8", + "@tailwindcss/oxide-linux-x64-gnu": "4.1.8", + "@tailwindcss/oxide-linux-x64-musl": "4.1.8", + "@tailwindcss/oxide-wasm32-wasi": "4.1.8", + "@tailwindcss/oxide-win32-arm64-msvc": "4.1.8", + "@tailwindcss/oxide-win32-x64-msvc": "4.1.8" } }, "node_modules/@tailwindcss/oxide-android-arm64": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.4.tgz", - "integrity": "sha512-xMMAe/SaCN/vHfQYui3fqaBDEXMu22BVwQ33veLc8ep+DNy7CWN52L+TTG9y1K397w9nkzv+Mw+mZWISiqhmlA==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.8.tgz", + "integrity": "sha512-Fbz7qni62uKYceWYvUjRqhGfZKwhZDQhlrJKGtnZfuNtHFqa8wmr+Wn74CTWERiW2hn3mN5gTpOoxWKk0jRxjg==", "cpu": [ "arm64" ], @@ -2163,9 +2278,9 @@ } }, "node_modules/@tailwindcss/oxide-darwin-arm64": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.4.tgz", - "integrity": "sha512-JGRj0SYFuDuAGilWFBlshcexev2hOKfNkoX+0QTksKYq2zgF9VY/vVMq9m8IObYnLna0Xlg+ytCi2FN2rOL0Sg==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.8.tgz", + "integrity": "sha512-RdRvedGsT0vwVVDztvyXhKpsU2ark/BjgG0huo4+2BluxdXo8NDgzl77qh0T1nUxmM11eXwR8jA39ibvSTbi7A==", "cpu": [ "arm64" ], @@ -2180,9 +2295,9 @@ } }, "node_modules/@tailwindcss/oxide-darwin-x64": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.4.tgz", - "integrity": "sha512-sdDeLNvs3cYeWsEJ4H1DvjOzaGios4QbBTNLVLVs0XQ0V95bffT3+scptzYGPMjm7xv4+qMhCDrkHwhnUySEzA==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.8.tgz", + "integrity": "sha512-t6PgxjEMLp5Ovf7uMb2OFmb3kqzVTPPakWpBIFzppk4JE4ix0yEtbtSjPbU8+PZETpaYMtXvss2Sdkx8Vs4XRw==", "cpu": [ "x64" ], @@ -2197,9 +2312,9 @@ } }, "node_modules/@tailwindcss/oxide-freebsd-x64": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.4.tgz", - "integrity": "sha512-VHxAqxqdghM83HslPhRsNhHo91McsxRJaEnShJOMu8mHmEj9Ig7ToHJtDukkuLWLzLboh2XSjq/0zO6wgvykNA==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.8.tgz", + "integrity": "sha512-g8C8eGEyhHTqwPStSwZNSrOlyx0bhK/V/+zX0Y+n7DoRUzyS8eMbVshVOLJTDDC+Qn9IJnilYbIKzpB9n4aBsg==", "cpu": [ "x64" ], @@ -2214,9 +2329,9 @@ } }, "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.4.tgz", - "integrity": "sha512-OTU/m/eV4gQKxy9r5acuesqaymyeSCnsx1cFto/I1WhPmi5HDxX1nkzb8KYBiwkHIGg7CTfo/AcGzoXAJBxLfg==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.8.tgz", + "integrity": "sha512-Jmzr3FA4S2tHhaC6yCjac3rGf7hG9R6Gf2z9i9JFcuyy0u79HfQsh/thifbYTF2ic82KJovKKkIB6Z9TdNhCXQ==", "cpu": [ "arm" ], @@ -2231,9 +2346,9 @@ } }, "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.4.tgz", - "integrity": "sha512-hKlLNvbmUC6z5g/J4H+Zx7f7w15whSVImokLPmP6ff1QqTVE+TxUM9PGuNsjHvkvlHUtGTdDnOvGNSEUiXI1Ww==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.8.tgz", + "integrity": "sha512-qq7jXtO1+UEtCmCeBBIRDrPFIVI4ilEQ97qgBGdwXAARrUqSn/L9fUrkb1XP/mvVtoVeR2bt/0L77xx53bPZ/Q==", "cpu": [ "arm64" ], @@ -2248,9 +2363,9 @@ } }, "node_modules/@tailwindcss/oxide-linux-arm64-musl": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.4.tgz", - "integrity": "sha512-X3As2xhtgPTY/m5edUtddmZ8rCruvBvtxYLMw9OsZdH01L2gS2icsHRwxdU0dMItNfVmrBezueXZCHxVeeb7Aw==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.8.tgz", + "integrity": "sha512-O6b8QesPbJCRshsNApsOIpzKt3ztG35gfX9tEf4arD7mwNinsoCKxkj8TgEE0YRjmjtO3r9FlJnT/ENd9EVefQ==", "cpu": [ "arm64" ], @@ -2265,9 +2380,9 @@ } }, "node_modules/@tailwindcss/oxide-linux-x64-gnu": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.4.tgz", - "integrity": "sha512-2VG4DqhGaDSmYIu6C4ua2vSLXnJsb/C9liej7TuSO04NK+JJJgJucDUgmX6sn7Gw3Cs5ZJ9ZLrnI0QRDOjLfNQ==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.8.tgz", + "integrity": "sha512-32iEXX/pXwikshNOGnERAFwFSfiltmijMIAbUhnNyjFr3tmWmMJWQKU2vNcFX0DACSXJ3ZWcSkzNbaKTdngH6g==", "cpu": [ "x64" ], @@ -2282,9 +2397,9 @@ } }, "node_modules/@tailwindcss/oxide-linux-x64-musl": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.4.tgz", - "integrity": "sha512-v+mxVgH2kmur/X5Mdrz9m7TsoVjbdYQT0b4Z+dr+I4RvreCNXyCFELZL/DO0M1RsidZTrm6O1eMnV6zlgEzTMQ==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.8.tgz", + "integrity": "sha512-s+VSSD+TfZeMEsCaFaHTaY5YNj3Dri8rST09gMvYQKwPphacRG7wbuQ5ZJMIJXN/puxPcg/nU+ucvWguPpvBDg==", "cpu": [ "x64" ], @@ -2299,9 +2414,9 @@ } }, "node_modules/@tailwindcss/oxide-wasm32-wasi": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.4.tgz", - "integrity": "sha512-2TLe9ir+9esCf6Wm+lLWTMbgklIjiF0pbmDnwmhR9MksVOq+e8aP3TSsXySnBDDvTTVd/vKu1aNttEGj3P6l8Q==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.8.tgz", + "integrity": "sha512-CXBPVFkpDjM67sS1psWohZ6g/2/cd+cq56vPxK4JeawelxwK4YECgl9Y9TjkE2qfF+9/s1tHHJqrC4SS6cVvSg==", "bundleDependencies": [ "@napi-rs/wasm-runtime", "@emnapi/core", @@ -2317,10 +2432,10 @@ "license": "MIT", "optional": true, "dependencies": { - "@emnapi/core": "^1.4.0", - "@emnapi/runtime": "^1.4.0", - "@emnapi/wasi-threads": "^1.0.1", - "@napi-rs/wasm-runtime": "^0.2.8", + "@emnapi/core": "^1.4.3", + "@emnapi/runtime": "^1.4.3", + "@emnapi/wasi-threads": "^1.0.2", + "@napi-rs/wasm-runtime": "^0.2.10", "@tybys/wasm-util": "^0.9.0", "tslib": "^2.8.0" }, @@ -2329,9 +2444,9 @@ } }, "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.4.tgz", - "integrity": "sha512-VlnhfilPlO0ltxW9/BgfLI5547PYzqBMPIzRrk4W7uupgCt8z6Trw/tAj6QUtF2om+1MH281Pg+HHUJoLesmng==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.8.tgz", + "integrity": "sha512-7GmYk1n28teDHUjPlIx4Z6Z4hHEgvP5ZW2QS9ygnDAdI/myh3HTHjDqtSqgu1BpRoI4OiLx+fThAyA1JePoENA==", "cpu": [ "arm64" ], @@ -2346,9 +2461,9 @@ } }, "node_modules/@tailwindcss/oxide-win32-x64-msvc": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.4.tgz", - "integrity": "sha512-+7S63t5zhYjslUGb8NcgLpFXD+Kq1F/zt5Xv5qTv7HaFTG/DHyHD9GA6ieNAxhgyA4IcKa/zy7Xx4Oad2/wuhw==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.8.tgz", + "integrity": "sha512-fou+U20j+Jl0EHwK92spoWISON2OBnCazIc038Xj2TdweYV33ZRkS9nwqiUi2d/Wba5xg5UoHfvynnb/UB49cQ==", "cpu": [ "x64" ], @@ -2363,17 +2478,17 @@ } }, "node_modules/@tailwindcss/postcss": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.4.tgz", - "integrity": "sha512-bjV6sqycCEa+AQSt2Kr7wpGF1bOZJ5wsqnLEkqSbM/JEHxx/yhMH8wHmdkPyApF9xhHeMSwnnkDUUMMM/hYnXw==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.8.tgz", + "integrity": "sha512-vB/vlf7rIky+w94aWMw34bWW1ka6g6C3xIOdICKX2GC0VcLtL6fhlLiafF0DVIwa9V6EHz8kbWMkS2s2QvvNlw==", "dev": true, "license": "MIT", "dependencies": { "@alloc/quick-lru": "^5.2.0", - "@tailwindcss/node": "4.1.4", - "@tailwindcss/oxide": "4.1.4", + "@tailwindcss/node": "4.1.8", + "@tailwindcss/oxide": "4.1.8", "postcss": "^8.4.41", - "tailwindcss": "4.1.4" + "tailwindcss": "4.1.8" } }, "node_modules/@tybys/wasm-util": { @@ -2479,9 +2594,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "20.17.32", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.32.tgz", - "integrity": "sha512-zeMXFn8zQ+UkjK4ws0RiOC9EWByyW1CcVmLe+2rQocXRsGEDxUCwPEIVgpsGcLHS/P8JkT0oa3839BRABS0oPw==", + "version": "20.17.57", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.57.tgz", + "integrity": "sha512-f3T4y6VU4fVQDKVqJV4Uppy8c1p/sVvS3peyqxyWnzkqXFJLRU7Y1Bl7rMS1Qe9z0v4M6McY0Fp9yBsgHJUsWQ==", "dev": true, "license": "MIT", "dependencies": { @@ -2496,9 +2611,9 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "18.3.20", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.20.tgz", - "integrity": "sha512-IPaCZN7PShZK/3t6Q87pfTkRm6oLTd4vztyoj+cbHUF1g3FfVb2tFIL79uCRKEfv16AhqDMBywP2VW3KIZUvcg==", + "version": "18.3.23", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.23.tgz", + "integrity": "sha512-/LDXMQh55EzZQ0uVAZmKKhfENivEvWz6E+EYzh+/MCjMhNsotd+ZHhBGIjFDTi6+fz0OhQQQLbTgdQIxxCsC0w==", "devOptional": true, "license": "MIT", "dependencies": { @@ -2507,9 +2622,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.3.6", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.6.tgz", - "integrity": "sha512-nf22//wEbKXusP6E9pfOCDwFdHAX4u172eaJI4YkDRQEZiorm6KfYnSC2SWLDMVWUOWPERmJnN0ujeAfTBLvrw==", + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", "devOptional": true, "license": "MIT", "peerDependencies": { @@ -2727,9 +2842,9 @@ "license": "ISC" }, "node_modules/@unrs/resolver-binding-darwin-arm64": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-arm64/-/resolver-binding-darwin-arm64-1.7.2.tgz", - "integrity": "sha512-vxtBno4xvowwNmO/ASL0Y45TpHqmNkAaDtz4Jqb+clmcVSSl8XCG/PNFFkGsXXXS6AMjP+ja/TtNCFFa1QwLRg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-arm64/-/resolver-binding-darwin-arm64-1.7.10.tgz", + "integrity": "sha512-ABsM3eEiL3yu903G0uxgvGAoIw011XjTzyEk//gGtuVY1PuXP2IJG6novd6DBjm7MaWmRV/CZFY1rWBXSlSVVw==", "cpu": [ "arm64" ], @@ -2741,9 +2856,9 @@ ] }, "node_modules/@unrs/resolver-binding-darwin-x64": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-x64/-/resolver-binding-darwin-x64-1.7.2.tgz", - "integrity": "sha512-qhVa8ozu92C23Hsmv0BF4+5Dyyd5STT1FolV4whNgbY6mj3kA0qsrGPe35zNR3wAN7eFict3s4Rc2dDTPBTuFQ==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-darwin-x64/-/resolver-binding-darwin-x64-1.7.10.tgz", + "integrity": "sha512-lGVWy4FQEDo/PuI1VQXaQCY0XUg4xUJilf3fQ8NY4wtsQTm9lbasbUYf3nkoma+O2/do90jQTqkb02S3meyTDg==", "cpu": [ "x64" ], @@ -2755,9 +2870,9 @@ ] }, "node_modules/@unrs/resolver-binding-freebsd-x64": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-freebsd-x64/-/resolver-binding-freebsd-x64-1.7.2.tgz", - "integrity": "sha512-zKKdm2uMXqLFX6Ac7K5ElnnG5VIXbDlFWzg4WJ8CGUedJryM5A3cTgHuGMw1+P5ziV8CRhnSEgOnurTI4vpHpg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-freebsd-x64/-/resolver-binding-freebsd-x64-1.7.10.tgz", + "integrity": "sha512-g9XLCHzNGatY79JJNgxrUH6uAAfBDj2NWIlTnqQN5odwGKjyVfFZ5tFL1OxYPcxTHh384TY5lvTtF+fuEZNvBQ==", "cpu": [ "x64" ], @@ -2769,9 +2884,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-arm-gnueabihf": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-gnueabihf/-/resolver-binding-linux-arm-gnueabihf-1.7.2.tgz", - "integrity": "sha512-8N1z1TbPnHH+iDS/42GJ0bMPLiGK+cUqOhNbMKtWJ4oFGzqSJk/zoXFzcQkgtI63qMcUI7wW1tq2usZQSb2jxw==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-gnueabihf/-/resolver-binding-linux-arm-gnueabihf-1.7.10.tgz", + "integrity": "sha512-zV0ZMNy50sJFJapsjec8onyL9YREQKT88V8KwMoOA+zki/duFUP0oyTlbax1jGKdh8rQnruvW9VYkovGvdBAsw==", "cpu": [ "arm" ], @@ -2783,9 +2898,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-arm-musleabihf": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-musleabihf/-/resolver-binding-linux-arm-musleabihf-1.7.2.tgz", - "integrity": "sha512-tjYzI9LcAXR9MYd9rO45m1s0B/6bJNuZ6jeOxo1pq1K6OBuRMMmfyvJYval3s9FPPGmrldYA3mi4gWDlWuTFGA==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm-musleabihf/-/resolver-binding-linux-arm-musleabihf-1.7.10.tgz", + "integrity": "sha512-jQxgb1DIDI7goyrabh4uvyWWBrFRfF+OOnS9SbF15h52g3Qjn/u8zG7wOQ0NjtcSMftzO75TITu9aHuI7FcqQQ==", "cpu": [ "arm" ], @@ -2797,9 +2912,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-arm64-gnu": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-gnu/-/resolver-binding-linux-arm64-gnu-1.7.2.tgz", - "integrity": "sha512-jon9M7DKRLGZ9VYSkFMflvNqu9hDtOCEnO2QAryFWgT6o6AXU8du56V7YqnaLKr6rAbZBWYsYpikF226v423QA==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-gnu/-/resolver-binding-linux-arm64-gnu-1.7.10.tgz", + "integrity": "sha512-9wVVlO6+aNlm90YWitwSI++HyCyBkzYCwMi7QbuGrTxDFm2pAgtpT0OEliaI7tLS8lAWYuDbzRRCJDgsdm6nwg==", "cpu": [ "arm64" ], @@ -2811,9 +2926,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-arm64-musl": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-musl/-/resolver-binding-linux-arm64-musl-1.7.2.tgz", - "integrity": "sha512-c8Cg4/h+kQ63pL43wBNaVMmOjXI/X62wQmru51qjfTvI7kmCy5uHTJvK/9LrF0G8Jdx8r34d019P1DVJmhXQpA==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-arm64-musl/-/resolver-binding-linux-arm64-musl-1.7.10.tgz", + "integrity": "sha512-FtFweORChdXOes0RAAyTZp6I4PodU2cZiSILAbGaEKDXp378UOumD2vaAkWHNxpsreQUKRxG5O1uq9EoV1NiVQ==", "cpu": [ "arm64" ], @@ -2825,9 +2940,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-ppc64-gnu": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-ppc64-gnu/-/resolver-binding-linux-ppc64-gnu-1.7.2.tgz", - "integrity": "sha512-A+lcwRFyrjeJmv3JJvhz5NbcCkLQL6Mk16kHTNm6/aGNc4FwPHPE4DR9DwuCvCnVHvF5IAd9U4VIs/VvVir5lg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-ppc64-gnu/-/resolver-binding-linux-ppc64-gnu-1.7.10.tgz", + "integrity": "sha512-B+hOjpG2ncCR96a9d9ww1dWVuRVC2NChD0bITgrUhEWBhpdv2o/Mu2l8MsB2fzjdV/ku+twaQhr8iLHBoZafZQ==", "cpu": [ "ppc64" ], @@ -2839,9 +2954,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-riscv64-gnu": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-gnu/-/resolver-binding-linux-riscv64-gnu-1.7.2.tgz", - "integrity": "sha512-hQQ4TJQrSQW8JlPm7tRpXN8OCNP9ez7PajJNjRD1ZTHQAy685OYqPrKjfaMw/8LiHCt8AZ74rfUVHP9vn0N69Q==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-gnu/-/resolver-binding-linux-riscv64-gnu-1.7.10.tgz", + "integrity": "sha512-DS6jFDoQCFsnsdLXlj3z3THakQLBic63B6A0rpQ1kpkyKa3OzEfqhwRNVaywuUuOKP9bX55Jk2uqpvn/hGjKCg==", "cpu": [ "riscv64" ], @@ -2853,9 +2968,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-riscv64-musl": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-musl/-/resolver-binding-linux-riscv64-musl-1.7.2.tgz", - "integrity": "sha512-NoAGbiqrxtY8kVooZ24i70CjLDlUFI7nDj3I9y54U94p+3kPxwd2L692YsdLa+cqQ0VoqMWoehDFp21PKRUoIQ==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-riscv64-musl/-/resolver-binding-linux-riscv64-musl-1.7.10.tgz", + "integrity": "sha512-A82SB6yEaA8EhIW2r0I7P+k5lg7zPscFnGs1Gna5rfPwoZjeUAGX76T55+DiyTiy08VFKUi79PGCulXnfjDq0g==", "cpu": [ "riscv64" ], @@ -2867,9 +2982,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-s390x-gnu": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-s390x-gnu/-/resolver-binding-linux-s390x-gnu-1.7.2.tgz", - "integrity": "sha512-KaZByo8xuQZbUhhreBTW+yUnOIHUsv04P8lKjQ5otiGoSJ17ISGYArc+4vKdLEpGaLbemGzr4ZeUbYQQsLWFjA==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-s390x-gnu/-/resolver-binding-linux-s390x-gnu-1.7.10.tgz", + "integrity": "sha512-J+VmOPH16U69QshCp9WS+Zuiuu9GWTISKchKIhLbS/6JSCEfw2A4N02whv2VmrkXE287xxZbhW1p6xlAXNzwqg==", "cpu": [ "s390x" ], @@ -2881,9 +2996,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-x64-gnu": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-gnu/-/resolver-binding-linux-x64-gnu-1.7.2.tgz", - "integrity": "sha512-dEidzJDubxxhUCBJ/SHSMJD/9q7JkyfBMT77Px1npl4xpg9t0POLvnWywSk66BgZS/b2Hy9Y1yFaoMTFJUe9yg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-gnu/-/resolver-binding-linux-x64-gnu-1.7.10.tgz", + "integrity": "sha512-bYTdDltcB/V3fEqpx8YDwDw8ta9uEg8TUbJOtek6JM42u9ciJ7R/jBjNeAOs+QbyxGDd2d6xkBaGwty1HzOz3Q==", "cpu": [ "x64" ], @@ -2895,9 +3010,9 @@ ] }, "node_modules/@unrs/resolver-binding-linux-x64-musl": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-musl/-/resolver-binding-linux-x64-musl-1.7.2.tgz", - "integrity": "sha512-RvP+Ux3wDjmnZDT4XWFfNBRVG0fMsc+yVzNFUqOflnDfZ9OYujv6nkh+GOr+watwrW4wdp6ASfG/e7bkDradsw==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-linux-x64-musl/-/resolver-binding-linux-x64-musl-1.7.10.tgz", + "integrity": "sha512-NYZ1GvSuTokJ28lqcjrMTnGMySoo4dVcNK/nsNCKCXT++1zekZtJaE+N+4jc1kR7EV0fc1OhRrOGcSt7FT9t8w==", "cpu": [ "x64" ], @@ -2909,9 +3024,9 @@ ] }, "node_modules/@unrs/resolver-binding-wasm32-wasi": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-wasm32-wasi/-/resolver-binding-wasm32-wasi-1.7.2.tgz", - "integrity": "sha512-y797JBmO9IsvXVRCKDXOxjyAE4+CcZpla2GSoBQ33TVb3ILXuFnMrbR/QQZoauBYeOFuu4w3ifWLw52sdHGz6g==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-wasm32-wasi/-/resolver-binding-wasm32-wasi-1.7.10.tgz", + "integrity": "sha512-MRjJhTaQzLoX8OtzRBQDJ84OJ8IX1FqpRAUSxp/JtPeak+fyDfhXaEjcA/fhfgrACUnvC+jWC52f/V6MixSKCQ==", "cpu": [ "wasm32" ], @@ -2919,16 +3034,16 @@ "license": "MIT", "optional": true, "dependencies": { - "@napi-rs/wasm-runtime": "^0.2.9" + "@napi-rs/wasm-runtime": "^0.2.10" }, "engines": { "node": ">=14.0.0" } }, "node_modules/@unrs/resolver-binding-win32-arm64-msvc": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-arm64-msvc/-/resolver-binding-win32-arm64-msvc-1.7.2.tgz", - "integrity": "sha512-gtYTh4/VREVSLA+gHrfbWxaMO/00y+34htY7XpioBTy56YN2eBjkPrY1ML1Zys89X3RJDKVaogzwxlM1qU7egg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-arm64-msvc/-/resolver-binding-win32-arm64-msvc-1.7.10.tgz", + "integrity": "sha512-Cgw6qhdsfzXJnHb006CzqgaX8mD445x5FGKuueaLeH1ptCxDbzRs8wDm6VieOI7rdbstfYBaFtaYN7zBT5CUPg==", "cpu": [ "arm64" ], @@ -2940,9 +3055,9 @@ ] }, "node_modules/@unrs/resolver-binding-win32-ia32-msvc": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-ia32-msvc/-/resolver-binding-win32-ia32-msvc-1.7.2.tgz", - "integrity": "sha512-Ywv20XHvHTDRQs12jd3MY8X5C8KLjDbg/jyaal/QLKx3fAShhJyD4blEANInsjxW3P7isHx1Blt56iUDDJO3jg==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-ia32-msvc/-/resolver-binding-win32-ia32-msvc-1.7.10.tgz", + "integrity": "sha512-Z7oECyIT2/HsrWpJ6wi2b+lVbPmWqQHuW5zeatafoRXizk1+2wUl+aSop1PF58XcyBuwPP2YpEUUpMZ8ILV4fA==", "cpu": [ "ia32" ], @@ -2954,9 +3069,9 @@ ] }, "node_modules/@unrs/resolver-binding-win32-x64-msvc": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-x64-msvc/-/resolver-binding-win32-x64-msvc-1.7.2.tgz", - "integrity": "sha512-friS8NEQfHaDbkThxopGk+LuE5v3iY0StruifjQEt7SLbA46OnfgMO15sOTkbpJkol6RB+1l1TYPXh0sCddpvA==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/@unrs/resolver-binding-win32-x64-msvc/-/resolver-binding-win32-x64-msvc-1.7.10.tgz", + "integrity": "sha512-DGAOo5asNvDsmFgwkb7xsgxNyN0If6XFYwDIC1QlRE7kEYWIMRChtWJyHDf30XmGovDNOs/37krxhnga/nm/4w==", "cpu": [ "x64" ], @@ -3070,9 +3185,9 @@ "license": "Python-2.0" }, "node_modules/aria-hidden": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.4.tgz", - "integrity": "sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A==", + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/aria-hidden/-/aria-hidden-1.2.6.tgz", + "integrity": "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==", "license": "MIT", "dependencies": { "tslib": "^2.0.0" @@ -3109,18 +3224,20 @@ } }, "node_modules/array-includes": { - "version": "3.1.8", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", - "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.9.tgz", + "integrity": "sha512-FmeCCAenzH0KH381SPT5FZmiA/TmpndpcaShhfgEN9eCVjnFBqq3l1xrI42y8+PPLI6hypzou4GXw00WHmPBLQ==", "dev": true, "license": "MIT", "dependencies": { - "call-bind": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", "define-properties": "^1.2.1", - "es-abstract": "^1.23.2", - "es-object-atoms": "^1.0.0", - "get-intrinsic": "^1.2.4", - "is-string": "^1.0.7" + "es-abstract": "^1.24.0", + "es-object-atoms": "^1.1.1", + "get-intrinsic": "^1.3.0", + "is-string": "^1.1.1", + "math-intrinsics": "^1.1.0" }, "engines": { "node": ">= 0.4" @@ -3431,9 +3548,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001715", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001715.tgz", - "integrity": "sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw==", + "version": "1.0.30001721", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001721.tgz", + "integrity": "sha512-cOuvmUVtKrtEaoKiO0rSc29jcjwMwX5tOHDy4MgVFEWiUXj4uBMJkwI8MDySkgXidpMiHUcviogAvFi4pA2hDQ==", "funding": [ { "type": "opencollective", @@ -3467,6 +3584,16 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/chownr": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz", + "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, "node_modules/class-variance-authority": { "version": "0.7.1", "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", @@ -3819,9 +3946,9 @@ } }, "node_modules/debug": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", - "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", "dev": true, "license": "MIT", "dependencies": { @@ -3995,9 +4122,9 @@ } }, "node_modules/es-abstract": { - "version": "1.23.9", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.9.tgz", - "integrity": "sha512-py07lI0wjxAC/DcfK1S6G7iANonniZwTISvdPzk9hzeH0IZIshbuuFxLIU96OyF89Yb9hiqWn8M/bY83KY5vzA==", + "version": "1.24.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.0.tgz", + "integrity": "sha512-WSzPgsdLtTcQwm4CROfS5ju2Wa1QQcVeT37jFjYzdFz1r9ahadC8B8/a4qxJxM+09F18iumCdRmlr96ZYkQvEg==", "dev": true, "license": "MIT", "dependencies": { @@ -4005,18 +4132,18 @@ "arraybuffer.prototype.slice": "^1.0.4", "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.8", - "call-bound": "^1.0.3", + "call-bound": "^1.0.4", "data-view-buffer": "^1.0.2", "data-view-byte-length": "^1.0.2", "data-view-byte-offset": "^1.0.1", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", - "es-object-atoms": "^1.0.0", + "es-object-atoms": "^1.1.1", "es-set-tostringtag": "^2.1.0", "es-to-primitive": "^1.3.0", "function.prototype.name": "^1.1.8", - "get-intrinsic": "^1.2.7", - "get-proto": "^1.0.0", + "get-intrinsic": "^1.3.0", + "get-proto": "^1.0.1", "get-symbol-description": "^1.1.0", "globalthis": "^1.0.4", "gopd": "^1.2.0", @@ -4028,21 +4155,24 @@ "is-array-buffer": "^3.0.5", "is-callable": "^1.2.7", "is-data-view": "^1.0.2", + "is-negative-zero": "^2.0.3", "is-regex": "^1.2.1", + "is-set": "^2.0.3", "is-shared-array-buffer": "^1.0.4", "is-string": "^1.1.1", "is-typed-array": "^1.1.15", - "is-weakref": "^1.1.0", + "is-weakref": "^1.1.1", "math-intrinsics": "^1.1.0", - "object-inspect": "^1.13.3", + "object-inspect": "^1.13.4", "object-keys": "^1.1.1", "object.assign": "^4.1.7", "own-keys": "^1.0.1", - "regexp.prototype.flags": "^1.5.3", + "regexp.prototype.flags": "^1.5.4", "safe-array-concat": "^1.1.3", "safe-push-apply": "^1.0.0", "safe-regex-test": "^1.1.0", "set-proto": "^1.0.0", + "stop-iteration-iterator": "^1.1.0", "string.prototype.trim": "^1.2.10", "string.prototype.trimend": "^1.0.9", "string.prototype.trimstart": "^1.0.8", @@ -4051,7 +4181,7 @@ "typed-array-byte-offset": "^1.0.4", "typed-array-length": "^1.0.7", "unbox-primitive": "^1.1.0", - "which-typed-array": "^1.1.18" + "which-typed-array": "^1.1.19" }, "engines": { "node": ">= 0.4" @@ -4268,6 +4398,41 @@ } } }, + "node_modules/eslint-config-next/node_modules/eslint-import-resolver-typescript": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.10.1.tgz", + "integrity": "sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@nolyfill/is-core-module": "1.0.39", + "debug": "^4.4.0", + "get-tsconfig": "^4.10.0", + "is-bun-module": "^2.0.0", + "stable-hash": "^0.0.5", + "tinyglobby": "^0.2.13", + "unrs-resolver": "^1.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-import-resolver-typescript" + }, + "peerDependencies": { + "eslint": "*", + "eslint-plugin-import": "*", + "eslint-plugin-import-x": "*" + }, + "peerDependenciesMeta": { + "eslint-plugin-import": { + "optional": true + }, + "eslint-plugin-import-x": { + "optional": true + } + } + }, "node_modules/eslint-config-prettier": { "version": "8.10.0", "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", @@ -4303,41 +4468,6 @@ "ms": "^2.1.1" } }, - "node_modules/eslint-import-resolver-typescript": { - "version": "3.10.1", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-typescript/-/eslint-import-resolver-typescript-3.10.1.tgz", - "integrity": "sha512-A1rHYb06zjMGAxdLSkN2fXPBwuSaQ0iO5M/hdyS0Ajj1VBaRp0sPD3dn1FhME3c/JluGFbwSxyCfqdSbtQLAHQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "@nolyfill/is-core-module": "1.0.39", - "debug": "^4.4.0", - "get-tsconfig": "^4.10.0", - "is-bun-module": "^2.0.0", - "stable-hash": "^0.0.5", - "tinyglobby": "^0.2.13", - "unrs-resolver": "^1.6.2" - }, - "engines": { - "node": "^14.18.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint-import-resolver-typescript" - }, - "peerDependencies": { - "eslint": "*", - "eslint-plugin-import": "*", - "eslint-plugin-import-x": "*" - }, - "peerDependenciesMeta": { - "eslint-plugin-import": { - "optional": true - }, - "eslint-plugin-import-x": { - "optional": true - } - } - }, "node_modules/eslint-module-utils": { "version": "2.12.0", "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.12.0.tgz", @@ -4924,14 +5054,15 @@ } }, "node_modules/form-data": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", - "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz", + "integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==", "license": "MIT", "dependencies": { "asynckit": "^0.4.0", "combined-stream": "^1.0.8", "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", "mime-types": "^2.1.12" }, "engines": { @@ -5103,9 +5234,9 @@ } }, "node_modules/get-tsconfig": { - "version": "4.10.0", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.0.tgz", - "integrity": "sha512-kGzZ3LWWQcGIAmg6iWvXn0ei6WDtV26wzHRMwDSzmAbcXrTEXxHy6IehI6/4eT6VRKyMP1eF1VqwrVUmE/LR7A==", + "version": "4.10.1", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.10.1.tgz", + "integrity": "sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==", "dev": true, "license": "MIT", "dependencies": { @@ -5682,6 +5813,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", @@ -6058,9 +6202,9 @@ } }, "node_modules/lightningcss": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.29.2.tgz", - "integrity": "sha512-6b6gd/RUXKaw5keVdSEtqFVdzWnU5jMxTUjA2bVcMNPLwSQ08Sv/UodBVtETLCn7k4S1Ibxwh7k68IwLZPgKaA==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.1.tgz", + "integrity": "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==", "dev": true, "license": "MPL-2.0", "dependencies": { @@ -6074,22 +6218,22 @@ "url": "https://opencollective.com/parcel" }, "optionalDependencies": { - "lightningcss-darwin-arm64": "1.29.2", - "lightningcss-darwin-x64": "1.29.2", - "lightningcss-freebsd-x64": "1.29.2", - "lightningcss-linux-arm-gnueabihf": "1.29.2", - "lightningcss-linux-arm64-gnu": "1.29.2", - "lightningcss-linux-arm64-musl": "1.29.2", - "lightningcss-linux-x64-gnu": "1.29.2", - "lightningcss-linux-x64-musl": "1.29.2", - "lightningcss-win32-arm64-msvc": "1.29.2", - "lightningcss-win32-x64-msvc": "1.29.2" + "lightningcss-darwin-arm64": "1.30.1", + "lightningcss-darwin-x64": "1.30.1", + "lightningcss-freebsd-x64": "1.30.1", + "lightningcss-linux-arm-gnueabihf": "1.30.1", + "lightningcss-linux-arm64-gnu": "1.30.1", + "lightningcss-linux-arm64-musl": "1.30.1", + "lightningcss-linux-x64-gnu": "1.30.1", + "lightningcss-linux-x64-musl": "1.30.1", + "lightningcss-win32-arm64-msvc": "1.30.1", + "lightningcss-win32-x64-msvc": "1.30.1" } }, "node_modules/lightningcss-darwin-arm64": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.29.2.tgz", - "integrity": "sha512-cK/eMabSViKn/PG8U/a7aCorpeKLMlK0bQeNHmdb7qUnBkNPnL+oV5DjJUo0kqWsJUapZsM4jCfYItbqBDvlcA==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz", + "integrity": "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==", "cpu": [ "arm64" ], @@ -6108,9 +6252,9 @@ } }, "node_modules/lightningcss-darwin-x64": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.29.2.tgz", - "integrity": "sha512-j5qYxamyQw4kDXX5hnnCKMf3mLlHvG44f24Qyi2965/Ycz829MYqjrVg2H8BidybHBp9kom4D7DR5VqCKDXS0w==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.1.tgz", + "integrity": "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==", "cpu": [ "x64" ], @@ -6129,9 +6273,9 @@ } }, "node_modules/lightningcss-freebsd-x64": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.29.2.tgz", - "integrity": "sha512-wDk7M2tM78Ii8ek9YjnY8MjV5f5JN2qNVO+/0BAGZRvXKtQrBC4/cn4ssQIpKIPP44YXw6gFdpUF+Ps+RGsCwg==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.1.tgz", + "integrity": "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==", "cpu": [ "x64" ], @@ -6150,9 +6294,9 @@ } }, "node_modules/lightningcss-linux-arm-gnueabihf": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.29.2.tgz", - "integrity": "sha512-IRUrOrAF2Z+KExdExe3Rz7NSTuuJ2HvCGlMKoquK5pjvo2JY4Rybr+NrKnq0U0hZnx5AnGsuFHjGnNT14w26sg==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.1.tgz", + "integrity": "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==", "cpu": [ "arm" ], @@ -6171,9 +6315,9 @@ } }, "node_modules/lightningcss-linux-arm64-gnu": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.29.2.tgz", - "integrity": "sha512-KKCpOlmhdjvUTX/mBuaKemp0oeDIBBLFiU5Fnqxh1/DZ4JPZi4evEH7TKoSBFOSOV3J7iEmmBaw/8dpiUvRKlQ==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.1.tgz", + "integrity": "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==", "cpu": [ "arm64" ], @@ -6192,9 +6336,9 @@ } }, "node_modules/lightningcss-linux-arm64-musl": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.29.2.tgz", - "integrity": "sha512-Q64eM1bPlOOUgxFmoPUefqzY1yV3ctFPE6d/Vt7WzLW4rKTv7MyYNky+FWxRpLkNASTnKQUaiMJ87zNODIrrKQ==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.1.tgz", + "integrity": "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==", "cpu": [ "arm64" ], @@ -6213,9 +6357,9 @@ } }, "node_modules/lightningcss-linux-x64-gnu": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.29.2.tgz", - "integrity": "sha512-0v6idDCPG6epLXtBH/RPkHvYx74CVziHo6TMYga8O2EiQApnUPZsbR9nFNrg2cgBzk1AYqEd95TlrsL7nYABQg==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.1.tgz", + "integrity": "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==", "cpu": [ "x64" ], @@ -6234,9 +6378,9 @@ } }, "node_modules/lightningcss-linux-x64-musl": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.29.2.tgz", - "integrity": "sha512-rMpz2yawkgGT8RULc5S4WiZopVMOFWjiItBT7aSfDX4NQav6M44rhn5hjtkKzB+wMTRlLLqxkeYEtQ3dd9696w==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.1.tgz", + "integrity": "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==", "cpu": [ "x64" ], @@ -6255,9 +6399,9 @@ } }, "node_modules/lightningcss-win32-arm64-msvc": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.29.2.tgz", - "integrity": "sha512-nL7zRW6evGQqYVu/bKGK+zShyz8OVzsCotFgc7judbt6wnB2KbiKKJwBE4SGoDBQ1O94RjW4asrCjQL4i8Fhbw==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.1.tgz", + "integrity": "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==", "cpu": [ "arm64" ], @@ -6276,9 +6420,9 @@ } }, "node_modules/lightningcss-win32-x64-msvc": { - "version": "1.29.2", - "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.29.2.tgz", - "integrity": "sha512-EdIUW3B2vLuHmv7urfzMI/h2fmlnOQBk1xlsDxkN1tCWKjNFjfLhGxYk8C8mzpSfr+A6jFFIi8fU6LbQGsRWjA==", + "version": "1.30.1", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.1.tgz", + "integrity": "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==", "cpu": [ "x64" ], @@ -6518,6 +6662,16 @@ "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" } }, + "node_modules/magic-string": { + "version": "0.30.17", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", + "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -6658,6 +6812,35 @@ "node": ">=16 || 14 >=14.17" } }, + "node_modules/minizlib": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz", + "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.1.2" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/mkdirp": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz", + "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "dist/cjs/src/bin.js" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/motion-dom": { "version": "11.18.1", "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-11.18.1.tgz", @@ -6699,9 +6882,9 @@ } }, "node_modules/napi-postinstall": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.2.2.tgz", - "integrity": "sha512-Wy1VI/hpKHwy1MsnFxHCJxqFwmmxD0RA/EKPL7e6mfbsY01phM2SZyJnRdU0bLvhu0Quby1DCcAZti3ghdl4/A==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/napi-postinstall/-/napi-postinstall-0.2.4.tgz", + "integrity": "sha512-ZEzHJwBhZ8qQSbknHqYcdtQVr8zUgGyM/q6h6qAyhtyVMNrSgDhrC4disf03dYW0e+czXyLnZINnCTEkWy0eJg==", "dev": true, "license": "MIT", "bin": { @@ -6729,12 +6912,12 @@ "license": "MIT" }, "node_modules/next": { - "version": "14.2.28", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.28.tgz", - "integrity": "sha512-QLEIP/kYXynIxtcKB6vNjtWLVs3Y4Sb+EClTC/CSVzdLD1gIuItccpu/n1lhmduffI32iPGEK2cLLxxt28qgYA==", + "version": "14.2.29", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.29.tgz", + "integrity": "sha512-s98mCOMOWLGGpGOfgKSnleXLuegvvH415qtRZXpSp00HeEgdmrxmwL9cgKU+h4XrhB16zEI5d/7BnkS3ATInsA==", "license": "MIT", "dependencies": { - "@next/env": "14.2.28", + "@next/env": "14.2.29", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -6749,15 +6932,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.28", - "@next/swc-darwin-x64": "14.2.28", - "@next/swc-linux-arm64-gnu": "14.2.28", - "@next/swc-linux-arm64-musl": "14.2.28", - "@next/swc-linux-x64-gnu": "14.2.28", - "@next/swc-linux-x64-musl": "14.2.28", - "@next/swc-win32-arm64-msvc": "14.2.28", - "@next/swc-win32-ia32-msvc": "14.2.28", - "@next/swc-win32-x64-msvc": "14.2.28" + "@next/swc-darwin-arm64": "14.2.29", + "@next/swc-darwin-x64": "14.2.29", + "@next/swc-linux-arm64-gnu": "14.2.29", + "@next/swc-linux-arm64-musl": "14.2.29", + "@next/swc-linux-x64-gnu": "14.2.29", + "@next/swc-linux-x64-musl": "14.2.29", + "@next/swc-win32-arm64-msvc": "14.2.29", + "@next/swc-win32-ia32-msvc": "14.2.29", + "@next/swc-win32-x64-msvc": "14.2.29" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -7195,9 +7378,9 @@ } }, "node_modules/postcss": { - "version": "8.5.3", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.3.tgz", - "integrity": "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==", + "version": "8.5.4", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.4.tgz", + "integrity": "sha512-QSa9EBe+uwlGTFmHsPKokv3B/oEMQZxfqW0QqNCyhpa6mB1afzulwn8hihglqAb2pOw+BJgNlmXQ8la2VeHB7w==", "dev": true, "funding": [ { @@ -7215,7 +7398,7 @@ ], "license": "MIT", "dependencies": { - "nanoid": "^3.3.8", + "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" }, @@ -7311,57 +7494,58 @@ "license": "MIT" }, "node_modules/radix-ui": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/radix-ui/-/radix-ui-1.3.4.tgz", - "integrity": "sha512-uHJD4yRGjxbEWhkVU+w9d8d+X6HUlmbesHGsE9tRWKX62FqDD3Z3hfEtVS9W+DpZAPvKSCLfz03O7un8xZT3pg==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/radix-ui/-/radix-ui-1.4.2.tgz", + "integrity": "sha512-fT/3YFPJzf2WUpqDoQi005GS8EpCi+53VhcLaHUj5fwkPYiZAjk1mSxFvbMA8Uq71L03n+WysuYC+mlKkXxt/Q==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.2", - "@radix-ui/react-accessible-icon": "1.1.4", - "@radix-ui/react-accordion": "1.2.8", - "@radix-ui/react-alert-dialog": "1.1.11", - "@radix-ui/react-arrow": "1.1.4", - "@radix-ui/react-aspect-ratio": "1.1.4", - "@radix-ui/react-avatar": "1.1.7", - "@radix-ui/react-checkbox": "1.2.3", - "@radix-ui/react-collapsible": "1.1.8", - "@radix-ui/react-collection": "1.1.4", + "@radix-ui/react-accessible-icon": "1.1.7", + "@radix-ui/react-accordion": "1.2.11", + "@radix-ui/react-alert-dialog": "1.1.14", + "@radix-ui/react-arrow": "1.1.7", + "@radix-ui/react-aspect-ratio": "1.1.7", + "@radix-ui/react-avatar": "1.1.10", + "@radix-ui/react-checkbox": "1.3.2", + "@radix-ui/react-collapsible": "1.1.11", + "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", - "@radix-ui/react-context-menu": "2.2.12", - "@radix-ui/react-dialog": "1.1.11", + "@radix-ui/react-context-menu": "2.2.15", + "@radix-ui/react-dialog": "1.1.14", "@radix-ui/react-direction": "1.1.1", - "@radix-ui/react-dismissable-layer": "1.1.7", - "@radix-ui/react-dropdown-menu": "2.1.12", + "@radix-ui/react-dismissable-layer": "1.1.10", + "@radix-ui/react-dropdown-menu": "2.1.15", "@radix-ui/react-focus-guards": "1.1.2", - "@radix-ui/react-focus-scope": "1.1.4", - "@radix-ui/react-form": "0.1.4", - "@radix-ui/react-hover-card": "1.1.11", - "@radix-ui/react-label": "2.1.4", - "@radix-ui/react-menu": "2.1.12", - "@radix-ui/react-menubar": "1.1.12", - "@radix-ui/react-navigation-menu": "1.2.10", - "@radix-ui/react-one-time-password-field": "0.1.4", - "@radix-ui/react-popover": "1.1.11", - "@radix-ui/react-popper": "1.2.4", - "@radix-ui/react-portal": "1.1.6", + "@radix-ui/react-focus-scope": "1.1.7", + "@radix-ui/react-form": "0.1.7", + "@radix-ui/react-hover-card": "1.1.14", + "@radix-ui/react-label": "2.1.7", + "@radix-ui/react-menu": "2.1.15", + "@radix-ui/react-menubar": "1.1.15", + "@radix-ui/react-navigation-menu": "1.2.13", + "@radix-ui/react-one-time-password-field": "0.1.7", + "@radix-ui/react-password-toggle-field": "0.1.2", + "@radix-ui/react-popover": "1.1.14", + "@radix-ui/react-popper": "1.2.7", + "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", - "@radix-ui/react-primitive": "2.1.0", - "@radix-ui/react-progress": "1.1.4", - "@radix-ui/react-radio-group": "1.3.4", - "@radix-ui/react-roving-focus": "1.1.7", - "@radix-ui/react-scroll-area": "1.2.6", - "@radix-ui/react-select": "2.2.2", - "@radix-ui/react-separator": "1.1.4", - "@radix-ui/react-slider": "1.3.2", - "@radix-ui/react-slot": "1.2.0", - "@radix-ui/react-switch": "1.2.2", - "@radix-ui/react-tabs": "1.1.9", - "@radix-ui/react-toast": "1.2.11", - "@radix-ui/react-toggle": "1.1.6", - "@radix-ui/react-toggle-group": "1.1.7", - "@radix-ui/react-toolbar": "1.1.7", - "@radix-ui/react-tooltip": "1.2.4", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-progress": "1.1.7", + "@radix-ui/react-radio-group": "1.3.7", + "@radix-ui/react-roving-focus": "1.1.10", + "@radix-ui/react-scroll-area": "1.2.9", + "@radix-ui/react-select": "2.2.5", + "@radix-ui/react-separator": "1.1.7", + "@radix-ui/react-slider": "1.3.5", + "@radix-ui/react-slot": "1.2.3", + "@radix-ui/react-switch": "1.2.5", + "@radix-ui/react-tabs": "1.1.12", + "@radix-ui/react-toast": "1.2.14", + "@radix-ui/react-toggle": "1.1.9", + "@radix-ui/react-toggle-group": "1.1.10", + "@radix-ui/react-toolbar": "1.1.10", + "@radix-ui/react-tooltip": "1.2.7", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", @@ -7369,7 +7553,7 @@ "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", - "@radix-ui/react-visually-hidden": "1.2.0" + "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", @@ -7412,9 +7596,9 @@ } }, "node_modules/react-hook-form": { - "version": "7.56.1", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.56.1.tgz", - "integrity": "sha512-qWAVokhSpshhcEuQDSANHx3jiAEFzu2HAaaQIzi/r9FNPm1ioAvuJSD4EuZzWd7Al7nTRKcKPnBKO7sRn+zavQ==", + "version": "7.57.0", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.57.0.tgz", + "integrity": "sha512-RbEks3+cbvTP84l/VXGUZ+JMrKOS8ykQCRYdm5aYsxnDquL0vspsyNhGRO7pcH6hsZqWlPOjLye7rJqdtdAmlg==", "license": "MIT", "engines": { "node": ">=18.0.0" @@ -7434,9 +7618,9 @@ "license": "MIT" }, "node_modules/react-remove-scroll": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.6.3.tgz", - "integrity": "sha512-pnAi91oOk8g8ABQKGF5/M9qxmmOPxaAnopyTHYfqYEwJhyFrbbBtHuSgtKEoH0jpcxx5o3hXqH1mNd9/Oi+8iQ==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.7.1.tgz", + "integrity": "sha512-HpMh8+oahmIdOuS5aFKKY6Pyog+FNaZV/XyJOq7b4YFwsFHe5yYfdbIalI4k3vU2nSDql7YskmUseHsRrJqIPA==", "license": "MIT", "dependencies": { "react-remove-scroll-bar": "^2.3.7", @@ -7600,12 +7784,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/regenerator-runtime": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", - "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", - "license": "MIT" - }, "node_modules/regexp.prototype.flags": { "version": "1.5.4", "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz", @@ -7870,9 +8048,9 @@ "license": "MIT" }, "node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", "dev": true, "license": "ISC", "bin": { @@ -8099,6 +8277,20 @@ "dev": true, "license": "MIT" }, + "node_modules/stop-iteration-iterator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.1.0.tgz", + "integrity": "sha512-eLoXW/DHyl62zxY4SCaIgnRhuMr6ri4juEYARS8E6sCEqzKpOiE521Ucofdx+KnDZl5xmvGYaaKCk5FEOxJCoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "internal-slot": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/streamsearch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", @@ -8433,9 +8625,9 @@ } }, "node_modules/tailwindcss": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.4.tgz", - "integrity": "sha512-1ZIUqtPITFbv/DxRmDr5/agPqJwF69d24m9qmM1939TJehgY539CtzeZRjbLt5G6fSy/7YqqYsfvoTEw9xUI2A==", + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.8.tgz", + "integrity": "sha512-kjeW8gjdxasbmFKpVGrGd5T4i40mV5J2Rasw48QARfYeQ8YS9x02ON9SFWax3Qf616rt4Cp3nVNIj6Hd1mP3og==", "dev": true, "license": "MIT" }, @@ -8450,15 +8642,33 @@ } }, "node_modules/tapable": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", - "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.2.tgz", + "integrity": "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==", "dev": true, "license": "MIT", "engines": { "node": ">=6" } }, + "node_modules/tar": { + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz", + "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/fs-minipass": "^4.0.0", + "chownr": "^3.0.0", + "minipass": "^7.1.2", + "minizlib": "^3.0.1", + "mkdirp": "^3.0.1", + "yallist": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -8479,9 +8689,9 @@ "license": "MIT" }, "node_modules/tinyglobby": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz", - "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==", + "version": "0.2.14", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz", + "integrity": "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==", "dev": true, "license": "MIT", "dependencies": { @@ -8496,9 +8706,9 @@ } }, "node_modules/tinyglobby/node_modules/fdir": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz", - "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==", + "version": "6.4.5", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.5.tgz", + "integrity": "sha512-4BG7puHpVsIYxZUbiUE3RqGloLaSSwzYie5jvasC4LWuBWzZawynvYouhjbQKw2JuIGYdm0DzIxl8iVidKlUEw==", "dev": true, "license": "MIT", "peerDependencies": { @@ -8579,9 +8789,9 @@ "license": "0BSD" }, "node_modules/tw-animate-css": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.2.8.tgz", - "integrity": "sha512-AxSnYRvyFnAiZCUndS3zQZhNfV/B77ZhJ+O7d3K6wfg/jKJY+yv6ahuyXwnyaYA9UdLqnpCwhTRv9pPTBnPR2g==", + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.3.4.tgz", + "integrity": "sha512-dd1Ht6/YQHcNbq0znIT6dG8uhO7Ce+VIIhZUhjsryXsMPJQz3bZg7Q2eNzLwipb25bRZslGb2myio5mScd1TFg==", "dev": true, "license": "MIT", "funding": { @@ -8742,9 +8952,9 @@ "license": "MIT" }, "node_modules/unrs-resolver": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.7.2.tgz", - "integrity": "sha512-BBKpaylOW8KbHsu378Zky/dGh4ckT/4NW/0SHRABdqRLcQJ2dAOjDo9g97p04sWflm0kqPqpUatxReNV/dqI5A==", + "version": "1.7.10", + "resolved": "https://registry.npmjs.org/unrs-resolver/-/unrs-resolver-1.7.10.tgz", + "integrity": "sha512-CJEMJcz6vuwRK6xxWc+uf8AGi0OyfoVtHs5mExtNecS0HZq3a3Br1JC/InwwTn6uy+qkAdAdK+nJUYO9FPtgZw==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -8752,26 +8962,26 @@ "napi-postinstall": "^0.2.2" }, "funding": { - "url": "https://github.com/sponsors/JounQin" + "url": "https://opencollective.com/unrs-resolver" }, "optionalDependencies": { - "@unrs/resolver-binding-darwin-arm64": "1.7.2", - "@unrs/resolver-binding-darwin-x64": "1.7.2", - "@unrs/resolver-binding-freebsd-x64": "1.7.2", - "@unrs/resolver-binding-linux-arm-gnueabihf": "1.7.2", - "@unrs/resolver-binding-linux-arm-musleabihf": "1.7.2", - "@unrs/resolver-binding-linux-arm64-gnu": "1.7.2", - "@unrs/resolver-binding-linux-arm64-musl": "1.7.2", - "@unrs/resolver-binding-linux-ppc64-gnu": "1.7.2", - "@unrs/resolver-binding-linux-riscv64-gnu": "1.7.2", - "@unrs/resolver-binding-linux-riscv64-musl": "1.7.2", - "@unrs/resolver-binding-linux-s390x-gnu": "1.7.2", - "@unrs/resolver-binding-linux-x64-gnu": "1.7.2", - "@unrs/resolver-binding-linux-x64-musl": "1.7.2", - "@unrs/resolver-binding-wasm32-wasi": "1.7.2", - "@unrs/resolver-binding-win32-arm64-msvc": "1.7.2", - "@unrs/resolver-binding-win32-ia32-msvc": "1.7.2", - "@unrs/resolver-binding-win32-x64-msvc": "1.7.2" + "@unrs/resolver-binding-darwin-arm64": "1.7.10", + "@unrs/resolver-binding-darwin-x64": "1.7.10", + "@unrs/resolver-binding-freebsd-x64": "1.7.10", + "@unrs/resolver-binding-linux-arm-gnueabihf": "1.7.10", + "@unrs/resolver-binding-linux-arm-musleabihf": "1.7.10", + "@unrs/resolver-binding-linux-arm64-gnu": "1.7.10", + "@unrs/resolver-binding-linux-arm64-musl": "1.7.10", + "@unrs/resolver-binding-linux-ppc64-gnu": "1.7.10", + "@unrs/resolver-binding-linux-riscv64-gnu": "1.7.10", + "@unrs/resolver-binding-linux-riscv64-musl": "1.7.10", + "@unrs/resolver-binding-linux-s390x-gnu": "1.7.10", + "@unrs/resolver-binding-linux-x64-gnu": "1.7.10", + "@unrs/resolver-binding-linux-x64-musl": "1.7.10", + "@unrs/resolver-binding-wasm32-wasi": "1.7.10", + "@unrs/resolver-binding-win32-arm64-msvc": "1.7.10", + "@unrs/resolver-binding-win32-ia32-msvc": "1.7.10", + "@unrs/resolver-binding-win32-x64-msvc": "1.7.10" } }, "node_modules/uri-js": { @@ -9091,6 +9301,16 @@ "dev": true, "license": "ISC" }, + "node_modules/yallist": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz", + "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, "node_modules/yaml": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz", @@ -9115,18 +9335,18 @@ } }, "node_modules/zod": { - "version": "3.24.3", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.3.tgz", - "integrity": "sha512-HhY1oqzWCQWuUqvBFnsyrtZRhyPeR7SUGv+C4+MsisMuVfSPx8HpwWqH8tRahSlt6M3PiFAcoeFhZAqIXTxoSg==", + "version": "3.25.51", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.51.tgz", + "integrity": "sha512-TQSnBldh+XSGL+opiSIq0575wvDPqu09AqWe1F7JhUMKY+M91/aGlK4MhpVNO7MgYfHcVCB1ffwAUTJzllKJqg==", "license": "MIT", "funding": { "url": "https://github.com/sponsors/colinhacks" } }, "node_modules/zustand": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.3.tgz", - "integrity": "sha512-14fwWQtU3pH4dE0dOpdMiWjddcH+QzKIgk1cl8epwSE7yag43k/AD/m4L6+K7DytAOr9gGBe3/EXj9g7cdostg==", + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.5.tgz", + "integrity": "sha512-mILtRfKW9xM47hqxGIxCv12gXusoY/xTSHBYApXozR0HmQv299whhBeeAcRy+KrPPybzosvJBCOmVjq6x12fCg==", "license": "MIT", "engines": { "node": ">=12.20.0" diff --git a/frontend/src/app/(protected)/experiments/[experimentId]/components/MABArmsProgress.tsx b/frontend/src/app/(protected)/experiments/[experimentId]/components/ArmsProgress.tsx similarity index 86% rename from frontend/src/app/(protected)/experiments/[experimentId]/components/MABArmsProgress.tsx rename to frontend/src/app/(protected)/experiments/[experimentId]/components/ArmsProgress.tsx index 6cb7221..c9017d9 100644 --- a/frontend/src/app/(protected)/experiments/[experimentId]/components/MABArmsProgress.tsx +++ b/frontend/src/app/(protected)/experiments/[experimentId]/components/ArmsProgress.tsx @@ -15,14 +15,14 @@ import { Info } from "lucide-react"; import { Progress } from "@/components/ui/progress"; import { Badge } from "@/components/ui/badge"; -import { MABArmDetails } from "../types"; +import { ArmDetails } from "../types"; export default function MABArmsProgress({ armsData, }: { - armsData: MABArmDetails[]; + armsData: ArmDetails[]; }) { - const maxMu = Math.max(...armsData.map((arm) => arm.mu)); + const maxMu = Math.max(...armsData.map((arm) => arm.mu ? arm.mu[0] : 0)); return ( @@ -56,18 +56,18 @@ export default function MABArmsProgress({ {arm.n_outcomes}
- {arm.beta + {(arm.alpha && arm.beta) ? `${((arm.alpha * 100) / (arm.alpha + arm.beta)).toFixed( 1 )}%` - : `${arm.mu.toFixed(1)}`} + : arm.mu ? (`${arm.mu[0].toFixed(1)}`) : (``)} diff --git a/frontend/src/app/(protected)/experiments/[experimentId]/components/Charts.tsx b/frontend/src/app/(protected)/experiments/[experimentId]/components/Charts.tsx index c8697a0..1eacf52 100644 --- a/frontend/src/app/(protected)/experiments/[experimentId]/components/Charts.tsx +++ b/frontend/src/app/(protected)/experiments/[experimentId]/components/Charts.tsx @@ -160,15 +160,15 @@ const NormalLineChart = ({ const data = x.map((xVal) => { const point: { x: number; [key: string]: number } = { x: xVal }; - const posteriorPDFs = posteriors.map(({ mu, sigma }) => - normalPDF(xVal, mu, sigma) + const posteriorPDFs = posteriors.map(({ mu, covariance }) => + normalPDF(xVal, mu[0], covariance[0][0]) ); posteriors.forEach(({ name }, i) => { point[`Posterior - ${i}_${name}`] = posteriorPDFs[i]; }); - const priorPDFs = priors.map(({ mu, sigma }) => normalPDF(xVal, mu, sigma)); + const priorPDFs = priors.map(({ mu, covariance }) => normalPDF(xVal, mu[0], covariance[0][0])); priors.forEach(({ name }, i) => { point[`Prior - ${i}_${name}`] = priorPDFs[i]; diff --git a/frontend/src/app/(protected)/experiments/[experimentId]/components/MABChart.tsx b/frontend/src/app/(protected)/experiments/[experimentId]/components/ExperimentChart.tsx similarity index 80% rename from frontend/src/app/(protected)/experiments/[experimentId]/components/MABChart.tsx rename to frontend/src/app/(protected)/experiments/[experimentId]/components/ExperimentChart.tsx index 6c58054..de8e680 100644 --- a/frontend/src/app/(protected)/experiments/[experimentId]/components/MABChart.tsx +++ b/frontend/src/app/(protected)/experiments/[experimentId]/components/ExperimentChart.tsx @@ -8,13 +8,13 @@ import { CardTitle, } from "@/components/ui/card"; import { Switch } from "@/components/ui/switch"; -import { MABExperimentDetails } from "../types"; +import { SingleExperimentDetails } from "../types"; import { BetaLineChart, NormalLineChart } from "./Charts"; export default function MABChart({ experimentData, }: { - experimentData: MABExperimentDetails | null; + experimentData: SingleExperimentDetails | null; }) { const [showPriors, setShowPriors] = useState(false); @@ -24,27 +24,27 @@ export default function MABChart({ const priorBetaData = experimentData.arms.map((arm) => ({ name: arm.name, - alpha: arm.alpha_init, - beta: arm.beta_init, + alpha: arm.alpha_init ? arm.alpha_init : 1, + beta: arm.beta_init ? arm.beta_init : 1, })); const posteriorBetaData = experimentData.arms.map((arm) => ({ name: arm.name, - alpha: arm.alpha, - beta: arm.beta, + alpha: arm.alpha ? arm.alpha : 1, + beta: arm.beta ? arm.beta : 1, })); const priorGaussianData = experimentData.arms.map((arm) => ({ name: arm.name, - mu: arm.mu_init, - sigma: arm.sigma_init, + mu: [arm.mu_init ? arm.mu_init : 0], + covariance: [[arm.sigma_init ? arm.sigma_init : 1]], })); const posteriorGaussianData = experimentData.arms.map((arm) => ({ name: arm.name, - mu: arm.mu, - sigma: arm.sigma, + mu: arm.mu ? arm.mu : [0], + covariance: arm.covariance ? arm.covariance : [[1]], })); return ( diff --git a/frontend/src/app/(protected)/experiments/[experimentId]/page.tsx b/frontend/src/app/(protected)/experiments/[experimentId]/page.tsx index 809c250..68e2a09 100644 --- a/frontend/src/app/(protected)/experiments/[experimentId]/page.tsx +++ b/frontend/src/app/(protected)/experiments/[experimentId]/page.tsx @@ -2,12 +2,12 @@ import { useState, useEffect } from "react"; import { Badge } from "@/components/ui/badge"; -import MABChart from "./components/MABChart"; -import MABArmsProgress from "./components/MABArmsProgress"; +import MABChart from "./components/ExperimentChart"; +import MABArmsProgress from "./components/ArmsProgress"; import NotificationDetails from "./components/Notifications"; import ExtraInfo from "./components/ExtraInfo"; -import { getMABExperimentById } from "../api"; +import { getExperimentById } from "../api"; import { useParams } from "next/navigation"; import { useAuth } from "@/utils/auth"; import { @@ -20,17 +20,17 @@ import { } from "@/components/ui/breadcrumb"; import { - MABExperimentDetails, - MABArmDetails, + SingleExperimentDetails, + ArmDetails, Notification, ExtraInfo as ExtraInfoType, } from "./types"; export default function ExperimentDetails() { const { experimentId } = useParams(); - const [armsDetails, setArmsDetails] = useState([]); + const [armsDetails, setArmsDetails] = useState([]); const [experimentDetails, setExperimentDetails] = - useState(null); + useState(null); const [notificationData, setNotificationData] = useState([]); const [extraInfo, setExtraInfo] = useState(null); @@ -40,10 +40,13 @@ export default function ExperimentDetails() { useEffect(() => { if (!token) return; - getMABExperimentById(token, Number(experimentId)).then((data) => { + getExperimentById(token, Number(experimentId)).then((data) => { setArmsDetails(data.arms); - setExperimentDetails(data); - setNotificationData(data.notifications); + setExperimentDetails({ + ...data, + notifications: Array.isArray(data.notifications) ? data.notifications : [] + }); + setNotificationData(Array.isArray(data.notifications) ? data.notifications : []); setExtraInfo({ dateCreated: data.created_datetime_utc, lastTrialDate: data.last_trial_datetime_utc, @@ -54,6 +57,7 @@ export default function ExperimentDetails() { }, [experimentId, token]); return ( + experimentDetails?.exp_type == "mab" ? (
@@ -113,5 +117,11 @@ export default function ExperimentDetails() {
- ); +) : ( +
+

+ We're working on visualizing details for {experimentDetails?.exp_type} experiments. Stay tuned! +

+
+ )); } diff --git a/frontend/src/app/(protected)/experiments/[experimentId]/types.ts b/frontend/src/app/(protected)/experiments/[experimentId]/types.ts index 71c66fd..4dd65df 100644 --- a/frontend/src/app/(protected)/experiments/[experimentId]/types.ts +++ b/frontend/src/app/(protected)/experiments/[experimentId]/types.ts @@ -1,28 +1,29 @@ -interface MABExperimentDetails { +interface SingleExperimentDetails { name: string; description: string; - reward: string; + reward_type: string; prior_type: string; + exp_type: string; is_active: boolean; experiment_id: number; created_datetime_utc: string; last_trial_datetime_utc: string; n_trials: number; - arms: MABArmDetails[]; + arms: ArmDetails[]; notifications: Notification[]; } -interface MABArmDetails { +interface ArmDetails { name: string; description: string; - alpha_init: number; - beta_init: number; - mu_init: number; - sigma_init: number; - alpha: number; - beta: number; - mu: number; - sigma: number; + alpha_init?: number; + beta_init?: number; + mu_init?: number; + sigma_init?: number; + alpha?: number; + beta?: number; + mu?: number[]; + covariance?: number[][]; arm_id: number; n_outcomes: number; } @@ -41,4 +42,4 @@ interface ExtraInfo { nTrials: number; } -export type { MABExperimentDetails, MABArmDetails, Notification, ExtraInfo }; +export type { SingleExperimentDetails, ArmDetails, Notification, ExtraInfo }; diff --git a/frontend/src/app/(protected)/experiments/add/components/mabs/addMABArms.tsx b/frontend/src/app/(protected)/experiments/add/components/addArms.tsx similarity index 92% rename from frontend/src/app/(protected)/experiments/add/components/mabs/addMABArms.tsx rename to frontend/src/app/(protected)/experiments/add/components/addArms.tsx index 8d36ac7..48c8e3e 100644 --- a/frontend/src/app/(protected)/experiments/add/components/mabs/addMABArms.tsx +++ b/frontend/src/app/(protected)/experiments/add/components/addArms.tsx @@ -1,18 +1,16 @@ import { useExperimentStore, - isMABExperimentStateNormal, -} from "../../../store/useExperimentStore"; + isMABExperimentStateBeta, + isBayesianABState +} from "../../store/useExperimentStore"; import { Button } from "@/components/ui/button"; import { Input } from "@/components/ui/input"; import { Textarea } from "@/components/ui/textarea"; import { Label } from "@/components/ui/label"; import type { - NewMABArmNormal, - NewMABArmBeta, + NewArm, StepComponentProps, - // MABArmBeta, - // MABArmNormal, -} from "../../../types"; +} from "../../types"; import { Plus, Trash } from "lucide-react"; import { DividerWithTitle } from "@/components/Dividers"; import { useCallback, useEffect, useState, useMemo } from "react"; @@ -23,6 +21,8 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { const [inputValues, setInputValues] = useState>({}); + const bayesABarms: Record = {1: "Treatment Arm", 2: "Control Arm"}; + const baseArmDesc = useMemo( () => ({ name: "", @@ -69,7 +69,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { newErrors[index].alpha_init = "Alpha prior is required"; isValid = false; } - if (arm.alpha_init <= 0) { + if (arm.alpha_init && arm.alpha_init <= 0) { newErrors[index].alpha_init = "Alpha prior should be greater than 0"; isValid = false; @@ -82,7 +82,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { isValid = false; } - if (arm.beta_init <= 0) { + if (arm.beta_init && arm.beta_init <= 0) { newErrors[index].beta_init = "Beta prior should be greater than 0"; isValid = false; } @@ -99,7 +99,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { isValid = false; } - if (arm.sigma_init <= 0) { + if (arm.sigma_init && arm.sigma_init <= 0) { newErrors[index].sigma_init = "Std deviation should be greater than 0"; isValid = false; @@ -128,10 +128,10 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { useEffect(() => { const newInputValues: Record = {}; - if (isMABExperimentStateNormal(experimentState)) { + if (!isMABExperimentStateBeta(experimentState)) { experimentState.arms.forEach((arm, index) => { newInputValues[`${index}-mu`] = ( - (arm as NewMABArmNormal).mu_init || 0 + (arm as NewArm).mu_init || 0 ).toString(); }); } @@ -156,15 +156,15 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { return (
-

Add MAB Arms

+

Add Experiment Arms

-
-
+
{experimentState.arms.map((arm, index) => (
- +
@@ -253,7 +253,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { id={`arm-${index + 1}-alpha`} name={`arm-${index + 1}-alpha`} placeholder="Enter an integer as the prior for the alpha parameter" - value={(arm as NewMABArmBeta).alpha_init || ""} + value={(arm as NewArm).alpha_init || ""} onChange={(e) => { updateArm(index, { alpha_init: parseInt(e.target.value), @@ -285,7 +285,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { id={`arm-${index + 1}-beta`} name={`arm-${index + 1}-beta`} placeholder="Enter an integer as the prior for the beta parameter" - value={(arm as NewMABArmBeta).beta_init || ""} + value={(arm as NewArm).beta_init || ""} onChange={(e) => { updateArm(index, { beta_init: parseInt(e.target.value), @@ -325,7 +325,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { defaultValue={0} value={ inputValues[`${index}-mu`] ?? - (arm as NewMABArmNormal).mu_init?.toString() + (arm as NewArm).mu_init?.toString() } onChange={(e) => { handleNumericChange(index, e.target.value); @@ -358,7 +358,7 @@ export default function AddMABArms({ onValidate }: StepComponentProps) { type="number" defaultValue={1} placeholder="Enter a float as standard deviation for the prior" - value={(arm as NewMABArmNormal).sigma_init || ""} + value={(arm as NewArm).sigma_init || ""} onChange={(e) => { updateArm(index, { sigma_init: Number(e.target.value), diff --git a/frontend/src/app/(protected)/experiments/add/components/cmabs/addCMABContext.tsx b/frontend/src/app/(protected)/experiments/add/components/addContext.tsx similarity index 90% rename from frontend/src/app/(protected)/experiments/add/components/cmabs/addCMABContext.tsx rename to frontend/src/app/(protected)/experiments/add/components/addContext.tsx index 79cf234..8a4b4a4 100644 --- a/frontend/src/app/(protected)/experiments/add/components/cmabs/addCMABContext.tsx +++ b/frontend/src/app/(protected)/experiments/add/components/addContext.tsx @@ -3,17 +3,17 @@ import { Input } from "@/components/ui/input"; import { Textarea } from "@/components/ui/textarea"; import { Label } from "@/components/ui/label"; import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; -import { useExperimentStore } from "../../../store/useExperimentStore"; +import { useExperimentStore } from "../../store/useExperimentStore"; import type { - CMABExperimentState, + ExperimentState, StepComponentProps, ContextType, -} from "../../../types"; +} from "../../types"; import { Plus, Trash } from "lucide-react"; import { DividerWithTitle } from "@/components/Dividers"; import { useCallback, useEffect, useState } from "react"; -export default function AddCMABContext({ onValidate }: StepComponentProps) { +export default function AddContext({ onValidate }: StepComponentProps) { const { experimentState, updateContext, addContext, removeContext } = useExperimentStore(); @@ -23,16 +23,17 @@ export default function AddCMABContext({ onValidate }: StepComponentProps) { const validateForm = useCallback(() => { let isValid = true; - const newErrors = (experimentState as CMABExperimentState).contexts.map( - () => ({ + let newErrors = [{ name: "", description: "", value_type: "" }]; + const contexts = (experimentState as ExperimentState).contexts; + + if (contexts) { + newErrors = contexts.map(() => ({ name: "", description: "", value_type: "", - }) - ); + })); - (experimentState as CMABExperimentState).contexts.forEach( - (context, index) => { + contexts.forEach((context, index) => { if (!context.name.trim()) { newErrors[index].name = "Context name is required"; isValid = false; @@ -47,8 +48,8 @@ export default function AddCMABContext({ onValidate }: StepComponentProps) { newErrors[index].value_type = "Context value type is required"; isValid = false; } - } - ); + }); + } return { isValid, newErrors }; }, [experimentState]); @@ -68,11 +69,11 @@ export default function AddCMABContext({ onValidate }: StepComponentProps) { } }, [validateForm, onValidate, errors]); - return ( + return experimentState.contexts ? (

- Add CMAB Contexts + Add Contexts

- {(experimentState as CMABExperimentState).contexts.map( + {experimentState.contexts.map( (context, index) => (
@@ -233,5 +234,9 @@ export default function AddCMABContext({ onValidate }: StepComponentProps) { )}
+ ) : ( +
+ No contexts available. Please add a context to proceed. +
); } diff --git a/frontend/src/app/(protected)/experiments/add/components/addExperimentSteps.tsx b/frontend/src/app/(protected)/experiments/add/components/addExperimentSteps.tsx index 7d098a1..27b56af 100644 --- a/frontend/src/app/(protected)/experiments/add/components/addExperimentSteps.tsx +++ b/frontend/src/app/(protected)/experiments/add/components/addExperimentSteps.tsx @@ -1,65 +1,30 @@ -import { Step } from "../../types"; -import AddMABArms from "./mabs/addMABArms"; -import MABPriorRewardSelection from "./mabs/addPriorReward"; -import AddCMABArms from "./cmabs/addCMABArms"; -import AddCMABContexts from "./cmabs/addCMABContext"; -import CMABPriorRewardSelection from "./cmabs/addPriorReward"; -import AddBayesABArms from "./bayes_ab/addBayesABArms"; -import BayesianABRewardSelection from "./bayes_ab/addPriorReward"; +import { MethodType, Step } from "../../types"; +import PriorRewardSelection from "./addPriorReward"; +import AddContext from "./addContext"; +import AddArms from "./addArms"; import AddNotifications from "./addNotifications"; -// --- MAB types and steps --- -const MABsteps: Step[] = [ - { - name: "Configure MAB", - component: MABPriorRewardSelection, - }, - { - name: "Add Arms", - component: AddMABArms, - }, - { name: "Notifications", component: AddNotifications }, -]; - -// --- CMAB test types and steps --- - -const CMABsteps: Step[] = [ - { - name: "Configure MAB", - component: CMABPriorRewardSelection, - }, - { - name: "Add Contexts", - component: AddCMABContexts, +const AllSteps = (exp_type: MethodType): Step[] => { + const steps = [{ + name: "Configure Prior and Outcome for Experiment", + component: PriorRewardSelection, }, { name: "Add Arms", - component: AddCMABArms, + component: AddArms, }, - { name: "Notifications", component: AddNotifications }, -]; - -// --- A/B test types and steps --- - -const BayesianABsteps: Step[] = [ - { - name: "Configure Bayesian A/B Test", - component: BayesianABRewardSelection, - }, - { - name: "Add Arms", - component: AddBayesABArms, - }, - { name: "Notifications", component: AddNotifications }, -]; - -// --- All steps --- - -const AllSteps = { - mab: MABsteps, - cmab: CMABsteps, - bayes_ab: BayesianABsteps, + { name: "Notifications", + component: AddNotifications } + +] + if (exp_type === "cmab") { + steps.splice(0, 0, { + name: "Configure Prior and Reward", + component: AddContext, + }); + } + return steps; }; export { AllSteps }; diff --git a/frontend/src/app/(protected)/experiments/add/components/mabs/addPriorReward.tsx b/frontend/src/app/(protected)/experiments/add/components/addPriorReward.tsx similarity index 92% rename from frontend/src/app/(protected)/experiments/add/components/mabs/addPriorReward.tsx rename to frontend/src/app/(protected)/experiments/add/components/addPriorReward.tsx index 5be4587..bc5257d 100644 --- a/frontend/src/app/(protected)/experiments/add/components/mabs/addPriorReward.tsx +++ b/frontend/src/app/(protected)/experiments/add/components/addPriorReward.tsx @@ -1,11 +1,11 @@ -import { useExperimentStore } from "../../../store/useExperimentStore"; +import { useExperimentStore, isBayesianABState, isCMABExperimentState } from "../../store/useExperimentStore"; import { useCallback, useState, useEffect } from "react"; import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; import { Label } from "@/components/ui/label"; -import type { PriorType, RewardType, StepComponentProps } from "../../../types"; +import type { PriorType, RewardType, StepComponentProps } from "../../types"; import { DividerWithTitle } from "@/components/Dividers"; -export default function MABPriorRewardSelection({ +export default function PriorRewardSelection({ onValidate, }: StepComponentProps) { const { experimentState, updatePriorType, updateRewardType } = @@ -32,14 +32,6 @@ export default function MABPriorRewardSelection({ isValid = false; } - if ( - experimentState.prior_type === "normal" && - experimentState.reward_type === "binary" - ) { - newErrors.reward_type = - "Normal prior is not compatible with binary reward"; - isValid = false; - } if ( experimentState.prior_type === "beta" && experimentState.reward_type === "real-valued" @@ -48,6 +40,12 @@ export default function MABPriorRewardSelection({ "Beta prior is not compatible with real-valued reward"; isValid = false; } + + if ((isBayesianABState(experimentState) || isCMABExperimentState(experimentState)) && experimentState.prior_type === "beta") { + newErrors.prior_type = "Beta prior is not compatible with Bayesian AB or CMAB experiments"; + isValid = false; + } + return { isValid, newErrors }; }, [experimentState.prior_type, experimentState.reward_type]); @@ -69,7 +67,7 @@ export default function MABPriorRewardSelection({

- Configure MAB Parameters + Configure Experiment Parameters

diff --git a/frontend/src/app/(protected)/experiments/add/components/basicInfo.tsx b/frontend/src/app/(protected)/experiments/add/components/basicInfo.tsx index 93bd293..b7a5b25 100644 --- a/frontend/src/app/(protected)/experiments/add/components/basicInfo.tsx +++ b/frontend/src/app/(protected)/experiments/add/components/basicInfo.tsx @@ -68,12 +68,12 @@ export default function AddBasicInfo({ const [errors, setErrors] = useState({ name: "", description: "", - methodType: "", + exp_type: "", }); const validateForm = useCallback(() => { let isValid = true; - const newErrors = { name: "", description: "", methodType: "" }; + const newErrors = { name: "", description: "", exp_type: "" }; if (!experimentState.name.trim()) { newErrors.name = "Experiment name is required"; @@ -85,8 +85,8 @@ export default function AddBasicInfo({ isValid = false; } - if (!experimentState.methodType) { - newErrors.methodType = "Please select an experiment type"; + if (!experimentState.exp_type) { + newErrors.exp_type = "Please select an experiment type"; isValid = false; } @@ -164,15 +164,15 @@ export default function AddBasicInfo({ description={methodInfo[method].description} infoTitle={methodInfo[method].infoTitle} infoDescription={methodInfo[method].infoDescription} - selected={experimentState.methodType === method} + selected={experimentState.exp_type === method} disabled={methodInfo[method].disabled} onClick={() => updateMethodType(method as keyof Methods)} /> ) )}
- {errors.methodType ? ( -

{errors.methodType}

+ {errors.exp_type ? ( +

{errors.exp_type}

) : (

 

)} diff --git a/frontend/src/app/(protected)/experiments/add/components/bayes_ab/addBayesABArms.tsx b/frontend/src/app/(protected)/experiments/add/components/bayes_ab/addBayesABArms.tsx deleted file mode 100644 index 95d2122..0000000 --- a/frontend/src/app/(protected)/experiments/add/components/bayes_ab/addBayesABArms.tsx +++ /dev/null @@ -1,287 +0,0 @@ -import { - useExperimentStore, - isBayesianABState, -} from "../../../store/useExperimentStore"; -import { - NewBayesianABArm, - StepComponentProps, - BayesianABArm, -} from "../../../types"; -import { Button } from "@/components/ui/button"; -import { Input } from "@/components/ui/input"; -import { Textarea } from "@/components/ui/textarea"; -import { Label } from "@/components/ui/label"; -import { Plus, Trash } from "lucide-react"; -import { DividerWithTitle } from "@/components/Dividers"; -import { useCallback, useEffect, useState, useMemo } from "react"; - -export default function AddBayesABArms({ onValidate }: StepComponentProps) { - const { experimentState, updateArm, addArm, removeArm } = - useExperimentStore(); - - const [inputValues, setInputValues] = useState>({}); - - const baseArmDesc = useMemo( - () => ({ - name: "", - description: "", - }), - [] - ); - - const additionalArmErrors = useMemo( - () => ({ mu_init: "", sigma_init: "" }), - [experimentState] - ); - - const [errors, setErrors] = useState(() => { - return experimentState.arms.map(() => { - return { ...baseArmDesc, ...additionalArmErrors }; - }); - }); - - const validateForm = useCallback(() => { - let isValid = true; - const newErrors = experimentState.arms.map(() => ({ - ...baseArmDesc, - ...additionalArmErrors, - })); - - experimentState.arms.forEach((arm, index) => { - if (!arm.name.trim()) { - newErrors[index].name = "Arm name is required"; - isValid = false; - } - - if (!arm.description.trim()) { - newErrors[index].description = "Description is required"; - isValid = false; - } - - if (experimentState.prior_type === "beta") { - if ("mu_init" in arm && typeof arm.mu_init !== "number") { - newErrors[index].mu_init = "Mean value is required"; - isValid = false; - } - - if ("sigma_init" in arm) { - if (!arm.sigma_init) { - newErrors[index].sigma_init = "Std. deviation is required"; - isValid = false; - } - - if (arm.sigma_init <= 0) { - newErrors[index].sigma_init = - "Std deviation should be greater than 0"; - isValid = false; - } - } - } - }); - return { isValid, newErrors }; - }, [experimentState, baseArmDesc, additionalArmErrors]); - - useEffect(() => { - const { isValid, newErrors } = validateForm(); - if (JSON.stringify(newErrors) !== JSON.stringify(errors)) { - setErrors(newErrors); - onValidate({ - isValid, - errors: newErrors.map((error) => - Object.fromEntries( - Object.entries(error).map(([key, value]) => [key, value ?? ""]) - ) - ), - }); - } - }, [validateForm, onValidate, errors]); - - useEffect(() => { - const newInputValues: Record = {}; - - if (isBayesianABState(experimentState)) { - experimentState.arms.forEach((arm, index) => { - newInputValues[`${index}-mu`] = ( - (arm as BayesianABArm).mu_init || 0 - ).toString(); - }); - } - setInputValues(newInputValues); - }, [experimentState]); - - const handleNumericChange = (index: number, value: string) => { - // Update the local input state for a smooth typing experience - setInputValues((prev) => ({ - ...prev, - [`${index}-mu`]: value, - })); - - if (value !== "" && value !== "-") { - const numValue = Number.parseFloat(value); - if (!isNaN(numValue)) { - updateArm(index, { mu_init: numValue }); - } - } - }; - - return ( -
-
-

- Add Bayesian A/B Arms -

-
- - -
-
-
- {experimentState.arms.map((arm, index) => ( -
- -
-
-
-
-
- -
- { - updateArm(index, { name: e.target.value }); - if (index === 0) { - updateArm(index, { is_treatment_arm: true }); - } - }} - /> - {errors[index]?.name ? ( -

- {errors[index].name} -

- ) : ( -

 

- )} -
-
-
-
-
- -
-