diff --git a/.gitignore b/.gitignore index 0c26523..91f9064 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,21 @@ -# AngelaMos | 2025 -# dev.compose.yml +# ============================================================================= +# AngelaMos | 2026 +# .gitignore +# ============================================================================= -venv -*.venv -*.env -*.cache -*.egg +# ============================================================================= +# Environment files (keep .example files) +# ============================================================================= +.env +.env.local +.env.development +.env.production +.env.*.local +!.env.example +# ============================================================================= +# Python +# ============================================================================= __pycache__/ *.py[cod] *$py.class @@ -26,7 +35,11 @@ wheels/ *.egg-info/ .installed.cfg *.egg +.venv/ +venv/ +*.venv +# Python tooling .pytest_cache/ .coverage htmlcov/ @@ -34,6 +47,90 @@ htmlcov/ .mypy_cache/ .dmypy.json dmypy.json +.ruff_cache/ + +# ============================================================================= +# Node.js / JavaScript +# ============================================================================= +node_modules/ +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* +.pnpm-store/ + +# Build outputs +dist/ +dist-ssr/ +*.local +.vite/ +# ============================================================================= +# Go +# ============================================================================= +bin/ +*.exe +*.exe~ +*.dll +*.dylib +*.test +coverage.out +coverage.html +tmp/ +vendor/ +__debug_bin* + +# Go keys (sensitive) +keys/*.pem +keys/*.key + +# ============================================================================= +# iOS / React Native +# ============================================================================= +.expo/ +web-build/ +expo-env.d.ts +.kotlin/ +*.orig.* +*.jks +*.p8 +*.p12 +*.key +*.mobileprovision +.metro-health-check* +*.tsbuildinfo +/ios +/android + +# ============================================================================= +# IDEs and Editors +# ============================================================================= +.idea/ +.vscode/* +!.vscode/extensions.json +*.swp +*.swo +*~ +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +# ============================================================================= +# OS Files +# ============================================================================= .DS_Store +Thumbs.db +*.pem + +# ============================================================================= +# Docker +# ============================================================================= +*.log +# ============================================================================= +# Misc +# ============================================================================= +*.cache diff --git a/LICENSE b/LICENSE index 95fe6c2..00afa7b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -©AngelaMos | 2025 | CarterPerez-dev +©AngelaMos | 2026 | CarterPerez-dev Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.rst b/README.rst index 67da212..60d36ef 100644 --- a/README.rst +++ b/README.rst @@ -1,8 +1,25 @@ =========================================== -Fullstack Template: FastAPI + React + Nginx +Fullstack Templates Collection =========================================== -*Production-ready Docker setup with TypeScript and SCSS* +*Production-ready stack templates with TypeScript, SCSS, and modern backends* + +Available Templates +=================== + +**Default Stack: FastAPI + React** + Full-stack web application with Python FastAPI backend and React 19 frontend. + Uses ``compose.yml`` and ``dev.compose.yml``. + +**Go-Chi Stack: Go + React** + Full-stack web application with Go Chi backend and React 19 frontend. + Uses ``compose.go.yml`` and ``dev.compose.go.yml``. + Backend located in ``alternate-backends/go-chi/``. + +**iOS Mobile: Expo + React Native** + Mobile application using Expo and React Native. + Located in ``mobile/ios-expo/``. + Connects to any backend via API (no Docker required). ---- @@ -63,10 +80,24 @@ This will: Next Steps ---------- +**For FastAPI + React (Default)** + 1. Edit ``.env`` with your configuration -2. Start development: ``just dev-up`` +2. Start development: ``just dev-up`` or ``docker compose -f dev.compose.yml up`` 3. After creating models: ``just migration-local "initial"`` then ``just migrate-local head`` +**For Go-Chi + React** + +1. Edit ``.env`` with your configuration +2. Start development: ``docker compose -f dev.compose.go.yml up`` +3. Production: ``docker compose -f compose.go.yml up`` + +**For iOS Expo App** + +1. Navigate to ``mobile/ios-expo/`` +2. Follow the iOS-specific README for Expo setup +3. Configure API endpoint to connect to your backend + Run ``just`` to see all available commands. ---- diff --git a/backend/.dockerignore b/backends/fastapi/.dockerignore similarity index 100% rename from backend/.dockerignore rename to backends/fastapi/.dockerignore diff --git a/backends/fastapi/.env.example b/backends/fastapi/.env.example new file mode 100644 index 0000000..ce0a004 --- /dev/null +++ b/backends/fastapi/.env.example @@ -0,0 +1,86 @@ +# ============================================================================= +# AngelaMos | 2026 +# .env.example +# ============================================================================= +# Standalone FastAPI Backend +# Copy to .env for production, .env.development for dev +# Production (*00 ports) and development (*01 ports) can run simultaneously +# ============================================================================= + +# ============================================================================= +# HOST PORTS +# ============================================================================= +API_HOST_PORT=8500 +POSTGRES_HOST_PORT=5500 +REDIS_HOST_PORT=6500 + +# ============================================================================= +# Application +# ============================================================================= +APP_NAME=FastAPI-API +APP_VERSION=1.0.0 +ENVIRONMENT=production +DEBUG=false + +# ============================================================================= +# Server (internal container settings) +# ============================================================================= +HOST=0.0.0.0 +PORT=8000 +RELOAD=false + +# ============================================================================= +# PostgreSQL +# ============================================================================= +POSTGRES_USER=postgres +POSTGRES_PASSWORD=CHANGE_ME_IN_PRODUCTION +POSTGRES_DB=app_db +POSTGRES_HOST=db +POSTGRES_CONTAINER_PORT=5432 + +DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_CONTAINER_PORT}/${POSTGRES_DB} + +DB_POOL_SIZE=20 +DB_MAX_OVERFLOW=10 +DB_POOL_TIMEOUT=30 +DB_POOL_RECYCLE=1800 + +# ============================================================================= +# Redis +# ============================================================================= +REDIS_HOST=redis +REDIS_CONTAINER_PORT=6379 +REDIS_PASSWORD=CHANGE_ME_IN_PRODUCTION + +REDIS_URL=redis://${REDIS_HOST}:${REDIS_CONTAINER_PORT} + +# ============================================================================= +# Security / JWT +# ============================================================================= +SECRET_KEY=CHANGE_ME_GENERATE_WITH_openssl_rand_base64_32 + +JWT_ALGORITHM=HS256 +ACCESS_TOKEN_EXPIRE_MINUTES=15 +REFRESH_TOKEN_EXPIRE_DAYS=7 + +# ============================================================================= +# Admin Bootstrap (optional) +# ============================================================================= +ADMIN_EMAIL= + +# ============================================================================= +# Rate Limiting +# ============================================================================= +RATE_LIMIT_DEFAULT=100/minute +RATE_LIMIT_AUTH=20/minute + +# ============================================================================= +# Logging +# ============================================================================= +LOG_LEVEL=INFO +LOG_JSON_FORMAT=true + +# ============================================================================= +# Cloudflare Tunnel (for cloudflared.compose.yml) +# ============================================================================= +CLOUDFLARE_TUNNEL_TOKEN=YOUR_TUNNEL_TOKEN_HERE diff --git a/backend/.style.yapf b/backends/fastapi/.style.yapf similarity index 100% rename from backend/.style.yapf rename to backends/fastapi/.style.yapf diff --git a/backend/alembic.ini b/backends/fastapi/alembic.ini similarity index 100% rename from backend/alembic.ini rename to backends/fastapi/alembic.ini diff --git a/backend/alembic/env.py b/backends/fastapi/alembic/env.py similarity index 100% rename from backend/alembic/env.py rename to backends/fastapi/alembic/env.py diff --git a/backend/alembic/script.py.mako b/backends/fastapi/alembic/script.py.mako similarity index 100% rename from backend/alembic/script.py.mako rename to backends/fastapi/alembic/script.py.mako diff --git a/backend/alembic/versions/20251224_033104_initial.py b/backends/fastapi/alembic/versions/20251224_033104_initial.py similarity index 100% rename from backend/alembic/versions/20251224_033104_initial.py rename to backends/fastapi/alembic/versions/20251224_033104_initial.py diff --git a/backend/app/__main__.py b/backends/fastapi/app/__main__.py similarity index 100% rename from backend/app/__main__.py rename to backends/fastapi/app/__main__.py diff --git a/backend/app/admin/__init__.py b/backends/fastapi/app/admin/__init__.py similarity index 100% rename from backend/app/admin/__init__.py rename to backends/fastapi/app/admin/__init__.py diff --git a/backend/app/admin/py.typed b/backends/fastapi/app/admin/py.typed similarity index 100% rename from backend/app/admin/py.typed rename to backends/fastapi/app/admin/py.typed diff --git a/backend/app/admin/routes.py b/backends/fastapi/app/admin/routes.py similarity index 100% rename from backend/app/admin/routes.py rename to backends/fastapi/app/admin/routes.py diff --git a/backend/app/auth/RefreshToken.py b/backends/fastapi/app/auth/RefreshToken.py similarity index 100% rename from backend/app/auth/RefreshToken.py rename to backends/fastapi/app/auth/RefreshToken.py diff --git a/backend/app/auth/__init__.py b/backends/fastapi/app/auth/__init__.py similarity index 100% rename from backend/app/auth/__init__.py rename to backends/fastapi/app/auth/__init__.py diff --git a/backend/app/auth/dependencies.py b/backends/fastapi/app/auth/dependencies.py similarity index 100% rename from backend/app/auth/dependencies.py rename to backends/fastapi/app/auth/dependencies.py diff --git a/backend/app/auth/py.typed b/backends/fastapi/app/auth/py.typed similarity index 100% rename from backend/app/auth/py.typed rename to backends/fastapi/app/auth/py.typed diff --git a/backend/app/auth/repository.py b/backends/fastapi/app/auth/repository.py similarity index 100% rename from backend/app/auth/repository.py rename to backends/fastapi/app/auth/repository.py diff --git a/backend/app/auth/routes.py b/backends/fastapi/app/auth/routes.py similarity index 76% rename from backend/app/auth/routes.py rename to backends/fastapi/app/auth/routes.py index afbb10e..116a344 100644 --- a/backend/app/auth/routes.py +++ b/backends/fastapi/app/auth/routes.py @@ -29,7 +29,10 @@ from core.rate_limit import limiter from core.exceptions import TokenError from .schemas import ( + MobileLoginResponse, + MobileTokenResponse, PasswordChange, + RefreshTokenRequest, TokenResponse, TokenWithUserResponse, ) @@ -92,6 +95,48 @@ async def refresh_token( return result +@router.post( + "/login-mobile", + response_model = MobileLoginResponse, + responses = {**AUTH_401} +) +@limiter.limit(settings.RATE_LIMIT_AUTH) +async def login_mobile( + request: Request, + auth_service: AuthServiceDep, + ip: ClientIP, + form_data: Annotated[OAuth2PasswordRequestForm, + Depends()], +) -> MobileLoginResponse: + """ + Mobile login - returns both tokens in response body + """ + return await auth_service.login_mobile( + email=form_data.username, + password=form_data.password, + ip_address=ip, + ) + + +@router.post( + "/refresh-mobile", + response_model = MobileTokenResponse, + responses = {**AUTH_401} +) +async def refresh_token_mobile( + auth_service: AuthServiceDep, + ip: ClientIP, + data: RefreshTokenRequest, +) -> MobileTokenResponse: + """ + Mobile refresh - accepts token in body, returns both tokens + """ + return await auth_service.refresh_tokens_mobile( + data.refresh_token, + ip_address=ip, + ) + + @router.post( "/logout", status_code = status.HTTP_204_NO_CONTENT, diff --git a/backend/app/auth/schemas.py b/backends/fastapi/app/auth/schemas.py similarity index 77% rename from backend/app/auth/schemas.py rename to backends/fastapi/app/auth/schemas.py index f534792..7b47c34 100644 --- a/backend/app/auth/schemas.py +++ b/backends/fastapi/app/auth/schemas.py @@ -49,6 +49,25 @@ class RefreshTokenRequest(BaseSchema): refresh_token: str +class MobileTokenResponse(BaseSchema): + """ + Schema for mobile token refresh response (returns both tokens) + """ + access_token: str + refresh_token: str + token_type: str = "bearer" + + +class MobileLoginResponse(BaseSchema): + """ + Schema for mobile login response (returns both tokens + user) + """ + access_token: str + refresh_token: str + token_type: str = "bearer" + user: UserResponse + + class PasswordResetRequest(BaseSchema): """ Schema for password reset request diff --git a/backend/app/auth/service.py b/backends/fastapi/app/auth/service.py similarity index 80% rename from backend/app/auth/service.py rename to backends/fastapi/app/auth/service.py index f0557a1..f31d921 100644 --- a/backend/app/auth/service.py +++ b/backends/fastapi/app/auth/service.py @@ -23,6 +23,8 @@ from user.repository import UserRepository from .repository import RefreshTokenRepository from .schemas import ( + MobileLoginResponse, + MobileTokenResponse, TokenResponse, TokenWithUserResponse, ) @@ -175,6 +177,53 @@ async def refresh_tokens( return TokenResponse(access_token = access_token), new_raw_token + async def login_mobile( + self, + email: str, + password: str, + device_id: str | None = None, + device_name: str | None = None, + ip_address: str | None = None, + ) -> MobileLoginResponse: + """ + Mobile login - returns both tokens and user + """ + access_token, refresh_token, user = await self.authenticate( + email, + password, + device_id, + device_name, + ip_address, + ) + + return MobileLoginResponse( + access_token=access_token, + refresh_token=refresh_token, + user=UserResponse.model_validate(user), + ) + + async def refresh_tokens_mobile( + self, + refresh_token: str, + device_id: str | None = None, + device_name: str | None = None, + ip_address: str | None = None, + ) -> MobileTokenResponse: + """ + Mobile refresh - returns both tokens in response body + """ + token_response, new_refresh_token = await self.refresh_tokens( + refresh_token, + device_id, + device_name, + ip_address, + ) + + return MobileTokenResponse( + access_token=token_response.access_token, + refresh_token=new_refresh_token, + ) + async def logout( self, refresh_token: str, diff --git a/backend/app/config.py b/backends/fastapi/app/config.py similarity index 98% rename from backend/app/config.py rename to backends/fastapi/app/config.py index 7ed8739..381d4b1 100644 --- a/backend/app/config.py +++ b/backends/fastapi/app/config.py @@ -112,8 +112,8 @@ class Settings(BaseSettings): CORS_ORIGINS: list[str] = [ "http://localhost", - "http://localhost:3420", - "http://localhost:8420", + "http://localhost:8426", + "http://localhost:9426", ] CORS_ALLOW_CREDENTIALS: bool = True CORS_ALLOW_METHODS: list[str] = [ diff --git a/backend/app/core/Base.py b/backends/fastapi/app/core/Base.py similarity index 100% rename from backend/app/core/Base.py rename to backends/fastapi/app/core/Base.py diff --git a/backend/app/core/__init__.py b/backends/fastapi/app/core/__init__.py similarity index 100% rename from backend/app/core/__init__.py rename to backends/fastapi/app/core/__init__.py diff --git a/backend/app/core/base_repository.py b/backends/fastapi/app/core/base_repository.py similarity index 100% rename from backend/app/core/base_repository.py rename to backends/fastapi/app/core/base_repository.py diff --git a/backend/app/core/base_schema.py b/backends/fastapi/app/core/base_schema.py similarity index 100% rename from backend/app/core/base_schema.py rename to backends/fastapi/app/core/base_schema.py diff --git a/backend/app/core/common_schemas.py b/backends/fastapi/app/core/common_schemas.py similarity index 100% rename from backend/app/core/common_schemas.py rename to backends/fastapi/app/core/common_schemas.py diff --git a/backend/app/core/constants.py b/backends/fastapi/app/core/constants.py similarity index 100% rename from backend/app/core/constants.py rename to backends/fastapi/app/core/constants.py diff --git a/backend/app/core/database.py b/backends/fastapi/app/core/database.py similarity index 100% rename from backend/app/core/database.py rename to backends/fastapi/app/core/database.py diff --git a/backend/app/core/dependencies.py b/backends/fastapi/app/core/dependencies.py similarity index 100% rename from backend/app/core/dependencies.py rename to backends/fastapi/app/core/dependencies.py diff --git a/backend/app/core/enums.py b/backends/fastapi/app/core/enums.py similarity index 100% rename from backend/app/core/enums.py rename to backends/fastapi/app/core/enums.py diff --git a/backend/app/core/error_schemas.py b/backends/fastapi/app/core/error_schemas.py similarity index 100% rename from backend/app/core/error_schemas.py rename to backends/fastapi/app/core/error_schemas.py diff --git a/backend/app/core/exceptions.py b/backends/fastapi/app/core/exceptions.py similarity index 100% rename from backend/app/core/exceptions.py rename to backends/fastapi/app/core/exceptions.py diff --git a/backend/app/core/health_routes.py b/backends/fastapi/app/core/health_routes.py similarity index 100% rename from backend/app/core/health_routes.py rename to backends/fastapi/app/core/health_routes.py diff --git a/backend/app/core/logging.py b/backends/fastapi/app/core/logging.py similarity index 100% rename from backend/app/core/logging.py rename to backends/fastapi/app/core/logging.py diff --git a/backend/app/core/py.typed b/backends/fastapi/app/core/py.typed similarity index 100% rename from backend/app/core/py.typed rename to backends/fastapi/app/core/py.typed diff --git a/backend/app/core/rate_limit.py b/backends/fastapi/app/core/rate_limit.py similarity index 100% rename from backend/app/core/rate_limit.py rename to backends/fastapi/app/core/rate_limit.py diff --git a/backend/app/core/responses.py b/backends/fastapi/app/core/responses.py similarity index 100% rename from backend/app/core/responses.py rename to backends/fastapi/app/core/responses.py diff --git a/backend/app/core/security.py b/backends/fastapi/app/core/security.py similarity index 100% rename from backend/app/core/security.py rename to backends/fastapi/app/core/security.py diff --git a/backend/app/factory.py b/backends/fastapi/app/factory.py similarity index 89% rename from backend/app/factory.py rename to backends/fastapi/app/factory.py index 52903d9..e5eed4f 100644 --- a/backend/app/factory.py +++ b/backends/fastapi/app/factory.py @@ -12,7 +12,7 @@ from slowapi import _rate_limit_exceeded_handler from slowapi.errors import RateLimitExceeded -from config import settings, Environment, API_PREFIX +from config import settings, API_PREFIX from core.database import sessionmanager from core.exceptions import BaseAppException from core.logging import configure_logging @@ -23,6 +23,7 @@ from user.routes import router as user_router from auth.routes import router as auth_router from admin.routes import router as admin_router +from it_was_never_real import register_psyop_handler @asynccontextmanager @@ -64,8 +65,6 @@ def create_app() -> FastAPI: """ Application factory """ - is_production = settings.ENVIRONMENT == Environment.PRODUCTION - app = FastAPI( title = settings.APP_NAME, summary = settings.APP_SUMMARY, @@ -82,10 +81,10 @@ def create_app() -> FastAPI: openapi_tags = OPENAPI_TAGS, openapi_version = "3.1.0", lifespan = lifespan, - root_path = "/api" if not is_production else "", - openapi_url = None if is_production else "/openapi.json", - docs_url = None if is_production else "/docs", - redoc_url = None if is_production else "/redoc", + root_path = "/api", + openapi_url = "/openapi.json", + docs_url = "/docs", + redoc_url = "/redoc", ) app.add_middleware(CorrelationIdMiddleware) @@ -122,7 +121,7 @@ async def root() -> AppInfoResponse: name = settings.APP_NAME, version = settings.APP_VERSION, environment = settings.ENVIRONMENT.value, - docs_url = None if is_production else "/docs", + docs_url = "/docs", ) app.include_router(health_router) @@ -130,4 +129,6 @@ async def root() -> AppInfoResponse: app.include_router(auth_router, prefix = API_PREFIX) app.include_router(user_router, prefix = API_PREFIX) + register_psyop_handler(app) + return app diff --git a/backends/fastapi/app/it_was_never_real.py b/backends/fastapi/app/it_was_never_real.py new file mode 100644 index 0000000..f8489ab --- /dev/null +++ b/backends/fastapi/app/it_was_never_real.py @@ -0,0 +1,30 @@ +""" +ⒸAngelaMos | 2025 +it_was_never_real.py +""" + +from fastapi import FastAPI, Request +from fastapi.responses import JSONResponse +from starlette.exceptions import HTTPException as StarletteHTTPException + + +def register_psyop_handler(app: FastAPI) -> None: + """ + Registers the 404 handler for routes that never existed. + """ + @app.exception_handler(StarletteHTTPException) + async def the_endpoint_was_a_psyop( + request: Request, + exc: StarletteHTTPException, + ) -> JSONResponse: + if exc.status_code == 404: + return JSONResponse( + status_code = 404, + content = { + "detail": "It was never real. The endpoint was a psyop." + }, + ) + return JSONResponse( + status_code = exc.status_code, + content = {"detail": exc.detail}, + ) diff --git a/backend/app/middleware/__init__.py b/backends/fastapi/app/middleware/__init__.py similarity index 100% rename from backend/app/middleware/__init__.py rename to backends/fastapi/app/middleware/__init__.py diff --git a/backend/app/middleware/correlation.py b/backends/fastapi/app/middleware/correlation.py similarity index 100% rename from backend/app/middleware/correlation.py rename to backends/fastapi/app/middleware/correlation.py diff --git a/backend/app/middleware/py.typed b/backends/fastapi/app/middleware/py.typed similarity index 100% rename from backend/app/middleware/py.typed rename to backends/fastapi/app/middleware/py.typed diff --git a/backend/app/py.typed b/backends/fastapi/app/py.typed similarity index 100% rename from backend/app/py.typed rename to backends/fastapi/app/py.typed diff --git a/backend/app/user/User.py b/backends/fastapi/app/user/User.py similarity index 100% rename from backend/app/user/User.py rename to backends/fastapi/app/user/User.py diff --git a/backend/app/user/__init__.py b/backends/fastapi/app/user/__init__.py similarity index 100% rename from backend/app/user/__init__.py rename to backends/fastapi/app/user/__init__.py diff --git a/backend/app/user/dependencies.py b/backends/fastapi/app/user/dependencies.py similarity index 100% rename from backend/app/user/dependencies.py rename to backends/fastapi/app/user/dependencies.py diff --git a/backend/app/user/py.typed b/backends/fastapi/app/user/py.typed similarity index 100% rename from backend/app/user/py.typed rename to backends/fastapi/app/user/py.typed diff --git a/backend/app/user/repository.py b/backends/fastapi/app/user/repository.py similarity index 100% rename from backend/app/user/repository.py rename to backends/fastapi/app/user/repository.py diff --git a/backend/app/user/routes.py b/backends/fastapi/app/user/routes.py similarity index 100% rename from backend/app/user/routes.py rename to backends/fastapi/app/user/routes.py diff --git a/backend/app/user/schemas.py b/backends/fastapi/app/user/schemas.py similarity index 100% rename from backend/app/user/schemas.py rename to backends/fastapi/app/user/schemas.py diff --git a/backend/app/user/service.py b/backends/fastapi/app/user/service.py similarity index 100% rename from backend/app/user/service.py rename to backends/fastapi/app/user/service.py diff --git a/backends/fastapi/cloudflared.compose.yml b/backends/fastapi/cloudflared.compose.yml new file mode 100644 index 0000000..2d16833 --- /dev/null +++ b/backends/fastapi/cloudflared.compose.yml @@ -0,0 +1,32 @@ +# ============================================================================= +# AngelaMos | 2026 +# cloudflared.compose.yml +# ============================================================================= +# Cloudflare Tunnel for production remote access +# Usage: docker compose -f compose.yml -f cloudflared.compose.yml up -d +# ============================================================================= + +services: + cloudflared: + image: cloudflare/cloudflared:latest + container_name: ${APP_NAME:-fastapi-api}-tunnel + command: tunnel run --token ${CLOUDFLARE_TUNNEL_TOKEN} + networks: + - backend + depends_on: + api: + condition: service_healthy + deploy: + resources: + limits: + cpus: '0.5' + memory: 128M + reservations: + cpus: '0.1' + memory: 32M + restart: unless-stopped + +networks: + backend: + external: true + name: ${APP_NAME:-fastapi-api}_backend diff --git a/backends/fastapi/compose.yml b/backends/fastapi/compose.yml new file mode 100644 index 0000000..a37a7c9 --- /dev/null +++ b/backends/fastapi/compose.yml @@ -0,0 +1,108 @@ +# ============================================================================= +# AngelaMos | 2026 +# compose.yml +# ============================================================================= +# Production compose - FastAPI + PostgreSQL + Redis (standalone API) +# For Cloudflare tunnel: docker compose -f compose.yml -f cloudflared.compose.yml up +# ============================================================================= + +name: ${APP_NAME:-fastapi-api} + +services: + api: + build: + context: . + dockerfile: infra/docker/fastapi.prod + container_name: ${APP_NAME:-fastapi-api}-backend + ports: + - "${API_HOST_PORT:-8500}:8000" + env_file: + - .env + environment: + - ENVIRONMENT=production + - DEBUG=false + - RELOAD=false + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - backend + deploy: + resources: + limits: + cpus: '2.0' + memory: 1G + reservations: + cpus: '0.5' + memory: 256M + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 40s + restart: unless-stopped + + db: + image: postgres:17-alpine + container_name: ${APP_NAME:-fastapi-api}-db + ports: + - "${POSTGRES_HOST_PORT:-5500}:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-postgres} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres} + - POSTGRES_DB=${POSTGRES_DB:-app_db} + networks: + - backend + deploy: + resources: + limits: + cpus: '1.0' + memory: 512M + reservations: + cpus: '0.25' + memory: 128M + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-app_db}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + restart: unless-stopped + + redis: + image: redis:7-alpine + container_name: ${APP_NAME:-fastapi-api}-redis + ports: + - "${REDIS_HOST_PORT:-6500}:6379" + volumes: + - redis_data:/data + command: redis-server --appendonly yes ${REDIS_PASSWORD:+--requirepass ${REDIS_PASSWORD}} + networks: + - backend + deploy: + resources: + limits: + cpus: '0.5' + memory: 256M + reservations: + cpus: '0.1' + memory: 64M + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + +networks: + backend: + driver: bridge + +volumes: + postgres_data: + redis_data: diff --git a/backend/conftest.py b/backends/fastapi/conftest.py similarity index 100% rename from backend/conftest.py rename to backends/fastapi/conftest.py diff --git a/backends/fastapi/dev.compose.yml b/backends/fastapi/dev.compose.yml new file mode 100644 index 0000000..d3c876d --- /dev/null +++ b/backends/fastapi/dev.compose.yml @@ -0,0 +1,88 @@ +# ============================================================================= +# AngelaMos | 2026 +# dev.compose.yml +# ============================================================================= +# Development compose - FastAPI + PostgreSQL + Redis (standalone API) +# Uses .env.development (different ports than production) +# ============================================================================= + +name: ${APP_NAME:-fastapi-api}-dev + +services: + api: + build: + context: . + dockerfile: infra/docker/fastapi.dev + container_name: ${APP_NAME:-fastapi-api}-backend-dev + ports: + - "${API_HOST_PORT:-8501}:8000" + volumes: + - .:/app + - venv_cache:/app/.venv + env_file: + - .env.development + environment: + - ENVIRONMENT=development + - DEBUG=true + - RELOAD=true + depends_on: + db: + condition: service_healthy + redis: + condition: service_healthy + networks: + - backend + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + restart: unless-stopped + + db: + image: postgres:17-alpine + container_name: ${APP_NAME:-fastapi-api}-db-dev + ports: + - "${POSTGRES_HOST_PORT:-5501}:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-postgres} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres} + - POSTGRES_DB=${POSTGRES_DB:-app_db} + networks: + - backend + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-app_db}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + restart: unless-stopped + + redis: + image: redis:7-alpine + container_name: ${APP_NAME:-fastapi-api}-redis-dev + ports: + - "${REDIS_HOST_PORT:-6501}:6379" + volumes: + - redis_data:/data + command: redis-server --appendonly yes + networks: + - backend + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + +networks: + backend: + driver: bridge + +volumes: + postgres_data: + redis_data: + venv_cache: diff --git a/backends/fastapi/infra/docker/fastapi.dev b/backends/fastapi/infra/docker/fastapi.dev new file mode 100644 index 0000000..6726625 --- /dev/null +++ b/backends/fastapi/infra/docker/fastapi.dev @@ -0,0 +1,34 @@ +# ============================================================================= +# AngelaMos | 2026 +# fastapi.dev +# ============================================================================= +# Development Dockerfile for standalone FastAPI backend +# Features: uv package manager, hot reload, dev dependencies +# ============================================================================= +# syntax=docker/dockerfile:1 + +FROM python:3.12-slim + +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +WORKDIR /app + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + UV_COMPILE_BYTECODE=0 \ + UV_LINK_MODE=copy \ + PYTHONPATH=/app/app + +COPY pyproject.toml uv.lock* ./ + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen --no-install-project + +COPY . . + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --frozen + +EXPOSE 8000 + +CMD ["sh", "-c", "uv run alembic upgrade head && uv run uvicorn app.__main__:app --host 0.0.0.0 --port 8000 --reload"] diff --git a/backends/fastapi/infra/docker/fastapi.prod b/backends/fastapi/infra/docker/fastapi.prod new file mode 100644 index 0000000..827aed3 --- /dev/null +++ b/backends/fastapi/infra/docker/fastapi.prod @@ -0,0 +1,59 @@ +# ============================================================================= +# AngelaMos | 2026 +# fastapi.prod +# ============================================================================= +# Production Dockerfile for standalone FastAPI backend +# Features: uv, multi-stage build, non-root user, health check +# ============================================================================= +# syntax=docker/dockerfile:1 + +# ============================================================================ +# BUILD STAGE +# ============================================================================ +FROM python:3.12-slim AS builder + +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +WORKDIR /app + +ENV UV_COMPILE_BYTECODE=1 \ + UV_LINK_MODE=copy + +COPY pyproject.toml uv.lock* ./ + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --locked --no-install-project --no-dev + +COPY . . + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --locked --no-dev --no-editable + +# ============================================================================ +# PRODUCTION STAGE +# ============================================================================ +FROM python:3.12-slim AS production + +RUN groupadd -g 1001 appgroup && \ + useradd -u 1001 -g appgroup -m -s /bin/false appuser + +WORKDIR /app + +COPY --from=builder --chown=appuser:appgroup /app/.venv /app/.venv +COPY --from=builder --chown=appuser:appgroup /app/app /app/app +COPY --from=builder --chown=appuser:appgroup /app/alembic /app/alembic +COPY --from=builder --chown=appuser:appgroup /app/alembic.ini /app/alembic.ini + +ENV PATH="/app/.venv/bin:$PATH" \ + PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + PYTHONPATH=/app/app + +USER appuser + +EXPOSE 8000 + +HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \ + CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1 + +CMD ["sh", "-c", "alembic upgrade head && gunicorn app.__main__:app --worker-class uvicorn.workers.UvicornWorker --workers 4 --bind 0.0.0.0:8000 --max-requests 1000 --max-requests-jitter 100 --access-logfile - --error-logfile -"] diff --git a/backends/fastapi/justfile b/backends/fastapi/justfile new file mode 100644 index 0000000..cd04421 --- /dev/null +++ b/backends/fastapi/justfile @@ -0,0 +1,285 @@ +# ============================================================================= +# AngelaMos | 2026 +# justfile +# ============================================================================= +# Standalone FastAPI Backend +# ============================================================================= + +set dotenv-filename := ".env.development" +set dotenv-load +set export +set shell := ["bash", "-uc"] +set windows-shell := ["powershell.exe", "-NoLogo", "-Command"] + +project := file_name(justfile_directory()) +version := `git describe --tags --always 2>/dev/null || echo "dev"` + +# ============================================================================= +# Default +# ============================================================================= + +default: + @just --list --unsorted + +# ============================================================================= +# Linting and Formatting +# ============================================================================= + +[group('lint')] +ruff *ARGS: + ruff check app/ {{ARGS}} + +[group('lint')] +ruff-fix: + ruff check app/ --fix + ruff format app/ + +[group('lint')] +ruff-format: + ruff format app/ + +[group('lint')] +pylint *ARGS: + pylint app/ {{ARGS}} + +[group('lint')] +lint: ruff + +# ============================================================================= +# Type Checking +# ============================================================================= + +[group('types')] +mypy *ARGS: + mypy app/ {{ARGS}} + +[group('types')] +ty *ARGS: + ty check {{ARGS}} + +[group('types')] +typecheck: mypy + +# ============================================================================= +# Testing +# ============================================================================= + +[group('test')] +pytest *ARGS: + pytest tests/ {{ARGS}} + +[group('test')] +test: pytest + +[group('test')] +test-cov: + pytest tests/ --cov=app --cov-report=term-missing --cov-report=html + +# ============================================================================= +# CI / Quality +# ============================================================================= + +[group('ci')] +ci: lint typecheck test + +[group('ci')] +check: ruff mypy + +# ============================================================================= +# Docker Compose (Production) +# ============================================================================= + +[group('prod')] +up *ARGS: + docker compose --env-file .env up {{ARGS}} + +[group('prod')] +start *ARGS: + docker compose --env-file .env up -d {{ARGS}} + +[group('prod')] +down *ARGS: + docker compose --env-file .env down {{ARGS}} + +[group('prod')] +stop: + docker compose --env-file .env stop + +[group('prod')] +build *ARGS: + docker compose --env-file .env build {{ARGS}} + +[group('prod')] +rebuild: + docker compose --env-file .env build --no-cache + +[group('prod')] +logs *SERVICE: + docker compose --env-file .env logs -f {{SERVICE}} + +[group('prod')] +ps: + docker compose --env-file .env ps + +[group('prod')] +shell: + docker compose --env-file .env exec -it api /bin/bash + +# ============================================================================= +# Docker Compose (Production + Cloudflare Tunnel) +# ============================================================================= + +[group('tunnel')] +tunnel-up *ARGS: + docker compose --env-file .env -f compose.yml -f cloudflared.compose.yml up {{ARGS}} + +[group('tunnel')] +tunnel-start *ARGS: + docker compose --env-file .env -f compose.yml -f cloudflared.compose.yml up -d {{ARGS}} + +[group('tunnel')] +tunnel-down *ARGS: + docker compose --env-file .env -f compose.yml -f cloudflared.compose.yml down {{ARGS}} + +[group('tunnel')] +tunnel-logs: + docker compose --env-file .env -f compose.yml -f cloudflared.compose.yml logs -f cloudflared + +# ============================================================================= +# Docker Compose (Development) +# ============================================================================= + +[group('dev')] +dev-up *ARGS: + docker compose -f dev.compose.yml up {{ARGS}} + +[group('dev')] +dev-start *ARGS: + docker compose -f dev.compose.yml up -d {{ARGS}} + +[group('dev')] +dev-down *ARGS: + docker compose -f dev.compose.yml down {{ARGS}} + +[group('dev')] +dev-stop: + docker compose -f dev.compose.yml stop + +[group('dev')] +dev-build *ARGS: + docker compose -f dev.compose.yml build {{ARGS}} + +[group('dev')] +dev-rebuild: + docker compose -f dev.compose.yml build --no-cache + +[group('dev')] +dev-logs *SERVICE: + docker compose -f dev.compose.yml logs -f {{SERVICE}} + +[group('dev')] +dev-ps: + docker compose -f dev.compose.yml ps + +[group('dev')] +dev-shell: + docker compose -f dev.compose.yml exec -it api /bin/bash + +# ============================================================================= +# Database (Production) +# ============================================================================= + +[group('db')] +migrate *ARGS: + docker compose --env-file .env exec api alembic upgrade {{ARGS}} + +[group('db')] +migration message: + docker compose --env-file .env exec api alembic revision --autogenerate -m "{{message}}" + +[group('db')] +rollback: + docker compose --env-file .env exec api alembic downgrade -1 + +[group('db')] +db-history: + docker compose --env-file .env exec api alembic history --verbose + +[group('db')] +db-current: + docker compose --env-file .env exec api alembic current + +# ============================================================================= +# Database (Development) +# ============================================================================= + +[group('db-dev')] +dev-migrate *ARGS: + docker compose -f dev.compose.yml exec api alembic upgrade {{ARGS}} + +[group('db-dev')] +dev-migration message: + docker compose -f dev.compose.yml exec api alembic revision --autogenerate -m "{{message}}" + +[group('db-dev')] +dev-rollback: + docker compose -f dev.compose.yml exec api alembic downgrade -1 + +# ============================================================================= +# Database (Local - no Docker) +# ============================================================================= + +[group('db-local')] +migrate-local *ARGS: + uv run alembic upgrade {{ARGS}} + +[group('db-local')] +migration-local message: + uv run alembic revision --autogenerate -m "{{message}}" + +[group('db-local')] +rollback-local: + uv run alembic downgrade -1 + +[group('db-local')] +db-history-local: + uv run alembic history --verbose + +[group('db-local')] +db-current-local: + uv run alembic current + +# ============================================================================= +# Local Development (no Docker) +# ============================================================================= + +[group('local')] +run: + uv run uvicorn app.__main__:app --host 0.0.0.0 --port 8000 --reload + +[group('local')] +sync: + uv sync + +[group('local')] +sync-dev: + uv sync --all-extras + +# ============================================================================= +# Utilities +# ============================================================================= + +[group('util')] +info: + @echo "Project: {{project}}" + @echo "Version: {{version}}" + @echo "OS: {{os()}} ({{arch()}})" + +[group('util')] +clean: + -rm -rf .mypy_cache + -rm -rf .pytest_cache + -rm -rf .ruff_cache + -rm -rf htmlcov + -rm -rf .coverage + @echo "Cache directories cleaned" diff --git a/backend/pyproject.toml b/backends/fastapi/pyproject.toml similarity index 100% rename from backend/pyproject.toml rename to backends/fastapi/pyproject.toml diff --git a/backend/tests/__init__.py b/backends/fastapi/tests/__init__.py similarity index 100% rename from backend/tests/__init__.py rename to backends/fastapi/tests/__init__.py diff --git a/backend/tests/integration/__init__.py b/backends/fastapi/tests/integration/__init__.py similarity index 100% rename from backend/tests/integration/__init__.py rename to backends/fastapi/tests/integration/__init__.py diff --git a/backend/tests/integration/test_admin.py b/backends/fastapi/tests/integration/test_admin.py similarity index 100% rename from backend/tests/integration/test_admin.py rename to backends/fastapi/tests/integration/test_admin.py diff --git a/backend/tests/integration/test_auth.py b/backends/fastapi/tests/integration/test_auth.py similarity index 100% rename from backend/tests/integration/test_auth.py rename to backends/fastapi/tests/integration/test_auth.py diff --git a/backend/tests/integration/test_health.py b/backends/fastapi/tests/integration/test_health.py similarity index 100% rename from backend/tests/integration/test_health.py rename to backends/fastapi/tests/integration/test_health.py diff --git a/backend/tests/integration/test_users.py b/backends/fastapi/tests/integration/test_users.py similarity index 100% rename from backend/tests/integration/test_users.py rename to backends/fastapi/tests/integration/test_users.py diff --git a/backend/tests/unit/__init__.py b/backends/fastapi/tests/unit/__init__.py similarity index 100% rename from backend/tests/unit/__init__.py rename to backends/fastapi/tests/unit/__init__.py diff --git a/backend/uv.lock b/backends/fastapi/uv.lock similarity index 98% rename from backend/uv.lock rename to backends/fastapi/uv.lock index 21cb83e..8b69d78 100644 --- a/backend/uv.lock +++ b/backends/fastapi/uv.lock @@ -655,7 +655,7 @@ requires-dist = [ { name = "slowapi", specifier = ">=0.1.9" }, { name = "sqlalchemy", specifier = ">=2.0.44,<3.0.0" }, { name = "structlog", specifier = ">=24.4.0" }, - { name = "ty", marker = "extra == 'dev'", specifier = ">=0.0.1a32" }, + { name = "ty", marker = "extra == 'dev'", specifier = ">=0.0.8" }, { name = "types-redis", marker = "extra == 'dev'", specifier = ">=4.6.0" }, { name = "uuid6", specifier = ">=2025.0.1" }, { name = "uvicorn", extras = ["standard"], specifier = ">=0.38.0" }, @@ -1685,27 +1685,26 @@ wheels = [ [[package]] name = "ty" -version = "0.0.1a32" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/26/92/8da015685fb83734a2a83de02080e64d182509de77fa9bcf3eed12eeab4b/ty-0.0.1a32.tar.gz", hash = "sha256:12f62e8a3dd0eaeb9557d74b1c32f0616ae40eae10a4f411e1e2a73225f67ff2", size = 4689151, upload-time = "2025-12-05T21:04:26.885Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/e6/fdc35c9ba047f16afdfedf36fb51c221e0190ccde9f70ee28e77084d6612/ty-0.0.1a32-py3-none-linux_armv6l.whl", hash = "sha256:ffe595eaf616f06f58f951766477830a55c2502d2c9f77dde8f60d9a836e0645", size = 9673128, upload-time = "2025-12-05T21:04:17.702Z" }, - { url = "https://files.pythonhosted.org/packages/19/20/eaff31048e2f309f37478f7d715c8de9f9bab03cba4758da27b9311147af/ty-0.0.1a32-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:07f1dce88ad6028fb14665aefe4e6697012c34bd48edd37d02b7eb6a833dbf62", size = 9434094, upload-time = "2025-12-05T21:04:03.383Z" }, - { url = "https://files.pythonhosted.org/packages/67/d4/ea8ed57d11b81c459f23561fd6bfb0f54a8d4120cf72541e3bdf71d46202/ty-0.0.1a32-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8fab7ed12528c77ddd600a9638ca859156a53c20f1e381353fa87a255bd397eb", size = 8980296, upload-time = "2025-12-05T21:04:28.912Z" }, - { url = "https://files.pythonhosted.org/packages/49/02/3ce98bbfbb3916678d717ee69358d38a404ca9a39391dda8874b66dd5ee7/ty-0.0.1a32-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ace395280fc21e25eff0a53cfbd68170f90a4b8ef2f85dfabe1ecbca2ced456b", size = 9263054, upload-time = "2025-12-05T21:04:05.619Z" }, - { url = "https://files.pythonhosted.org/packages/b7/be/a639638bcd1664de2d70a87da6c4fe0e3272a60b7fa3f0c108a956a456bd/ty-0.0.1a32-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2bcbeed7f5ed8e3c1c7e525fce541e7b943ac04ee7fe369a926551b5e50ea4a8", size = 9451396, upload-time = "2025-12-05T21:04:01.265Z" }, - { url = "https://files.pythonhosted.org/packages/1f/a4/2bcf54e842a3d10dc14b369f28a3bab530c5d7ddba624e910b212bda93ee/ty-0.0.1a32-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:60ff2e4493f90f81a260205d87719bb1d3420928a1e4a2a7454af7cbdfed2047", size = 9862726, upload-time = "2025-12-05T21:04:08.806Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c7/19e6719496e59f2f082f34bcac312698366cf50879fdcc3ef76298bfe6a0/ty-0.0.1a32-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:53cad50a59a0d943b06872e0b10f9f2b564805c2ea93f64c7798852bc1901954", size = 10475051, upload-time = "2025-12-05T21:04:31.059Z" }, - { url = "https://files.pythonhosted.org/packages/88/77/bdf0ddb066d2b62f141d058f8a33bb7c8628cdbb8bfa75b20e296b79fb4e/ty-0.0.1a32-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:343d43cdc1d7f649ea2baa64ac2b479da3d679239b94509f1df12f7211561ea9", size = 10232712, upload-time = "2025-12-05T21:04:19.849Z" }, - { url = "https://files.pythonhosted.org/packages/ed/07/f73260a461762a581a007015c1019d40658828ce41576f8c1db88dee574d/ty-0.0.1a32-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f45483e4a84bcf622413712164ea687ce323a9f7013b9e7977c5d623ed937ca9", size = 10237705, upload-time = "2025-12-05T21:04:35.366Z" }, - { url = "https://files.pythonhosted.org/packages/2c/57/dbb92206cf2f798d8c51ea16504e8afb90a139d0ff105c31cec9a1db29f9/ty-0.0.1a32-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d452f30d47002a6bafc36d1b6aee42c321e9ec9f7f43a04a2ee7d48c208b86c", size = 9766469, upload-time = "2025-12-05T21:04:22.236Z" }, - { url = "https://files.pythonhosted.org/packages/c3/5e/143d93bd143abcebcbaa98c8aeec78898553d62d0a5a432cd79e0cf5bd6d/ty-0.0.1a32-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:86c4e31737fe954637890cef1f3e1b479ffb20e836cac3b76050bdbe80005010", size = 9238592, upload-time = "2025-12-05T21:04:11.33Z" }, - { url = "https://files.pythonhosted.org/packages/21/b8/225230ae097ed88f3c92ad974dd77f8e4f86f2594d9cd0c729da39769878/ty-0.0.1a32-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:daf15fa03bc39a76a0fbc9c2d81d79d528f584e3fbe08d71981e3f7912db91d6", size = 9502161, upload-time = "2025-12-05T21:04:37.642Z" }, - { url = "https://files.pythonhosted.org/packages/85/13/cc89955c9637f25f3aca2dd7749c6008639ef036f0b9bea3e9d89e892ff9/ty-0.0.1a32-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6128f6bab5c6dab3d08689fed1d529dc34f50f221f89c8e16064ed0c549dad7a", size = 9603058, upload-time = "2025-12-05T21:04:39.532Z" }, - { url = "https://files.pythonhosted.org/packages/46/77/1fe2793c8065a02d1f70ca7da1b87db49ca621bcbbdb79a18ad79d5d0ab2/ty-0.0.1a32-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:55aab688be1b46776a5a458a1993cae0da7725932c45393399c479c2fa979337", size = 9879903, upload-time = "2025-12-05T21:04:13.567Z" }, - { url = "https://files.pythonhosted.org/packages/fc/47/fd58e80a3e42310b4b649340d5d97403fe796146cae8678b3a031a414b8e/ty-0.0.1a32-py3-none-win32.whl", hash = "sha256:f55ec25088a09236ad1578b656a07fa009c3a353f5923486905ba48175d142a6", size = 9077703, upload-time = "2025-12-05T21:04:15.849Z" }, - { url = "https://files.pythonhosted.org/packages/8d/96/209c417c69317339ea8e9b3277fd98364a0e97dd1ffd3585e143ec7b4e57/ty-0.0.1a32-py3-none-win_amd64.whl", hash = "sha256:ed8d5cbd4e47dfed86aaa27e243008aa4e82b6a5434f3ab95c26d3ee5874d9d7", size = 9922426, upload-time = "2025-12-05T21:04:33.289Z" }, - { url = "https://files.pythonhosted.org/packages/e0/1c/350fd851fb91244f8c80cec218009cbee7564d76c14e2f423b47e69a5cbc/ty-0.0.1a32-py3-none-win_arm64.whl", hash = "sha256:dbb25f9b513d34cee8ce419514eaef03313f45c3f7ab4eb6e6d427ea1f6854af", size = 9453761, upload-time = "2025-12-05T21:04:24.502Z" }, +version = "0.0.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/85/97b5276baa217e05db2fe3d5c61e4dfd35d1d3d0ec95bfca1986820114e0/ty-0.0.10.tar.gz", hash = "sha256:0a1f9f7577e56cd508a8f93d0be2a502fdf33de6a7d65a328a4c80b784f4ac5f", size = 4892892, upload-time = "2026-01-07T23:00:23.572Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/7a/5a7147ce5231c3ccc55d6f945dabd7412e233e755d28093bfdec988ba595/ty-0.0.10-py3-none-linux_armv6l.whl", hash = "sha256:406a8ea4e648551f885629b75dc3f070427de6ed099af45e52051d4c68224829", size = 9835881, upload-time = "2026-01-07T22:08:17.492Z" }, + { url = "https://files.pythonhosted.org/packages/3e/7d/89f4d2277c938332d047237b47b11b82a330dbff4fff0de8574cba992128/ty-0.0.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d6e0a733e3d6d3bce56d6766bc61923e8b130241088dc2c05e3c549487190096", size = 9696404, upload-time = "2026-01-07T22:08:37.965Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cd/9dd49e6d40e54d4b7d563f9e2a432c4ec002c0673a81266e269c4bc194ce/ty-0.0.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e4832f8879cb95fc725f7e7fcab4f22be0cf2550f3a50641d5f4409ee04176d4", size = 9181195, upload-time = "2026-01-07T22:59:07.187Z" }, + { url = "https://files.pythonhosted.org/packages/d2/b8/3e7c556654ba0569ed5207138d318faf8633d87e194760fc030543817c26/ty-0.0.10-py3-none-manylinux_2_24_aarch64.whl", hash = "sha256:6b58cc78e5865bc908f053559a80bb77cab0dc168aaad2e88f2b47955694b138", size = 9665002, upload-time = "2026-01-07T22:08:30.782Z" }, + { url = "https://files.pythonhosted.org/packages/98/96/410a483321406c932c4e3aa1581d1072b72cdcde3ae83cd0664a65c7b254/ty-0.0.10-py3-none-manylinux_2_24_armv7l.whl", hash = "sha256:83c6a514bb86f05005fa93e3b173ae3fde94d291d994bed6fe1f1d2e5c7331cf", size = 9664948, upload-time = "2026-01-07T23:04:14.655Z" }, + { url = "https://files.pythonhosted.org/packages/1f/5d/cba2ab3e2f660763a72ad12620d0739db012e047eaa0ceaa252bf5e94ebb/ty-0.0.10-py3-none-manylinux_2_24_i686.whl", hash = "sha256:2e43f71e357f8a4f7fc75e4753b37beb2d0f297498055b1673a9306aa3e21897", size = 10125401, upload-time = "2026-01-07T22:08:28.171Z" }, + { url = "https://files.pythonhosted.org/packages/a7/67/29536e0d97f204a2933122239298e754db4564f4ed7f34e2153012b954be/ty-0.0.10-py3-none-manylinux_2_24_ppc64le.whl", hash = "sha256:18be3c679965c23944c8e574be0635504398c64c55f3f0c46259464e10c0a1c7", size = 10714052, upload-time = "2026-01-07T22:08:20.098Z" }, + { url = "https://files.pythonhosted.org/packages/63/c8/82ac83b79a71c940c5dcacb644f526f0c8fdf4b5e9664065ab7ee7c0e4ec/ty-0.0.10-py3-none-manylinux_2_24_s390x.whl", hash = "sha256:5477981681440a35acdf9b95c3097410c547abaa32b893f61553dbc3b0096fff", size = 10395924, upload-time = "2026-01-07T22:08:22.839Z" }, + { url = "https://files.pythonhosted.org/packages/9e/4c/2f9ac5edbd0e67bf82f5cd04275c4e87cbbf69a78f43e5dcf90c1573d44e/ty-0.0.10-py3-none-manylinux_2_24_x86_64.whl", hash = "sha256:e206a23bd887574302138b33383ae1edfcc39d33a06a12a5a00803b3f0287a45", size = 10220096, upload-time = "2026-01-07T22:08:13.171Z" }, + { url = "https://files.pythonhosted.org/packages/04/13/3be2b7bfd53b9952b39b6f2c2ef55edeb1a2fea3bf0285962736ee26731c/ty-0.0.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4e09ddb0d3396bd59f645b85eab20f9a72989aa8b736b34338dcb5ffecfe77b6", size = 9649120, upload-time = "2026-01-07T22:08:34.003Z" }, + { url = "https://files.pythonhosted.org/packages/93/e3/edd58547d9fd01e4e584cec9dced4f6f283506b422cdd953e946f6a8e9f0/ty-0.0.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:139d2a741579ad86a044233b5d7e189bb81f427eebce3464202f49c3ec0eba3b", size = 9686033, upload-time = "2026-01-07T22:08:40.967Z" }, + { url = "https://files.pythonhosted.org/packages/cc/bc/9d2f5fec925977446d577fb9b322d0e7b1b1758709f23a6cfc10231e9b84/ty-0.0.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6bae10420c0abfe4601fbbc6ce637b67d0b87a44fa520283131a26da98f2e74c", size = 9841905, upload-time = "2026-01-07T23:04:21.694Z" }, + { url = "https://files.pythonhosted.org/packages/7c/b8/5acd3492b6a4ef255ace24fcff0d4b1471a05b7f3758d8910a681543f899/ty-0.0.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7358bbc5d037b9c59c3a48895206058bcd583985316c4125a74dd87fd1767adb", size = 10320058, upload-time = "2026-01-07T22:08:25.645Z" }, + { url = "https://files.pythonhosted.org/packages/35/67/5b6906fccef654c7e801d6ac8dcbe0d493e1f04c38127f82a5e6d7e0aa0e/ty-0.0.10-py3-none-win32.whl", hash = "sha256:f51b6fd485bc695d0fdf555e69e6a87d1c50f14daef6cb980c9c941e12d6bcba", size = 9271806, upload-time = "2026-01-07T22:08:10.08Z" }, + { url = "https://files.pythonhosted.org/packages/42/36/82e66b9753a76964d26fd9bc3514ea0abce0a5ba5ad7d5f084070c6981da/ty-0.0.10-py3-none-win_amd64.whl", hash = "sha256:16deb77a72cf93b89b4d29577829613eda535fbe030513dfd9fba70fe38bc9f5", size = 10130520, upload-time = "2026-01-07T23:04:11.759Z" }, + { url = "https://files.pythonhosted.org/packages/63/52/89da123f370e80b587d2db8551ff31562c882d87b32b0e92b59504b709ae/ty-0.0.10-py3-none-win_arm64.whl", hash = "sha256:7495288bca7afba9a4488c9906466d648ffd3ccb6902bc3578a6dbd91a8f05f0", size = 9626026, upload-time = "2026-01-07T23:04:17.91Z" }, ] [[package]] diff --git a/backends/go-backend/.air.toml b/backends/go-backend/.air.toml new file mode 100644 index 0000000..5f03fe9 --- /dev/null +++ b/backends/go-backend/.air.toml @@ -0,0 +1,29 @@ +# AngelaMos | 2026 +# .air.toml - Hot reload configuration + +root = "." +tmp_dir = "tmp" + +[build] +cmd = "go build -o ./tmp/main ./cmd/api" +bin = "tmp/main" +full_bin = "./tmp/main" +include_ext = ["go", "yaml", "yml"] +exclude_dir = ["tmp", "vendor", "bin", "keys", "migrations"] +exclude_regex = ["_test\\.go"] +delay = 1000 +stop_on_error = true +send_interrupt = true +kill_delay = 500 + +[log] +time = false + +[color] +main = "cyan" +watcher = "magenta" +build = "yellow" +runner = "green" + +[misc] +clean_on_exit = true diff --git a/backends/go-backend/.env.example b/backends/go-backend/.env.example new file mode 100644 index 0000000..f542823 --- /dev/null +++ b/backends/go-backend/.env.example @@ -0,0 +1,33 @@ +# AngelaMos | 2026 +# .env.example + +# Environment: development, staging, production +ENVIRONMENT=development + +# Server +HOST=0.0.0.0 +PORT=8080 + +# Database +DATABASE_URL=postgres://postgres:postgres@localhost:5432/app?sslmode=disable + +# Redis +REDIS_URL=redis://localhost:6379/0 + +# JWT +JWT_PRIVATE_KEY_PATH=keys/private.pem +JWT_PUBLIC_KEY_PATH=keys/public.pem +ACCESS_TOKEN_EXPIRE_MINUTES=15 +REFRESH_TOKEN_EXPIRE_DAYS=7 + +# Rate Limiting +RATE_LIMIT_REQUESTS=100 +RATE_LIMIT_WINDOW=1m + +# OpenTelemetry +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +OTEL_SERVICE_NAME=go-backend + +# Logging +LOG_LEVEL=debug +LOG_FORMAT=text diff --git a/backends/go-backend/.gitignore b/backends/go-backend/.gitignore new file mode 100644 index 0000000..3688c87 --- /dev/null +++ b/backends/go-backend/.gitignore @@ -0,0 +1,44 @@ +# AngelaMos | 2026 +# .gitignore + +# Binaries +bin/ +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test +*.test +coverage.out +coverage.html + +# Build +tmp/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Environment +.env +.env.local +.env.*.local + +# Keys (sensitive) +keys/*.pem +keys/*.key + +# Vendor (if using) +vendor/ + +# Debug +__debug_bin* diff --git a/backends/go-backend/.golangci.yml b/backends/go-backend/.golangci.yml new file mode 100644 index 0000000..679c3e2 --- /dev/null +++ b/backends/go-backend/.golangci.yml @@ -0,0 +1,113 @@ +# AngelaMos | 2026 +# .golangci.yml + +version: "2" + +linters: + default: none + enable: + - errcheck + - govet + - gosec + - bodyclose + - nilerr + - errorlint + - exhaustive + - gocritic + - funlen + - gocognit + - dupl + - goconst + - ineffassign + - unused + - unconvert + - unparam + - testifylint + - fatcontext + + settings: + errcheck: + check-type-assertions: true + check-blank: true + + funlen: + lines: 100 + statements: 50 + + gocognit: + min-complexity: 20 + + govet: + enable-all: true + disable: + - fieldalignment + + revive: + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: increment-decrement + - name: var-declaration + - name: package-comments + disabled: true + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unreachable-code + + staticcheck: + checks: + - all + + gosec: + excludes: + - G104 + + sloglint: + no-mixed-args: true + kv-only: true + context: all + +issues: + max-same-issues: 50 + exclude-dirs: + - vendor + - testdata + exclude-rules: + - path: _test\.go + linters: + - funlen + - dupl + - goconst + + +formatters: + enable: + - gci # Groups imports + - gofumpt # Whitespace + - golines # Vertical wrap + settings: + golines: + max-len: 80 + reformat-tags: true + goimports: + local-prefixes: + - github.com/carterperez-dev/templates/go-backend + gci: + sections: + - standard + - default + - prefix(github.com/carterperez-dev) + custom-order: true + gofumpt: + extra-rules: true diff --git a/backends/go-backend/Dockerfile b/backends/go-backend/Dockerfile new file mode 100644 index 0000000..d6f0205 --- /dev/null +++ b/backends/go-backend/Dockerfile @@ -0,0 +1,36 @@ +# AngelaMos | 2026 +# Dockerfile - Multi-stage distroless build + +# Build stage +FROM golang:1.25-bookworm AS builder + +WORKDIR /build + +# Copy dependency files first for layer caching +COPY go.mod go.sum ./ +RUN go mod download + +# Copy source code +COPY . . + +# Build binary +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /app ./cmd/api + +# Production stage - Distroless +FROM gcr.io/distroless/static-debian12 + +# Copy binary from builder +COPY --from=builder /app /app + +# Copy migrations (embedded, but useful for debugging) +COPY --from=builder /build/migrations /migrations + +# Copy keys if they exist (for JWT) +COPY --from=builder /build/keys /keys + +# Run as non-root user +USER nonroot:nonroot + +EXPOSE 8080 + +ENTRYPOINT ["/app"] diff --git a/backends/go-backend/Justfile b/backends/go-backend/Justfile new file mode 100644 index 0000000..a3cdc63 --- /dev/null +++ b/backends/go-backend/Justfile @@ -0,0 +1,110 @@ +# AngelaMos | 2026 +# Justfile + +set dotenv-load := true + +# Default recipe +default: + @just --list + +# Development +# ----------- + +# Run the API server with hot reload (requires air) +dev: + air -c .air.toml + +# Run the API server without hot reload +run: + go run ./cmd/api + +# Build the binary +build: + CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o bin/api ./cmd/api + +# Run tests +test: + go test -v -race -coverprofile=coverage.out ./... + +# Run tests with coverage report +test-coverage: test + go tool cover -html=coverage.out -o coverage.html + +# Run linter +lint: + golangci-lint run --timeout=5m + +# Format code +fmt: + gofumpt -w . + goimports -w . + +# Tidy dependencies +tidy: + go mod tidy + +# Database +# -------- + +# Run database migrations +migrate-up: + goose -dir migrations postgres "${DATABASE_URL}" up + +# Rollback last migration +migrate-down: + goose -dir migrations postgres "${DATABASE_URL}" down + +# Check migration status +migrate-status: + goose -dir migrations postgres "${DATABASE_URL}" status + +# Create new migration +migrate-create name: + goose -dir migrations create {{name}} sql + +# Docker +# ------ + +# Start development containers +up: + docker compose -f dev.compose.yml up -d + +# Stop development containers +down: + docker compose -f dev.compose.yml down + +# View container logs +logs: + docker compose -f dev.compose.yml logs -f + +# Rebuild and restart containers +rebuild: + docker compose -f dev.compose.yml up -d --build + +# Build production image +docker-build: + docker build -t go-backend:latest . + +# Keys +# ---- + +# Generate ES256 keypair for JWT signing +generate-keys: + #!/usr/bin/env bash + set -euo pipefail + mkdir -p keys + openssl ecparam -genkey -name prime256v1 -noout -out keys/private.pem + openssl ec -in keys/private.pem -pubout -out keys/public.pem + chmod 600 keys/private.pem + echo "ES256 keypair generated in keys/" + +# Clean +# ----- + +# Remove build artifacts +clean: + rm -rf bin/ coverage.out coverage.html tmp/ + +# Full clean including containers +clean-all: clean + docker compose -f dev.compose.yml down -v diff --git a/backends/go-backend/cmd/api/main.go b/backends/go-backend/cmd/api/main.go new file mode 100644 index 0000000..150d2b5 --- /dev/null +++ b/backends/go-backend/cmd/api/main.go @@ -0,0 +1,221 @@ +// AngelaMos | 2026 +// main.go + +package main + +import ( + "context" + "flag" + "log/slog" + "os" + "os/signal" + "syscall" + "time" + + "github.com/go-chi/chi/v5" + + "github.com/carterperez-dev/templates/go-backend/internal/admin" + "github.com/carterperez-dev/templates/go-backend/internal/auth" + "github.com/carterperez-dev/templates/go-backend/internal/config" + "github.com/carterperez-dev/templates/go-backend/internal/core" + "github.com/carterperez-dev/templates/go-backend/internal/health" + "github.com/carterperez-dev/templates/go-backend/internal/middleware" + "github.com/carterperez-dev/templates/go-backend/internal/server" + "github.com/carterperez-dev/templates/go-backend/internal/user" +) + +const ( + drainDelay = 5 * time.Second +) + +func main() { + configPath := flag.String("config", "config.yaml", "path to config file") + flag.Parse() + + if err := run(*configPath); err != nil { + slog.Error("application error", "error", err) + os.Exit(1) + } +} + +//nolint:funlen // bootstrap code is inherently verbose +func run(configPath string) error { + ctx, stop := signal.NotifyContext( + context.Background(), + syscall.SIGINT, + syscall.SIGTERM, + ) + defer stop() + + cfg, err := config.Load(configPath) + if err != nil { + return err + } + + logger := setupLogger(cfg.Log) + slog.SetDefault(logger) + + logger.Info("starting application", + "name", cfg.App.Name, + "version", cfg.App.Version, + "environment", cfg.App.Environment, + ) + + var telemetry *core.Telemetry + if cfg.Otel.Enabled { + tel, telErr := core.NewTelemetry(ctx, cfg.Otel, cfg.App) + if telErr != nil { + logger.Warn("failed to initialize telemetry", "error", telErr) + } else { + telemetry = tel + logger.Info("OpenTelemetry tracer initialized", + "endpoint", cfg.Otel.Endpoint, + ) + } + } + + db, err := core.NewDatabase(ctx, cfg.Database) + if err != nil { + return err + } + logger.Info("database connected", + "max_open_conns", cfg.Database.MaxOpenConns, + "max_idle_conns", cfg.Database.MaxIdleConns, + ) + + redis, err := core.NewRedis(ctx, cfg.Redis) + if err != nil { + return err + } + logger.Info("redis connected", + "pool_size", cfg.Redis.PoolSize, + ) + + jwtManager, err := auth.NewJWTManager(cfg.JWT) + if err != nil { + return err + } + logger.Info("JWT manager initialized", + "algorithm", "ES256", + "key_id", jwtManager.GetKeyID(), + ) + + userRepo := user.NewRepository(db.DB) + userSvc := user.NewService(userRepo) + userHandler := user.NewHandler(userSvc) + + authRepo := auth.NewRepository(db.DB) + authSvc := auth.NewService(authRepo, jwtManager, userSvc, redis.Client) + authHandler := auth.NewHandler(authSvc) + + healthHandler := health.NewHandler(db, redis) + + adminHandler := admin.NewHandler(admin.HandlerConfig{ + DBStats: db.Stats, + RedisStats: redis.PoolStats, + DBPing: db.Ping, + RedisPing: redis.Ping, + }) + + srv := server.New(server.Config{ + ServerConfig: cfg.Server, + HealthHandler: healthHandler, + Logger: logger, + }) + + router := srv.Router() + + router.Use(middleware.RequestID) + router.Use(middleware.Logger(logger)) + router.Use( + middleware.NewRateLimiter(redis.Client, middleware.RateLimitConfig{ + Limit: middleware.PerMinute( + cfg.RateLimit.Requests, + cfg.RateLimit.Burst, + ), + FailOpen: true, + }).Handler, + ) + router.Use(middleware.SecurityHeaders(cfg.App.Environment == "production")) + router.Use(middleware.CORS(cfg.CORS)) + + healthHandler.RegisterRoutes(router) + + router.Get("/.well-known/jwks.json", jwtManager.GetJWKSHandler()) + + authenticator := middleware.Authenticator(jwtManager) + adminOnly := middleware.RequireAdmin + + router.Route("/v1", func(r chi.Router) { + authHandler.RegisterRoutes(r, authenticator) + + r.Post("/users", authHandler.Register) + + userHandler.RegisterRoutes(r, authenticator) + userHandler.RegisterAdminRoutes(r, authenticator, adminOnly) + adminHandler.RegisterRoutes(r, authenticator, adminOnly) + }) + + errChan := make(chan error, 1) + go func() { + errChan <- srv.Start() + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + logger.Info("shutdown signal received") + } + + shutdownCtx, cancel := context.WithTimeout( + context.Background(), + cfg.Server.ShutdownTimeout+drainDelay+5*time.Second, + ) + defer cancel() + + if err := srv.Shutdown(shutdownCtx, drainDelay); err != nil { + logger.Error("server shutdown error", "error", err) + } + + if telemetry != nil { + if err := telemetry.Shutdown(shutdownCtx); err != nil { + logger.Error("telemetry shutdown error", "error", err) + } + } + + if err := redis.Close(); err != nil { + logger.Error("redis close error", "error", err) + } + + if err := db.Close(); err != nil { + logger.Error("database close error", "error", err) + } + + logger.Info("application stopped") + return nil +} + +func setupLogger(cfg config.LogConfig) *slog.Logger { + var handler slog.Handler + + level := slog.LevelInfo + switch cfg.Level { + case "debug": + level = slog.LevelDebug + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + } + + opts := &slog.HandlerOptions{Level: level} + + if cfg.Format == "json" { + handler = slog.NewJSONHandler(os.Stdout, opts) + } else { + handler = slog.NewTextHandler(os.Stdout, opts) + } + + return slog.New(handler) +} diff --git a/backends/go-backend/compose.yml b/backends/go-backend/compose.yml new file mode 100644 index 0000000..66d66a2 --- /dev/null +++ b/backends/go-backend/compose.yml @@ -0,0 +1,59 @@ +# AngelaMos | 2026 +# compose.yml - Production compose + +services: + api: + image: go-backend:latest + ports: + - "8085:8080" + environment: + - ENVIRONMENT=production + - DATABASE_URL=postgres://postgres:postgres@postgres:5432/app?sslmode=disable + - REDIS_URL=redis://redis:6379/0 + - OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4317 + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "-qO-", "http://localhost:8080/healthz"] + interval: 10s + timeout: 5s + retries: 3 + start_period: 10s + deploy: + resources: + limits: + memory: 256M + reservations: + memory: 128M + + postgres: + image: postgres:18-alpine + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: app + volumes: + - pgdata:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + command: redis-server --appendonly yes + volumes: + - redisdata:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 5s + retries: 5 + +volumes: + pgdata: + redisdata: diff --git a/backends/go-backend/config.yaml b/backends/go-backend/config.yaml new file mode 100644 index 0000000..01aa28f --- /dev/null +++ b/backends/go-backend/config.yaml @@ -0,0 +1,53 @@ +# AngelaMos | 2026 +# config.yaml - Default configuration + +app: + name: "Go Backend Template" + version: "1.0.0" + +server: + host: "0.0.0.0" + port: 8080 + read_timeout: 30s + write_timeout: 30s + idle_timeout: 120s + shutdown_timeout: 15s + +database: + max_open_conns: 25 + max_idle_conns: 5 + conn_max_lifetime: 1h + conn_max_idle_time: 30m + +redis: + pool_size: 10 + min_idle_conns: 5 + +jwt: + access_token_expire: 15m + refresh_token_expire: 168h + issuer: "go-backend" + +rate_limit: + requests: 100 + window: 1m + burst: 20 + +cors: + allowed_origins: + - "http://localhost:3000" + - "http://localhost:3420" + allowed_methods: + - "GET" + - "POST" + - "PUT" + - "PATCH" + - "DELETE" + - "OPTIONS" + allowed_headers: + - "Accept" + - "Authorization" + - "Content-Type" + - "X-Request-ID" + allow_credentials: true + max_age: 300 diff --git a/backends/go-backend/dev.compose.yml b/backends/go-backend/dev.compose.yml new file mode 100644 index 0000000..26bfa45 --- /dev/null +++ b/backends/go-backend/dev.compose.yml @@ -0,0 +1,45 @@ +# AngelaMos | 2026 +# dev.compose.yml - Development compose + +services: + postgres: + image: postgres:18-alpine + ports: + - "5447:5432" + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: app + volumes: + - pgdata_dev:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + ports: + - "6022:6379" + command: redis-server --appendonly yes + volumes: + - redisdata_dev:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 5s + timeout: 5s + retries: 5 + + jaeger: + image: jaegertracing/all-in-one:1.54 + ports: + - "16686:16686" + - "4317:4317" + - "4318:4318" + environment: + COLLECTOR_OTLP_ENABLED: "true" + +volumes: + pgdata_dev: + redisdata_dev: diff --git a/backends/go-backend/go.mod b/backends/go-backend/go.mod new file mode 100644 index 0000000..e5981e9 --- /dev/null +++ b/backends/go-backend/go.mod @@ -0,0 +1,69 @@ +module github.com/carterperez-dev/templates/go-backend + +go 1.25.0 + +require ( + github.com/go-chi/chi/v5 v5.2.3 + github.com/go-playground/validator/v10 v10.23.0 + github.com/go-redis/redis_rate/v10 v10.0.1 + github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.7.2 + github.com/jmoiron/sqlx v1.4.0 + github.com/knadh/koanf/parsers/yaml v1.1.0 + github.com/knadh/koanf/providers/env v1.1.0 + github.com/knadh/koanf/providers/file v1.2.1 + github.com/knadh/koanf/v2 v2.1.2 + github.com/lestrrat-go/jwx/v3 v3.0.12 + github.com/redis/go-redis/v9 v9.7.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 + golang.org/x/crypto v0.43.0 + golang.org/x/time v0.14.0 + google.golang.org/grpc v1.68.1 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/knadh/koanf/maps v0.1.2 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/dsig v1.0.0 // indirect + github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect + github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect + github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lestrrat-go/option/v2 v2.0.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/segmentio/asm v1.2.1 // indirect + github.com/valyala/fastjson v1.6.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.yaml.in/yaml/v3 v3.0.3 // indirect + golang.org/x/net v0.45.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/protobuf v1.35.2 // indirect +) diff --git a/backends/go-backend/go.sum b/backends/go-backend/go.sum new file mode 100644 index 0000000..0a9a961 --- /dev/null +++ b/backends/go-backend/go.sum @@ -0,0 +1,165 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-redis/redis_rate/v10 v10.0.1 h1:calPxi7tVlxojKunJwQ72kwfozdy25RjA0bCj1h0MUo= +github.com/go-redis/redis_rate/v10 v10.0.1/go.mod h1:EMiuO9+cjRkR7UvdvwMO7vbgqJkltQHtwbdIQvaBKIU= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo= +github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/parsers/yaml v1.1.0 h1:3ltfm9ljprAHt4jxgeYLlFPmUaunuCgu1yILuTXRdM4= +github.com/knadh/koanf/parsers/yaml v1.1.0/go.mod h1:HHmcHXUrp9cOPcuC+2wrr44GTUB0EC+PyfN3HZD9tFg= +github.com/knadh/koanf/providers/env v1.1.0 h1:U2VXPY0f+CsNDkvdsG8GcsnK4ah85WwWyJgef9oQMSc= +github.com/knadh/koanf/providers/env v1.1.0/go.mod h1:QhHHHZ87h9JxJAn2czdEl6pdkNnDh/JS1Vtsyt65hTY= +github.com/knadh/koanf/providers/file v1.2.1 h1:bEWbtQwYrA+W2DtdBrQWyXqJaJSG3KrP3AESOJYp9wM= +github.com/knadh/koanf/providers/file v1.2.1/go.mod h1:bp1PM5f83Q+TOUu10J/0ApLBd9uIzg+n9UgthfY+nRA= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= +github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.1 h1:3n7Es68YYGZb2Jf+k//llA4FTZMl3yCwIjFIk4ubevI= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.12 h1:p25r68Y4KrbBdYjIsQweYxq794CtGCzcrc5dGzJIRjg= +github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8= +github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE= +go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= +golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/backends/go-backend/internal/admin/handler.go b/backends/go-backend/internal/admin/handler.go new file mode 100644 index 0000000..4f75ab2 --- /dev/null +++ b/backends/go-backend/internal/admin/handler.go @@ -0,0 +1,208 @@ +// AngelaMos | 2026 +// handler.go + +package admin + +import ( + "context" + "database/sql" + "net/http" + "runtime" + + "github.com/go-chi/chi/v5" + "github.com/redis/go-redis/v9" + + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +type AuthService interface { + InvalidateAllSessions(ctx context.Context) error +} + +type Handler struct { + dbStats func() sql.DBStats + redisStats func() *redis.PoolStats + redisPing func(ctx context.Context) error + dbPing func(ctx context.Context) error + authSvc AuthService +} + +type HandlerConfig struct { + DBStats func() sql.DBStats + RedisStats func() *redis.PoolStats + RedisPing func(ctx context.Context) error + DBPing func(ctx context.Context) error + AuthSvc AuthService +} + +func NewHandler(cfg HandlerConfig) *Handler { + return &Handler{ + dbStats: cfg.DBStats, + redisStats: cfg.RedisStats, + redisPing: cfg.RedisPing, + dbPing: cfg.DBPing, + authSvc: cfg.AuthSvc, + } +} + +func (h *Handler) RegisterRoutes( + r chi.Router, + authenticator, adminOnly func(http.Handler) http.Handler, +) { + r.Route("/admin", func(r chi.Router) { + r.Use(authenticator) + r.Use(adminOnly) + + r.Get("/stats", h.GetSystemStats) + r.Get("/stats/db", h.GetDatabaseStats) + r.Get("/stats/redis", h.GetRedisStats) + r.Get("/stats/runtime", h.GetRuntimeStats) + }) +} + +func (h *Handler) GetSystemStats(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + dbHealthy := true + if h.dbPing != nil { + if err := h.dbPing(ctx); err != nil { + dbHealthy = false + } + } + + redisHealthy := true + if h.redisPing != nil { + if err := h.redisPing(ctx); err != nil { + redisHealthy = false + } + } + + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + + response := SystemStatsResponse{ + Database: DatabaseStatus{ + Healthy: dbHealthy, + Stats: h.getDBStats(), + }, + Redis: RedisStatus{ + Healthy: redisHealthy, + Stats: h.getRedisStats(), + }, + Runtime: RuntimeStats{ + GoVersion: runtime.Version(), + NumGoroutine: runtime.NumGoroutine(), + NumCPU: runtime.NumCPU(), + MemAlloc: memStats.Alloc, + MemSys: memStats.Sys, + NumGC: memStats.NumGC, + }, + } + + core.OK(w, response) +} + +func (h *Handler) GetDatabaseStats(w http.ResponseWriter, r *http.Request) { + core.OK(w, h.getDBStats()) +} + +func (h *Handler) GetRedisStats(w http.ResponseWriter, r *http.Request) { + core.OK(w, h.getRedisStats()) +} + +func (h *Handler) GetRuntimeStats(w http.ResponseWriter, r *http.Request) { + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + + response := RuntimeStats{ + GoVersion: runtime.Version(), + NumGoroutine: runtime.NumGoroutine(), + NumCPU: runtime.NumCPU(), + MemAlloc: memStats.Alloc, + MemSys: memStats.Sys, + NumGC: memStats.NumGC, + } + + core.OK(w, response) +} + +func (h *Handler) getDBStats() *DBPoolStats { + if h.dbStats == nil { + return nil + } + + stats := h.dbStats() + return &DBPoolStats{ + MaxOpenConnections: stats.MaxOpenConnections, + OpenConnections: stats.OpenConnections, + InUse: stats.InUse, + Idle: stats.Idle, + WaitCount: stats.WaitCount, + WaitDuration: stats.WaitDuration.String(), + MaxIdleClosed: stats.MaxIdleClosed, + MaxIdleTimeClosed: stats.MaxIdleTimeClosed, + MaxLifetimeClosed: stats.MaxLifetimeClosed, + } +} + +func (h *Handler) getRedisStats() *RedisPoolStats { + if h.redisStats == nil { + return nil + } + + stats := h.redisStats() + return &RedisPoolStats{ + Hits: stats.Hits, + Misses: stats.Misses, + Timeouts: stats.Timeouts, + TotalConns: stats.TotalConns, + IdleConns: stats.IdleConns, + StaleConns: stats.StaleConns, + } +} + +type SystemStatsResponse struct { + Database DatabaseStatus `json:"database"` + Redis RedisStatus `json:"redis"` + Runtime RuntimeStats `json:"runtime"` +} + +type DatabaseStatus struct { + Healthy bool `json:"healthy"` + Stats *DBPoolStats `json:"stats,omitempty"` +} + +type RedisStatus struct { + Healthy bool `json:"healthy"` + Stats *RedisPoolStats `json:"stats,omitempty"` +} + +type DBPoolStats struct { + MaxOpenConnections int `json:"max_open_connections"` + OpenConnections int `json:"open_connections"` + InUse int `json:"in_use"` + Idle int `json:"idle"` + WaitCount int64 `json:"wait_count"` + WaitDuration string `json:"wait_duration"` + MaxIdleClosed int64 `json:"max_idle_closed"` + MaxIdleTimeClosed int64 `json:"max_idle_time_closed"` + MaxLifetimeClosed int64 `json:"max_lifetime_closed"` +} + +type RedisPoolStats struct { + Hits uint32 `json:"hits"` + Misses uint32 `json:"misses"` + Timeouts uint32 `json:"timeouts"` + TotalConns uint32 `json:"total_conns"` + IdleConns uint32 `json:"idle_conns"` + StaleConns uint32 `json:"stale_conns"` +} + +type RuntimeStats struct { + GoVersion string `json:"go_version"` + NumGoroutine int `json:"num_goroutine"` + NumCPU int `json:"num_cpu"` + MemAlloc uint64 `json:"mem_alloc_bytes"` + MemSys uint64 `json:"mem_sys_bytes"` + NumGC uint32 `json:"num_gc"` +} diff --git a/backends/go-backend/internal/auth/dto.go b/backends/go-backend/internal/auth/dto.go new file mode 100644 index 0000000..9fc1cfa --- /dev/null +++ b/backends/go-backend/internal/auth/dto.go @@ -0,0 +1,62 @@ +// AngelaMos | 2026 +// dto.go + +package auth + +import ( + "time" +) + +type LoginRequest struct { + Email string `json:"email" validate:"required,email,max=255"` + Password string `json:"password" validate:"required,min=8,max=128"` +} + +type RegisterRequest struct { + Email string `json:"email" validate:"required,email,max=255"` + Password string `json:"password" validate:"required,min=8,max=128"` + Name string `json:"name" validate:"required,min=1,max=100"` +} + +type RefreshRequest struct { + RefreshToken string `json:"refresh_token" validate:"required"` +} + +type TokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + ExpiresAt time.Time `json:"expires_at"` +} + +type UserResponse struct { + ID string `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + Role string `json:"role"` + Tier string `json:"tier"` + CreatedAt time.Time `json:"created_at"` +} + +type AuthResponse struct { + User UserResponse `json:"user"` + Tokens TokenResponse `json:"tokens"` +} + +type SessionInfo struct { + ID string `json:"id"` + UserAgent string `json:"user_agent"` + IPAddress string `json:"ip_address"` + CreatedAt time.Time `json:"created_at"` + ExpiresAt time.Time `json:"expires_at"` +} + +type SessionsResponse struct { + Sessions []SessionInfo `json:"sessions"` +} + +type ChangePasswordRequest struct { + CurrentPassword string `json:"current_password" validate:"required"` + NewPassword string `json:"new_password" validate:"required,min=8,max=128"` +} diff --git a/backends/go-backend/internal/auth/entity.go b/backends/go-backend/internal/auth/entity.go new file mode 100644 index 0000000..8ce8a4e --- /dev/null +++ b/backends/go-backend/internal/auth/entity.go @@ -0,0 +1,47 @@ +// AngelaMos | 2026 +// entity.go + +package auth + +import ( + "time" +) + +type RefreshToken struct { + ID string `db:"id"` + UserID string `db:"user_id"` + TokenHash string `db:"token_hash"` + FamilyID string `db:"family_id"` + ExpiresAt time.Time `db:"expires_at"` + CreatedAt time.Time `db:"created_at"` + IsUsed bool `db:"is_used"` + UsedAt *time.Time `db:"used_at"` + RevokedAt *time.Time `db:"revoked_at"` + ReplacedByID *string `db:"replaced_by_id"` + UserAgent string `db:"user_agent"` + IPAddress string `db:"ip_address"` +} + +func (t *RefreshToken) IsExpired() bool { + return time.Now().After(t.ExpiresAt) +} + +func (t *RefreshToken) IsRevoked() bool { + return t.RevokedAt != nil +} + +func (t *RefreshToken) IsValid() bool { + return !t.IsExpired() && !t.IsRevoked() && !t.IsUsed +} + +func (t *RefreshToken) MarkAsUsed(replacedByID string) { + now := time.Now() + t.IsUsed = true + t.UsedAt = &now + t.ReplacedByID = &replacedByID +} + +func (t *RefreshToken) Revoke() { + now := time.Now() + t.RevokedAt = &now +} diff --git a/backends/go-backend/internal/auth/handler.go b/backends/go-backend/internal/auth/handler.go new file mode 100644 index 0000000..16b3b6b --- /dev/null +++ b/backends/go-backend/internal/auth/handler.go @@ -0,0 +1,316 @@ +// AngelaMos | 2026 +// handler.go + +package auth + +import ( + "encoding/json" + "errors" + "net" + "net/http" + "strings" + + "github.com/go-chi/chi/v5" + "github.com/go-playground/validator/v10" + + "github.com/carterperez-dev/templates/go-backend/internal/core" + "github.com/carterperez-dev/templates/go-backend/internal/middleware" +) + +type Handler struct { + service *Service + validator *validator.Validate +} + +func NewHandler(service *Service) *Handler { + return &Handler{ + service: service, + validator: validator.New(validator.WithRequiredStructEnabled()), + } +} + +func (h *Handler) RegisterRoutes( + r chi.Router, + authenticator func(http.Handler) http.Handler, +) { + r.Route("/auth", func(r chi.Router) { + r.Post("/login", h.Login) + r.Post("/register", h.Register) + r.Post("/refresh", h.Refresh) + + r.Group(func(r chi.Router) { + r.Use(authenticator) + r.Get("/me", h.GetMe) + r.Post("/logout", h.Logout) + r.Post("/logout-all", h.LogoutAll) + r.Get("/sessions", h.GetSessions) + r.Delete("/sessions/{sessionID}", h.RevokeSession) + r.Post("/change-password", h.ChangePassword) + }) + }) +} + +func (h *Handler) Login(w http.ResponseWriter, r *http.Request) { + var req LoginRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + userAgent := r.UserAgent() + ipAddress := extractIPAddress(r) + + resp, err := h.service.Login(r.Context(), req, userAgent, ipAddress) + if err != nil { + if errors.Is(err, ErrInvalidCredentials) { + core.JSONError( + w, + core.UnauthorizedError("invalid email or password"), + ) + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, resp) +} + +func (h *Handler) Register(w http.ResponseWriter, r *http.Request) { + var req RegisterRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + userAgent := r.UserAgent() + ipAddress := extractIPAddress(r) + + resp, err := h.service.Register(r.Context(), req, userAgent, ipAddress) + if err != nil { + if errors.Is(err, ErrEmailExists) { + core.JSONError(w, core.DuplicateError("email")) + return + } + core.InternalServerError(w, err) + return + } + + core.Created(w, resp) +} + +func (h *Handler) Refresh(w http.ResponseWriter, r *http.Request) { + var req RefreshRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + userAgent := r.UserAgent() + ipAddress := extractIPAddress(r) + + resp, err := h.service.Refresh( + r.Context(), + req.RefreshToken, + userAgent, + ipAddress, + ) + if err != nil { + if errors.Is(err, ErrTokenReuse) { + core.JSONError(w, core.NewAppError( + core.ErrTokenRevoked, + "security alert: token reuse detected, all sessions revoked", + http.StatusUnauthorized, + "TOKEN_REUSE_DETECTED", + )) + return + } + if errors.Is(err, core.ErrTokenExpired) { + core.JSONError(w, core.TokenExpiredError()) + return + } + if errors.Is(err, core.ErrTokenRevoked) { + core.JSONError(w, core.TokenRevokedError()) + return + } + if errors.Is(err, core.ErrTokenInvalid) { + core.JSONError(w, core.TokenInvalidError()) + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, resp) +} + +func (h *Handler) Logout(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + var req RefreshRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.service.Logout(r.Context(), req.RefreshToken, userID); err != nil { + if errors.Is(err, core.ErrForbidden) { + core.Forbidden(w, "cannot revoke another user's token") + return + } + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +func (h *Handler) LogoutAll(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + if err := h.service.LogoutAll(r.Context(), userID); err != nil { + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +func (h *Handler) GetSessions(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + sessions, err := h.service.GetActiveSessions(r.Context(), userID) + if err != nil { + core.InternalServerError(w, err) + return + } + + core.OK(w, SessionsResponse{Sessions: sessions}) +} + +func (h *Handler) RevokeSession(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + sessionID := chi.URLParam(r, "sessionID") + if sessionID == "" { + core.BadRequest(w, "session ID required") + return + } + + if err := h.service.RevokeSession(r.Context(), userID, sessionID); err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "session") + return + } + if errors.Is(err, core.ErrForbidden) { + core.Forbidden(w, "cannot revoke another user's session") + return + } + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +func (h *Handler) ChangePassword(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + var req ChangePasswordRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + if err := h.service.ChangePassword(r.Context(), userID, req.CurrentPassword, req.NewPassword); err != nil { + if errors.Is(err, ErrInvalidCredentials) { + core.JSONError( + w, + core.UnauthorizedError("current password is incorrect"), + ) + return + } + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +func (h *Handler) GetMe(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + if userID == "" { + core.Unauthorized(w, "") + return + } + + user, err := h.service.GetCurrentUser(r.Context(), userID) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, user) +} + +func extractIPAddress(r *http.Request) string { + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + ips := strings.Split(xff, ",") + return strings.TrimSpace(ips[len(ips)-1]) + } + + if xri := r.Header.Get("X-Real-IP"); xri != "" { + return xri + } + + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return r.RemoteAddr + } + + return ip +} diff --git a/backends/go-backend/internal/auth/jwt.go b/backends/go-backend/internal/auth/jwt.go new file mode 100644 index 0000000..026cf05 --- /dev/null +++ b/backends/go-backend/internal/auth/jwt.go @@ -0,0 +1,295 @@ +// AngelaMos | 2026 +// jwt.go + +package auth + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "time" + + "github.com/google/uuid" + "github.com/lestrrat-go/jwx/v3/jwa" + "github.com/lestrrat-go/jwx/v3/jwk" + "github.com/lestrrat-go/jwx/v3/jwt" + + "github.com/carterperez-dev/templates/go-backend/internal/config" + "github.com/carterperez-dev/templates/go-backend/internal/core" + "github.com/carterperez-dev/templates/go-backend/internal/middleware" +) + +type JWTManager struct { + privateKey jwk.Key + publicKey jwk.Key + publicJWKS jwk.Set + config config.JWTConfig +} + +func NewJWTManager(cfg config.JWTConfig) (*JWTManager, error) { + privateKeyPEM, err := os.ReadFile(cfg.PrivateKeyPath) + if err != nil { + return nil, fmt.Errorf("read private key: %w", err) + } + + privateKey, err := jwk.ParseKey(privateKeyPEM, jwk.WithPEM(true)) + if err != nil { + return nil, fmt.Errorf("parse private key: %w", err) + } + + if setErr := privateKey.Set(jwk.AlgorithmKey, jwa.ES256()); setErr != nil { + return nil, fmt.Errorf("set algorithm: %w", setErr) + } + + keyID := uuid.New().String()[:8] + if setErr := privateKey.Set(jwk.KeyIDKey, keyID); setErr != nil { + return nil, fmt.Errorf("set key id: %w", setErr) + } + + publicKey, err := privateKey.PublicKey() + if err != nil { + return nil, fmt.Errorf("derive public key: %w", err) + } + + if setErr := publicKey.Set(jwk.KeyUsageKey, "sig"); setErr != nil { + return nil, fmt.Errorf("set key usage: %w", setErr) + } + + publicJWKS := jwk.NewSet() + if addErr := publicJWKS.AddKey(publicKey); addErr != nil { + return nil, fmt.Errorf("add key to set: %w", addErr) + } + + return &JWTManager{ + privateKey: privateKey, + publicKey: publicKey, + publicJWKS: publicJWKS, + config: cfg, + }, nil +} + +func GenerateKeyPair(privateKeyPath, publicKeyPath string) error { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return fmt.Errorf("generate key: %w", err) + } + + jwkPrivate, err := jwk.Import(privateKey) + if err != nil { + return fmt.Errorf("import private key: %w", err) + } + + keyID := uuid.New().String()[:8] + if setErr := jwkPrivate.Set(jwk.KeyIDKey, keyID); setErr != nil { + return fmt.Errorf("set key id: %w", setErr) + } + if setErr := jwkPrivate.Set(jwk.AlgorithmKey, jwa.ES256()); setErr != nil { + return fmt.Errorf("set algorithm: %w", setErr) + } + + privatePEM, err := jwk.Pem(jwkPrivate) + if err != nil { + return fmt.Errorf("encode private key: %w", err) + } + + if writeErr := os.WriteFile(privateKeyPath, privatePEM, 0o600); writeErr != nil { + return fmt.Errorf("write private key: %w", writeErr) + } + + jwkPublic, err := jwkPrivate.PublicKey() + if err != nil { + return fmt.Errorf("derive public key: %w", err) + } + + publicPEM, err := jwk.Pem(jwkPublic) + if err != nil { + return fmt.Errorf("encode public key: %w", err) + } + + //nolint:gosec // G306: public key is intentionally world-readable + if writeErr := os.WriteFile(publicKeyPath, publicPEM, 0o644); writeErr != nil { + return fmt.Errorf("write public key: %w", writeErr) + } + + return nil +} + +type AccessTokenClaims struct { + UserID string `json:"sub"` + Role string `json:"role"` + Tier string `json:"tier"` + TokenVersion int `json:"token_version"` +} + +func (m *JWTManager) CreateAccessToken( + claims AccessTokenClaims, +) (string, error) { + now := time.Now() + + token, err := jwt.NewBuilder(). + JwtID(uuid.New().String()). + Issuer(m.config.Issuer). + Audience([]string{m.config.Audience}). + Subject(claims.UserID). + IssuedAt(now). + Expiration(now.Add(m.config.AccessTokenExpire)). + NotBefore(now). + Claim("role", claims.Role). + Claim("tier", claims.Tier). + Claim("token_version", claims.TokenVersion). + Claim("type", "access"). + Build() + if err != nil { + return "", fmt.Errorf("build token: %w", err) + } + + signed, err := jwt.Sign(token, jwt.WithKey(jwa.ES256(), m.privateKey)) + if err != nil { + return "", fmt.Errorf("sign token: %w", err) + } + + return string(signed), nil +} + +func (m *JWTManager) VerifyAccessToken( + ctx context.Context, + tokenString string, +) (*middleware.AccessTokenClaims, error) { + token, err := jwt.Parse( + []byte(tokenString), + jwt.WithKey(jwa.ES256(), m.publicKey), + jwt.WithValidate(true), + jwt.WithIssuer(m.config.Issuer), + jwt.WithAudience(m.config.Audience), + ) + if err != nil { + if isTokenExpiredError(err) { + return nil, fmt.Errorf("verify token: %w", core.ErrTokenExpired) + } + return nil, fmt.Errorf("verify token: %w", core.ErrTokenInvalid) + } + + var tokenType string + if err := token.Get("type", &tokenType); err != nil || + tokenType != "access" { + return nil, fmt.Errorf( + "verify token: invalid token type: %w", + core.ErrTokenInvalid, + ) + } + + subject, ok := token.Subject() + if !ok || subject == "" { + return nil, fmt.Errorf( + "verify token: missing subject: %w", + core.ErrTokenInvalid, + ) + } + + var roleStr string + if err := token.Get("role", &roleStr); err != nil { + return nil, fmt.Errorf( + "verify token: missing role claim: %w", + core.ErrTokenInvalid, + ) + } + + var tierStr string + if err := token.Get("tier", &tierStr); err != nil { + return nil, fmt.Errorf( + "verify token: missing tier claim: %w", + core.ErrTokenInvalid, + ) + } + + var versionFloat float64 + if err := token.Get("token_version", &versionFloat); err != nil { + return nil, fmt.Errorf( + "verify token: missing token_version claim: %w", + core.ErrTokenInvalid, + ) + } + + return &middleware.AccessTokenClaims{ + UserID: subject, + Role: roleStr, + Tier: tierStr, + TokenVersion: int(versionFloat), + }, nil +} + +func isTokenExpiredError(err error) bool { + if err == nil { + return false + } + errStr := err.Error() + return strings.Contains(errStr, "exp") && + strings.Contains(errStr, "not satisfied") +} + +func (m *JWTManager) GetJWKSHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "public, max-age=3600") + + if err := json.NewEncoder(w).Encode(m.publicJWKS); err != nil { + http.Error( + w, + "Internal Server Error", + http.StatusInternalServerError, + ) + return + } + } +} + +func (m *JWTManager) GetPublicKey() jwk.Key { + return m.publicKey +} + +func (m *JWTManager) GetKeyID() string { + var kid string + //nolint:errcheck // key ID always set during NewJWTManager init + _ = m.privateKey.Get(jwk.KeyIDKey, &kid) + return kid +} + +type RefreshTokenData struct { + Token string + Hash string + ExpiresAt time.Time + FamilyID string +} + +func (m *JWTManager) CreateRefreshToken( + userID, familyID string, +) (*RefreshTokenData, error) { + token, err := core.GenerateRefreshToken() + if err != nil { + return nil, fmt.Errorf("generate refresh token: %w", err) + } + + hash := core.HashToken(token) + expiresAt := time.Now().Add(m.config.RefreshTokenExpire) + + if familyID == "" { + familyID = uuid.New().String() + } + + return &RefreshTokenData{ + Token: token, + Hash: hash, + ExpiresAt: expiresAt, + FamilyID: familyID, + }, nil +} + +func (m *JWTManager) VerifyRefreshTokenHash(token, storedHash string) bool { + return core.CompareTokenHash(token, storedHash) +} diff --git a/backends/go-backend/internal/auth/repository.go b/backends/go-backend/internal/auth/repository.go new file mode 100644 index 0000000..2abb036 --- /dev/null +++ b/backends/go-backend/internal/auth/repository.go @@ -0,0 +1,236 @@ +// AngelaMos | 2026 +// repository.go + +package auth + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +type Repository interface { + Create(ctx context.Context, token *RefreshToken) error + FindByHash(ctx context.Context, tokenHash string) (*RefreshToken, error) + FindByID(ctx context.Context, id string) (*RefreshToken, error) + MarkAsUsed(ctx context.Context, id, replacedByID string) error + RevokeByID(ctx context.Context, id string) error + RevokeByFamilyID(ctx context.Context, familyID string) error + RevokeAllForUser(ctx context.Context, userID string) error + GetActiveSessionsForUser( + ctx context.Context, + userID string, + ) ([]RefreshToken, error) + DeleteExpired(ctx context.Context) (int64, error) +} + +type repository struct { + db core.DBTX +} + +func NewRepository(db core.DBTX) Repository { + return &repository{db: db} +} + +func (r *repository) Create(ctx context.Context, token *RefreshToken) error { + query := ` + INSERT INTO refresh_tokens ( + id, user_id, token_hash, family_id, expires_at, + user_agent, ip_address + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7 + ) + RETURNING created_at` + + err := r.db.GetContext(ctx, &token.CreatedAt, query, + token.ID, + token.UserID, + token.TokenHash, + token.FamilyID, + token.ExpiresAt, + token.UserAgent, + token.IPAddress, + ) + if err != nil { + return fmt.Errorf("create refresh token: %w", err) + } + + return nil +} + +func (r *repository) FindByHash( + ctx context.Context, + tokenHash string, +) (*RefreshToken, error) { + query := ` + SELECT + id, user_id, token_hash, family_id, expires_at, created_at, + is_used, used_at, revoked_at, replaced_by_id, user_agent, ip_address + FROM refresh_tokens + WHERE token_hash = $1` + + var token RefreshToken + err := r.db.GetContext(ctx, &token, query, tokenHash) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("find refresh token: %w", core.ErrNotFound) + } + if err != nil { + return nil, fmt.Errorf("find refresh token: %w", err) + } + + return &token, nil +} + +func (r *repository) FindByID( + ctx context.Context, + id string, +) (*RefreshToken, error) { + query := ` + SELECT + id, user_id, token_hash, family_id, expires_at, created_at, + is_used, used_at, revoked_at, replaced_by_id, user_agent, ip_address + FROM refresh_tokens + WHERE id = $1` + + var token RefreshToken + err := r.db.GetContext(ctx, &token, query, id) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("find refresh token: %w", core.ErrNotFound) + } + if err != nil { + return nil, fmt.Errorf("find refresh token: %w", err) + } + + return &token, nil +} + +func (r *repository) MarkAsUsed( + ctx context.Context, + id, replacedByID string, +) error { + query := ` + UPDATE refresh_tokens + SET is_used = true, used_at = NOW(), replaced_by_id = $2 + WHERE id = $1 AND is_used = false` + + result, err := r.db.ExecContext(ctx, query, id, replacedByID) + if err != nil { + return fmt.Errorf("mark refresh token as used: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("mark refresh token as used: %w", err) + } + + if rows == 0 { + return fmt.Errorf("mark refresh token as used: %w", core.ErrNotFound) + } + + return nil +} + +func (r *repository) RevokeByID(ctx context.Context, id string) error { + query := ` + UPDATE refresh_tokens + SET revoked_at = NOW() + WHERE id = $1 AND revoked_at IS NULL` + + result, err := r.db.ExecContext(ctx, query, id) + if err != nil { + return fmt.Errorf("revoke refresh token: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("revoke refresh token: %w", err) + } + + if rows == 0 { + return fmt.Errorf("revoke refresh token: %w", core.ErrNotFound) + } + + return nil +} + +func (r *repository) RevokeByFamilyID( + ctx context.Context, + familyID string, +) error { + query := ` + UPDATE refresh_tokens + SET revoked_at = NOW() + WHERE family_id = $1 AND revoked_at IS NULL` + + _, err := r.db.ExecContext(ctx, query, familyID) + if err != nil { + return fmt.Errorf("revoke token family: %w", err) + } + + return nil +} + +func (r *repository) RevokeAllForUser( + ctx context.Context, + userID string, +) error { + query := ` + UPDATE refresh_tokens + SET revoked_at = NOW() + WHERE user_id = $1 AND revoked_at IS NULL` + + _, err := r.db.ExecContext(ctx, query, userID) + if err != nil { + return fmt.Errorf("revoke all user tokens: %w", err) + } + + return nil +} + +func (r *repository) GetActiveSessionsForUser( + ctx context.Context, + userID string, +) ([]RefreshToken, error) { + query := ` + SELECT + id, user_id, token_hash, family_id, expires_at, created_at, + is_used, used_at, revoked_at, replaced_by_id, user_agent, ip_address + FROM refresh_tokens + WHERE user_id = $1 + AND revoked_at IS NULL + AND is_used = false + AND expires_at > NOW() + ORDER BY created_at DESC` + + var tokens []RefreshToken + err := r.db.SelectContext(ctx, &tokens, query, userID) + if err != nil { + return nil, fmt.Errorf("get active sessions: %w", err) + } + + return tokens, nil +} + +func (r *repository) DeleteExpired(ctx context.Context) (int64, error) { + query := ` + DELETE FROM refresh_tokens + WHERE expires_at < $1` + + cutoff := time.Now().Add(-24 * time.Hour) + + result, err := r.db.ExecContext(ctx, query, cutoff) + if err != nil { + return 0, fmt.Errorf("delete expired tokens: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete expired tokens: %w", err) + } + + return rows, nil +} diff --git a/backends/go-backend/internal/auth/service.go b/backends/go-backend/internal/auth/service.go new file mode 100644 index 0000000..3629051 --- /dev/null +++ b/backends/go-backend/internal/auth/service.go @@ -0,0 +1,411 @@ +// AngelaMos | 2026 +// service.go + +package auth + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +var ( + ErrInvalidCredentials = errors.New("invalid credentials") + ErrTokenReuse = errors.New("token reuse detected") + ErrEmailExists = errors.New("email already exists") +) + +type UserInfo struct { + ID string + Email string + Name string + PasswordHash string + Role string + Tier string + TokenVersion int +} + +type UserProvider interface { + GetByEmail(ctx context.Context, email string) (*UserInfo, error) + GetByID(ctx context.Context, id string) (*UserInfo, error) + Create( + ctx context.Context, + email, passwordHash, name string, + ) (*UserInfo, error) + IncrementTokenVersion(ctx context.Context, userID string) error + UpdatePassword(ctx context.Context, userID, passwordHash string) error +} + +type Service struct { + repo Repository + jwt *JWTManager + userProvider UserProvider + redis *redis.Client + blacklistTTL time.Duration +} + +func NewService( + repo Repository, + jwt *JWTManager, + userProvider UserProvider, + redisClient *redis.Client, +) *Service { + return &Service{ + repo: repo, + jwt: jwt, + userProvider: userProvider, + redis: redisClient, + blacklistTTL: 15 * time.Minute, + } +} + +func (s *Service) Login( + ctx context.Context, + req LoginRequest, + userAgent, ipAddress string, +) (*AuthResponse, error) { + user, err := s.userProvider.GetByEmail(ctx, req.Email) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + //nolint:errcheck // timing attack prevention - always verify to prevent enumeration + _, _, _ = core.VerifyPasswordTimingSafe(req.Password, nil) + return nil, ErrInvalidCredentials + } + return nil, fmt.Errorf("get user: %w", err) + } + + valid, newHash, err := core.VerifyPasswordTimingSafe( + req.Password, + &user.PasswordHash, + ) + if err != nil { + return nil, fmt.Errorf("verify password: %w", err) + } + + if !valid { + return nil, ErrInvalidCredentials + } + + if newHash != "" { + //nolint:errcheck // best-effort rehash upgrade + _ = s.userProvider.UpdatePassword(ctx, user.ID, newHash) + } + + return s.createAuthResponse(ctx, user, userAgent, ipAddress, "", nil) +} + +func (s *Service) Register( + ctx context.Context, + req RegisterRequest, + userAgent, ipAddress string, +) (*AuthResponse, error) { + passwordHash, err := core.HashPassword(req.Password) + if err != nil { + return nil, fmt.Errorf("hash password: %w", err) + } + + user, err := s.userProvider.Create(ctx, req.Email, passwordHash, req.Name) + if err != nil { + if errors.Is(err, core.ErrDuplicateKey) { + return nil, ErrEmailExists + } + return nil, fmt.Errorf("create user: %w", err) + } + + return s.createAuthResponse(ctx, user, userAgent, ipAddress, "", nil) +} + +func (s *Service) Refresh( + ctx context.Context, + refreshToken, userAgent, ipAddress string, +) (*AuthResponse, error) { + tokenHash := core.HashToken(refreshToken) + + storedToken, err := s.repo.FindByHash(ctx, tokenHash) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + return nil, fmt.Errorf("refresh: %w", core.ErrTokenInvalid) + } + return nil, fmt.Errorf("find token: %w", err) + } + + if storedToken.IsUsed { + //nolint:errcheck // security revocation continues regardless + _ = s.repo.RevokeByFamilyID(ctx, storedToken.FamilyID) + return nil, ErrTokenReuse + } + + if !storedToken.IsValid() { + if storedToken.IsRevoked() { + return nil, fmt.Errorf("refresh: %w", core.ErrTokenRevoked) + } + return nil, fmt.Errorf("refresh: %w", core.ErrTokenExpired) + } + + user, err := s.userProvider.GetByID(ctx, storedToken.UserID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + return s.createAuthResponse( + ctx, + user, + userAgent, + ipAddress, + storedToken.FamilyID, + &storedToken.ID, + ) +} + +func (s *Service) Logout( + ctx context.Context, + refreshToken, userID string, +) error { + tokenHash := core.HashToken(refreshToken) + + storedToken, err := s.repo.FindByHash(ctx, tokenHash) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + return nil + } + return fmt.Errorf("find token: %w", err) + } + + if storedToken.UserID != userID { + return fmt.Errorf("logout: %w", core.ErrForbidden) + } + + if err := s.repo.RevokeByID(ctx, storedToken.ID); err != nil && + !errors.Is(err, core.ErrNotFound) { + return fmt.Errorf("revoke token: %w", err) + } + + return nil +} + +func (s *Service) LogoutAll(ctx context.Context, userID string) error { + if err := s.repo.RevokeAllForUser(ctx, userID); err != nil { + return fmt.Errorf("revoke all tokens: %w", err) + } + + if err := s.userProvider.IncrementTokenVersion(ctx, userID); err != nil { + return fmt.Errorf("increment token version: %w", err) + } + + return nil +} + +func (s *Service) RevokeAccessToken( + ctx context.Context, + jti string, + expiresAt time.Time, +) error { + key := "blacklist:" + jti + ttl := time.Until(expiresAt) + + if ttl <= 0 { + return nil + } + + if err := s.redis.Set(ctx, key, "1", ttl).Err(); err != nil { + return fmt.Errorf("blacklist token: %w", err) + } + + return nil +} + +func (s *Service) IsAccessTokenBlacklisted( + ctx context.Context, + jti string, +) (bool, error) { + key := "blacklist:" + jti + + exists, err := s.redis.Exists(ctx, key).Result() + if err != nil { + return false, fmt.Errorf("check blacklist: %w", err) + } + + return exists > 0, nil +} + +func (s *Service) GetActiveSessions( + ctx context.Context, + userID string, +) ([]SessionInfo, error) { + tokens, err := s.repo.GetActiveSessionsForUser(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get sessions: %w", err) + } + + sessions := make([]SessionInfo, 0, len(tokens)) + for _, t := range tokens { + sessions = append(sessions, SessionInfo{ + ID: t.ID, + UserAgent: t.UserAgent, + IPAddress: t.IPAddress, + CreatedAt: t.CreatedAt, + ExpiresAt: t.ExpiresAt, + }) + } + + return sessions, nil +} + +func (s *Service) RevokeSession( + ctx context.Context, + userID, sessionID string, +) error { + token, err := s.repo.FindByID(ctx, sessionID) + if err != nil { + return fmt.Errorf("find session: %w", err) + } + + if token.UserID != userID { + return fmt.Errorf("revoke session: %w", core.ErrForbidden) + } + + if err := s.repo.RevokeByID(ctx, sessionID); err != nil { + return fmt.Errorf("revoke session: %w", err) + } + + return nil +} + +func (s *Service) ChangePassword( + ctx context.Context, + userID, currentPassword, newPassword string, +) error { + user, err := s.userProvider.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + valid, _, err := core.VerifyPasswordWithRehash( + currentPassword, + user.PasswordHash, + ) + if err != nil { + return fmt.Errorf("verify password: %w", err) + } + + if !valid { + return ErrInvalidCredentials + } + + newHash, err := core.HashPassword(newPassword) + if err != nil { + return fmt.Errorf("hash password: %w", err) + } + + if err := s.userProvider.UpdatePassword(ctx, userID, newHash); err != nil { + return fmt.Errorf("update password: %w", err) + } + + if err := s.LogoutAll(ctx, userID); err != nil { + return fmt.Errorf("logout all: %w", err) + } + + return nil +} + +func (s *Service) ValidateTokenVersion( + ctx context.Context, + userID string, + tokenVersion int, +) error { + user, err := s.userProvider.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + if tokenVersion < user.TokenVersion { + return fmt.Errorf("validate token version: %w", core.ErrTokenRevoked) + } + + return nil +} + +func (s *Service) GetCurrentUser( + ctx context.Context, + userID string, +) (*UserResponse, error) { + user, err := s.userProvider.GetByID(ctx, userID) + if err != nil { + return nil, err + } + + return &UserResponse{ + ID: user.ID, + Email: user.Email, + Name: user.Name, + Role: user.Role, + Tier: user.Tier, + }, nil +} + +func (s *Service) createAuthResponse( + ctx context.Context, + user *UserInfo, + userAgent, ipAddress, familyID string, + oldTokenID *string, +) (*AuthResponse, error) { + accessToken, err := s.jwt.CreateAccessToken(AccessTokenClaims{ + UserID: user.ID, + Role: user.Role, + Tier: user.Tier, + TokenVersion: user.TokenVersion, + }) + if err != nil { + return nil, fmt.Errorf("create access token: %w", err) + } + + refreshData, err := s.jwt.CreateRefreshToken(user.ID, familyID) + if err != nil { + return nil, fmt.Errorf("create refresh token: %w", err) + } + + newTokenID := uuid.New().String() + + refreshTokenEntity := &RefreshToken{ + ID: newTokenID, + UserID: user.ID, + TokenHash: refreshData.Hash, + FamilyID: refreshData.FamilyID, + ExpiresAt: refreshData.ExpiresAt, + UserAgent: userAgent, + IPAddress: ipAddress, + } + + if err := s.repo.Create(ctx, refreshTokenEntity); err != nil { + return nil, fmt.Errorf("store refresh token: %w", err) + } + + if oldTokenID != nil { + //nolint:errcheck // best-effort token chain tracking + _ = s.repo.MarkAsUsed(ctx, *oldTokenID, newTokenID) + } + + return &AuthResponse{ + User: UserResponse{ + ID: user.ID, + Email: user.Email, + Name: user.Name, + Role: user.Role, + Tier: user.Tier, + CreatedAt: time.Now(), + }, + Tokens: TokenResponse{ + AccessToken: accessToken, + RefreshToken: refreshData.Token, + TokenType: "Bearer", + ExpiresIn: int(15 * time.Minute / time.Second), + ExpiresAt: time.Now().Add(15 * time.Minute), + }, + }, nil +} diff --git a/backends/go-backend/internal/config/config.go b/backends/go-backend/internal/config/config.go new file mode 100644 index 0000000..ff266e5 --- /dev/null +++ b/backends/go-backend/internal/config/config.go @@ -0,0 +1,302 @@ +// AngelaMos | 2026 +// config.go + +package config + +import ( + "fmt" + "sync" + "time" + + "github.com/knadh/koanf/parsers/yaml" + "github.com/knadh/koanf/providers/env" + "github.com/knadh/koanf/providers/file" + "github.com/knadh/koanf/v2" +) + +type Config struct { + App AppConfig `koanf:"app"` + Server ServerConfig `koanf:"server"` + Database DatabaseConfig `koanf:"database"` + Redis RedisConfig `koanf:"redis"` + JWT JWTConfig `koanf:"jwt"` + RateLimit RateLimitConfig `koanf:"rate_limit"` + CORS CORSConfig `koanf:"cors"` + Log LogConfig `koanf:"log"` + Otel OtelConfig `koanf:"otel"` +} + +type AppConfig struct { + Name string `koanf:"name"` + Version string `koanf:"version"` + Environment string `koanf:"environment"` +} + +type ServerConfig struct { + Host string `koanf:"host"` + Port int `koanf:"port"` + ReadTimeout time.Duration `koanf:"read_timeout"` + WriteTimeout time.Duration `koanf:"write_timeout"` + IdleTimeout time.Duration `koanf:"idle_timeout"` + ShutdownTimeout time.Duration `koanf:"shutdown_timeout"` +} + +type DatabaseConfig struct { + URL string `koanf:"url"` + MaxOpenConns int `koanf:"max_open_conns"` + MaxIdleConns int `koanf:"max_idle_conns"` + ConnMaxLifetime time.Duration `koanf:"conn_max_lifetime"` + ConnMaxIdleTime time.Duration `koanf:"conn_max_idle_time"` +} + +type RedisConfig struct { + URL string `koanf:"url"` + PoolSize int `koanf:"pool_size"` + MinIdleConns int `koanf:"min_idle_conns"` +} + +type JWTConfig struct { + PrivateKeyPath string `koanf:"private_key_path"` + PublicKeyPath string `koanf:"public_key_path"` + AccessTokenExpire time.Duration `koanf:"access_token_expire"` + RefreshTokenExpire time.Duration `koanf:"refresh_token_expire"` + Issuer string `koanf:"issuer"` + Audience string `koanf:"audience"` +} + +type RateLimitConfig struct { + Requests int `koanf:"requests"` + Window time.Duration `koanf:"window"` + Burst int `koanf:"burst"` +} + +type CORSConfig struct { + AllowedOrigins []string `koanf:"allowed_origins"` + AllowedMethods []string `koanf:"allowed_methods"` + AllowedHeaders []string `koanf:"allowed_headers"` + AllowCredentials bool `koanf:"allow_credentials"` + MaxAge int `koanf:"max_age"` +} + +type LogConfig struct { + Level string `koanf:"level"` + Format string `koanf:"format"` +} + +type OtelConfig struct { + Endpoint string `koanf:"endpoint"` + ServiceName string `koanf:"service_name"` + Enabled bool `koanf:"enabled"` + Insecure bool `koanf:"insecure"` + SampleRate float64 `koanf:"sample_rate"` +} + +var ( + cfg *Config + once sync.Once +) + +func Load(configPath string) (*Config, error) { + var loadErr error + + once.Do(func() { + k := koanf.New(".") + + if err := loadDefaults(k); err != nil { + loadErr = fmt.Errorf("load defaults: %w", err) + return + } + + if configPath != "" { + if err := k.Load(file.Provider(configPath), yaml.Parser()); err != nil { + loadErr = fmt.Errorf("load config file: %w", err) + return + } + } + + if err := k.Load(env.Provider("", ".", envKeyReplacer), nil); err != nil { + loadErr = fmt.Errorf("load env vars: %w", err) + return + } + + cfg = &Config{} + if err := k.Unmarshal("", cfg); err != nil { + loadErr = fmt.Errorf("unmarshal config: %w", err) + return + } + + if err := validate(cfg); err != nil { + loadErr = fmt.Errorf("validate config: %w", err) + return + } + }) + + if loadErr != nil { + return nil, loadErr + } + + return cfg, nil +} + +func Get() *Config { + if cfg == nil { + panic("config not loaded: call Load() first") + } + return cfg +} + +func loadDefaults(k *koanf.Koanf) error { + defaults := map[string]any{ + "app.name": "Go Backend", + "app.version": "1.0.0", + "app.environment": "development", + + "server.host": "0.0.0.0", + "server.port": 8080, + "server.read_timeout": "30s", + "server.write_timeout": "30s", + "server.idle_timeout": "120s", + "server.shutdown_timeout": "15s", + + "database.max_open_conns": 25, + "database.max_idle_conns": 5, + "database.conn_max_lifetime": "1h", + "database.conn_max_idle_time": "30m", + + "redis.pool_size": 10, + "redis.min_idle_conns": 5, + + "jwt.access_token_expire": "15m", + "jwt.refresh_token_expire": "168h", + "jwt.issuer": "go-backend", + "jwt.audience": "go-backend-api", + "jwt.private_key_path": "keys/private.pem", + "jwt.public_key_path": "keys/public.pem", + + "rate_limit.requests": 100, + "rate_limit.window": "1m", + "rate_limit.burst": 20, + + "cors.allowed_origins": []string{"http://localhost:3000"}, + "cors.allowed_methods": []string{ + "GET", + "POST", + "PUT", + "PATCH", + "DELETE", + "OPTIONS", + }, + "cors.allowed_headers": []string{ + "Accept", + "Authorization", + "Content-Type", + "X-Request-ID", + }, + "cors.allow_credentials": true, + "cors.max_age": 300, + + "log.level": "info", + "log.format": "json", + + "otel.enabled": false, + "otel.insecure": true, + "otel.sample_rate": 0.1, + "otel.service_name": "go-backend", + } + + for key, value := range defaults { + if err := k.Set(key, value); err != nil { + return fmt.Errorf("set default %s: %w", key, err) + } + } + + return nil +} + +var envKeyMap = map[string]string{ + "DATABASE_URL": "database.url", + "REDIS_URL": "redis.url", + "ENVIRONMENT": "app.environment", + "HOST": "server.host", + "PORT": "server.port", + "LOG_LEVEL": "log.level", + "LOG_FORMAT": "log.format", + "JWT_PRIVATE_KEY_PATH": "jwt.private_key_path", + "JWT_PUBLIC_KEY_PATH": "jwt.public_key_path", + "JWT_ACCESS_TOKEN_EXPIRE": "jwt.access_token_expire", + "JWT_REFRESH_TOKEN_EXPIRE": "jwt.refresh_token_expire", + "JWT_ISSUER": "jwt.issuer", + "JWT_AUDIENCE": "jwt.audience", + "RATE_LIMIT_REQUESTS": "rate_limit.requests", + "RATE_LIMIT_WINDOW": "rate_limit.window", + "RATE_LIMIT_BURST": "rate_limit.burst", + "OTEL_ENDPOINT": "otel.endpoint", + "OTEL_EXPORTER_OTLP_ENDPOINT": "otel.endpoint", + "OTEL_SERVICE_NAME": "otel.service_name", + "OTEL_ENABLED": "otel.enabled", + "OTEL_INSECURE": "otel.insecure", + "OTEL_SAMPLE_RATE": "otel.sample_rate", +} + +func envKeyReplacer(s string) string { + if mapped, ok := envKeyMap[s]; ok { + return mapped + } + return "" +} + +func validate(c *Config) error { + if c.Database.URL == "" { + return fmt.Errorf("DATABASE_URL is required") + } + + if c.Redis.URL == "" { + return fmt.Errorf("REDIS_URL is required") + } + + if c.JWT.PrivateKeyPath == "" { + return fmt.Errorf("JWT_PRIVATE_KEY_PATH is required") + } + + if c.JWT.PublicKeyPath == "" { + return fmt.Errorf("JWT_PUBLIC_KEY_PATH is required") + } + + if c.CORS.AllowCredentials { + for _, origin := range c.CORS.AllowedOrigins { + if origin == "*" { + return fmt.Errorf( + "CORS wildcard '*' cannot be used with AllowCredentials", + ) + } + } + } + + if c.App.Environment == "production" { + if c.Otel.Enabled && c.Otel.Insecure { + return fmt.Errorf("OTEL_INSECURE must be false in production") + } + } + + if c.Server.ReadTimeout <= 0 { + return fmt.Errorf("server.read_timeout must be positive") + } + + if c.Server.WriteTimeout <= 0 { + return fmt.Errorf("server.write_timeout must be positive") + } + + return nil +} + +func (c *Config) IsProduction() bool { + return c.App.Environment == "production" +} + +func (c *Config) IsDevelopment() bool { + return c.App.Environment == "development" +} + +func (s *ServerConfig) Address() string { + return fmt.Sprintf("%s:%d", s.Host, s.Port) +} diff --git a/backends/go-backend/internal/core/database.go b/backends/go-backend/internal/core/database.go new file mode 100644 index 0000000..766af7f --- /dev/null +++ b/backends/go-backend/internal/core/database.go @@ -0,0 +1,145 @@ +// AngelaMos | 2026 +// database.go + +package core + +import ( + "context" + "database/sql" + "fmt" + "math/rand/v2" + "time" + + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/jmoiron/sqlx" + + "github.com/carterperez-dev/templates/go-backend/internal/config" +) + +type Database struct { + DB *sqlx.DB +} + +func NewDatabase( + ctx context.Context, + cfg config.DatabaseConfig, +) (*Database, error) { + db, err := sqlx.ConnectContext(ctx, "pgx", cfg.URL) + if err != nil { + return nil, fmt.Errorf("connect to database: %w", err) + } + + db.SetMaxOpenConns(cfg.MaxOpenConns) + db.SetMaxIdleConns(cfg.MaxIdleConns) + db.SetConnMaxLifetime(jitteredDuration(cfg.ConnMaxLifetime)) + db.SetConnMaxIdleTime(cfg.ConnMaxIdleTime) + + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := db.PingContext(pingCtx); err != nil { + _ = db.Close() //nolint:errcheck // cleanup on connection failure + return nil, fmt.Errorf("ping database: %w", err) + } + + return &Database{DB: db}, nil +} + +func (d *Database) Close() error { + if d.DB != nil { + return d.DB.Close() + } + return nil +} + +func (d *Database) Ping(ctx context.Context) error { + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := d.DB.PingContext(pingCtx); err != nil { + return fmt.Errorf("database ping failed: %w", err) + } + + return nil +} + +func (d *Database) Stats() sql.DBStats { + return d.DB.Stats() +} + +type DBTX interface { + sqlx.ExtContext + sqlx.ExecerContext + GetContext(ctx context.Context, dest any, query string, args ...any) error + SelectContext( + ctx context.Context, + dest any, + query string, + args ...any, + ) error +} + +func InTx(ctx context.Context, db *sqlx.DB, fn func(tx *sqlx.Tx) error) error { + tx, err := db.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("begin transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + _ = tx.Rollback() //nolint:errcheck // best-effort rollback on panic + panic(p) + } + }() + + if err := fn(tx); err != nil { + if rbErr := tx.Rollback(); rbErr != nil { + return fmt.Errorf("rollback failed: %w (original: %w)", rbErr, err) + } + return err + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit transaction: %w", err) + } + + return nil +} + +func InTxWithOptions( + ctx context.Context, + db *sqlx.DB, + opts *sql.TxOptions, + fn func(tx *sqlx.Tx) error, +) error { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + return fmt.Errorf("begin transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + _ = tx.Rollback() //nolint:errcheck // best-effort rollback on panic + panic(p) + } + }() + + if err := fn(tx); err != nil { + if rbErr := tx.Rollback(); rbErr != nil { + return fmt.Errorf("rollback failed: %w (original: %w)", rbErr, err) + } + return err + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit transaction: %w", err) + } + + return nil +} + +func jitteredDuration(base time.Duration) time.Duration { + //nolint:gosec // G404: non-security-sensitive jitter for connection pool + jitter := time.Duration(rand.Int64N(int64(base / 7))) + return base + jitter +} diff --git a/backends/go-backend/internal/core/errors.go b/backends/go-backend/internal/core/errors.go new file mode 100644 index 0000000..b478a6e --- /dev/null +++ b/backends/go-backend/internal/core/errors.go @@ -0,0 +1,169 @@ +// AngelaMos | 2026 +// errors.go + +package core + +import ( + "errors" + "fmt" + "net/http" +) + +var ( + ErrNotFound = errors.New("resource not found") + ErrDuplicateKey = errors.New("duplicate key violation") + ErrForeignKey = errors.New("foreign key violation") + ErrInvalidInput = errors.New("invalid input") + ErrUnauthorized = errors.New("unauthorized") + ErrForbidden = errors.New("forbidden") + ErrInternal = errors.New("internal server error") + ErrConflict = errors.New("resource conflict") + ErrRateLimited = errors.New("rate limit exceeded") + ErrTokenExpired = errors.New("token expired") + ErrTokenInvalid = errors.New("token invalid") + ErrTokenRevoked = errors.New("token revoked") +) + +type AppError struct { + Err error `json:"-"` + Message string `json:"message"` + StatusCode int `json:"-"` + Code string `json:"code"` +} + +func (e *AppError) Error() string { + if e.Message != "" { + return e.Message + } + if e.Err != nil { + return e.Err.Error() + } + return "unknown error" +} + +func (e *AppError) Unwrap() error { + return e.Err +} + +func NewAppError( + err error, + message string, + statusCode int, + code string, +) *AppError { + return &AppError{ + Err: err, + Message: message, + StatusCode: statusCode, + Code: code, + } +} + +func NotFoundError(resource string) *AppError { + return &AppError{ + Err: ErrNotFound, + Message: fmt.Sprintf("%s not found", resource), + StatusCode: http.StatusNotFound, + Code: "NOT_FOUND", + } +} + +func DuplicateError(field string) *AppError { + return &AppError{ + Err: ErrDuplicateKey, + Message: fmt.Sprintf("%s already exists", field), + StatusCode: http.StatusConflict, + Code: "DUPLICATE", + } +} + +func ValidationError(message string) *AppError { + return &AppError{ + Err: ErrInvalidInput, + Message: message, + StatusCode: http.StatusBadRequest, + Code: "VALIDATION_ERROR", + } +} + +func UnauthorizedError(message string) *AppError { + if message == "" { + message = "authentication required" + } + return &AppError{ + Err: ErrUnauthorized, + Message: message, + StatusCode: http.StatusUnauthorized, + Code: "UNAUTHORIZED", + } +} + +func ForbiddenError(message string) *AppError { + if message == "" { + message = "access denied" + } + return &AppError{ + Err: ErrForbidden, + Message: message, + StatusCode: http.StatusForbidden, + Code: "FORBIDDEN", + } +} + +func InternalError(err error) *AppError { + return &AppError{ + Err: err, + Message: "internal server error", + StatusCode: http.StatusInternalServerError, + Code: "INTERNAL_ERROR", + } +} + +func RateLimitError() *AppError { + return &AppError{ + Err: ErrRateLimited, + Message: "too many requests", + StatusCode: http.StatusTooManyRequests, + Code: "RATE_LIMITED", + } +} + +func TokenExpiredError() *AppError { + return &AppError{ + Err: ErrTokenExpired, + Message: "token has expired", + StatusCode: http.StatusUnauthorized, + Code: "TOKEN_EXPIRED", + } +} + +func TokenInvalidError() *AppError { + return &AppError{ + Err: ErrTokenInvalid, + Message: "invalid token", + StatusCode: http.StatusUnauthorized, + Code: "TOKEN_INVALID", + } +} + +func TokenRevokedError() *AppError { + return &AppError{ + Err: ErrTokenRevoked, + Message: "token has been revoked", + StatusCode: http.StatusUnauthorized, + Code: "TOKEN_REVOKED", + } +} + +func IsAppError(err error) bool { + var appErr *AppError + return errors.As(err, &appErr) +} + +func GetAppError(err error) *AppError { + var appErr *AppError + if errors.As(err, &appErr) { + return appErr + } + return InternalError(err) +} diff --git a/backends/go-backend/internal/core/redis.go b/backends/go-backend/internal/core/redis.go new file mode 100644 index 0000000..822b6cc --- /dev/null +++ b/backends/go-backend/internal/core/redis.go @@ -0,0 +1,63 @@ +// AngelaMos | 2026 +// redis.go + +package core + +import ( + "context" + "fmt" + "time" + + "github.com/redis/go-redis/v9" + + "github.com/carterperez-dev/templates/go-backend/internal/config" +) + +type Redis struct { + Client *redis.Client +} + +func NewRedis(ctx context.Context, cfg config.RedisConfig) (*Redis, error) { + opts, err := redis.ParseURL(cfg.URL) + if err != nil { + return nil, fmt.Errorf("parse redis url: %w", err) + } + + opts.PoolSize = cfg.PoolSize + opts.MinIdleConns = cfg.MinIdleConns + opts.PoolTimeout = 30 * time.Second + opts.ConnMaxIdleTime = 5 * time.Minute + + client := redis.NewClient(opts) + + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := client.Ping(pingCtx).Err(); err != nil { + return nil, fmt.Errorf("ping redis: %w", err) + } + + return &Redis{Client: client}, nil +} + +func (r *Redis) Close() error { + if r.Client != nil { + return r.Client.Close() + } + return nil +} + +func (r *Redis) Ping(ctx context.Context) error { + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if err := r.Client.Ping(pingCtx).Err(); err != nil { + return fmt.Errorf("redis ping failed: %w", err) + } + + return nil +} + +func (r *Redis) PoolStats() *redis.PoolStats { + return r.Client.PoolStats() +} diff --git a/backends/go-backend/internal/core/response.go b/backends/go-backend/internal/core/response.go new file mode 100644 index 0000000..e37ac73 --- /dev/null +++ b/backends/go-backend/internal/core/response.go @@ -0,0 +1,120 @@ +// AngelaMos | 2026 +// response.go + +package core + +import ( + "encoding/json" + "log/slog" + "net/http" +) + +type Response struct { + Success bool `json:"success"` + Data any `json:"data,omitempty"` + Error *Error `json:"error,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +type Meta struct { + Page int `json:"page,omitempty"` + PageSize int `json:"page_size,omitempty"` + Total int `json:"total,omitempty"` + TotalPages int `json:"total_pages,omitempty"` +} + +func JSON(w http.ResponseWriter, status int, data any) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + response := Response{ + Success: status >= 200 && status < 300, + Data: data, + } + + if err := json.NewEncoder(w).Encode(response); err != nil { + slog.Error("failed to encode response", "error", err) + } +} + +func JSONWithMeta(w http.ResponseWriter, status int, data any, meta *Meta) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + response := Response{ + Success: true, + Data: data, + Meta: meta, + } + + if err := json.NewEncoder(w).Encode(response); err != nil { + slog.Error("failed to encode response", "error", err) + } +} + +func JSONError(w http.ResponseWriter, err error) { + appErr := GetAppError(err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(appErr.StatusCode) + + response := Response{ + Success: false, + Error: &Error{ + Code: appErr.Code, + Message: appErr.Message, + }, + } + + if encErr := json.NewEncoder(w).Encode(response); encErr != nil { + slog.Error("failed to encode error response", "error", encErr) + } +} + +func Created(w http.ResponseWriter, data any) { + JSON(w, http.StatusCreated, data) +} + +func OK(w http.ResponseWriter, data any) { + JSON(w, http.StatusOK, data) +} + +func NoContent(w http.ResponseWriter) { + w.WriteHeader(http.StatusNoContent) +} + +func BadRequest(w http.ResponseWriter, message string) { + JSONError(w, ValidationError(message)) +} + +func NotFound(w http.ResponseWriter, resource string) { + JSONError(w, NotFoundError(resource)) +} + +func Unauthorized(w http.ResponseWriter, message string) { + JSONError(w, UnauthorizedError(message)) +} + +func Forbidden(w http.ResponseWriter, message string) { + JSONError(w, ForbiddenError(message)) +} + +func InternalServerError(w http.ResponseWriter, err error) { + slog.Error("internal server error", "error", err) + JSONError(w, InternalError(err)) +} + +func Paginated(w http.ResponseWriter, data any, page, pageSize, total int) { + totalPages := (total + pageSize - 1) / pageSize + JSONWithMeta(w, http.StatusOK, data, &Meta{ + Page: page, + PageSize: pageSize, + Total: total, + TotalPages: totalPages, + }) +} diff --git a/backends/go-backend/internal/core/security.go b/backends/go-backend/internal/core/security.go new file mode 100644 index 0000000..05065e1 --- /dev/null +++ b/backends/go-backend/internal/core/security.go @@ -0,0 +1,218 @@ +// AngelaMos | 2026 +// security.go + +package core + +import ( + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/hex" + "fmt" + "strings" + + "golang.org/x/crypto/argon2" +) + +const ( + argonTime = 1 + argonMemory = 64 * 1024 + argonThreads = 4 + argonKeyLen = 32 + saltLength = 16 +) + +func HashPassword(password string) (string, error) { + salt := make([]byte, saltLength) + if _, err := rand.Read(salt); err != nil { + return "", fmt.Errorf("generate salt: %w", err) + } + + hash := argon2.IDKey( + []byte(password), + salt, + argonTime, + argonMemory, + argonThreads, + argonKeyLen, + ) + + b64Salt := base64.RawStdEncoding.EncodeToString(salt) + b64Hash := base64.RawStdEncoding.EncodeToString(hash) + + encoded := fmt.Sprintf( + "$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", + argon2.Version, + argonMemory, + argonTime, + argonThreads, + b64Salt, + b64Hash, + ) + + return encoded, nil +} + +func VerifyPassword(password, encodedHash string) (bool, error) { + params, salt, hash, err := decodeHash(encodedHash) + if err != nil { + return false, err + } + + otherHash := argon2.IDKey( + []byte(password), + salt, + params.time, + params.memory, + params.threads, + params.keyLen, + ) + + if subtle.ConstantTimeCompare(hash, otherHash) == 1 { + return true, nil + } + + return false, nil +} + +func VerifyPasswordWithRehash( + password, encodedHash string, +) (bool, string, error) { + valid, err := VerifyPassword(password, encodedHash) + if err != nil { + return false, "", err + } + + if !valid { + return false, "", nil + } + + if needsRehash(encodedHash) { + newHash, hashErr := HashPassword(password) + if hashErr != nil { + //nolint:nilerr // password verified successfully; rehash failure is non-critical + return true, "", nil + } + return true, newHash, nil + } + + return true, "", nil +} + +var dummyHash string + +func init() { + hash, err := HashPassword("dummy_password_for_timing_attack_prevention") + if err != nil { + panic(fmt.Sprintf("security: failed to generate dummy hash: %v", err)) + } + dummyHash = hash +} + +func VerifyPasswordTimingSafe( + password string, + encodedHash *string, +) (bool, string, error) { + hashToVerify := dummyHash + if encodedHash != nil && *encodedHash != "" { + hashToVerify = *encodedHash + } + + valid, newHash, err := VerifyPasswordWithRehash(password, hashToVerify) + + if encodedHash == nil || *encodedHash == "" { + return false, "", nil + } + + return valid, newHash, err +} + +type argonParams struct { + memory uint32 + time uint32 + threads uint8 + keyLen uint32 +} + +func decodeHash(encodedHash string) (*argonParams, []byte, []byte, error) { + parts := strings.Split(encodedHash, "$") + if len(parts) != 6 { + return nil, nil, nil, fmt.Errorf("invalid hash format") + } + + if parts[1] != "argon2id" { + return nil, nil, nil, fmt.Errorf("unsupported algorithm: %s", parts[1]) + } + + var version int + _, err := fmt.Sscanf(parts[2], "v=%d", &version) + if err != nil { + return nil, nil, nil, fmt.Errorf("invalid version: %w", err) + } + + if version != argon2.Version { + return nil, nil, nil, fmt.Errorf("incompatible version: %d", version) + } + + params := &argonParams{} + _, err = fmt.Sscanf( + parts[3], + "m=%d,t=%d,p=%d", + ¶ms.memory, + ¶ms.time, + ¶ms.threads, + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("invalid params: %w", err) + } + + salt, err := base64.RawStdEncoding.DecodeString(parts[4]) + if err != nil { + return nil, nil, nil, fmt.Errorf("decode salt: %w", err) + } + + hash, err := base64.RawStdEncoding.DecodeString(parts[5]) + if err != nil { + return nil, nil, nil, fmt.Errorf("decode hash: %w", err) + } + + //nolint:gosec // G115: hash length is always small (32 bytes for Argon2id) + params.keyLen = uint32(len(hash)) + + return params, salt, hash, nil +} + +func needsRehash(encodedHash string) bool { + params, _, _, err := decodeHash(encodedHash) + if err != nil { + return true + } + + return params.memory != argonMemory || + params.time != argonTime || + params.threads != argonThreads || + params.keyLen != argonKeyLen +} + +func GenerateSecureToken(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + return base64.URLEncoding.EncodeToString(bytes), nil +} + +func GenerateRefreshToken() (string, error) { + return GenerateSecureToken(32) +} + +func HashToken(token string) string { + hash := sha256.Sum256([]byte(token)) + return hex.EncodeToString(hash[:]) +} + +func CompareTokenHash(token, hash string) bool { + tokenHash := HashToken(token) + return subtle.ConstantTimeCompare([]byte(tokenHash), []byte(hash)) == 1 +} diff --git a/backends/go-backend/internal/core/telemetry.go b/backends/go-backend/internal/core/telemetry.go new file mode 100644 index 0000000..950bba5 --- /dev/null +++ b/backends/go-backend/internal/core/telemetry.go @@ -0,0 +1,142 @@ +// AngelaMos | 2026 +// telemetry.go + +package core + +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + "github.com/carterperez-dev/templates/go-backend/internal/config" +) + +type Telemetry struct { + TracerProvider *sdktrace.TracerProvider + Tracer trace.Tracer +} + +func NewTelemetry( + ctx context.Context, + otelCfg config.OtelConfig, + appCfg config.AppConfig, +) (*Telemetry, error) { + if !otelCfg.Enabled || otelCfg.Endpoint == "" { + noopProvider := sdktrace.NewTracerProvider() + return &Telemetry{ + TracerProvider: noopProvider, + Tracer: noopProvider.Tracer(otelCfg.ServiceName), + }, nil + } + + opts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(otelCfg.Endpoint), + otlptracegrpc.WithTimeout(5 * time.Second), + } + + if otelCfg.Insecure { + opts = append( + opts, + otlptracegrpc.WithTLSCredentials(insecure.NewCredentials()), + ) + } else { + opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewClientTLSFromCert(nil, ""))) + } + + exporter, err := otlptracegrpc.New(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("create otlp exporter: %w", err) + } + + res, err := resource.New(ctx, + resource.WithAttributes( + semconv.ServiceName(otelCfg.ServiceName), + semconv.ServiceVersion(appCfg.Version), + attribute.String("environment", appCfg.Environment), + ), + resource.WithHost(), + resource.WithProcess(), + ) + if err != nil { + return nil, fmt.Errorf("create resource: %w", err) + } + + sampleRate := otelCfg.SampleRate + if sampleRate <= 0 || sampleRate > 1 { + sampleRate = 0.1 + } + + tp := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(exporter, + sdktrace.WithBatchTimeout(5*time.Second), + sdktrace.WithMaxExportBatchSize(512), + ), + sdktrace.WithResource(res), + sdktrace.WithSampler(sdktrace.ParentBased( + sdktrace.TraceIDRatioBased(sampleRate), + )), + ) + + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + )) + + return &Telemetry{ + TracerProvider: tp, + Tracer: tp.Tracer(otelCfg.ServiceName), + }, nil +} + +func (t *Telemetry) Shutdown(ctx context.Context) error { + if t.TracerProvider == nil { + return nil + } + + shutdownCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + if err := t.TracerProvider.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("shutdown tracer provider: %w", err) + } + + return nil +} + +func SpanFromContext(ctx context.Context) trace.Span { + return trace.SpanFromContext(ctx) +} + +func TraceIDFromContext(ctx context.Context) string { + span := trace.SpanFromContext(ctx) + if span.SpanContext().IsValid() { + return span.SpanContext().TraceID().String() + } + return "" +} + +func AddSpanEvent( + ctx context.Context, + name string, + attrs ...attribute.KeyValue, +) { + span := trace.SpanFromContext(ctx) + span.AddEvent(name, trace.WithAttributes(attrs...)) +} + +func SetSpanError(ctx context.Context, err error) { + span := trace.SpanFromContext(ctx) + span.RecordError(err) +} diff --git a/backends/go-backend/internal/core/validation.go b/backends/go-backend/internal/core/validation.go new file mode 100644 index 0000000..f77cbd9 --- /dev/null +++ b/backends/go-backend/internal/core/validation.go @@ -0,0 +1,42 @@ +// AngelaMos | 2026 +// validation.go + +package core + +import ( + "errors" + "strings" + + "github.com/go-playground/validator/v10" +) + +func FormatValidationError(err error) string { + var ve validator.ValidationErrors + if errors.As(err, &ve) { + messages := make([]string, 0, len(ve)) + for _, fe := range ve { + messages = append(messages, FormatFieldError(fe)) + } + return strings.Join(messages, "; ") + } + return "validation failed" +} + +func FormatFieldError(fe validator.FieldError) string { + field := strings.ToLower(fe.Field()) + + switch fe.Tag() { + case "required": + return field + " is required" + case "email": + return field + " must be a valid email" + case "min": + return field + " must be at least " + fe.Param() + " characters" + case "max": + return field + " must be at most " + fe.Param() + " characters" + case "oneof": + return field + " must be one of: " + fe.Param() + default: + return field + " is invalid" + } +} diff --git a/backends/go-backend/internal/health/handler.go b/backends/go-backend/internal/health/handler.go new file mode 100644 index 0000000..e7003c6 --- /dev/null +++ b/backends/go-backend/internal/health/handler.go @@ -0,0 +1,195 @@ +// AngelaMos | 2026 +// handler.go + +package health + +import ( + "context" + "encoding/json" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/go-chi/chi/v5" +) + +type Checker interface { + Ping(ctx context.Context) error +} + +type Handler struct { + db Checker + redis Checker + ready atomic.Bool + shutdown atomic.Bool +} + +func NewHandler(db, redis Checker) *Handler { + h := &Handler{ + db: db, + redis: redis, + } + h.ready.Store(true) + return h +} + +func (h *Handler) RegisterRoutes(r chi.Router) { + r.Get("/healthz", h.Liveness) + r.Get("/livez", h.Liveness) + r.Get("/readyz", h.Readiness) +} + +func (h *Handler) Liveness(w http.ResponseWriter, r *http.Request) { + if h.shutdown.Load() { + h.writeStatus(w, http.StatusServiceUnavailable, StatusResponse{ + Status: "shutting_down", + }) + return + } + + h.writeStatus(w, http.StatusOK, StatusResponse{ + Status: "ok", + }) +} + +func (h *Handler) Readiness(w http.ResponseWriter, r *http.Request) { + if h.shutdown.Load() { + h.writeStatus(w, http.StatusServiceUnavailable, StatusResponse{ + Status: "shutting_down", + }) + return + } + + if !h.ready.Load() { + h.writeStatus(w, http.StatusServiceUnavailable, StatusResponse{ + Status: "not_ready", + }) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + checks := h.runHealthChecks(ctx) + + allHealthy := true + for _, check := range checks { + if !check.Healthy { + allHealthy = false + break + } + } + + status := "ok" + statusCode := http.StatusOK + if !allHealthy { + status = "degraded" + statusCode = http.StatusServiceUnavailable + } + + h.writeStatus(w, statusCode, ReadinessResponse{ + Status: status, + Checks: checks, + }) +} + +func (h *Handler) runHealthChecks(ctx context.Context) []HealthCheck { + var wg sync.WaitGroup + checks := make([]HealthCheck, 2) + + wg.Add(2) + + go func() { + defer wg.Done() + checks[0] = h.checkDatabase(ctx) + }() + + go func() { + defer wg.Done() + checks[1] = h.checkRedis(ctx) + }() + + wg.Wait() + return checks +} + +func (h *Handler) checkDatabase(ctx context.Context) HealthCheck { + check := HealthCheck{ + Name: "database", + Healthy: true, + } + + if h.db == nil { + check.Healthy = false + check.Message = "database checker not configured" + return check + } + + start := time.Now() + err := h.db.Ping(ctx) + check.Latency = time.Since(start).String() + + if err != nil { + check.Healthy = false + check.Message = "ping failed" + } + + return check +} + +func (h *Handler) checkRedis(ctx context.Context) HealthCheck { + check := HealthCheck{ + Name: "redis", + Healthy: true, + } + + if h.redis == nil { + check.Healthy = false + check.Message = "redis checker not configured" + return check + } + + start := time.Now() + err := h.redis.Ping(ctx) + check.Latency = time.Since(start).String() + + if err != nil { + check.Healthy = false + check.Message = "ping failed" + } + + return check +} + +func (h *Handler) SetReady(ready bool) { + h.ready.Store(ready) +} + +func (h *Handler) SetShutdown(shutdown bool) { + h.shutdown.Store(shutdown) +} + +func (h *Handler) writeStatus(w http.ResponseWriter, status int, data any) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") + w.WriteHeader(status) + //nolint:errcheck // best-effort response + _ = json.NewEncoder(w).Encode(data) +} + +type StatusResponse struct { + Status string `json:"status"` +} + +type ReadinessResponse struct { + Status string `json:"status"` + Checks []HealthCheck `json:"checks"` +} + +type HealthCheck struct { + Name string `json:"name"` + Healthy bool `json:"healthy"` + Latency string `json:"latency,omitempty"` + Message string `json:"message,omitempty"` +} diff --git a/backends/go-backend/internal/middleware/auth.go b/backends/go-backend/internal/middleware/auth.go new file mode 100644 index 0000000..0f9fb5f --- /dev/null +++ b/backends/go-backend/internal/middleware/auth.go @@ -0,0 +1,189 @@ +// AngelaMos | 2026 +// auth.go + +package middleware + +import ( + "context" + "errors" + "net/http" + "strings" + + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +const ( + UserIDKey contextKey = "user_id" + UserRoleKey contextKey = "user_role" + UserTierKey contextKey = "user_tier" + ClaimsKey contextKey = "jwt_claims" +) + +type TokenVerifier interface { + VerifyAccessToken( + ctx context.Context, + token string, + ) (*AccessTokenClaims, error) +} + +type AccessTokenClaims struct { + UserID string + Role string + Tier string + TokenVersion int +} + +func Authenticator(verifier TokenVerifier) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := ExtractToken(r) + + if token == "" { + core.JSONError( + w, + core.UnauthorizedError("missing authorization token"), + ) + return + } + + claims, err := verifier.VerifyAccessToken(r.Context(), token) + if err != nil { + handleAuthError(w, err) + return + } + + ctx := r.Context() + ctx = context.WithValue(ctx, UserIDKey, claims.UserID) + ctx = context.WithValue(ctx, UserRoleKey, claims.Role) + ctx = context.WithValue(ctx, UserTierKey, claims.Tier) + ctx = context.WithValue(ctx, ClaimsKey, claims) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } +} + +func OptionalAuth(verifier TokenVerifier) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := ExtractToken(r) + + if token != "" { + claims, err := verifier.VerifyAccessToken(r.Context(), token) + if err == nil { + ctx := r.Context() + ctx = context.WithValue(ctx, UserIDKey, claims.UserID) + ctx = context.WithValue(ctx, UserRoleKey, claims.Role) + ctx = context.WithValue(ctx, UserTierKey, claims.Tier) + ctx = context.WithValue(ctx, ClaimsKey, claims) + r = r.WithContext(ctx) + } + } + + next.ServeHTTP(w, r) + }) + } +} + +func RequireRole(roles ...string) func(http.Handler) http.Handler { + roleSet := make(map[string]struct{}, len(roles)) + for _, role := range roles { + roleSet[role] = struct{}{} + } + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userRole := GetUserRole(r.Context()) + + if userRole == "" { + core.JSONError( + w, + core.UnauthorizedError("authentication required"), + ) + return + } + + if _, ok := roleSet[userRole]; !ok { + core.JSONError( + w, + core.ForbiddenError("insufficient permissions"), + ) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +func RequireAdmin(next http.Handler) http.Handler { + return RequireRole("admin")(next) +} + +func ExtractToken(r *http.Request) string { + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + return "" + } + + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) != 2 || !strings.EqualFold(parts[0], "bearer") { + return "" + } + + return strings.TrimSpace(parts[1]) +} + +func handleAuthError(w http.ResponseWriter, err error) { + if core.IsAppError(err) { + core.JSONError(w, err) + return + } + + switch { + case errors.Is(err, core.ErrTokenExpired): + core.JSONError(w, core.TokenExpiredError()) + case errors.Is(err, core.ErrTokenRevoked): + core.JSONError(w, core.TokenRevokedError()) + case errors.Is(err, core.ErrTokenInvalid): + core.JSONError(w, core.TokenInvalidError()) + default: + core.JSONError(w, core.TokenInvalidError()) + } +} + +func GetUserID(ctx context.Context) string { + if id, ok := ctx.Value(UserIDKey).(string); ok { + return id + } + return "" +} + +func GetUserRole(ctx context.Context) string { + if role, ok := ctx.Value(UserRoleKey).(string); ok { + return role + } + return "" +} + +func GetUserTier(ctx context.Context) string { + if tier, ok := ctx.Value(UserTierKey).(string); ok { + return tier + } + return "" +} + +func GetClaims(ctx context.Context) *AccessTokenClaims { + if claims, ok := ctx.Value(ClaimsKey).(*AccessTokenClaims); ok { + return claims + } + return nil +} + +func IsAuthenticated(ctx context.Context) bool { + return GetUserID(ctx) != "" +} + +func IsAdmin(ctx context.Context) bool { + return GetUserRole(ctx) == "admin" +} diff --git a/backends/go-backend/internal/middleware/headers.go b/backends/go-backend/internal/middleware/headers.go new file mode 100644 index 0000000..332d48f --- /dev/null +++ b/backends/go-backend/internal/middleware/headers.go @@ -0,0 +1,103 @@ +// AngelaMos | 2026 +// headers.go + +package middleware + +import ( + "net/http" + "strconv" + "strings" + + "github.com/carterperez-dev/templates/go-backend/internal/config" +) + +func SecurityHeaders(isProduction bool) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h := w.Header() + + h.Set("X-Content-Type-Options", "nosniff") + h.Set("X-Frame-Options", "DENY") + h.Set("X-XSS-Protection", "1; mode=block") + h.Set("Referrer-Policy", "strict-origin-when-cross-origin") + h.Set( + "Permissions-Policy", + "geolocation=(), microphone=(), camera=()", + ) + + if isProduction { + h.Set( + "Strict-Transport-Security", + "max-age=31536000; includeSubDomains; preload", + ) + } + + h.Set("Content-Security-Policy", buildCSP(isProduction)) + + next.ServeHTTP(w, r) + }) + } +} + +func buildCSP(isProduction bool) string { + directives := []string{ + "default-src 'self'", + "script-src 'self'", + "style-src 'self' 'unsafe-inline'", + "img-src 'self' data: https:", + "font-src 'self'", + "connect-src 'self'", + "frame-ancestors 'none'", + "base-uri 'self'", + "form-action 'self'", + } + + if !isProduction { + directives[1] = "script-src 'self' 'unsafe-inline' 'unsafe-eval'" + } + + return strings.Join(directives, "; ") +} + +func CORS(cfg config.CORSConfig) func(http.Handler) http.Handler { + allowedOrigins := make(map[string]struct{}, len(cfg.AllowedOrigins)) + for _, origin := range cfg.AllowedOrigins { + allowedOrigins[origin] = struct{}{} + } + + methodsStr := strings.Join(cfg.AllowedMethods, ", ") + headersStr := strings.Join(cfg.AllowedHeaders, ", ") + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + origin := r.Header.Get("Origin") + + if origin != "" { + if _, ok := allowedOrigins[origin]; ok { + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + + if cfg.AllowCredentials { + w.Header(). + Set("Access-Control-Allow-Credentials", "true") + } + } + } + + if r.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", methodsStr) + w.Header().Set("Access-Control-Allow-Headers", headersStr) + + if cfg.MaxAge > 0 { + w.Header(). + Set("Access-Control-Max-Age", strconv.Itoa(cfg.MaxAge)) + } + + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) + } +} diff --git a/backends/go-backend/internal/middleware/logging.go b/backends/go-backend/internal/middleware/logging.go new file mode 100644 index 0000000..602ed4c --- /dev/null +++ b/backends/go-backend/internal/middleware/logging.go @@ -0,0 +1,99 @@ +// AngelaMos | 2026 +// logging.go + +package middleware + +import ( + "context" + "log/slog" + "net/http" + "time" + + "go.opentelemetry.io/otel/trace" +) + +type loggerKey struct{} + +func Logger(baseLogger *slog.Logger) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + requestID := GetRequestID(r.Context()) + + reqLogger := baseLogger.With( + slog.String("request_id", requestID), + slog.String("method", r.Method), + slog.String("path", r.URL.Path), + slog.String("remote_addr", r.RemoteAddr), + ) + + if span := trace.SpanFromContext(r.Context()); span.SpanContext(). + IsValid() { + reqLogger = reqLogger.With( + slog.String( + "trace_id", + span.SpanContext().TraceID().String(), + ), + slog.String( + "span_id", + span.SpanContext().SpanID().String(), + ), + ) + } + + ctx := context.WithValue(r.Context(), loggerKey{}, reqLogger) + + ww := &responseWriter{ + ResponseWriter: w, + status: http.StatusOK, + } + + next.ServeHTTP(ww, r.WithContext(ctx)) + + latency := time.Since(start) + + logLevel := slog.LevelInfo + if ww.status >= 500 { + logLevel = slog.LevelError + } else if ww.status >= 400 { + logLevel = slog.LevelWarn + } + + reqLogger.Log(r.Context(), logLevel, "request completed", + slog.Int("status", ww.status), + slog.Int("bytes", ww.bytes), + slog.Duration("latency", latency), + slog.String("user_agent", r.UserAgent()), + ) + }) + } +} + +func GetLogger(ctx context.Context) *slog.Logger { + if logger, ok := ctx.Value(loggerKey{}).(*slog.Logger); ok { + return logger + } + return slog.Default() +} + +type responseWriter struct { + http.ResponseWriter + status int + bytes int +} + +func (rw *responseWriter) WriteHeader(code int) { + rw.status = code + rw.ResponseWriter.WriteHeader(code) +} + +func (rw *responseWriter) Write(b []byte) (int, error) { + n, err := rw.ResponseWriter.Write(b) + rw.bytes += n + return n, err +} + +func (rw *responseWriter) Unwrap() http.ResponseWriter { + return rw.ResponseWriter +} diff --git a/backends/go-backend/internal/middleware/ratelimit.go b/backends/go-backend/internal/middleware/ratelimit.go new file mode 100644 index 0000000..c85519a --- /dev/null +++ b/backends/go-backend/internal/middleware/ratelimit.go @@ -0,0 +1,375 @@ +// AngelaMos | 2026 +// ratelimit.go + +package middleware + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + redis_rate "github.com/go-redis/redis_rate/v10" + "github.com/redis/go-redis/v9" + "golang.org/x/time/rate" +) + +type RateLimitConfig struct { + Limit redis_rate.Limit + KeyFunc func(*http.Request) string + FailOpen bool + BypassFunc func(*http.Request) bool + OnLimited func(http.ResponseWriter, *http.Request, *redis_rate.Result) +} + +type RateLimiter struct { + limiter *redis_rate.Limiter + fallback *localLimiter + config RateLimitConfig +} + +func NewRateLimiter(rdb *redis.Client, cfg RateLimitConfig) *RateLimiter { + if cfg.KeyFunc == nil { + cfg.KeyFunc = KeyByIP + } + + return &RateLimiter{ + limiter: redis_rate.NewLimiter(rdb), + fallback: newLocalLimiter(), + config: cfg, + } +} + +func (rl *RateLimiter) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if rl.config.BypassFunc != nil && rl.config.BypassFunc(r) { + next.ServeHTTP(w, r) + return + } + + key := rl.config.KeyFunc(r) + res, err := rl.allow(r.Context(), key) + if err != nil { + if rl.config.FailOpen { + slog.Warn("rate limiter error, failing open", + "error", err, + "key", key, + ) + next.ServeHTTP(w, r) + return + } + http.Error(w, "Service Unavailable", http.StatusServiceUnavailable) + return + } + + setRateLimitHeaders(w, res, rl.config.Limit) + + if res.Allowed == 0 { + if rl.config.OnLimited != nil { + rl.config.OnLimited(w, r, res) + return + } + writeRateLimitExceeded(w, res) + return + } + + next.ServeHTTP(w, r) + }) +} + +func (rl *RateLimiter) allow( + ctx context.Context, + key string, +) (*redis_rate.Result, error) { + res, err := rl.limiter.Allow(ctx, key, rl.config.Limit) + if err != nil { + return rl.fallback.allow(key, rl.config.Limit) + } + return res, nil +} + +func KeyByIP(r *http.Request) string { + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + ips := strings.Split(xff, ",") + ip := strings.TrimSpace(ips[len(ips)-1]) + return "ratelimit:ip:" + ip + } + + if xri := r.Header.Get("X-Real-IP"); xri != "" { + return "ratelimit:ip:" + xri + } + + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + ip = r.RemoteAddr + } + + return "ratelimit:ip:" + ip +} + +func KeyByUser(r *http.Request) string { + if userID := GetUserID(r.Context()); userID != "" { + return "ratelimit:user:" + userID + } + return KeyByIP(r) +} + +func KeyByUserAndEndpoint(r *http.Request) string { + userKey := KeyByUser(r) + endpoint := normalizeEndpoint(r.URL.Path) + return fmt.Sprintf("%s:endpoint:%s", userKey, endpoint) +} + +func normalizeEndpoint(path string) string { + parts := strings.Split(strings.Trim(path, "/"), "/") + normalized := make([]string, 0, len(parts)) + + for _, part := range parts { + if isUUID(part) || isNumeric(part) { + normalized = append(normalized, "{id}") + } else { + normalized = append(normalized, part) + } + } + + return "/" + strings.Join(normalized, "/") +} + +func isUUID(s string) bool { + if len(s) != 36 { + return false + } + return s[8] == '-' && s[13] == '-' && s[18] == '-' && s[23] == '-' +} + +func isNumeric(s string) bool { + for _, c := range s { + if c < '0' || c > '9' { + return false + } + } + return len(s) > 0 +} + +func setRateLimitHeaders( + w http.ResponseWriter, + res *redis_rate.Result, + limit redis_rate.Limit, +) { + h := w.Header() + + h.Set("X-RateLimit-Limit", strconv.Itoa(limit.Rate)) + h.Set("X-RateLimit-Remaining", strconv.Itoa(res.Remaining)) + h.Set("X-RateLimit-Reset", strconv.FormatInt( + time.Now().Add(res.ResetAfter).Unix(), 10)) + + windowSecs := int(limit.Period.Seconds()) + h.Set("RateLimit-Policy", fmt.Sprintf(`%d;w=%d`, limit.Rate, windowSecs)) + h.Set( + "RateLimit", + fmt.Sprintf(`%d;t=%d`, res.Remaining, int(res.ResetAfter.Seconds())), + ) +} + +func writeRateLimitExceeded(w http.ResponseWriter, res *redis_rate.Result) { + retryAfter := int(res.RetryAfter.Seconds()) + if retryAfter < 1 { + retryAfter = 1 + } + + w.Header().Set("Retry-After", strconv.Itoa(retryAfter)) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusTooManyRequests) + + response := map[string]any{ + "success": false, + "error": map[string]any{ + "code": "RATE_LIMITED", + "message": fmt.Sprintf( + "Rate limit exceeded. Retry after %d seconds.", + retryAfter, + ), + }, + } + + //nolint:errcheck // best-effort response write + _ = json.NewEncoder(w).Encode(response) +} + +type limiterEntry struct { + limiter *rate.Limiter + lastAccess int64 +} + +type localLimiter struct { + limiters sync.Map +} + +const ( + cleanupInterval = 5 * time.Minute + entryTTL = 10 * time.Minute +) + +func newLocalLimiter() *localLimiter { + l := &localLimiter{} + go l.cleanup() + return l +} + +func (l *localLimiter) cleanup() { + ticker := time.NewTicker(cleanupInterval) + defer ticker.Stop() + + for range ticker.C { + cutoff := time.Now().Add(-entryTTL).Unix() + l.limiters.Range(func(key, value any) bool { + entry, ok := value.(*limiterEntry) + if ok && entry.lastAccess < cutoff { + l.limiters.Delete(key) + } + return true + }) + } +} + +func (l *localLimiter) allow( + key string, + limit redis_rate.Limit, +) (*redis_rate.Result, error) { + ratePerSec := float64(limit.Rate) / limit.Period.Seconds() + now := time.Now().Unix() + + entryI, loaded := l.limiters.Load(key) + if !loaded { + newEntry := &limiterEntry{ + limiter: rate.NewLimiter( + rate.Limit(ratePerSec), + limit.Burst, + ), + lastAccess: now, + } + entryI, _ = l.limiters.LoadOrStore(key, newEntry) + } + + entry, ok := entryI.(*limiterEntry) + if !ok { + return nil, fmt.Errorf("invalid limiter entry type") + } + entry.lastAccess = now + + allowed := entry.limiter.Allow() + + remaining := int(entry.limiter.Tokens()) + if remaining < 0 { + remaining = 0 + } + + var retryAfter time.Duration + if !allowed { + retryAfter = time.Duration(float64(time.Second) / ratePerSec) + } else { + retryAfter = -1 + } + + allowedInt := 0 + if allowed { + allowedInt = 1 + } + + return &redis_rate.Result{ + Limit: limit, + Allowed: allowedInt, + Remaining: remaining, + RetryAfter: retryAfter, + ResetAfter: time.Duration(float64(time.Second) / ratePerSec), + }, nil +} + +type TierConfig struct { + RequestsPerMinute int + BurstSize int +} + +var DefaultTiers = map[string]TierConfig{ + "free": {RequestsPerMinute: 60, BurstSize: 10}, + "pro": {RequestsPerMinute: 600, BurstSize: 100}, + "enterprise": {RequestsPerMinute: 6000, BurstSize: 1000}, +} + +func TieredRateLimiter( + rdb *redis.Client, + tiers map[string]TierConfig, +) func(http.Handler) http.Handler { + limiter := redis_rate.NewLimiter(rdb) + fallback := newLocalLimiter() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userID := GetUserID(r.Context()) + tier := GetUserTier(r.Context()) + + if tier == "" { + tier = "free" + } + + config, ok := tiers[tier] + if !ok { + config = tiers["free"] + } + + limit := redis_rate.Limit{ + Rate: config.RequestsPerMinute, + Burst: config.BurstSize, + Period: time.Minute, + } + + key := fmt.Sprintf("ratelimit:user:%s", userID) + + res, err := limiter.Allow(r.Context(), key, limit) + if err != nil { + //nolint:errcheck // fallback never fails + res, _ = fallback.allow(key, limit) + } + + w.Header().Set("X-RateLimit-Tier", tier) + setRateLimitHeaders(w, res, limit) + + if res.Allowed == 0 { + writeRateLimitExceeded(w, res) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +func PerMinute(rate, burst int) redis_rate.Limit { + return redis_rate.Limit{ + Rate: rate, + Burst: burst, + Period: time.Minute, + } +} + +func PerSecond(rate, burst int) redis_rate.Limit { + return redis_rate.Limit{ + Rate: rate, + Burst: burst, + Period: time.Second, + } +} + +func PerHour(rate, burst int) redis_rate.Limit { + return redis_rate.Limit{ + Rate: rate, + Burst: burst, + Period: time.Hour, + } +} diff --git a/backends/go-backend/internal/middleware/request_id.go b/backends/go-backend/internal/middleware/request_id.go new file mode 100644 index 0000000..77d9816 --- /dev/null +++ b/backends/go-backend/internal/middleware/request_id.go @@ -0,0 +1,40 @@ +// AngelaMos | 2026 +// request_id.go + +package middleware + +import ( + "context" + "net/http" + + "github.com/google/uuid" +) + +type contextKey string + +const RequestIDKey contextKey = "request_id" + +const RequestIDHeader = "X-Request-ID" + +func RequestID(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestID := r.Header.Get(RequestIDHeader) + + if requestID == "" { + requestID = uuid.New().String() + } + + ctx := context.WithValue(r.Context(), RequestIDKey, requestID) + + w.Header().Set(RequestIDHeader, requestID) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func GetRequestID(ctx context.Context) string { + if id, ok := ctx.Value(RequestIDKey).(string); ok { + return id + } + return "" +} diff --git a/backends/go-backend/internal/server/server.go b/backends/go-backend/internal/server/server.go new file mode 100644 index 0000000..58d5cba --- /dev/null +++ b/backends/go-backend/internal/server/server.go @@ -0,0 +1,108 @@ +// AngelaMos | 2026 +// server.go + +package server + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net/http" + "time" + + "github.com/go-chi/chi/v5" + chimw "github.com/go-chi/chi/v5/middleware" + + "github.com/carterperez-dev/templates/go-backend/internal/config" + "github.com/carterperez-dev/templates/go-backend/internal/health" +) + +type Server struct { + httpServer *http.Server + router *chi.Mux + config config.ServerConfig + healthHandler *health.Handler + logger *slog.Logger +} + +type Config struct { + ServerConfig config.ServerConfig + HealthHandler *health.Handler + Logger *slog.Logger +} + +func New(cfg Config) *Server { + router := chi.NewRouter() + + router.Use(chimw.CleanPath) + router.Use(chimw.StripSlashes) + + return &Server{ + httpServer: &http.Server{ + Addr: cfg.ServerConfig.Address(), + Handler: router, + ReadTimeout: cfg.ServerConfig.ReadTimeout, + WriteTimeout: cfg.ServerConfig.WriteTimeout, + IdleTimeout: cfg.ServerConfig.IdleTimeout, + }, + router: router, + config: cfg.ServerConfig, + healthHandler: cfg.HealthHandler, + logger: cfg.Logger, + } +} + +func (s *Server) Router() *chi.Mux { + return s.router +} + +func (s *Server) Start() error { + s.logger.Info("starting HTTP server", + "addr", s.config.Address(), + "read_timeout", s.config.ReadTimeout, + "write_timeout", s.config.WriteTimeout, + "idle_timeout", s.config.IdleTimeout, + ) + + if err := s.httpServer.ListenAndServe(); err != nil && + !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("http server error: %w", err) + } + + return nil +} + +func (s *Server) Shutdown(ctx context.Context, drainDelay time.Duration) error { + s.logger.Info("initiating graceful shutdown") + + s.logger.Info("marking server as not ready") + if s.healthHandler != nil { + s.healthHandler.SetReady(false) + s.healthHandler.SetShutdown(true) + } + + s.logger.Info("waiting for load balancer to drain", + "delay", drainDelay, + ) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(drainDelay): + } + + s.logger.Info("stopping HTTP server") + shutdownCtx, cancel := context.WithTimeout(ctx, s.config.ShutdownTimeout) + defer cancel() + + if err := s.httpServer.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("http server shutdown: %w", err) + } + + s.logger.Info("HTTP server stopped gracefully") + return nil +} + +func (s *Server) Address() string { + return s.httpServer.Addr +} diff --git a/backends/go-backend/internal/user/dto.go b/backends/go-backend/internal/user/dto.go new file mode 100644 index 0000000..ba2b068 --- /dev/null +++ b/backends/go-backend/internal/user/dto.go @@ -0,0 +1,84 @@ +// AngelaMos | 2026 +// dto.go + +package user + +import ( + "time" +) + +type CreateUserRequest struct { + Email string `json:"email" validate:"required,email,max=255"` + Password string `json:"password" validate:"required,min=8,max=128"` + Name string `json:"name" validate:"required,min=1,max=100"` +} + +type UpdateUserRequest struct { + Name *string `json:"name,omitempty" validate:"omitempty,min=1,max=100"` +} + +type UpdateUserRoleRequest struct { + Role string `json:"role" validate:"required,oneof=user admin"` +} + +type UpdateUserTierRequest struct { + Tier string `json:"tier" validate:"required,oneof=free pro enterprise"` +} + +type UserResponse struct { + ID string `json:"id"` + Email string `json:"email"` + Name string `json:"name"` + Role string `json:"role"` + Tier string `json:"tier"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type UserListResponse struct { + Users []UserResponse `json:"users"` +} + +type ListUsersParams struct { + Page int `json:"page"` + PageSize int `json:"page_size"` + Search string `json:"search"` + Role string `json:"role"` + Tier string `json:"tier"` +} + +func (p *ListUsersParams) Normalize() { + if p.Page < 1 { + p.Page = 1 + } + if p.PageSize < 1 { + p.PageSize = 20 + } + if p.PageSize > 100 { + p.PageSize = 100 + } +} + +func (p *ListUsersParams) Offset() int { + return (p.Page - 1) * p.PageSize +} + +func ToUserResponse(u *User) UserResponse { + return UserResponse{ + ID: u.ID, + Email: u.Email, + Name: u.Name, + Role: u.Role, + Tier: u.Tier, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, + } +} + +func ToUserResponseList(users []User) []UserResponse { + responses := make([]UserResponse, 0, len(users)) + for _, u := range users { + responses = append(responses, ToUserResponse(&u)) + } + return responses +} diff --git a/backends/go-backend/internal/user/entity.go b/backends/go-backend/internal/user/entity.go new file mode 100644 index 0000000..f830b05 --- /dev/null +++ b/backends/go-backend/internal/user/entity.go @@ -0,0 +1,40 @@ +// AngelaMos | 2026 +// entity.go + +package user + +import ( + "time" +) + +type User struct { + ID string `db:"id"` + Email string `db:"email"` + PasswordHash string `db:"password_hash"` + Name string `db:"name"` + Role string `db:"role"` + Tier string `db:"tier"` + TokenVersion int `db:"token_version"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + DeletedAt *time.Time `db:"deleted_at"` +} + +func (u *User) IsDeleted() bool { + return u.DeletedAt != nil +} + +func (u *User) IsAdmin() bool { + return u.Role == RoleAdmin +} + +const ( + RoleUser = "user" + RoleAdmin = "admin" +) + +const ( + TierFree = "free" + TierPro = "pro" + TierEnterprise = "enterprise" +) diff --git a/backends/go-backend/internal/user/handler.go b/backends/go-backend/internal/user/handler.go new file mode 100644 index 0000000..8a21889 --- /dev/null +++ b/backends/go-backend/internal/user/handler.go @@ -0,0 +1,288 @@ +// AngelaMos | 2026 +// handler.go + +package user + +import ( + "encoding/json" + "errors" + "net/http" + "strconv" + + "github.com/go-chi/chi/v5" + "github.com/go-playground/validator/v10" + + "github.com/carterperez-dev/templates/go-backend/internal/core" + "github.com/carterperez-dev/templates/go-backend/internal/middleware" +) + +type Handler struct { + service *Service + validator *validator.Validate +} + +func NewHandler(service *Service) *Handler { + return &Handler{ + service: service, + validator: validator.New(validator.WithRequiredStructEnabled()), + } +} + +func (h *Handler) RegisterRoutes( + r chi.Router, + authenticator func(http.Handler) http.Handler, +) { + r.Route("/users", func(r chi.Router) { + r.Use(authenticator) + + r.Get("/me", h.GetMe) + r.Put("/me", h.UpdateMe) + r.Delete("/me", h.DeleteMe) + }) +} + +func (h *Handler) GetMe(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + + user, err := h.service.GetMe(r.Context(), userID) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +func (h *Handler) UpdateMe(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + + var req UpdateUserRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + user, err := h.service.UpdateMe(r.Context(), userID, req) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +func (h *Handler) DeleteMe(w http.ResponseWriter, r *http.Request) { + userID := middleware.GetUserID(r.Context()) + + if err := h.service.DeleteMe(r.Context(), userID); err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +// RegisterAdminRoutes registers admin-only user management endpoints. +func (h *Handler) RegisterAdminRoutes( + r chi.Router, + authenticator, adminOnly func(http.Handler) http.Handler, +) { + r.Route("/admin/users", func(r chi.Router) { + r.Use(authenticator) + r.Use(adminOnly) + + r.Get("/", h.ListUsers) + r.Get("/{userID}", h.GetUser) + r.Put("/{userID}", h.UpdateUser) + r.Put("/{userID}/role", h.UpdateUserRole) + r.Put("/{userID}/tier", h.UpdateUserTier) + r.Delete("/{userID}", h.DeleteUser) + }) +} + +// ListUsers returns a paginated list of users with optional filtering. +func (h *Handler) ListUsers(w http.ResponseWriter, r *http.Request) { + params := ListUsersParams{ + Page: parseIntQuery(r, "page", 1), + PageSize: parseIntQuery(r, "page_size", 20), + Search: r.URL.Query().Get("search"), + Role: r.URL.Query().Get("role"), + Tier: r.URL.Query().Get("tier"), + } + + users, total, err := h.service.ListUsers(r.Context(), params) + if err != nil { + core.InternalServerError(w, err) + return + } + + core.Paginated( + w, + ToUserResponseList(users), + params.Page, + params.PageSize, + total, + ) +} + +// GetUser returns a specific user by ID (admin only). +func (h *Handler) GetUser(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") + + user, err := h.service.GetUser(r.Context(), userID) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +// UpdateUser updates a specific user's profile (admin only). +func (h *Handler) UpdateUser(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") + + var req UpdateUserRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + user, err := h.service.UpdateUser(r.Context(), userID, req) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +// UpdateUserRole changes a user's role (admin only). +func (h *Handler) UpdateUserRole(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") + + var req UpdateUserRoleRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + user, err := h.service.UpdateUserRole(r.Context(), userID, req.Role) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +// UpdateUserTier changes a user's subscription tier (admin only). +func (h *Handler) UpdateUserTier(w http.ResponseWriter, r *http.Request) { + userID := chi.URLParam(r, "userID") + + var req UpdateUserTierRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + core.BadRequest(w, "invalid request body") + return + } + + if err := h.validator.Struct(req); err != nil { + core.BadRequest(w, core.FormatValidationError(err)) + return + } + + user, err := h.service.UpdateUserTier(r.Context(), userID, req.Tier) + if err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.OK(w, ToUserResponse(user)) +} + +// DeleteUser soft deletes a user account (admin only). +func (h *Handler) DeleteUser(w http.ResponseWriter, r *http.Request) { + requesterID := middleware.GetUserID(r.Context()) + targetID := chi.URLParam(r, "userID") + + if err := h.service.CanDeleteUser(r.Context(), requesterID, targetID); err != nil { + if errors.Is(err, core.ErrForbidden) { + core.Forbidden(w, "insufficient permissions") + return + } + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + if err := h.service.DeleteUser(r.Context(), targetID); err != nil { + if errors.Is(err, core.ErrNotFound) { + core.NotFound(w, "user") + return + } + core.InternalServerError(w, err) + return + } + + core.NoContent(w) +} + +func parseIntQuery(r *http.Request, key string, defaultVal int) int { + val := r.URL.Query().Get(key) + if val == "" { + return defaultVal + } + + parsed, err := strconv.Atoi(val) + if err != nil { + return defaultVal + } + + return parsed +} diff --git a/backends/go-backend/internal/user/repository.go b/backends/go-backend/internal/user/repository.go new file mode 100644 index 0000000..a255b08 --- /dev/null +++ b/backends/go-backend/internal/user/repository.go @@ -0,0 +1,289 @@ +// AngelaMos | 2026 +// repository.go + +package user + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/jackc/pgx/v5/pgconn" + + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +type Repository interface { + Create(ctx context.Context, user *User) error + GetByID(ctx context.Context, id string) (*User, error) + GetByEmail(ctx context.Context, email string) (*User, error) + Update(ctx context.Context, user *User) error + UpdatePassword(ctx context.Context, id, passwordHash string) error + IncrementTokenVersion(ctx context.Context, id string) error + SoftDelete(ctx context.Context, id string) error + List(ctx context.Context, params ListUsersParams) ([]User, int, error) + ExistsByEmail(ctx context.Context, email string) (bool, error) +} + +type repository struct { + db core.DBTX +} + +func NewRepository(db core.DBTX) Repository { + return &repository{db: db} +} + +func (r *repository) Create(ctx context.Context, user *User) error { + query := ` + INSERT INTO users (id, email, password_hash, name, role, tier) + VALUES ($1, $2, $3, $4, $5, $6) + RETURNING created_at, updated_at, token_version` + + err := r.db.GetContext(ctx, user, query, + user.ID, + user.Email, + user.PasswordHash, + user.Name, + user.Role, + user.Tier, + ) + if err != nil { + if isDuplicateKeyError(err) { + return fmt.Errorf("create user: %w", core.ErrDuplicateKey) + } + return fmt.Errorf("create user: %w", err) + } + + return nil +} + +func (r *repository) GetByID(ctx context.Context, id string) (*User, error) { + query := ` + SELECT id, email, password_hash, name, role, tier, token_version, + created_at, updated_at, deleted_at + FROM users + WHERE id = $1 AND deleted_at IS NULL` + + var user User + err := r.db.GetContext(ctx, &user, query, id) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("get user: %w", core.ErrNotFound) + } + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + return &user, nil +} + +func (r *repository) GetByEmail( + ctx context.Context, + email string, +) (*User, error) { + query := ` + SELECT id, email, password_hash, name, role, tier, token_version, + created_at, updated_at, deleted_at + FROM users + WHERE email = $1 AND deleted_at IS NULL` + + var user User + err := r.db.GetContext(ctx, &user, query, email) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("get user by email: %w", core.ErrNotFound) + } + if err != nil { + return nil, fmt.Errorf("get user by email: %w", err) + } + + return &user, nil +} + +func (r *repository) Update(ctx context.Context, user *User) error { + query := ` + UPDATE users + SET name = $2, role = $3, tier = $4, updated_at = NOW() + WHERE id = $1 AND deleted_at IS NULL + RETURNING updated_at` + + err := r.db.GetContext(ctx, &user.UpdatedAt, query, + user.ID, + user.Name, + user.Role, + user.Tier, + ) + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("update user: %w", core.ErrNotFound) + } + if err != nil { + return fmt.Errorf("update user: %w", err) + } + + return nil +} + +func (r *repository) UpdatePassword( + ctx context.Context, + id, passwordHash string, +) error { + query := ` + UPDATE users + SET password_hash = $2, updated_at = NOW() + WHERE id = $1 AND deleted_at IS NULL` + + result, err := r.db.ExecContext(ctx, query, id, passwordHash) + if err != nil { + return fmt.Errorf("update password: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update password: %w", err) + } + + if rows == 0 { + return fmt.Errorf("update password: %w", core.ErrNotFound) + } + + return nil +} + +func (r *repository) IncrementTokenVersion( + ctx context.Context, + id string, +) error { + query := ` + UPDATE users + SET token_version = token_version + 1, updated_at = NOW() + WHERE id = $1 AND deleted_at IS NULL` + + result, err := r.db.ExecContext(ctx, query, id) + if err != nil { + return fmt.Errorf("increment token version: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("increment token version: %w", err) + } + + if rows == 0 { + return fmt.Errorf("increment token version: %w", core.ErrNotFound) + } + + return nil +} + +func (r *repository) SoftDelete(ctx context.Context, id string) error { + query := ` + UPDATE users + SET deleted_at = NOW(), updated_at = NOW() + WHERE id = $1 AND deleted_at IS NULL` + + result, err := r.db.ExecContext(ctx, query, id) + if err != nil { + return fmt.Errorf("delete user: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("delete user: %w", err) + } + + if rows == 0 { + return fmt.Errorf("delete user: %w", core.ErrNotFound) + } + + return nil +} + +func (r *repository) List( + ctx context.Context, + params ListUsersParams, +) ([]User, int, error) { + params.Normalize() + + var conditions []string + var args []any + argIdx := 1 + + conditions = append(conditions, "deleted_at IS NULL") + + if params.Search != "" { + conditions = append(conditions, fmt.Sprintf( + "(email ILIKE $%d OR name ILIKE $%d)", argIdx, argIdx)) + args = append(args, "%"+escapeLike(params.Search)+"%") + argIdx++ + } + + if params.Role != "" { + conditions = append(conditions, fmt.Sprintf("role = $%d", argIdx)) + args = append(args, params.Role) + argIdx++ + } + + if params.Tier != "" { + conditions = append(conditions, fmt.Sprintf("tier = $%d", argIdx)) + args = append(args, params.Tier) + argIdx++ + } + + whereClause := strings.Join(conditions, " AND ") + + countQuery := fmt.Sprintf( + "SELECT COUNT(*) FROM users WHERE %s", + whereClause, + ) + var total int + if err := r.db.GetContext(ctx, &total, countQuery, args...); err != nil { + return nil, 0, fmt.Errorf("count users: %w", err) + } + + query := fmt.Sprintf(` + SELECT id, email, name, role, tier, token_version, + created_at, updated_at, deleted_at + FROM users + WHERE %s + ORDER BY created_at DESC + LIMIT $%d OFFSET $%d`, + whereClause, argIdx, argIdx+1) + + args = append(args, params.PageSize, params.Offset()) + + var users []User + if err := r.db.SelectContext(ctx, &users, query, args...); err != nil { + return nil, 0, fmt.Errorf("list users: %w", err) + } + + return users, total, nil +} + +func (r *repository) ExistsByEmail( + ctx context.Context, + email string, +) (bool, error) { + query := `SELECT EXISTS(SELECT 1 FROM users WHERE email = $1 AND deleted_at IS NULL)` + + var exists bool + if err := r.db.GetContext(ctx, &exists, query, email); err != nil { + return false, fmt.Errorf("check email exists: %w", err) + } + + return exists, nil +} + +func isDuplicateKeyError(err error) bool { + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + return pgErr.Code == "23505" + } + return false +} + +func escapeLike(s string) string { + s = strings.ReplaceAll(s, "\\", "\\\\") + s = strings.ReplaceAll(s, "%", "\\%") + s = strings.ReplaceAll(s, "_", "\\_") + return s +} diff --git a/backends/go-backend/internal/user/service.go b/backends/go-backend/internal/user/service.go new file mode 100644 index 0000000..699d24d --- /dev/null +++ b/backends/go-backend/internal/user/service.go @@ -0,0 +1,256 @@ +// AngelaMos | 2026 +// service.go + +package user + +import ( + "context" + "fmt" + "strings" + + "github.com/google/uuid" + + "github.com/carterperez-dev/templates/go-backend/internal/auth" + "github.com/carterperez-dev/templates/go-backend/internal/core" +) + +type Service struct { + repo Repository +} + +func NewService(repo Repository) *Service { + return &Service{repo: repo} +} + +func (s *Service) GetByID( + ctx context.Context, + id string, +) (*auth.UserInfo, error) { + user, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + return toUserInfo(user), nil +} + +func (s *Service) GetByEmail( + ctx context.Context, + email string, +) (*auth.UserInfo, error) { + user, err := s.repo.GetByEmail(ctx, strings.ToLower(email)) + if err != nil { + return nil, err + } + + return toUserInfo(user), nil +} + +func (s *Service) Create( + ctx context.Context, + email, passwordHash, name string, +) (*auth.UserInfo, error) { + user := &User{ + ID: uuid.New().String(), + Email: strings.ToLower(email), + PasswordHash: passwordHash, + Name: name, + Role: RoleUser, + Tier: TierFree, + } + + if err := s.repo.Create(ctx, user); err != nil { + return nil, err + } + + return toUserInfo(user), nil +} + +func (s *Service) IncrementTokenVersion( + ctx context.Context, + userID string, +) error { + return s.repo.IncrementTokenVersion(ctx, userID) +} + +func (s *Service) UpdatePassword( + ctx context.Context, + userID, passwordHash string, +) error { + return s.repo.UpdatePassword(ctx, userID, passwordHash) +} + +func (s *Service) GetUser(ctx context.Context, id string) (*User, error) { + return s.repo.GetByID(ctx, id) +} + +func (s *Service) UpdateUser( + ctx context.Context, + id string, + req UpdateUserRequest, +) (*User, error) { + user, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if req.Name != nil { + user.Name = *req.Name + } + + if err := s.repo.Update(ctx, user); err != nil { + return nil, err + } + + return user, nil +} + +func (s *Service) UpdateUserRole( + ctx context.Context, + id, role string, +) (*User, error) { + if role != RoleUser && role != RoleAdmin { + return nil, fmt.Errorf( + "update role: invalid role %q: %w", + role, + core.ErrInvalidInput, + ) + } + + user, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + user.Role = role + + if err := s.repo.Update(ctx, user); err != nil { + return nil, err + } + + return user, nil +} + +func (s *Service) UpdateUserTier( + ctx context.Context, + id, tier string, +) (*User, error) { + if tier != TierFree && tier != TierPro && tier != TierEnterprise { + return nil, fmt.Errorf( + "update tier: invalid tier %q: %w", + tier, + core.ErrInvalidInput, + ) + } + + user, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + user.Tier = tier + + if err := s.repo.Update(ctx, user); err != nil { + return nil, err + } + + return user, nil +} + +func (s *Service) DeleteUser(ctx context.Context, id string) error { + return s.repo.SoftDelete(ctx, id) +} + +func (s *Service) ListUsers( + ctx context.Context, + params ListUsersParams, +) ([]User, int, error) { + return s.repo.List(ctx, params) +} + +func (s *Service) GetMe(ctx context.Context, userID string) (*User, error) { + if userID == "" { + return nil, fmt.Errorf("get me: %w", core.ErrUnauthorized) + } + + user, err := s.repo.GetByID(ctx, userID) + if err != nil { + return nil, err + } + + return user, nil +} + +func (s *Service) UpdateMe( + ctx context.Context, + userID string, + req UpdateUserRequest, +) (*User, error) { + if userID == "" { + return nil, fmt.Errorf("update me: %w", core.ErrUnauthorized) + } + + return s.UpdateUser(ctx, userID, req) +} + +func (s *Service) DeleteMe(ctx context.Context, userID string) error { + if userID == "" { + return fmt.Errorf("delete me: %w", core.ErrUnauthorized) + } + + return s.repo.SoftDelete(ctx, userID) +} + +func (s *Service) EmailExists( + ctx context.Context, + email string, +) (bool, error) { + exists, err := s.repo.ExistsByEmail(ctx, email) + if err != nil { + return false, err + } + return exists, nil +} + +func (s *Service) CanDeleteUser( + ctx context.Context, + requesterID, targetID string, +) error { + if requesterID == targetID { + return nil + } + + requester, err := s.repo.GetByID(ctx, requesterID) + if err != nil { + return err + } + + if !requester.IsAdmin() { + return fmt.Errorf("delete user: %w", core.ErrForbidden) + } + + target, err := s.repo.GetByID(ctx, targetID) + if err != nil { + return err + } + + if target.IsAdmin() { + return fmt.Errorf("cannot delete admin users: %w", core.ErrForbidden) + } + + return nil +} + +func toUserInfo(u *User) *auth.UserInfo { + return &auth.UserInfo{ + ID: u.ID, + Email: u.Email, + Name: u.Name, + PasswordHash: u.PasswordHash, + Role: u.Role, + Tier: u.Tier, + TokenVersion: u.TokenVersion, + } +} + +var _ auth.UserProvider = (*Service)(nil) diff --git a/docs/research/DOCKER-NGINX.md b/docs/research/DOCKER-NGINX.md deleted file mode 100644 index dc33b80..0000000 --- a/docs/research/DOCKER-NGINX.md +++ /dev/null @@ -1,507 +0,0 @@ -# Production Docker stack for FastAPI and React in 2025 - -The modern Docker Compose ecosystem has matured significantly, with **the Compose Specification replacing legacy v2/v3 syntax** and BuildKit becoming the default engine. Your proposed architecture is sound, but several optimizations can dramatically improve performance, security, and developer experience. The key shifts for 2025: adopt **uv** as your Python package manager (10-100× faster than pip), consider **Granian** as an alternative ASGI server for maximum throughput, leverage **Compose Watch** for superior hot-reload without bind mounts, and structure your Dockerfiles with multi-stage builds using BuildKit cache mounts. - -This guide provides senior-level patterns addressing your specific stack: FastAPI with async SQLAlchemy, React/Vite frontend, Nginx reverse proxy with WebSocket support, and proper dev/prod separation. - -## Modern Compose architecture eliminates version confusion - -The **`version` field is now deprecated and should be omitted** entirely. Since Compose v1.27.0+, the unified Compose Specification auto-detects behavior. Your file naming convention is correct—prefer `compose.yml` over the legacy `docker-compose.yml`. - -For your proposed structure, the recommended override pattern maximizes code reuse while maintaining clear separation: - -```yaml -# compose.yml (Base/shared configuration - no version field) -name: myproject - -services: - api: - build: - context: ./backend - dockerfile: ../conf/docker/fastapi.Dockerfile - target: ${BUILD_TARGET:-production} - networks: - - backend - - frontend - depends_on: - db: - condition: service_healthy - restart: true # Compose 2.17.0+ restarts api if db restarts - - frontend: - build: - context: ./frontend - dockerfile: ../conf/docker/frontend.Dockerfile - target: ${BUILD_TARGET:-production} - networks: - - frontend - - nginx: - image: nginx:1.27-alpine - volumes: - - ./conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - - ./conf/nginx/${NGINX_CONFIG:-prod}.nginx:/etc/nginx/conf.d/default.conf:ro - ports: - - "80:80" - depends_on: - api: - condition: service_healthy - networks: - - frontend - - backend - - db: - image: postgres:16-alpine - volumes: - - db_data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 30s - networks: - - backend - -networks: - backend: - frontend: - -volumes: - db_data: -``` - -Custom networks are essential for security—your database sits only on the `backend` network, inaccessible to the frontend container. Services resolve each other by name automatically (`http://api:8000`). - -**Compose Watch** (GA in Compose 2.22.0+) provides superior hot-reload compared to traditional bind mounts, with granular control over sync, rebuild, and restart actions. For development, your override file would include: - -```yaml -# compose.dev.yml -services: - api: - build: - target: development - develop: - watch: - - action: sync - path: ./backend - target: /app - ignore: - - __pycache__/ - - .venv/ - - action: rebuild - path: ./backend/pyproject.toml - ports: - - "8000:8000" - environment: - - NGINX_CONFIG=dev - - frontend: - build: - target: development - develop: - watch: - - action: sync - path: ./frontend/src - target: /app/src - - action: rebuild - path: ./frontend/package.json -``` - -Execute with `docker compose watch` for development. For production: `docker compose -f compose.yml up -d`. - -**Resource limits now work without Swarm** using the `deploy.resources` syntax. Always set these in production to prevent runaway containers: - -```yaml -services: - api: - deploy: - resources: - limits: - cpus: '2.0' - memory: 1G - reservations: - cpus: '0.5' - memory: 256M -``` - -## FastAPI containers need uv, multi-stage builds, and careful ASGI selection - -**Use `python:3.12-slim` as your base image.** Alpine's musl libc causes package compatibility issues and can make builds 50× slower when compiling native extensions. The slim variant offers the best balance at ~130MB. - -**uv is now production-ready** with 16+ million monthly downloads. Created by Astral (the Ruff team), it provides 10-100× faster dependency installation with proper lock file support. Here's the complete production Dockerfile pattern: - -```dockerfile -# conf/docker/fastapi.Dockerfile -# syntax=docker/dockerfile:1 - -# ============ BUILD STAGE ============ -FROM python:3.12-slim AS builder -COPY --from=ghcr.io/astral-sh/uv:0.9 /uv /uvx /bin/ - -WORKDIR /app -ENV UV_COMPILE_BYTECODE=1 \ - UV_LINK_MODE=copy - -# Install dependencies first (cached layer) -COPY pyproject.toml uv.lock ./ -RUN --mount=type=cache,target=/root/.cache/uv \ - uv sync --locked --no-install-project --no-dev - -# Install project -COPY . . -RUN --mount=type=cache,target=/root/.cache/uv \ - uv sync --locked --no-dev --no-editable - -# ============ DEVELOPMENT STAGE ============ -FROM python:3.12-slim AS development -COPY --from=ghcr.io/astral-sh/uv:0.9 /uv /uvx /bin/ - -WORKDIR /app -ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 - -COPY pyproject.toml uv.lock ./ -RUN --mount=type=cache,target=/root/.cache/uv \ - uv sync --frozen - -COPY . . -CMD ["uv", "run", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"] - -# ============ PRODUCTION STAGE ============ -FROM python:3.12-slim AS production - -# Security: non-root user -RUN groupadd -g 1001 appgroup && \ - useradd -u 1001 -g appgroup -m -s /bin/false appuser - -WORKDIR /app -COPY --from=builder --chown=appuser:appgroup /app/.venv /app/.venv -COPY --from=builder --chown=appuser:appgroup /app /app - -ENV PATH="/app/.venv/bin:$PATH" \ - PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 - -USER appuser -EXPOSE 8000 - -HEALTHCHECK --interval=30s --timeout=3s --start-period=10s --retries=3 \ - CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1 - -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "4"] -``` - -The `--mount=type=cache` directive is a BuildKit feature that caches pip/uv downloads between builds—essential for CI/CD speed. The `UV_COMPILE_BYTECODE=1` flag pre-compiles Python files for faster startup in production. - -**ASGI server selection in 2025** presents interesting choices. Uvicorn remains the safe default, but **Granian** (Rust-based) delivers higher throughput with more consistent latency: - -| Server | Requests/sec | Latency Gap (avg/max) | Best For | -|--------|-------------|----------------------|----------| -| Granian | ~50,000 | 2.8× | Maximum performance | -| Uvicorn (httptools) | ~45,000 | 6.8× | General production | -| Gunicorn + Uvicorn | ~45,000 | 6.5× | Process management | -| Hypercorn | ~35,000 | 5.2× | HTTP/2, HTTP/3 | - -For Uvicorn with multiple workers: `uvicorn app.main:app --workers 4`. The worker formula for async I/O-bound apps is `(2 × CPU_COUNT) + 1`. For Gunicorn with process management benefits, use `gunicorn app.main:app -w 4 -k uvicorn.workers.UvicornWorker --max-requests 1000 --max-requests-jitter 100`. - -**Async SQLAlchemy connection pooling** requires attention in containers: - -```python -from sqlalchemy.ext.asyncio import create_async_engine - -engine = create_async_engine( - "postgresql+asyncpg://user:pass@db:5432/dbname", - pool_size=10, - max_overflow=20, - pool_pre_ping=True, # Verify connections before use - pool_recycle=3600, # Recycle after 1 hour -) -``` - -## Frontend builds require HMR fixes and strategic caching - -Vite in Docker requires specific configuration to enable Hot Module Replacement through container networking. Your `vite.config.ts` must bind to all interfaces and enable polling for file system events: - -```typescript -export default defineConfig({ - server: { - host: "0.0.0.0", - port: 5173, - watch: { usePolling: true }, - hmr: { clientPort: 5173 }, - }, -}); -``` - -**Use `node:22-slim` for production builds**—it provides glibc compatibility and lower CVE counts than Alpine. The multi-stage frontend Dockerfile should separate dependency installation from build for optimal caching: - -```dockerfile -# conf/docker/frontend.Dockerfile -# ========== DEVELOPMENT ========== -FROM node:22-slim AS development -WORKDIR /app -COPY package*.json ./ -RUN npm ci -EXPOSE 5173 -CMD ["npm", "run", "dev"] - -# ========== BUILD STAGE ========== -FROM node:22-slim AS builder -WORKDIR /app -COPY package*.json ./ -ENV NODE_ENV=production -RUN npm ci --only=production -COPY . . -RUN npm run build - -# ========== PRODUCTION ========== -FROM nginx:1.27-alpine AS production -COPY --from=builder /app/dist /usr/share/nginx/html -RUN chown -R nginx:nginx /usr/share/nginx/html -USER nginx -EXPOSE 80 -CMD ["nginx", "-g", "daemon off;"] -``` - -The **node_modules handling** question has a definitive answer for Docker: install in the container, not on the host. Use an anonymous volume to preserve container modules when bind-mounting source: - -```yaml -volumes: - - ./frontend:/app - - /app/node_modules # Preserves container's node_modules -``` - -**pnpm is the 2025 recommendation** for new projects—it provides significant disk space savings through a shared store and faster installs. Enable it in your Dockerfile with `RUN corepack enable && corepack prepare pnpm@latest --activate`. - -**Vite environment variables are statically replaced at build time**—they become hardcoded strings. For runtime configuration, use the placeholder pattern: - -```dockerfile -ENV VITE_API_URL="__VITE_API_URL__" -RUN npm run build -CMD ["/bin/sh", "-c", \ - "find /usr/share/nginx/html -type f -name '*.js' -exec sed -i 's|__VITE_API_URL__|'$VITE_API_URL'|g' {} + && \ - nginx -g 'daemon off;'"] -``` - -## Nginx configuration balances performance, security, and WebSocket support - -Your nginx.conf should establish global settings while environment-specific server blocks handle routing differences. The critical pattern for FastAPI reverse proxying includes **upstream keepalive for connection pooling** and proper header forwarding: - -```nginx -# conf/nginx/nginx.conf -user nginx; -worker_processes auto; -worker_rlimit_nofile 65535; -error_log /var/log/nginx/error.log warn; - -events { - worker_connections 4096; - multi_accept on; - use epoll; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - server_tokens off; - - # Compression - gzip on; - gzip_vary on; - gzip_comp_level 6; - gzip_types text/plain text/css application/json application/javascript text/xml application/xml image/svg+xml; - - # Rate limiting zones - limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; - limit_req_zone $binary_remote_addr zone=auth:10m rate=1r/s; - limit_req_status 429; - - # WebSocket upgrade map - map $http_upgrade $connection_upgrade { - default upgrade; - '' close; - } - - # Upstream with connection pooling - upstream fastapi { - server api:8000; - keepalive 32; - keepalive_requests 1000; - keepalive_timeout 60s; - } - - include /etc/nginx/conf.d/*.conf; -} -``` - -The production server block handles API proxying, WebSocket connections, and static file serving with proper cache headers: - -```nginx -# conf/nginx/prod.nginx -server { - listen 80; - root /usr/share/nginx/html; - - # Security headers - add_header X-Frame-Options "SAMEORIGIN" always; - add_header X-Content-Type-Options "nosniff" always; - add_header Referrer-Policy "strict-origin-when-cross-origin" always; - - # API proxy with rate limiting - location /api/ { - limit_req zone=api burst=20 nodelay; - - proxy_pass http://fastapi/; - proxy_http_version 1.1; - proxy_set_header Connection ""; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_buffering on; - proxy_buffer_size 4k; - proxy_buffers 8 32k; - proxy_connect_timeout 60s; - proxy_read_timeout 60s; - } - - # WebSocket endpoint - location /api/ws { - proxy_pass http://fastapi; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_read_timeout 86400s; - proxy_buffering off; - } - - # Hashed assets - cache forever - location /assets/ { - add_header Cache-Control "public, max-age=31536000, immutable"; - try_files $uri =404; - } - - # SPA fallback - never cache index.html - location / { - add_header Cache-Control "no-cache"; - try_files $uri $uri/ /index.html; - } -} -``` - -**WebSocket configuration requires specific attention**: the `Upgrade` and `Connection` headers are hop-by-hop and must be explicitly forwarded. The `map` directive dynamically sets the Connection header based on whether an upgrade is requested. The **86400-second timeout** prevents Nginx from closing idle WebSocket connections. - -**Keep proxy_buffering ON for regular API requests**—this protects FastAPI from slow clients by letting Nginx accept the full response and free the uvicorn worker immediately. Only disable buffering for WebSocket and Server-Sent Events endpoints. - -For development, your dev.nginx proxies to the Vite dev server instead of serving static files: - -```nginx -# conf/nginx/dev.nginx -server { - listen 80; - access_log /var/log/nginx/access.log detailed; - - add_header Cache-Control "no-store" always; - - location / { - proxy_pass http://frontend:5173; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header Host $host; - } - - location /api/ { - proxy_pass http://api:8000/; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - } -} -``` - -## Security hardening requires layered defenses - -Container security follows defense-in-depth principles. **Never run containers as root**—this is the single most important security measure. Combine with capability dropping and read-only filesystems: - -```yaml -services: - api: - user: "1001:1001" - read_only: true - tmpfs: - - /tmp - - /var/run - cap_drop: - - ALL - security_opt: - - no-new-privileges:true -``` - -**Docker secrets provide secure credential handling** without exposing values in environment variables or compose files: - -```yaml -services: - db: - environment: - POSTGRES_PASSWORD_FILE: /run/secrets/db_password - secrets: - - db_password - -secrets: - db_password: - file: ./secrets/db_password.txt # Git-ignored -``` - -Your FastAPI app reads secrets from the mounted file: - -```python -def get_secret(name: str) -> str: - secret_path = Path(f"/run/secrets/{name}") - if secret_path.exists(): - return secret_path.read_text().strip() - return os.environ.get(name.upper(), "") -``` - -**Handle CORS at the FastAPI level**, not Nginx—this allows dynamic origin handling and proper credential support. Never configure CORS in both layers simultaneously. - -For zero-downtime deployments with Docker Compose, the **docker-rollout** plugin provides seamless rolling updates: - -```bash -curl https://raw.githubusercontent.com/wowu/docker-rollout/main/docker-rollout \ - -o ~/.docker/cli-plugins/docker-rollout -chmod +x ~/.docker/cli-plugins/docker-rollout -docker rollout api # Instead of docker compose up -d -``` - -**Health checks are mandatory for reliable orchestration**. Implement both liveness (is the process running?) and readiness (can we serve traffic?) endpoints: - -```python -@router.get("/health") -async def liveness(): - return {"status": "alive"} - -@router.get("/health/ready") -async def readiness(db: AsyncSession = Depends(get_db)): - await db.execute(text("SELECT 1")) - return {"status": "ready", "database": "connected"} -``` - -Database migrations should be decoupled from application startup in production. Run them as a separate step: `docker compose run --rm api alembic upgrade head`, then deploy with `docker compose up -d`. - -## Conclusion - -The 2025 Docker ecosystem offers substantial improvements over previous patterns. The unified Compose Specification eliminates version confusion, BuildKit cache mounts dramatically accelerate CI/CD builds, and tools like uv transform Python dependency management. Your architecture is well-structured—the key refinements are adopting multi-stage Dockerfiles with explicit development/production targets, leveraging Compose Watch for superior hot-reload, and ensuring proper security hardening with non-root users, capability dropping, and secrets management. - -For maximum performance, consider Granian as your ASGI server—its Rust-based implementation delivers 10-15% higher throughput with more consistent latency than Uvicorn. The upstream keepalive configuration in Nginx is often overlooked but critical for reducing TCP handshake overhead between your reverse proxy and FastAPI workers. - -The anti-patterns to actively avoid: bind-mounting code in production, using the `version` field in compose files, running containers as root, hardcoding secrets anywhere, and configuring CORS in both Nginx and FastAPI simultaneously. These patterns cause subtle production issues that are difficult to debug. diff --git a/docs/research/ENUMS.md b/docs/research/ENUMS.md deleted file mode 100644 index 12cf8a5..0000000 --- a/docs/research/ENUMS.md +++ /dev/null @@ -1,492 +0,0 @@ - -SQLAlchemy Enums - Careful what goes into the database -Will Rouesnel 2024-04-25 21:54 -The Situation -SQLAlchemy is an obvious choice when you need to throw together anything dealing with databases in Python. There might be other options, there might be faster options, but if you need it done then SQLAlchemy will do it for you pretty well and very ergonomically. - -The problem I ran into recently is dealing with Python enums recently. Or more specifically: I had a user input problem which obviously turned into an enum application side - I had a limited set of inputs I wanted to allow, because those were what we supported - and I didn't want strings all through my code testing for values. - -So on the client side it's obvious: check if the string matches an enum value, and use that. The enum would look something like below: - -from enum import Enum - -class Color(Enum): - RED = "red" - GREEN = "green" - BLUE = "blue" -Now from this, we have our second problem: storing this in the database. We want to not do work here - that's we're using SQLAlchemy, so we can have our commmon problems handled. And so, SQLAlchemy helps us - here's automatic enum type handling for us. - -Easy - so our model using the declarative syntax, and typehints can be written as follows: - -import sqlalchemy -from sqlalchemy.orm import Mapped, DeclarativeBase, Session, mapped_column -from sqlalchemy import create_engine, select, text - -class Base(DeclarativeBase): - pass - -class TestTable(Base): - __tablename__ = "test_table" - id: Mapped[int] = mapped_column(primary_key=True) - value: Mapped[Color] -This is essentially identical to the documentation we see above. And, if we run this in a sample program - it works! - -engine = create_engine("sqlite://") - -Base.metadata.create_all(engine) - -with Session(engine) as session: - # Create normal values - for enum_item in Color: - session.add(TestTable(value=enum_item)) - session.commit() - -# Now try and read the values back -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - for record in records: - print(record.value) -Color.RED -Color.GREEN -Color.BLUE -Right? We stored some enum's to the database and retreived them in simple elegant code. This is exactly what we want...right? - -But the question is...what did we actually store? Let's extend the program to do a raw query to read back that table... - -from sqlalchemy import text - -with engine.connect() as conn: - print(conn.execute(text("SELECT * FROM test_table;")).all()) -[(1, 'RED'), (2, 'GREEN'), (3, 'BLUE')] -Notice the tuples: the second column, we see "RED", "GREEN" and "BLUE"...but our enum defines our colors as RED is "red". What's going on? And is something wrong here? - -Depending how you view the situation, yes, but also no - but it's likely this isn't what you wanted either. - -The primary reason to use SQLAlchemy enum types is to take advantage of something like PostgreSQL supporting native enum types in the database. Everywhere else in SQLAlchemy, when we define a python class - like we do with TestTable above - we're not defining a Python object, we're defining a Python object which is describing the database objects we want and how they'll behave. - -And so long as we're using things that come from SQLAlchemy - and under-the-hood SQLAlchemy is converting that enum.Enum to sqlalchemy.Enum - then this makes complete sense. The enum we declare is declaring what values we store, and what data value they map too...in the sense that we might use the data elsewhere, in our application. Basically our database will hold the symbolic value RED and we interpret that as meaning "red" - but we reserve the right to change that interpretation. - -But if we're coming at this from a Python application perspective - i.e. the reason we made an enum - we likely have a different view of the problem. We're thinking "we want the data to look a particular way, and then to refer to it symbolically in code which we might change" - i.e. the immutable element is the data, the value, of the enum - because that's what we'll present to the user, but not what we want to have all over the application. - -In isolation these are separate problems, but automatic enum handling makes the boundary here fuzzy: because while the database is defined in our code, from one perspective, it's also external to it - i.e. we may be writing code which is meant to simply interface with and understand a database not under our control. Basically, the enum.Enum object feels like it's us saying "this is how we'll interpret the external world" and not us saying "this is what the database looks like". - -And in that case then, our view of what the enum is is probably more like "the enum is the internal symbolic representation of how we plan to consume database values" - i.e. we expect to map "red" to Color.RED from the database. Rather then reading the database and interpreting RED as "red". - -Nobodies wrong - but you probably have your assumptions going into this (I know I did...but it compiled, it worked, and I never questioned it - and so long as I'm the sole owner, who cares right?) - -The Problem -There are a few problems though with this interpretation. One is obvious: we're a simple, apparently safe refactor away from ruining our database schema and we might be aware of it. In the above, naive interpretation, changing Color.RED to Color.LEGACY_RED for example, is implying that RED is no longer a valid value in the database - which if we think of the enum as an application mapping to an external interface is something which might make sense. - -This is the sort of change which crops up all the time. We know the string "red" is out there, hardcoded and compiled into a bunch of old systems so we can't just go and change a color name in the database. Or we're doing rolling deployments and we need consistency of values - or share the database or any number of other complex environment concerns. Either way: we want to avoid needlessly updating the database value - changing our code, but not an apparent variable constant - should be safe. - -However we're not storing the data we think we are. We expected "red", "green" and "blue" and got "RED", "GREEN" and "BLUE". It's worth noting that the SQLAlchemy documentation leads you astray like this, since the second example showing using typing.Literal for the mapping uses the string assignments from the first (and neither shows a sample table result which makes it obvious on a quick read). - -If we change a name in this enum, then the result is actually bad if we've used it anywhere - we stop being able to read models out of this table at all. So if we do the following: - -class Color(Enum): - LEGACY_RED = "red" - GREEN = "green" - BLUE = "blue" -Then try to read the models we've created, it won't work - in fact we can't read any part of that table anymore (this post is written as a Jupyter notebook so the redefinition below is needed to setup the SQLAlchemy model again) - -class Base(DeclarativeBase): - pass - -class TestTable(Base): - __tablename__ = "test_table" - id: Mapped[int] = mapped_column(primary_key=True) - value: Mapped[Color] - -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - for record in records: - print(record.value) ---------------------------------------------------------------------------- -KeyError Traceback (most recent call last) -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in _object_value_for_elem(self, elem) - 1608 try: --> 1609 return self._object_lookup[elem] - 1610 except KeyError as err: - -KeyError: 'RED' - -The above exception was the direct cause of the following exception: - -LookupError Traceback (most recent call last) -/tmp/ipykernel_69447/1820198460.py in - 8 - 9 with Session(engine) as session: ----> 10 records = session.scalars(select(TestTable)).all() - 11 for record in records: - 12 print(record.value) - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in all(self) - 1767 - 1768 """ --> 1769 return self._allrows() - 1770 - 1771 def __iter__(self) -> Iterator[_R]: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _allrows(self) - 546 make_row = self._row_getter - 547 ---> 548 rows = self._fetchall_impl() - 549 made_rows: List[_InterimRowType[_R]] - 550 if make_row: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _fetchall_impl(self) - 1674 - 1675 def _fetchall_impl(self) -> List[_InterimRowType[Row[Any]]]: --> 1676 return self._real_result._fetchall_impl() - 1677 - 1678 def _fetchmany_impl( - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _fetchall_impl(self) - 2268 self._raise_hard_closed() - 2269 try: --> 2270 return list(self.iterator) - 2271 finally: - 2272 self._soft_close() - -~/.local/lib/python3.10/site-packages/sqlalchemy/orm/loading.py in chunks(size) - 217 break - 218 else: ---> 219 fetch = cursor._raw_all_rows() - 220 - 221 if single_entity: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _raw_all_rows(self) - 539 assert make_row is not None - 540 rows = self._fetchall_impl() ---> 541 return [make_row(row) for row in rows] - 542 - 543 def _allrows(self) -> List[_R]: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in (.0) - 539 assert make_row is not None - 540 rows = self._fetchall_impl() ---> 541 return [make_row(row) for row in rows] - 542 - 543 def _allrows(self) -> List[_R]: - -lib/sqlalchemy/cyextension/resultproxy.pyx in sqlalchemy.cyextension.resultproxy.BaseRow.__init__() - -lib/sqlalchemy/cyextension/resultproxy.pyx in sqlalchemy.cyextension.resultproxy._apply_processors() - -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in process(value) - 1727 value = parent_processor(value) - 1728 --> 1729 value = self._object_value_for_elem(value) - 1730 return value - 1731 - -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in _object_value_for_elem(self, elem) - 1609 return self._object_lookup[elem] - 1610 except KeyError as err: --> 1611 raise LookupError( - 1612 "'%s' is not among the defined enum values. " - 1613 "Enum name: %s. Possible values: %s" - -LookupError: 'RED' is not among the defined enum values. Enum name: color. Possible values: LEGACY_RED, GREEN, BLUE -Even though we did a proper refactor, we can no longer read this table - in fact we can't even read part of it without using raw SQL and giving up on our models entirely. Obviously if we were writing an application, we've just broken all our queries - but not because we messed anything up, but because we thought we were making a code change when in reality we were making a data change. - -This behavior also makes it pretty much impossible to handle externally managed schemas or existing schemas - we don't really want our enum to have to follow someone else's data scheme, even if they're well behaved. - -Finally it also hightlights another danger we've walked into: what if we try to read this column, and there are values there we don't recognize? We would also get the same error - in this case, RED is unknown because we removed it. But if a new version of our application comes along and has inserted ORANGE then we'd also have the same problem - we've lost backwards and forwards compatibility, in a way which doesn't necessarily show up easily. There's just no easy way to deal with these LookupError validation problems when we're loading large chunks of models - they happen at the wrong part of the stack - -The Solution -Doing the obvious thing here got us a working applications with a bunch of technical footguns - which is unfortunate, but it does work. There are plenty of situations where we'd never encounter these though - although many more where we might. So what should we do instead? - -To get the behavior we expected when we used an enum we can do the following in our model definition: - -class Base(DeclarativeBase): - pass - -class TestTable(Base): - __tablename__ = "test_table" - id: Mapped[int] = mapped_column(primary_key=True) - value: Mapped[Color] = mapped_column(sqlalchemy.Enum(Color, values_callable=lambda t: [ str(item.value) for item in t ])) -Notice the values_callable parameter. The order returned here should be the order our enum returns (and it is - it's simply passed our Enum object) - and returns the list of values which should be assigned in the database for it. In this case we simply do a Python string conversion of the enum value (which will just return the literal string - but if you were doing something ill-advised like mixing in numbers, then this makes it sensible for the DB). - -When we run this with a new database, we now see that we get what we expected in the underlying table: - -engine = create_engine("sqlite://") - -Base.metadata.create_all(engine) - -with Session(engine) as session: - # Create normal values - for enum_item in Color: - session.add(TestTable(value=enum_item)) - session.commit() - -# Now try and read the values back -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - print("We restored the following values in code...") - for record in records: - print(record.value) - -print("But the underlying table contains...") -with engine.connect() as conn: - print(conn.execute(text("SELECT * FROM test_table;")).all()) -We restored the following values in code... -Color.LEGACY_RED -Color.GREEN -Color.BLUE -But the underlying table contains... -[(1, 'red'), (2, 'green'), (3, 'blue')] -Perfect. Now if we're connecting to an external database, or a schema we don't control, everything works great. But what about when we have unknown values? What happens then? Well we haven't fixed that, but we're much less likely to encounter it by accident now. Of course it's worth noting, SQLAlchemy also doesn't validate the inputs we put into this model against the enum before we write it either. So if we do this, then we're back to it not working: - -with Session(engine) as session: - session.add(TestTable(value="reed")) - session.commit() -# Now try and read the values back -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - print("We restored the following values in code...") - for record in records: - print(record.value) ---------------------------------------------------------------------------- -KeyError Traceback (most recent call last) -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in _object_value_for_elem(self, elem) - 1608 try: --> 1609 return self._object_lookup[elem] - 1610 except KeyError as err: - -KeyError: 'reed' - -The above exception was the direct cause of the following exception: - -LookupError Traceback (most recent call last) -/tmp/ipykernel_69447/3460624042.py in - 1 # Now try and read the values back - 2 with Session(engine) as session: -----> 3 records = session.scalars(select(TestTable)).all() - 4 print("We restored the following values in code...") - 5 for record in records: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in all(self) - 1767 - 1768 """ --> 1769 return self._allrows() - 1770 - 1771 def __iter__(self) -> Iterator[_R]: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _allrows(self) - 546 make_row = self._row_getter - 547 ---> 548 rows = self._fetchall_impl() - 549 made_rows: List[_InterimRowType[_R]] - 550 if make_row: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _fetchall_impl(self) - 1674 - 1675 def _fetchall_impl(self) -> List[_InterimRowType[Row[Any]]]: --> 1676 return self._real_result._fetchall_impl() - 1677 - 1678 def _fetchmany_impl( - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _fetchall_impl(self) - 2268 self._raise_hard_closed() - 2269 try: --> 2270 return list(self.iterator) - 2271 finally: - 2272 self._soft_close() - -~/.local/lib/python3.10/site-packages/sqlalchemy/orm/loading.py in chunks(size) - 217 break - 218 else: ---> 219 fetch = cursor._raw_all_rows() - 220 - 221 if single_entity: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in _raw_all_rows(self) - 539 assert make_row is not None - 540 rows = self._fetchall_impl() ---> 541 return [make_row(row) for row in rows] - 542 - 543 def _allrows(self) -> List[_R]: - -~/.local/lib/python3.10/site-packages/sqlalchemy/engine/result.py in (.0) - 539 assert make_row is not None - 540 rows = self._fetchall_impl() ---> 541 return [make_row(row) for row in rows] - 542 - 543 def _allrows(self) -> List[_R]: - -lib/sqlalchemy/cyextension/resultproxy.pyx in sqlalchemy.cyextension.resultproxy.BaseRow.__init__() - -lib/sqlalchemy/cyextension/resultproxy.pyx in sqlalchemy.cyextension.resultproxy._apply_processors() - -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in process(value) - 1727 value = parent_processor(value) - 1728 --> 1729 value = self._object_value_for_elem(value) - 1730 return value - 1731 - -~/.local/lib/python3.10/site-packages/sqlalchemy/sql/sqltypes.py in _object_value_for_elem(self, elem) - 1609 return self._object_lookup[elem] - 1610 except KeyError as err: --> 1611 raise LookupError( - 1612 "'%s' is not among the defined enum values. " - 1613 "Enum name: %s. Possible values: %s" - -LookupError: 'reed' is not among the defined enum values. Enum name: color. Possible values: red, green, blue -Broken again. - -So how do we fix this? - -Handling Unknown Values -All the cases we've seen of LookupErrors are essentially a problem that we have no unknown value handler - ultimately in all applications where the value could change - which I would argue should always be considered to be all of them - we in fact should have had an option which specified handling an unknown one. - -At this point we need to subclass the SQLAlchemy Enum type, and specify that directly - which do like so: - -import typing as t - -class EnumWithUnknown(sqlalchemy.Enum): - def __init__(self, *enums, **kw: t.Any): - super().__init__(*enums, **kw) - # SQLAlchemy sets the _adapted_from keyword argument sometimes, which contains a reference to the original type - but won't include - # original keyword arguments, so we need to handle that here. - self._unknown_value = kw["_adapted_from"]._unknown_value if "_adapted_from" in kw else kw.get("unknown_value",None) - if self._unknown_value is None: - raise ValueError("unknown_value should be a member of the enum") - - # This is the function which resolves the object for the DB value - def _object_value_for_elem(self, elem): - try: - return self._object_lookup[elem] - except LookupError: - return self._unknown_value -And then we can use this type like follows: - -class Color(Enum): - UNKNOWN = "unknown" - LEGACY_RED = "red" - GREEN = "green" - BLUE = "blue" - -class Base(DeclarativeBase): - pass - -class TestTable(Base): - __tablename__ = "test_table" - id: Mapped[int] = mapped_column(primary_key=True) - value: Mapped[Color] = mapped_column(EnumWithUnknown(Color, values_callable=lambda t: [ str(item.value) for item in t ], - unknown_value=Color.UNKNOWN)) - -Let's run that against the database we just inserted reed into: - -# Now try and read the values back -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - print("We restored the following values in code...") - for record in records: - print(record.value) -We restored the following values in code... -Color.LEGACY_RED -Color.GREEN -Color.BLUE -Color.UNKNOWN -And fixed! We obviously have changed our application logic, but this is now much safer and code which will work as we expect it too in all circumstances. - -From a practical perspective we've had to expand our design space to assume indeterminate colors can exist - which might be awkward, but the trade-off is robustness: our application logic can now choose how it handles "unknown" - we could crash if we wanted, but we can also choose just to ignore those records we don't understand or display them as "unknown" and prevent user interaction or whatever else we want. - -Discussion -This is an interesting case where in my opinion the "default" design isn't what you would want, but the logic for it is actually sound. SQLAlchemy models define databases - they are principally built on assuming you are describing the actual state of a database, with constraints provided by a database - i.e. in a database with first-class enumeration support, some of the tripwires here just wouldn't work without a schema upgrade. - -Conversely, if you did a schema upgrade, your old applications still wouldn't know how to parse new values unless you did everything perfectly in lockstep - which in my experience isn't reality. - -Basically it's an interesting case where everything is justifiably right, but leaves some design footguns lying around which might be a bit of a surprise (hence this post). The kicker for me is the effect on using session.scalar calls to return models - since unless we're querying more specifically, having unknown values we can't handle in tables leads to being unable to list any elements on the table ergonomically. - -Conclusions -Think carefully before using automagic enum methods in SQLAlchemy. What you want to do now is likely subtly wrong, and while there's a simple and elegant way to use enum.Enum with SQLAlchemy, the magic will give you working code quickly but with potentially nasty problems from subtle bugs or data mismatches later. - -Listings -The full listing for the code samples here can also be found here. - -sqlalchemy-enums.py (Source) -#!/usr/bin/env python -# sqlalchemy-enums.py -# Note: you need to at least install `pip install sqlalchemy` for this to work. - -from enum import Enum - -import sqlalchemy -from sqlalchemy.orm import Mapped, DeclarativeBase, Session, mapped_column -from sqlalchemy import create_engine, select, text - -import typing as t - - -class EnumWithUnknown(sqlalchemy.Enum): - def __init__(self, *enums, **kw: t.Any): - super().__init__(*enums, **kw) - # SQLAlchemy sets the _adapted_from keyword argument sometimes, which contains a reference to the original type - but won't include - # original keyword arguments, so we need to handle that here. - self._unknown_value = ( - kw["_adapted_from"]._unknown_value - if "_adapted_from" in kw - else kw.get("unknown_value", None) - ) - if self._unknown_value is None: - raise ValueError("unknown_value should be a member of the enum") - - # This is the function which resolves the object for the DB value - def _object_value_for_elem(self, elem): - try: - return self._object_lookup[elem] - except LookupError: - return self._unknown_value - - -class Color(Enum): - UNKNOWN = "unknown" - LEGACY_RED = "red" - GREEN = "green" - BLUE = "blue" - - -class Base(DeclarativeBase): - pass - - -class TestTable(Base): - __tablename__ = "test_table" - id: Mapped[int] = mapped_column(primary_key=True) - value: Mapped[Color] = mapped_column( - EnumWithUnknown( - Color, - values_callable=lambda t: [str(item.value) for item in t], - unknown_value=Color.UNKNOWN, - ) - ) - - -engine = create_engine("sqlite://") - -Base.metadata.create_all(engine) - -with Session(engine) as session: - # Create normal values - for enum_item in [Color.LEGACY_RED, Color.GREEN, Color.BLUE]: - session.add(TestTable(value=enum_item)) - session.commit() - -with Session(engine) as session: - session.add(TestTable(value="reed")) - session.commit() - -# Now try and read the values back -with Session(engine) as session: - records = session.scalars(select(TestTable)).all() - print("We restored the following values in code...") - for record in records: - print(record.value) - -print("But the underlying table contains...") -with engine.connect() as conn: - print(conn.execute(text("SELECT * FROM test_table;")).all()) - diff --git a/docs/research/FASTAPI.md b/docs/research/FASTAPI.md deleted file mode 100644 index 7713f3e..0000000 --- a/docs/research/FASTAPI.md +++ /dev/null @@ -1,1154 +0,0 @@ -# Production-Grade FastAPI Boilerplate: 2025 Best Practices - -**The 2025 FastAPI ecosystem has matured significantly**, with clear conventions emerging around async-first patterns, Pydantic v2, SQLAlchemy 2.0+, and modern tooling like uv and Ruff. This guide synthesizes the latest practices for building production-grade FastAPI applications following a layered architecture: Models → Repositories → Services → Routes. - -## Project structure for medium-large applications - -The **feature-based (domain-driven) organization** pattern, popularized by Netflix's Dispatch project, is now the recommended approach for medium-large applications—scaling better than traditional file-type organization. - -``` -fastapi-project/ -├── alembic/ # Database migrations -│ ├── versions/ -│ └── env.py -├── src/ -│ ├── __init__.py -│ ├── main.py # FastAPI app, lifespan events -│ ├── core/ # Shared infrastructure -│ │ ├── __init__.py -│ │ ├── config.py # Pydantic Settings -│ │ ├── database.py # Async session manager -│ │ ├── security.py # Auth utilities -│ │ ├── exceptions.py # Global exception classes -│ │ └── dependencies.py # Shared dependencies -│ ├── auth/ # Feature module -│ │ ├── __init__.py -│ │ ├── router.py # Thin routes -│ │ ├── schemas.py # Pydantic request/response -│ │ ├── models.py # SQLAlchemy models -│ │ ├── repository.py # DB operations (static methods) -│ │ ├── service.py # Business logic -│ │ ├── dependencies.py # Module-specific deps -│ │ ├── constants.py # Error codes, enums -│ │ └── exceptions.py # Module exceptions -│ ├── users/ -│ │ ├── router.py -│ │ ├── schemas.py -│ │ ├── models.py -│ │ ├── repository.py -│ │ └── service.py -│ └── posts/ -│ └── ... -├── tests/ -│ ├── conftest.py -│ ├── factories/ # Test data factories -│ ├── unit/ -│ │ ├── services/ -│ │ └── repositories/ -│ └── integration/ -│ └── api/ -├── pyproject.toml -├── Dockerfile -├── docker-compose.yml -├── nginx/nginx.conf -└── .pre-commit-config.yaml -``` - -**Key architectural principles** include using explicit module imports to prevent circular dependencies (`from src.auth import constants as auth_constants`), keeping routes thin by delegating all business logic to services, and ensuring repositories handle only database operations without business logic. - -## Modern pyproject.toml configuration - -**uv** has emerged as the preferred package manager in 2025, developed by Astral (creators of Ruff), offering dramatically faster dependency resolution than Poetry or pip. - -```toml -[project] -name = "fastapi-app" -version = "1.0.0" -description = "Production FastAPI Application" -requires-python = ">=3.12" - -dependencies = [ - "fastapi[standard]>=0.115.0,<1.0.0", - "pydantic>=2.9.0,<3.0.0", - "pydantic-settings>=2.6.0,<3.0.0", - "sqlalchemy>=2.0.0,<3.0.0", - "alembic>=1.13.0,<2.0.0", - "asyncpg>=0.29.0,<1.0.0", - "python-multipart>=0.0.9", - "pyjwt>=2.9.0", - "pwdlib[argon2]>=0.2.0", - "slowapi>=0.1.9", - "redis>=5.0.0", - "structlog>=24.0.0", - "gunicorn>=22.0.0", - "uvicorn[standard]>=0.30.0", -] - -[project.optional-dependencies] -dev = [ - "pytest>=8.0.0", - "pytest-asyncio>=0.24.0", - "pytest-cov>=5.0.0", - "httpx>=0.27.0", - "factory-boy>=3.3.0", - "asgi-lifespan>=2.1.0", - "mypy>=1.13.0", - "ruff>=0.8.0", - "pre-commit>=4.0.0", -] - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[tool.ruff] -target-version = "py312" -line-length = 88 -src = ["src"] - -[tool.ruff.lint] -select = [ - "E", "W", "F", "I", "B", "C4", "UP", "ARG", - "SIM", "TCH", "PTH", "RUF", "ASYNC", "S", "N" -] -ignore = ["E501", "B008", "PLR0913", "S101"] - -[tool.ruff.lint.isort] -known-first-party = ["src"] - -[tool.mypy] -python_version = "3.12" -strict = true -plugins = ["pydantic.mypy"] - -[tool.pytest.ini_options] -asyncio_mode = "auto" -asyncio_default_fixture_loop_scope = "function" -testpaths = ["tests"] -addopts = "-ra -q --cov=src --cov-report=term-missing" -``` - -The version pinning strategy uses **compatible release ranges** (`>=2.0.0,<3.0.0`) in pyproject.toml while letting lock files (`uv.lock`) handle exact versions. - -## Pydantic Settings v2 for configuration management - -```python -# src/core/config.py -from functools import lru_cache -from typing import Literal -from pydantic import Field, PostgresDsn, model_validator -from pydantic_settings import BaseSettings, SettingsConfigDict - - -class Settings(BaseSettings): - model_config = SettingsConfigDict( - env_file=".env", - env_file_encoding="utf-8", - case_sensitive=False, - extra="ignore", - ) - - # Application - APP_NAME: str = "FastAPI App" - ENVIRONMENT: Literal["development", "staging", "production"] = "development" - DEBUG: bool = False - - # Database - DATABASE_URL: PostgresDsn - DB_POOL_SIZE: int = 5 - DB_MAX_OVERFLOW: int = 10 - - # Security - SECRET_KEY: str = Field(..., min_length=32) - ACCESS_TOKEN_EXPIRE_MINUTES: int = 30 - ALGORITHM: str = "HS256" - - # CORS - CORS_ORIGINS: list[str] = ["http://localhost:3000"] - - @model_validator(mode="after") - def validate_production_settings(self) -> "Settings": - if self.ENVIRONMENT == "production" and self.DEBUG: - raise ValueError("DEBUG must be False in production") - return self - - -@lru_cache -def get_settings() -> Settings: - return Settings() - -settings = get_settings() -``` - -**Module-specific settings** can use `env_prefix` to namespace environment variables: - -```python -# src/auth/config.py -from pydantic_settings import BaseSettings, SettingsConfigDict - -class AuthConfig(BaseSettings): - model_config = SettingsConfigDict(env_prefix="AUTH_", env_file=".env") - - JWT_SECRET: str - JWT_ALG: str = "HS256" - JWT_EXP: int = 30 # minutes -``` - -## Async SQLAlchemy 2.0+ with session management - -The **DatabaseSessionManager pattern** provides clean lifecycle management for async database connections: - -```python -# src/core/database.py -import contextlib -from typing import AsyncIterator -from sqlalchemy.ext.asyncio import ( - AsyncConnection, AsyncSession, async_sessionmaker, create_async_engine -) -from sqlalchemy.pool import AsyncAdaptedQueuePool - - -class DatabaseSessionManager: - def __init__(self, url: str, **engine_kwargs): - self._engine = create_async_engine( - url, - poolclass=AsyncAdaptedQueuePool, - pool_size=5, - max_overflow=10, - pool_pre_ping=True, - echo=False, - **engine_kwargs - ) - self._sessionmaker = async_sessionmaker( - autocommit=False, - autoflush=False, - expire_on_commit=False, - bind=self._engine, - class_=AsyncSession - ) - - async def close(self): - await self._engine.dispose() - - @contextlib.asynccontextmanager - async def session(self) -> AsyncIterator[AsyncSession]: - session = self._sessionmaker() - try: - yield session - except Exception: - await session.rollback() - raise - finally: - await session.close() - - -sessionmanager = DatabaseSessionManager(str(settings.DATABASE_URL)) - - -async def get_db_session() -> AsyncIterator[AsyncSession]: - async with sessionmanager.session() as session: - yield session -``` - -**SQLAlchemy 2.0 models** use `Mapped` type hints with `lazy="raise"` to prevent implicit lazy loading in async contexts: - -```python -# src/users/models.py -from typing import List, Optional -from sqlalchemy import String, ForeignKey -from sqlalchemy.orm import Mapped, mapped_column, relationship, DeclarativeBase - - -class Base(DeclarativeBase): - pass - - -class User(Base): - __tablename__ = "users" - - id: Mapped[int] = mapped_column(primary_key=True) - email: Mapped[str] = mapped_column(String(255), unique=True, index=True) - hashed_password: Mapped[str] = mapped_column(String(255)) - is_active: Mapped[bool] = mapped_column(default=True) - full_name: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) - - posts: Mapped[List["Post"]] = relationship( - back_populates="author", - lazy="raise", # Prevents N+1 queries - cascade="all, delete-orphan" - ) -``` - -For relationship loading: use **`selectinload`** for collections (one-to-many) and **`joinedload`** for single objects (many-to-one). - -## Repository pattern with static methods - -```python -# src/users/repository.py -from typing import Optional, Sequence -from sqlalchemy import select -from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import selectinload -from .models import User -from .schemas import UserCreate, UserUpdate - - -class UserRepository: - @staticmethod - async def get_by_id(session: AsyncSession, user_id: int) -> Optional[User]: - result = await session.execute(select(User).where(User.id == user_id)) - return result.scalars().first() - - @staticmethod - async def get_by_email(session: AsyncSession, email: str) -> Optional[User]: - result = await session.execute(select(User).where(User.email == email)) - return result.scalars().first() - - @staticmethod - async def get_with_posts(session: AsyncSession, user_id: int) -> Optional[User]: - result = await session.execute( - select(User) - .where(User.id == user_id) - .options(selectinload(User.posts)) - ) - return result.scalars().first() - - @staticmethod - async def get_multi( - session: AsyncSession, - *, - skip: int = 0, - limit: int = 100 - ) -> Sequence[User]: - result = await session.execute( - select(User).offset(skip).limit(limit) - ) - return result.scalars().all() - - @staticmethod - async def create(session: AsyncSession, user_in: UserCreate, hashed_password: str) -> User: - user = User( - email=user_in.email, - hashed_password=hashed_password, - full_name=user_in.full_name - ) - session.add(user) - await session.flush() - await session.refresh(user) - return user - - @staticmethod - async def update( - session: AsyncSession, user: User, user_in: UserUpdate - ) -> User: - update_data = user_in.model_dump(exclude_unset=True) - for field, value in update_data.items(): - setattr(user, field, value) - await session.flush() - await session.refresh(user) - return user -``` - -## Service layer with business logic - -```python -# src/users/service.py -from sqlalchemy.ext.asyncio import AsyncSession -from .repository import UserRepository -from .schemas import UserCreate, UserUpdate, UserResponse -from .exceptions import UserNotFound, EmailAlreadyExists -from src.core.security import get_password_hash - - -class UserService: - @staticmethod - async def create_user(session: AsyncSession, user_in: UserCreate) -> UserResponse: - existing = await UserRepository.get_by_email(session, user_in.email) - if existing: - raise EmailAlreadyExists(user_in.email) - - hashed_password = get_password_hash(user_in.password) - user = await UserRepository.create(session, user_in, hashed_password) - await session.commit() - return UserResponse.model_validate(user) - - @staticmethod - async def get_user(session: AsyncSession, user_id: int) -> UserResponse: - user = await UserRepository.get_by_id(session, user_id) - if not user: - raise UserNotFound(user_id) - return UserResponse.model_validate(user) - - @staticmethod - async def update_user( - session: AsyncSession, user_id: int, user_in: UserUpdate - ) -> UserResponse: - user = await UserRepository.get_by_id(session, user_id) - if not user: - raise UserNotFound(user_id) - - updated = await UserRepository.update(session, user, user_in) - await session.commit() - return UserResponse.model_validate(updated) -``` - -## Pydantic v2 schemas with validation - -```python -# src/users/schemas.py -from datetime import datetime -from pydantic import BaseModel, ConfigDict, Field, EmailStr, field_validator - - -class UserBase(BaseModel): - email: EmailStr - full_name: str | None = None - - -class UserCreate(UserBase): - password: str = Field(..., min_length=8) - - @field_validator("password") - @classmethod - def validate_password(cls, v: str) -> str: - if not any(c.isupper() for c in v): - raise ValueError("Password must contain at least one uppercase letter") - if not any(c.isdigit() for c in v): - raise ValueError("Password must contain at least one digit") - return v - - -class UserUpdate(BaseModel): - email: EmailStr | None = None - full_name: str | None = None - is_active: bool | None = None - - -class UserResponse(UserBase): - model_config = ConfigDict(from_attributes=True) - - id: int - is_active: bool - created_at: datetime -``` - -Key Pydantic v2 changes: `ConfigDict` replaces `class Config`, `from_attributes=True` replaces `orm_mode`, and validators use `@field_validator` with `@classmethod`. - -## Dependency injection patterns - -```python -# src/core/dependencies.py -from typing import Annotated -from fastapi import Depends -from sqlalchemy.ext.asyncio import AsyncSession -from .database import get_db_session - -# Type alias for cleaner injection -DBSession = Annotated[AsyncSession, Depends(get_db_session)] - - -# src/auth/dependencies.py -from typing import Annotated -from fastapi import Depends, HTTPException, status -from fastapi.security import OAuth2PasswordBearer -import jwt -from src.core.config import settings -from src.core.dependencies import DBSession -from src.users.repository import UserRepository -from src.users.models import User - -oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/token") - - -async def get_current_user( - token: Annotated[str, Depends(oauth2_scheme)], - db: DBSession -) -> User: - credentials_exception = HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, - detail="Could not validate credentials", - headers={"WWW-Authenticate": "Bearer"}, - ) - try: - payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM]) - user_id: int = payload.get("sub") - if user_id is None: - raise credentials_exception - except jwt.InvalidTokenError: - raise credentials_exception - - user = await UserRepository.get_by_id(db, user_id) - if user is None: - raise credentials_exception - return user - - -async def get_current_active_user( - current_user: Annotated[User, Depends(get_current_user)] -) -> User: - if not current_user.is_active: - raise HTTPException(status_code=400, detail="Inactive user") - return current_user - - -CurrentUser = Annotated[User, Depends(get_current_active_user)] -``` - -## Exception handling with global handlers - -```python -# src/core/exceptions.py -class BaseAppException(Exception): - def __init__(self, message: str, status_code: int = 500): - self.message = message - self.status_code = status_code - super().__init__(self.message) - - -class ResourceNotFound(BaseAppException): - def __init__(self, resource: str, identifier: str | int): - super().__init__(f"{resource} {identifier} not found", status_code=404) - - -class ConflictError(BaseAppException): - def __init__(self, message: str): - super().__init__(message, status_code=409) - - -# src/users/exceptions.py -from src.core.exceptions import ResourceNotFound, ConflictError - -class UserNotFound(ResourceNotFound): - def __init__(self, user_id: int): - super().__init__("User", user_id) - -class EmailAlreadyExists(ConflictError): - def __init__(self, email: str): - super().__init__(f"Email {email} already registered") -``` - -```python -# src/main.py - Exception handlers -from fastapi import FastAPI, Request -from fastapi.responses import JSONResponse -from fastapi.exceptions import RequestValidationError -from src.core.exceptions import BaseAppException - -app = FastAPI() - - -@app.exception_handler(BaseAppException) -async def app_exception_handler(request: Request, exc: BaseAppException): - return JSONResponse( - status_code=exc.status_code, - content={"detail": exc.message, "type": exc.__class__.__name__} - ) - - -@app.exception_handler(RequestValidationError) -async def validation_exception_handler(request: Request, exc: RequestValidationError): - return JSONResponse( - status_code=422, - content={"detail": "Validation Error", "errors": exc.errors()} - ) -``` - -## JWT authentication with PyJWT and Argon2 - -**PyJWT** is now the recommended library over python-jose, and **pwdlib with Argon2** is the modern choice for password hashing: - -```python -# src/core/security.py -from datetime import datetime, timedelta, timezone -import jwt -from pwdlib import PasswordHash -from src.core.config import settings - -password_hash = PasswordHash.recommended() - - -def verify_password(plain_password: str, hashed_password: str) -> bool: - return password_hash.verify(plain_password, hashed_password) - - -def get_password_hash(password: str) -> str: - return password_hash.hash(password) - - -def create_access_token(subject: int | str, expires_delta: timedelta | None = None) -> str: - expire = datetime.now(timezone.utc) + ( - expires_delta or timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES) - ) - to_encode = {"sub": str(subject), "exp": expire} - return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM) - - -def create_refresh_token(subject: int | str) -> str: - expire = datetime.now(timezone.utc) + timedelta(days=30) - to_encode = {"sub": str(subject), "exp": expire, "type": "refresh"} - return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM) -``` - -## SlowAPI rate limiting integration - -```python -# src/core/rate_limit.py -from slowapi import Limiter -from slowapi.util import get_remote_address -from src.core.config import settings - - -def get_user_identifier(request) -> str: - """Rate limit by user ID if authenticated, otherwise by IP.""" - auth_header = request.headers.get("Authorization") - if auth_header and auth_header.startswith("Bearer "): - try: - import jwt - token = auth_header.split(" ")[1] - payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM]) - return f"user:{payload.get('sub')}" - except Exception: - pass - return get_remote_address(request) - - -limiter = Limiter( - key_func=get_user_identifier, - storage_uri=str(settings.REDIS_URL) if settings.REDIS_URL else None, - default_limits=["100/hour", "10/minute"], - headers_enabled=True, - in_memory_fallback_enabled=True, -) -``` - -```python -# src/auth/router.py -from fastapi import APIRouter, Request -from src.core.rate_limit import limiter - -router = APIRouter(prefix="/auth", tags=["auth"]) - - -@router.post("/token") -@limiter.limit("5/minute") # Stricter limit for auth endpoints -async def login(request: Request, form_data: OAuth2PasswordRequestForm = Depends()): - ... -``` - -## Thin routes calling services - -```python -# src/users/router.py -from fastapi import APIRouter, status -from src.core.dependencies import DBSession -from src.auth.dependencies import CurrentUser -from .service import UserService -from .schemas import UserCreate, UserUpdate, UserResponse - -router = APIRouter(prefix="/users", tags=["users"]) - - -@router.post("/", response_model=UserResponse, status_code=status.HTTP_201_CREATED) -async def create_user(user_in: UserCreate, db: DBSession): - return await UserService.create_user(db, user_in) - - -@router.get("/me", response_model=UserResponse) -async def get_current_user_info(current_user: CurrentUser): - return UserResponse.model_validate(current_user) - - -@router.get("/{user_id}", response_model=UserResponse) -async def get_user(user_id: int, db: DBSession, current_user: CurrentUser): - return await UserService.get_user(db, user_id) - - -@router.patch("/{user_id}", response_model=UserResponse) -async def update_user(user_id: int, user_in: UserUpdate, db: DBSession, current_user: CurrentUser): - return await UserService.update_user(db, user_id, user_in) -``` - -## Main application assembly - -```python -# src/main.py -from contextlib import asynccontextmanager -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from slowapi import _rate_limit_exceeded_handler -from slowapi.errors import RateLimitExceeded - -from src.core.config import settings -from src.core.database import sessionmanager -from src.core.rate_limit import limiter -from src.users.router import router as users_router -from src.auth.router import router as auth_router - - -@asynccontextmanager -async def lifespan(app: FastAPI): - # Startup - yield - # Shutdown - await sessionmanager.close() - - -app_config = {"title": settings.APP_NAME, "version": "1.0.0"} -if settings.ENVIRONMENT == "production": - app_config["openapi_url"] = None # Hide docs in production - -app = FastAPI(**app_config, lifespan=lifespan) - -# Middleware -app.add_middleware( - CORSMiddleware, - allow_origins=settings.CORS_ORIGINS, - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -# Rate limiting -app.state.limiter = limiter -app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) - -# Routers -app.include_router(auth_router, prefix="/api/v1") -app.include_router(users_router, prefix="/api/v1") - - -@app.get("/health") -async def health_check(): - return {"status": "healthy"} -``` - -## Multi-stage Dockerfile for production - -```dockerfile -# Build stage -FROM python:3.12-slim AS builder -WORKDIR /app -RUN apt-get update && apt-get install -y gcc && rm -rf /var/lib/apt/lists/* -COPY requirements.txt . -RUN pip install --no-cache-dir --upgrade -r requirements.txt - -# Runtime stage -FROM python:3.12-slim -WORKDIR /app - -RUN useradd --create-home --shell /bin/bash app && chown -R app:app /app - -COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --chown=app:app ./src /app/src - -USER app - -ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 \ - PYTHONPATH="/app" - -HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ - CMD curl -f http://localhost:8000/health || exit 1 - -CMD ["gunicorn", "src.main:app", "-w", "4", "-k", "uvicorn.workers.UvicornWorker", "--bind", "0.0.0.0:8000"] -``` - -## Docker Compose configuration - -```yaml -version: '3.8' - -services: - app: - build: . - ports: - - "8000:8000" - env_file: .env.production - environment: - - DATABASE_URL=postgresql+asyncpg://user:password@db:5432/myapp - - REDIS_URL=redis://:redis_pass@redis:6379 - depends_on: - db: - condition: service_healthy - redis: - condition: service_healthy - restart: unless-stopped - networks: - - backend - - db: - image: postgres:16-alpine - environment: - POSTGRES_DB: myapp - POSTGRES_USER: user - POSTGRES_PASSWORD: password - volumes: - - postgres_data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U user -d myapp"] - interval: 5s - timeout: 5s - retries: 5 - networks: - - backend - - redis: - image: redis:7-alpine - command: redis-server --appendonly yes --requirepass redis_pass - volumes: - - redis_data:/data - healthcheck: - test: ["CMD", "redis-cli", "-a", "redis_pass", "ping"] - interval: 5s - timeout: 5s - retries: 5 - networks: - - backend - - nginx: - image: nginx:alpine - ports: - - "80:80" - - "443:443" - volumes: - - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro - depends_on: - - app - networks: - - backend - -volumes: - postgres_data: - redis_data: - -networks: - backend: -``` - -## Nginx reverse proxy configuration - -```nginx -upstream fastapi_backend { - least_conn; - server app:8000 max_fails=3 fail_timeout=30s; - keepalive 32; -} - -limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s; -limit_req_zone $binary_remote_addr zone=auth:10m rate=1r/s; - -server { - listen 80; - server_name yourdomain.com; - return 301 https://$host$request_uri; -} - -server { - listen 443 ssl http2; - server_name yourdomain.com; - - ssl_certificate /etc/nginx/ssl/fullchain.pem; - ssl_certificate_key /etc/nginx/ssl/privkey.pem; - ssl_protocols TLSv1.2 TLSv1.3; - - add_header X-Frame-Options DENY always; - add_header X-Content-Type-Options nosniff always; - add_header Strict-Transport-Security "max-age=63072000" always; - - client_max_body_size 10M; - - location /api/ { - limit_req zone=api burst=20 nodelay; - - proxy_pass http://fastapi_backend; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Connection ""; - } - - location /api/auth/ { - limit_req zone=auth burst=5 nodelay; - proxy_pass http://fastapi_backend; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /health { - proxy_pass http://fastapi_backend; - } -} -``` - -## Pytest configuration and fixtures - -```python -# tests/conftest.py -import pytest -from typing import AsyncGenerator, Generator -from httpx import ASGITransport, AsyncClient -from sqlalchemy import create_engine -from sqlalchemy.orm import Session, sessionmaker -from sqlalchemy.pool import StaticPool - -from src.main import app -from src.core.database import get_db_session -from src.core.security import create_access_token -from src.users.models import Base -from tests.factories.user import UserFactory - -SQLALCHEMY_TEST_URL = "sqlite:///:memory:" - -engine = create_engine( - SQLALCHEMY_TEST_URL, - connect_args={"check_same_thread": False}, - poolclass=StaticPool, -) -TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) - - -@pytest.fixture(scope="session", autouse=True) -def setup_database(): - Base.metadata.create_all(bind=engine) - yield - Base.metadata.drop_all(bind=engine) - - -@pytest.fixture -def db() -> Generator[Session, None, None]: - connection = engine.connect() - transaction = connection.begin() - session = TestingSessionLocal(bind=connection) - - yield session - - session.close() - transaction.rollback() - connection.close() - - -@pytest.fixture -def client(db: Session) -> Generator: - def override_get_db(): - yield db - - app.dependency_overrides[get_db_session] = override_get_db - - with TestClient(app) as test_client: - yield test_client - - app.dependency_overrides.clear() - - -@pytest.fixture -async def async_client(db: Session) -> AsyncGenerator[AsyncClient, None]: - def override_get_db(): - yield db - - app.dependency_overrides[get_db_session] = override_get_db - - async with AsyncClient( - transport=ASGITransport(app=app), - base_url="http://test" - ) as ac: - yield ac - - app.dependency_overrides.clear() - - -@pytest.fixture -def test_user(db: Session): - return UserFactory() - - -@pytest.fixture -def authenticated_client(client, test_user): - token = create_access_token(test_user.id) - client.headers.update({"Authorization": f"Bearer {token}"}) - yield client - client.headers.clear() -``` - -## GitHub Actions CI/CD workflow - -```yaml -name: CI/CD - -on: - push: - branches: [main] - pull_request: - branches: [main] - -jobs: - lint-and-test: - runs-on: ubuntu-latest - services: - postgres: - image: postgres:16-alpine - env: - POSTGRES_USER: test - POSTGRES_PASSWORD: test - POSTGRES_DB: test_db - ports: - - 5432:5432 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 - - steps: - - uses: actions/checkout@v4 - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.12" - cache: pip - - - name: Install dependencies - run: | - pip install --upgrade pip - pip install -r requirements.txt - pip install -r requirements-dev.txt - - - name: Run Ruff - run: | - ruff check --output-format=github . - ruff format --check . - - - name: Run MyPy - run: mypy src/ - - - name: Run tests - env: - DATABASE_URL: postgresql://test:test@localhost:5432/test_db - run: pytest --cov=src --cov-report=xml - - - name: Upload coverage - uses: codecov/codecov-action@v4 - with: - files: coverage.xml - - build-and-push: - needs: lint-and-test - if: github.ref == 'refs/heads/main' - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - - steps: - - uses: actions/checkout@v4 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to GHCR - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push - uses: docker/build-push-action@v5 - with: - push: true - tags: ghcr.io/${{ github.repository }}:${{ github.sha }} - cache-from: type=gha - cache-to: type=gha,mode=max -``` - -## Pre-commit hooks configuration - -```yaml -# .pre-commit-config.yaml -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 - hooks: - - id: check-yaml - - id: check-toml - - id: end-of-file-fixer - - id: trailing-whitespace - - id: check-merge-conflict - - id: detect-private-key - - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 - hooks: - - id: ruff-check - args: [--fix, --exit-non-zero-on-fix] - - id: ruff-format - - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.13.0 - hooks: - - id: mypy - additional_dependencies: [pydantic>=2.0, types-python-dateutil] - args: [--config-file=pyproject.toml] - exclude: ^tests/ - - - repo: https://github.com/gitleaks/gitleaks - rev: v8.22.1 - hooks: - - id: gitleaks -``` - -## Structured logging with correlation IDs - -```python -# src/core/logging.py -import structlog -import uuid -from starlette.middleware.base import BaseHTTPMiddleware -from starlette.requests import Request - -shared_processors = [ - structlog.contextvars.merge_contextvars, - structlog.stdlib.add_log_level, - structlog.processors.TimeStamper(fmt="iso"), - structlog.processors.StackInfoRenderer(), -] - - -def configure_logging(environment: str): - if environment == "production": - renderer = structlog.processors.JSONRenderer() - else: - renderer = structlog.dev.ConsoleRenderer(colors=True) - - structlog.configure( - processors=shared_processors + [renderer], - wrapper_class=structlog.stdlib.BoundLogger, - cache_logger_on_first_use=True, - ) - - -class CorrelationIdMiddleware(BaseHTTPMiddleware): - async def dispatch(self, request: Request, call_next): - correlation_id = request.headers.get("X-Correlation-ID", str(uuid.uuid4())) - - structlog.contextvars.clear_contextvars() - structlog.contextvars.bind_contextvars( - correlation_id=correlation_id, - method=request.method, - path=request.url.path, - ) - - response = await call_next(request) - response.headers["X-Correlation-ID"] = correlation_id - return response -``` - -## Conclusion - -The 2025 FastAPI ecosystem emphasizes **async-first patterns** with SQLAlchemy 2.0+'s mature async support and proper relationship loading strategies. **Ruff has consolidated** the linting ecosystem, replacing black, isort, and flake8 with a single, faster tool. **uv** offers significantly faster dependency management than pip or Poetry. - -Key architectural takeaways include maintaining thin routes that delegate to services, repositories that handle only database operations without business logic, and leveraging FastAPI's dependency injection with `Annotated` types for cleaner code. For security, **PyJWT and Argon2** (via pwdlib) are the current recommended choices. - -Production deployments benefit from multi-stage Docker builds with non-root users, Nginx as a reverse proxy with rate limiting at multiple layers, and structured logging with correlation IDs for distributed tracing. The testing stack centers on pytest-asyncio with `asyncio_mode="auto"` and httpx's `AsyncClient` with `ASGITransport` for async endpoint testing. diff --git a/docs/research/JUSTFILE.md b/docs/research/JUSTFILE.md deleted file mode 100644 index 1142ba5..0000000 --- a/docs/research/JUSTFILE.md +++ /dev/null @@ -1,635 +0,0 @@ -# Justfile patterns for production full-stack development in 2025 - -The `just` command runner (currently at **version 1.43.1** as of November 2025) has matured into an excellent choice for full-stack monorepo orchestration, offering native module support for organizing multi-service commands, **parallel dependency execution** via the new `[parallel]` attribute, and robust cross-platform compatibility through shell configuration. For your FastAPI + React + Docker Compose stack, the recommended architecture uses a root justfile with service-specific modules (`backend.just`, `frontend.just`, `db.just`), enabling clean namespaced commands like `just backend::test` while keeping orchestration commands at the root level. This approach eliminates the complexity of Makefiles while providing significantly more power than npm scripts. - -## Current just features and 2024-2025 additions - -Just has seen substantial feature additions over the past year that directly benefit full-stack development workflows. The **module system** (`mod` statement) was stabilized in version 1.31.0, enabling proper monorepo organization with namespaced recipes. Version 1.42.0 introduced the **`[parallel]` attribute** for concurrent dependency execution and **cross-submodule dependencies**, allowing recipes to depend on recipes in other modules (`deploy: utils::build`). The **`[script]` attribute** (1.33.0) enables writing recipes in any language without shebang workarounds, and the **`[group]` attribute** (1.27.0) organizes recipes into logical categories in help output. - -New built-in functions added in 2024-2025 include `which()` and `require()` for finding executables (with `require()` erroring if not found), `read()` for file contents, and path constants `PATH_SEP` and `PATH_VAR_SEP` for cross-platform path handling. The `dotenv-override` setting now allows `.env` files to override existing environment variables, useful for Docker-based development where container environment variables might conflict with local configuration. - -```just -# Core settings block for a 2025 production justfile -set dotenv-load # Auto-load .env -set export # Export all variables -set shell := ["bash", "-uc"] # Bash with error checking -set windows-shell := ["powershell.exe", "-NoLogo", "-Command"] -``` - -## Monorepo organization with the module system - -The recommended structure for a full-stack monorepo uses a **root justfile for orchestration** combined with **service modules** for domain-specific commands. Just searches for module files in a specific order: `foo.just`, `foo/mod.just`, `foo/justfile`, or `foo/.justfile`, giving you flexibility in organizing your project. - -``` -project/ -├── justfile # Root orchestration (dev, build, test-all, deploy) -├── backend.just # Backend module (migrations, backend tests, lint) -├── frontend.just # Frontend module (build, dev server, frontend tests) -├── db.just # Database module (backup, restore, shell) -├── docker.just # Shared Docker utilities (imported, not modularized) -├── backend/ -├── frontend/ -└── docker-compose.yml -``` - -The distinction between `mod` and `import` is critical: **`mod` creates namespaced recipes** accessed via `just backend::test`, while **`import` merges recipes** into the current namespace without prefixes. For service-specific commands, modules provide cleaner organization; for shared utilities like Docker helpers, imports work better. - -```just -# Root justfile -mod backend # Creates just backend::* namespace -mod frontend -mod db -import 'docker.just' # Merges into root namespace - -# Start everything -dev: - docker compose up - -# Run all tests across services -test: - just backend::test - just frontend::test -``` - -Module recipes should use the **`[no-cd]` attribute** to ensure they execute from the project root rather than the module file's directory, since Docker Compose commands need access to the root `docker-compose.yml`: - -```just -# backend.just -[no-cd] -test *ARGS: - docker compose exec backend pytest {{ARGS}} -``` - -## Docker Compose integration patterns - -Docker Compose integration forms the backbone of full-stack development workflows. The key patterns involve **variadic arguments for passthrough** (`*ARGS`), **parameterized compose files** for environment switching, and **service-specific exec commands**. - -```just -# Essential Docker Compose recipes -@up *ARGS: - docker compose up {{ARGS}} - -@start *ARGS: - docker compose up -d {{ARGS}} - -@down *ARGS: - docker compose down {{ARGS}} - -@build *ARGS: - docker compose build {{ARGS}} - -@logs *SERVICE: - docker compose logs -f {{SERVICE}} - -# Execute in running container -@exec service *CMD: - docker compose exec {{service}} {{CMD}} - -# One-off command (new container) -@run service *CMD: - docker compose run --rm {{service}} {{CMD}} - -# Interactive shell access -shell service='backend': - docker compose exec -it {{service}} /bin/bash -``` - -For **environment-specific deployments**, parameterize the compose file selection: - -```just -# Parameterized environment handling -up-dev: - docker compose -f docker-compose.yml -f docker-compose.dev.yml up - -up-prod: - docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d - -# Or with a parameter -deploy env='staging': - docker compose -f docker-compose.yml -f docker-compose.{{env}}.yml up -d -``` - -**Docker Compose profiles** integrate naturally with environment variables: - -```just -export COMPOSE_PROFILES := env_var_or_default('COMPOSE_PROFILES', 'default') - -# Usage: COMPOSE_PROFILES=workers just up -``` - -## Recipe parameters and dependency patterns - -Just offers sophisticated parameter handling including **required parameters**, **optional with defaults**, and **variadic parameters** (both mandatory `+` and optional `*`). Default values can be expressions, enabling dynamic defaults based on environment or other variables. - -```just -# Required parameter -deploy environment: - echo "Deploying to {{environment}}" - -# Optional with default -serve port='8000' host='0.0.0.0': - uvicorn main:app --host {{host}} --port {{port}} - -# Variadic (one or more required) -backup +tables: - pg_dump {{tables}} - -# Variadic (zero or more optional) -test *ARGS: - pytest {{ARGS}} - -# Mixed parameters with expression default -arch := 'amd64' -build target os=os() architecture=arch: - docker build --platform {{os}}/{{architecture}} -t {{target}} . -``` - -Dependencies between recipes can now execute **in parallel** using the `[parallel]` attribute introduced in version 1.42.0: - -```just -[parallel] -ci: lint typecheck test build - @echo "All checks passed" - -lint: - just backend::lint - just frontend::lint - -typecheck: - just backend::typecheck - just frontend::typecheck - -test: - just backend::test - just frontend::test - -build: - docker compose build -``` - -**Cross-submodule dependencies** allow recipes to depend on recipes in other modules: - -```just -# Root justfile -mod backend -mod frontend - -deploy: backend::build frontend::build - docker compose -f docker-compose.prod.yml up -d -``` - -## Variables, conditionals, and platform handling - -Just's expression system supports **conditionals**, **environment variable access**, **command substitution**, and **platform detection**. These features enable cross-platform justfiles that work on Windows, macOS, and Linux. - -```just -# Platform-specific commands using conditionals -browse := if os() == "linux" { "xdg-open" } else if os() == "macos" { "open" } else { "start" } -sed_inplace := if os() == "linux" { "sed -i" } else { "sed -i '' -e" } - -# Environment variables with fallbacks -db_host := env('DATABASE_HOST', 'localhost') -db_port := env('DATABASE_PORT', '5432') - -# Command substitution via backticks -git_hash := `git rev-parse --short HEAD` -current_branch := `git branch --show-current 2>/dev/null || echo "main"` - -# Dynamic values based on environment -build_mode := if env('CI', '') == 'true' { 'release' } else { 'debug' } -``` - -For **platform-specific recipes**, use the OS attributes: - -```just -[linux] -install-deps: - sudo apt install postgresql-client - -[macos] -install-deps: - brew install postgresql - -[windows] -install-deps: - choco install postgresql -``` - -## Built-in functions for production workflows - -Just provides an extensive function library. The most useful for full-stack development include path manipulation, environment access, system information, and the new executable-finding functions. - -| Category | Functions | Use Case | -|----------|-----------|----------| -| **Path** | `justfile_directory()`, `parent_directory()`, `join()` | Constructing paths relative to project root | -| **Environment** | `env(key, default)`, `require()`, `which()` | Configuration and dependency checking | -| **System** | `os()`, `arch()`, `os_family()`, `num_cpus()` | Cross-platform logic | -| **Files** | `path_exists()`, `read()`, `sha256_file()` | File validation and checksums | -| **Strings** | `replace()`, `trim()`, `kebabcase()` | String manipulation | - -```just -# Practical examples -project_root := justfile_directory() -scripts_dir := project_root / "scripts" -config_file := project_root / "config" / "settings.yaml" - -# Validate required tools exist -cargo := require('cargo') -docker := require('docker') - -# Generate cache keys -config_hash := sha256_file('requirements.txt') -``` - -## Error handling and user feedback - -Just provides several mechanisms for **controlling error behavior** and **user feedback**. The `-` prefix ignores command failures, the `[confirm]` attribute requires user confirmation for dangerous operations, and `[no-exit-message]` suppresses error messages for wrapper recipes. - -```just -# Continue on error (useful for cleanup) -clean: - -rm -rf build/ - -rm -rf dist/ - -rm -rf .cache/ - @echo "Cleanup complete" - -# Require confirmation for destructive operations -[confirm("This will DELETE the production database. Are you sure?")] -[group('danger')] -db-drop-prod: - docker compose -f docker-compose.prod.yml exec db dropdb production - -# Suppress just's error message (the tool's own message is enough) -[no-exit-message] -git *args: - git {{args}} -``` - -For **validation at parse time**, use the `error()` function in expressions: - -```just -required_env := if env('API_KEY', '') == '' { error("API_KEY environment variable is required") } else { env('API_KEY') } -``` - -## Documentation and help organization - -Self-documenting justfiles use **comments above recipes** (shown in `just --list`), the **`[doc()]` attribute** for custom descriptions, and **`[group()]`** for logical organization. Private helper recipes use the **underscore prefix**. - -```just -# Show available commands (default recipe) -default: - @just --list --unsorted - -# Build Docker images for all services -[group('build')] -build: - docker compose build - -# Run database migrations -[group('database')] -migrate *ARGS: - docker compose exec backend alembic upgrade {{ARGS}} - -# Create a new migration file -[doc("Generate migration from model changes")] -[group('database')] -migration message: - docker compose exec backend alembic revision --autogenerate -m "{{message}}" - -# Internal helper (hidden from --list) -[private] -_ensure-docker: - @docker info > /dev/null 2>&1 || (echo "Docker not running" && exit 1) -``` - -Running `just --list` with groups produces organized output: - -``` -Available recipes: - default - -[build] - build # Build Docker images for all services - -[database] - migrate *ARGS # Run database migrations - migration message # Generate migration from model changes -``` - -## CI/CD integration with GitHub Actions - -Installing just in CI pipelines uses either the **official setup-just action** or the **install script with version pinning**. Pin versions in CI to avoid unexpected breakage. - -```yaml -# .github/workflows/ci.yml -name: CI -on: [push, pull_request] - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: extractions/setup-just@v3 - with: - just-version: '1.43.1' - - - name: Run CI checks - run: just ci -``` - -Create a dedicated **CI recipe** that runs all checks: - -```just -# CI-specific recipe that fails fast on any issue -ci: lint typecheck test build - @echo "✅ All CI checks passed" - -# Local development doesn't need all checks -dev: - docker compose up -``` - -For **environment variable handling** in CI, rely on the `env()` function with defaults rather than assuming variables exist: - -```just -# Works both locally (with .env) and in CI (with secrets) -set dotenv-load -db_url := env('DATABASE_URL', 'postgresql://localhost/dev') -``` - -## Complete production justfile template - -This template incorporates all the patterns discussed for a FastAPI + React + PostgreSQL + Docker Compose stack: - -```just -# ============================================================================= -# Full-Stack Development Commands -# ============================================================================= -set dotenv-load -set export -set shell := ["bash", "-uc"] -set windows-shell := ["powershell.exe", "-NoLogo", "-Command"] - -# Service modules -mod backend 'backend.just' -mod frontend 'frontend.just' -mod db 'db.just' - -# Project info -project := file_name(justfile_directory()) -version := `git describe --tags --always 2>/dev/null || echo "dev"` - -# ============================================================================= -# Core Commands -# ============================================================================= - -# List available commands -default: - @just --list --unsorted - -# Start development environment -dev: - docker compose up - -# Start services in background -start: - docker compose up -d - -# Stop all services -stop: - docker compose down - -# Stop and remove volumes (fresh start) -[confirm("Remove all volumes and data?")] -clean: - docker compose down -v --remove-orphans - -# View service logs -logs *SERVICE: - docker compose logs -f {{SERVICE}} - -# Open shell in service container -shell service='backend': - docker compose exec -it {{service}} /bin/bash - -# ============================================================================= -# Build and Deploy -# ============================================================================= - -# Build all Docker images -[group('build')] -build *ARGS: - docker compose build {{ARGS}} - -# Rebuild from scratch (no cache) -[group('build')] -rebuild: - docker compose build --no-cache - -# Build production images -[group('build')] -build-prod: - docker compose -f docker-compose.yml -f docker-compose.prod.yml build - -# Deploy to staging -[group('deploy')] -[confirm("Deploy to staging?")] -deploy-staging: ci build-prod - docker compose -f docker-compose.yml -f docker-compose.staging.yml up -d - -# Deploy to production -[group('deploy')] -[confirm("Deploy to PRODUCTION?")] -deploy-prod: ci build-prod - docker compose -f docker-compose.yml -f docker-compose.prod.yml up -d - -# ============================================================================= -# Testing and Quality -# ============================================================================= - -# Run all tests -[group('test')] -test: - just backend::test - just frontend::test - -# Run linters -[group('test')] -lint: - just backend::lint - just frontend::lint - -# Type checking -[group('test')] -typecheck: - just backend::typecheck - just frontend::typecheck - -# Format all code -[group('test')] -format: - just backend::format - just frontend::format - -# CI pipeline (runs all checks) -[group('test')] -[parallel] -ci: lint typecheck test - @echo "✅ All checks passed" - -# ============================================================================= -# Database -# ============================================================================= - -# Run migrations -[group('database')] -migrate *ARGS: - just db::migrate {{ARGS}} - -# Create new migration -[group('database')] -migration message: - just db::migration "{{message}}" - -# Reset database (dangerous) -[group('database')] -[confirm("Reset the database? All data will be lost.")] -db-reset: - just db::reset - just migrate - -# ============================================================================= -# Setup -# ============================================================================= - -# First-time project setup -setup: - @echo "Setting up {{project}}..." - cp -n .env.example .env || true - docker compose build - docker compose up -d - just migrate - @echo "✅ Setup complete. Run 'just dev' to start." - -# ============================================================================= -# Utilities -# ============================================================================= - -# Show running containers -ps: - docker compose ps - -# Docker system cleanup -[private] -docker-prune: - docker system prune -f - -# Print project info -info: - @echo "Project: {{project}}" - @echo "Version: {{version}}" - @echo "OS: {{os()}} ({{arch()}})" -``` - -```just -# backend.just - Backend service commands -[no-cd] - -# Run backend tests -test *ARGS: - docker compose exec backend pytest {{ARGS}} - -# Run specific test file -test-file file: - docker compose exec backend pytest {{file}} -v - -# Lint backend code -lint: - docker compose exec backend ruff check . - docker compose exec backend ruff format --check . - -# Type checking -typecheck: - docker compose exec backend mypy src/ - -# Format backend code -format: - docker compose exec backend ruff format . - docker compose exec backend ruff check --fix . - -# Python REPL -repl: - docker compose exec backend python - -# Install new dependency -add package: - docker compose exec backend pip install {{package}} - docker compose exec backend pip freeze > requirements.txt -``` - -```just -# db.just - Database commands -[no-cd] - -DATABASE_URL := env('DATABASE_URL', 'postgresql://postgres:postgres@db/app') - -# Run migrations -migrate *ARGS='head': - docker compose exec backend alembic upgrade {{ARGS}} - -# Create new migration -migration message: - docker compose exec backend alembic revision --autogenerate -m "{{message}}" - -# Rollback last migration -rollback: - docker compose exec backend alembic downgrade -1 - -# PostgreSQL shell -psql: - docker compose exec db psql -U postgres app - -# Create database backup -backup: - #!/usr/bin/env bash - timestamp=$(date +%Y%m%d_%H%M%S) - docker compose exec db pg_dump -U postgres app > "backups/db_${timestamp}.sql" - echo "Backup created: backups/db_${timestamp}.sql" - -# Restore from backup -restore file: - docker compose exec -T db psql -U postgres app < {{file}} - -# Reset database (drop and recreate) -reset: - docker compose exec db dropdb -U postgres --if-exists app - docker compose exec db createdb -U postgres app -``` - -## Common pitfalls to avoid - -Several patterns consistently cause problems in production justfiles. **Avoid relative paths with `../`**—use `justfile_directory()` instead, as relative paths break when the working directory changes. **Always set `windows-shell`** if your team includes Windows users, since the default `sh` isn't available natively on Windows. **Don't set `fallback := true` in the root justfile**, as this can accidentally invoke user-level justfile recipes. **Keep recipes focused**—if a recipe exceeds 10-15 lines, split it into smaller dependent recipes or use a shebang script. **Pin versions in CI** to avoid breaking changes from automatic just updates. - -The `error()` function evaluates at **parse time, not runtime**, so conditional error messages based on runtime state won't work as expected. For runtime validation, use shell conditionals within recipes instead. - -## Debugging justfiles effectively - -Just provides several tools for troubleshooting. Use `just --dry-run recipe` to see commands without executing them, `just --evaluate` to print all variable values, `just --show recipe` to display a recipe's source, and `just -vv recipe` for verbose execution. The `just --dump --dump-format json` command outputs the parsed justfile as JSON, useful for debugging complex expression evaluation. - -```bash -# See what would run without executing -just --dry-run deploy-prod - -# Check variable values -just --evaluate - -# Debug a specific recipe -just --show migrate - -# Maximum verbosity -just -vv ci -``` - -For cross-platform testing, test on all target platforms or use CI matrix builds to catch platform-specific issues early. The combination of OS-specific attributes (`[linux]`, `[macos]`, `[windows]`) and conditional expressions provides comprehensive cross-platform support when used correctly. diff --git a/docs/research/JWT.md b/docs/research/JWT.md deleted file mode 100644 index 94845a1..0000000 --- a/docs/research/JWT.md +++ /dev/null @@ -1,499 +0,0 @@ -# JWT Authentication for FastAPI + PostgreSQL: 2025 Production Patterns - -**The recommended stack for production JWT authentication in 2025 is PyJWT 2.10+ with Argon2id (via argon2-cffi), rotating refresh tokens stored hashed in PostgreSQL, and Redis for rate limiting and instant revocation.** FastAPI's deprecation of python-jose in favor of PyJWT, combined with passlib's unmaintained status since 2020, marks a significant shift in the ecosystem. This report covers the deep implementation patterns, anti-patterns, and security considerations needed to build a production-ready auth system. - ---- - -## JWT library landscape has shifted dramatically - -**PyJWT 2.10.1** is now the FastAPI-endorsed choice after python-jose's effective deprecation (last meaningful release ~2021). Key 2024-2025 updates include built-in `sub` and `jti` claim validation, `strict_aud` for stricter audience checks, and Ed448/EdDSA support. - -```python -# Production PyJWT configuration (2025) -import jwt -from datetime import datetime, timedelta, UTC - -def create_access_token(user_id: str, roles: list[str]) -> str: - return jwt.encode( - { - "sub": user_id, - "exp": datetime.now(UTC) + timedelta(minutes=15), - "iat": datetime.now(UTC), - "jti": str(uuid.uuid4()), # For revocation - "type": "access", - "roles": roles, - "token_version": user.token_version, # For "logout all" - }, - settings.JWT_SECRET_KEY.get_secret_value(), - algorithm="HS256" - ) - -def decode_token(token: str) -> dict: - return jwt.decode( - token, - settings.JWT_SECRET_KEY.get_secret_value(), - algorithms=["HS256"], # NEVER trust token header - options={"require": ["exp", "sub", "jti", "iat"]} - ) -``` - -**joserfc** (from Authlib team) emerges as the modern alternative when JWE encryption is needed or stricter type safety is required. For most FastAPI applications, PyJWT remains the pragmatic choice due to ecosystem maturity and FastAPI documentation alignment. - -### Token expiration and rotation strategy - -Production consensus for **access tokens is 15 minutes**, **refresh tokens 7-14 days with rotation**. The critical pattern is **rotating refresh tokens with token families** for theft detection: - -```python -# Token family pattern prevents replay attacks -{ - "sub": "user_123", - "jti": "unique-token-id", - "family_id": "auth-session-uuid", # All tokens from same login share this - "type": "refresh", - "exp": 1234567890 -} -``` - -When a refresh token is used, issue a **new refresh token and invalidate the old one**. If a previously-invalidated token is presented (replay attack), **revoke the entire token family** immediately—this catches the race condition between legitimate user and attacker. - -### Storage strategy: Memory + HttpOnly cookies - -The 2025 consensus is clear: **access tokens in JavaScript memory** (React state/closures, never localStorage), **refresh tokens in HttpOnly cookies**: - -```python -def set_refresh_cookie(response: Response, token: str): - response.set_cookie( - key="refresh_token", - value=token, - httponly=True, - secure=True, # HTTPS only - samesite="strict", # CSRF protection - max_age=604800, # 7 days - path="/api/auth/refresh" # Limit scope to refresh endpoint - ) -``` - -For high-security applications, the **Backend-for-Frontend (BFF) pattern** keeps tokens entirely server-side—the SPA receives only a session cookie while the BFF proxies API requests with injected access tokens. - ---- - -## Password hashing requires Argon2id with specific parameters - -**passlib is effectively dead**—last release October 2020, breaks on Python 3.13+. Use **argon2-cffi 25.1.0** or the newer **pwdlib 0.3.0** (from FastAPI-users maintainer). - -OWASP's 2025 minimum parameters for Argon2id: **19 MiB memory, 2 iterations, 1 parallelism**, targeting ~500ms hash time: - -```python -from argon2 import PasswordHasher -import asyncio - -# OWASP minimum configuration -ph = PasswordHasher( - time_cost=2, # iterations - memory_cost=19456, # 19 MiB - parallelism=1, # Important: p=1 for async FastAPI - hash_len=32, - salt_len=16 -) - -async def hash_password(password: str) -> str: - # CPU-bound - run in thread pool for async FastAPI - return await asyncio.to_thread(ph.hash, password) - -async def verify_password(hash: str, password: str) -> tuple[bool, str | None]: - """Returns (is_valid, new_hash_if_needs_rehash)""" - try: - await asyncio.to_thread(ph.verify, hash, password) - if ph.check_needs_rehash(hash): # Auto-upgrade old parameters - return True, await hash_password(password) - return True, None - except: - return False, None -``` - -**Argon2id handles salts internally**—16-byte salt is automatically generated and embedded in the output hash. Separate salt storage is unnecessary. For additional defense, a **pepper** (application-level secret stored in secrets vault) can wrap the hash via HMAC, but requires all users to reset passwords if the pepper changes. - -### Password reset must prevent timing attacks and enumeration - -```python -async def request_reset(email: str): - start = datetime.now() - user = await get_user_by_email(email) - - if user: - token = secrets.token_urlsafe(32) # 256 bits - token_hash = hashlib.sha256(token.encode()).hexdigest() - await store_reset_token(user.id, token_hash, expires=timedelta(hours=1)) - await send_email(email, token) - - # CRITICAL: Consistent timing prevents user enumeration - elapsed = (datetime.now() - start).total_seconds() - if elapsed < 0.5: - await asyncio.sleep(0.5 - elapsed) - - # Always same response - return {"message": "If an account exists, reset email sent"} -``` - -Store reset tokens as **SHA-256 hashes** (treat like passwords), expire within **15-60 minutes**, enforce **one-time use** by marking used before changing password. - ---- - -## FastAPI auth patterns favor dependencies over middleware - -**Dependencies are the FastAPI-native pattern for authentication**—more testable, composable, and efficient than middleware which runs on every request including `/docs`. - -### Standard dependency chain - -```python -from typing import Annotated -from fastapi import Depends, HTTPException, status -from fastapi.security import OAuth2PasswordBearer - -oauth2_scheme = OAuth2PasswordBearer( - tokenUrl="api/v1/auth/token", - scopes={"read": "Read access", "write": "Write access", "admin": "Admin access"} -) - -async def get_current_user( - token: Annotated[str, Depends(oauth2_scheme)] -) -> User: - try: - payload = decode_token(token) - if await is_token_revoked(payload["jti"]): - raise HTTPException(status_code=401, detail="Token revoked") - user = await get_user(payload["sub"]) - if payload.get("token_version") != user.token_version: - raise HTTPException(status_code=401, detail="Session invalidated") - return user - except jwt.InvalidTokenError: - raise HTTPException(status_code=401, detail="Invalid token") - -async def get_current_active_user( - user: Annotated[User, Depends(get_current_user)] -) -> User: - if not user.is_active: - raise HTTPException(status_code=400, detail="Inactive user") - return user - -async def get_admin_user( - user: Annotated[User, Depends(get_current_active_user)] -) -> User: - if not user.is_superuser: - raise HTTPException(status_code=403, detail="Admin required") - return user -``` - -### Optional authentication uses auto_error=False - -```python -optional_oauth2 = OAuth2PasswordBearer(tokenUrl="token", auto_error=False) - -async def get_optional_user( - token: str | None = Depends(optional_oauth2) -) -> User | None: - if not token: - return None - try: - payload = decode_token(token) - return await get_user(payload["sub"]) - except: - return None -``` - -### RBAC with callable permission checker classes - -```python -class PermissionChecker: - def __init__(self, required_permissions: list[str]): - self.required = required_permissions - - def __call__(self, user: User = Depends(get_current_active_user)): - for perm in self.required: - if perm not in user.permissions: - raise HTTPException(status_code=403, detail="Insufficient permissions") - return True - -# Usage -@app.get("/items/", dependencies=[Depends(PermissionChecker(["read:items"]))]) -def list_items(): ... - -@app.delete("/items/{id}", dependencies=[Depends(PermissionChecker(["delete:items"]))]) -def delete_item(id: int): ... -``` - -For **route group auth**, use APIRouter dependencies: - -```python -protected_router = APIRouter( - prefix="/api/v1", - dependencies=[Depends(get_current_active_user)] -) -admin_router = APIRouter( - prefix="/api/v1/admin", - dependencies=[Depends(get_admin_user)] -) -``` - ---- - -## Database schema requires hashed token storage and family tracking - -**Use UUID primary keys** for user tables—the ~13% performance penalty versus integers is acceptable for the security benefit of not exposing record counts. PostgreSQL 18 (Fall 2025) will introduce UUID v7 with timestamp ordering for better index performance. - -### Core user model - -```python -class User(Base): - __tablename__ = "users" - - id: Mapped[UUID] = mapped_column(primary_key=True, default=uuid4) - email: Mapped[str] = mapped_column(String(320), unique=True, index=True) - hashed_password: Mapped[str] = mapped_column(String(1024)) - - is_active: Mapped[bool] = mapped_column(default=True) - is_verified: Mapped[bool] = mapped_column(default=False) - is_superuser: Mapped[bool] = mapped_column(default=False) - - # Critical for "logout all devices" - token_version: Mapped[int] = mapped_column(default=0) - - created_at: Mapped[datetime] = mapped_column(server_default=func.now()) - updated_at: Mapped[datetime] = mapped_column(onupdate=func.now()) - last_login: Mapped[datetime | None] - - refresh_tokens: Mapped[list["RefreshToken"]] = relationship( - back_populates="user", - cascade="all, delete-orphan", - lazy="selectin" # Required for async SQLAlchemy - ) -``` - -### Refresh token table with family tracking - -```python -class RefreshToken(Base): - __tablename__ = "refresh_tokens" - - id: Mapped[UUID] = mapped_column(primary_key=True, default=uuid4) - - # ALWAYS hash stored tokens - token_hash: Mapped[str] = mapped_column(String(64), unique=True, index=True) - - user_id: Mapped[UUID] = mapped_column(ForeignKey("users.id", ondelete="CASCADE"), index=True) - - # Device tracking for "active sessions" UI - device_id: Mapped[str | None] = mapped_column(String(255)) - device_name: Mapped[str | None] = mapped_column(String(100)) - ip_address: Mapped[str | None] = mapped_column(String(45)) # IPv6 length - - # Token family for rotation attack detection - family_id: Mapped[UUID] = mapped_column(default=uuid4, index=True) - - expires_at: Mapped[datetime] = mapped_column(index=True) - created_at: Mapped[datetime] = mapped_column(server_default=func.now()) - - is_revoked: Mapped[bool] = mapped_column(default=False) - revoked_at: Mapped[datetime | None] -``` - -### Token rotation with replay attack detection - -```python -async def rotate_refresh_token(db: AsyncSession, old_token: str, user_id: UUID) -> tuple[str, str]: - old_hash = hashlib.sha256(old_token.encode()).hexdigest() - - existing = await db.scalar( - select(RefreshToken).where( - RefreshToken.token_hash == old_hash, - RefreshToken.is_revoked == False - ) - ) - - if not existing: - # REPLAY ATTACK - Revoke entire token family - await db.execute( - update(RefreshToken) - .where(RefreshToken.user_id == user_id) - .values(is_revoked=True, revoked_at=func.now()) - ) - await db.commit() - raise SecurityException("Token reuse detected - all sessions revoked") - - # Revoke old, issue new with same family - existing.is_revoked = True - existing.revoked_at = datetime.now(UTC) - - new_token = secrets.token_urlsafe(32) - new_refresh = RefreshToken( - token_hash=hashlib.sha256(new_token.encode()).hexdigest(), - user_id=user_id, - family_id=existing.family_id, # Same family - expires_at=datetime.now(UTC) + timedelta(days=7) - ) - db.add(new_refresh) - await db.commit() - - return new_token, create_access_token(user_id) -``` - -### "Logout all devices" via token_version - -```python -async def logout_all_devices(db: AsyncSession, user_id: UUID): - await db.execute( - update(User).where(User.id == user_id) - .values(token_version=User.token_version + 1) - ) - await db.execute( - update(RefreshToken).where(RefreshToken.user_id == user_id) - .values(is_revoked=True) - ) - await db.commit() -``` - -All access tokens immediately become invalid when `token_version` in the token doesn't match the user's current `token_version`. - ---- - -## Security hardening requires defense in depth - -### CORS with credentials requires explicit origins - -```python -app.add_middleware( - CORSMiddleware, - allow_origins=["https://yourapp.com"], # NEVER ["*"] with credentials - allow_credentials=True, - allow_methods=["GET", "POST", "PUT", "DELETE"], - allow_headers=["Content-Type", "Authorization", "X-CSRF-Token"], - max_age=600, # Cache preflight 10 minutes -) -``` - -### Rate limiting is mandatory for auth endpoints - -```python -from slowapi import Limiter -from slowapi.util import get_remote_address - -limiter = Limiter( - key_func=get_remote_address, - storage_uri="redis://localhost:6379" -) - -@app.post("/auth/login") -@limiter.limit("5/minute") -async def login(request: Request): ... - -@app.post("/auth/password-reset") -@limiter.limit("3/hour") -async def password_reset(request: Request): ... -``` - -Implement **progressive lockout**: 5 failed attempts → 1 minute lock, 10 → 5 minutes, 15 → 30 minutes. - -### Timing attack prevention requires dummy operations - -```python -# Pre-compute at startup -DUMMY_HASH = ph.hash("dummy_password_for_timing") - -async def authenticate(username: str, password: str) -> User | None: - user = await get_user_by_email(username) - - if user is None: - # Perform dummy hash to prevent timing attack - await asyncio.to_thread(ph.verify, password, DUMMY_HASH) - return None - - valid, _ = await verify_password(user.hashed_password, password) - return user if valid else None -``` - -Use `secrets.compare_digest()` for all token comparisons—never `==`. - -### RS256 over HS256 for production multi-service architectures - -For microservices, **RS256 (asymmetric)** allows distributing public keys for verification while keeping private keys isolated. Key rotation becomes simpler—only public keys need updating across services. - -```python -# Key rotation with kid (key ID) in header -def sign_token(payload: dict, private_key: str, kid: str) -> str: - return jwt.encode(payload, private_key, algorithm="RS256", headers={"kid": kid}) - -def verify_token(token: str, public_keys: dict) -> dict: - header = jwt.get_unverified_header(token) - kid = header.get("kid") - return jwt.decode(token, public_keys[kid], algorithms=["RS256"]) -``` - ---- - -## Production deployment checklist - -### Secrets management with pydantic-settings - -```python -from pydantic_settings import BaseSettings, SettingsConfigDict -from pydantic import SecretStr - -class Settings(BaseSettings): - model_config = SettingsConfigDict( - env_file=".env", - secrets_dir="/run/secrets" # Docker secrets - ) - - JWT_SECRET_KEY: SecretStr - DATABASE_URL: SecretStr - REDIS_URL: str - - ACCESS_TOKEN_EXPIRE_MINUTES: int = 15 - REFRESH_TOKEN_EXPIRE_DAYS: int = 7 -``` - -### Auth event logging (GDPR-compliant) - -**Log**: login attempts/success/failure, password changes, token refresh, session invalidation -**Never log**: passwords, full tokens, session IDs, unnecessary PII - -```python -logger.info("auth_event", - event_type="login_success", - user_id=user.id, # Pseudonymized ID, not email - ip_address=mask_ip(request.client.host), # Mask last octet - timestamp=datetime.utcnow().isoformat() -) -``` - -### Docker security essentials - -```dockerfile -# Non-root user -RUN useradd -r -g appgroup appuser -USER appuser - -# Multi-stage build -FROM python:3.12-slim AS runtime -COPY --from=builder /app/requirements.txt . -``` - -Use Docker secrets (`/run/secrets/`) instead of environment variables for sensitive data in production. - ---- - -## Critical anti-patterns to avoid - -- **Storing sensitive data in JWT payload** - tokens are base64, not encrypted -- **Not explicitly specifying algorithms** on decode - enables algorithm confusion attacks -- **Using passlib in new projects** - unmaintained, breaks on Python 3.13+ -- **Trusting `alg` header from token** - always validate with explicit `algorithms=["HS256"]` -- **Using `["*"]` with `allow_credentials=True`** in CORS - browsers reject this -- **Comparing tokens with `==`** instead of `secrets.compare_digest()` -- **Not running password hashing in thread pool** - blocks async event loop -- **Storing raw refresh tokens** - always hash before storage -- **Missing token family tracking** - enables silent replay attacks - -## Conclusion - -Building production-ready JWT authentication in FastAPI requires careful attention to the shifting ecosystem (PyJWT over python-jose, argon2-cffi over passlib), proper token rotation with family tracking, and defense-in-depth security measures. The patterns outlined here—dependency-based auth, hashed token storage, timing attack prevention, and progressive rate limiting—form a robust foundation that scales from single applications to distributed microservices architectures. diff --git a/docs/research/NGINX.md b/docs/research/NGINX.md deleted file mode 100644 index 5c16f5d..0000000 --- a/docs/research/NGINX.md +++ /dev/null @@ -1,979 +0,0 @@ -# Production-Grade Nginx Configuration: 2025 Best Practices - -**Complete guide for modern full-stack applications (FastAPI + Frontend) with emphasis on WebSocket support, configuration organization, and dev/prod architectures.** - -## Table of Contents -1. [Configuration File Organization](#configuration-file-organization) -2. [WebSocket Proxying](#websocket-proxying) -3. [Shared Configuration (http.conf)](#shared-configuration-httpconf) -4. [Development Configuration](#development-configuration-devnginx) -5. [Production Configuration](#production-configuration-prodnginx) -6. [Performance Optimization](#performance-optimization) -7. [Security Headers](#security-headers) -8. [Static File Serving](#static-file-serving-production) -9. [Vite Dev Server Integration](#vite-dev-server-integration-development) -10. [Load Balancing & Upstreams](#load-balancing--upstreams) -11. [Rate Limiting](#rate-limiting) -12. [Logging & Monitoring](#logging--monitoring) -13. [Complete Configuration Examples](#complete-configuration-examples) - ---- - -## Configuration File Organization - -### Recommended Structure - -``` -conf/nginx/ -├── http.conf # Shared: upstreams, maps, global http settings -├── dev.nginx # Full nginx.conf for development -└── prod.nginx # Full nginx.conf for production -``` - -### What Goes Where - -**http.conf (Shared Configurations)**: -- Upstream definitions (backend, frontend servers) -- Map directives for WebSocket connection upgrades -- Shared rate limit zones -- Common proxy settings -- MIME type definitions -- Log formats - -**dev.nginx (Development-Specific)**: -- Full nginx.conf structure -- Includes http.conf -- Proxies to Vite dev server (port 5173) -- Proxies API/WebSocket to backend (port 8000) -- Verbose logging -- CORS permissive settings -- No caching -- No SSL (typically) - -**prod.nginx (Production-Specific)**: -- Full nginx.conf structure -- Includes http.conf -- Serves static files from `/usr/share/nginx/html` -- Proxies API/WebSocket to Gunicorn workers -- SSL/TLS configuration -- Security headers -- Gzip/Brotli compression -- Static file caching -- Error logging only - ---- - -## WebSocket Proxying - -WebSocket connections require explicit handling because the "Upgrade" and "Connection" headers are hop-by-hop and not automatically passed to the proxied server. The modern approach uses a `map` directive to handle connections conditionally. - -### Core WebSocket Configuration Pattern - -```nginx -# In http.conf (shared configuration) -map $http_upgrade $connection_upgrade { - default upgrade; - '' close; -} -``` - -This sophisticated approach sets the Connection header to "close" when there's no Upgrade header, and to "upgrade" when WebSocket upgrade is requested. - -### WebSocket Location Block - -```nginx -location /ws/ { - proxy_pass http://backend; - proxy_http_version 1.1; - - # WebSocket-specific headers - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - - # Standard proxy headers - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # Timeout settings for long-lived connections - proxy_read_timeout 3600s; - proxy_send_timeout 3600s; - proxy_connect_timeout 75s; - - # Disable buffering for real-time data - proxy_buffering off; - proxy_cache_bypass $http_upgrade; -} -``` - -### Critical WebSocket Settings Explained - -For WebSocket session persistence with multiple backend servers, use `ip_hash` to ensure clients always connect to the same backend: - -```nginx -upstream websocket_backend { - ip_hash; # Session persistence - server backend1:8000; - server backend2:8000; - server backend3:8000; -} -``` - -**Timeout Configuration**: By default, connections close if the proxied server doesn't transmit data within 60 seconds. For WebSockets: -- `proxy_read_timeout`: Set to 3600s (1 hour) or higher -- `proxy_send_timeout`: Set to 3600s (1 hour) or higher -- `proxy_connect_timeout`: Usually 75s is sufficient - ---- - -## Shared Configuration (http.conf) - -This file contains settings used by both dev and prod environments. - -```nginx -# conf/nginx/http.conf - -# WebSocket upgrade handling -map $http_upgrade $connection_upgrade { - default upgrade; - '' close; -} - -# Upstream definitions -upstream backend { - # Development: single uvicorn instance - # Production: multiple gunicorn workers (override in prod.nginx) - server backend:8000 max_fails=3 fail_timeout=30s; - keepalive 32; # Connection pooling -} - -upstream frontend_dev { - # Only used in development - server frontend:5173; -} - -# Custom log format with timing information -log_format main_timed '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'rt=$request_time uct="$upstream_connect_time" ' - 'uht="$upstream_header_time" urt="$upstream_response_time"'; - -# Rate limit zones -limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; -limit_req_zone $binary_remote_addr zone=auth_limit:10m rate=1r/s; - -# Connection limits -limit_conn_zone $binary_remote_addr zone=conn_limit:10m; - -# Common proxy settings -proxy_set_header Host $host; -proxy_set_header X-Real-IP $remote_addr; -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -``` - ---- - -## Development Configuration (dev.nginx) - -Full nginx.conf optimized for local development with hot module replacement. - -```nginx -# conf/nginx/dev.nginx - -user nginx; -worker_processes 1; # Single worker sufficient for dev -error_log /var/log/nginx/error.log debug; # Verbose logging -pid /var/run/nginx.pid; - -events { - worker_connections 1024; - use epoll; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Include shared configuration - include /etc/nginx/http.conf; - - # Development-specific settings - access_log /var/log/nginx/access.log main_timed; - sendfile off; # Disable for file system changes - tcp_nopush off; - tcp_nodelay on; - keepalive_timeout 65; - - # Disable caching in development - add_header Cache-Control "no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0"; - - # CORS permissive for development - add_header 'Access-Control-Allow-Origin' '*' always; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, PATCH, OPTIONS' always; - add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization' always; - - server { - listen 80; - server_name localhost; - - # Handle preflight requests - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, PATCH, OPTIONS'; - add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization'; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain; charset=utf-8'; - add_header 'Content-Length' 0; - return 204; - } - - # API routes to backend - location /api/ { - limit_req zone=api_limit burst=20 nodelay; - limit_conn conn_limit 10; - - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Connection ""; - - # Proxy buffering settings - proxy_buffering off; - proxy_request_buffering off; - } - - # WebSocket route - location /ws/ { - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_read_timeout 3600s; - proxy_send_timeout 3600s; - proxy_buffering off; - } - - # Vite dev server (with HMR WebSocket support) - location / { - proxy_pass http://frontend_dev; - proxy_http_version 1.1; - - # Required for Vite HMR - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Host $host; - - # Vite-specific timeout - proxy_read_timeout 60s; - proxy_buffering off; - } - - # Health check endpoint - location /health { - access_log off; - return 200 "healthy\n"; - add_header Content-Type text/plain; - } - } -} -``` - -### Key Development Features: - -1. **Verbose Logging**: `error_log debug` for troubleshooting -2. **No Caching**: Ensures fresh content on every request -3. **Permissive CORS**: Allows frontend to call backend freely -4. **HMR Support**: WebSocket headers (Upgrade and Connection) are required for Vite's Hot Module Replacement to function properly -5. **Disabled Optimizations**: `sendfile off` to catch file changes immediately - ---- - -## Production Configuration (prod.nginx) - -Optimized for performance, security, and reliability. - -```nginx -# conf/nginx/prod.nginx - -user nginx; -worker_processes auto; # One per CPU core -worker_rlimit_nofile 100000; -error_log /var/log/nginx/error.log warn; # Only warnings and errors -pid /var/run/nginx.pid; - -events { - worker_connections 4096; # High concurrency support - use epoll; # Efficient connection handling on Linux - multi_accept on; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - # Include shared configuration - include /etc/nginx/http.conf; - - # Logging with buffering - access_log /var/log/nginx/access.log main_timed buffer=32k flush=5s; - - # Performance optimizations - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - keepalive_requests 100; - types_hash_max_size 2048; - server_tokens off; # Hide nginx version - - # File cache - open_file_cache max=10000 inactive=20s; - open_file_cache_valid 30s; - open_file_cache_min_uses 2; - open_file_cache_errors on; - - # Buffer sizes - client_body_buffer_size 128k; - client_header_buffer_size 16k; - client_max_body_size 10m; - large_client_header_buffers 4 16k; - - # Timeouts - client_body_timeout 12s; - client_header_timeout 12s; - send_timeout 10s; - - # Gzip compression - gzip on; - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_types - text/plain - text/css - text/xml - text/javascript - application/json - application/javascript - application/xml+rss - application/atom+xml - image/svg+xml; - gzip_disable "msie6"; - gzip_min_length 256; - - # Redirect HTTP to HTTPS - server { - listen 80; - server_name yourdomain.com; - return 301 https://$host$request_uri; - } - - # Main HTTPS server - server { - listen 443 ssl http2; - server_name yourdomain.com; - - # SSL configuration - ssl_certificate /etc/nginx/ssl/fullchain.pem; - ssl_certificate_key /etc/nginx/ssl/privkey.pem; - ssl_protocols TLSv1.2 TLSv1.3; - ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384'; - ssl_prefer_server_ciphers off; - ssl_session_cache shared:SSL:50m; - ssl_session_timeout 1d; - ssl_session_tickets off; - ssl_stapling on; - ssl_stapling_verify on; - - # Security headers - add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; - add_header X-Frame-Options "SAMEORIGIN" always; - add_header X-Content-Type-Options "nosniff" always; - add_header X-XSS-Protection "1; mode=block" always; - add_header Referrer-Policy "strict-origin-when-cross-origin" always; - add_header Permissions-Policy "geolocation=(), microphone=(), camera=()" always; - - # Content Security Policy - add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' wss://yourdomain.com" always; - - # Root directory for static files - root /usr/share/nginx/html; - index index.html; - - # API routes with rate limiting - location /api/ { - limit_req zone=api_limit burst=20 nodelay; - limit_conn conn_limit 20; - - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Connection ""; - - # Proxy buffering optimized for API - proxy_buffering on; - proxy_buffers 8 24k; - proxy_buffer_size 2k; - - # Timeouts - proxy_connect_timeout 75s; - proxy_send_timeout 30s; - proxy_read_timeout 30s; - } - - # Auth endpoints with stricter rate limiting - location /api/auth/ { - limit_req zone=auth_limit burst=5 nodelay; - - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Connection ""; - } - - # WebSocket route - location /ws/ { - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_read_timeout 3600s; - proxy_send_timeout 3600s; - proxy_connect_timeout 75s; - proxy_buffering off; - } - - # Static files with aggressive caching - location ~* \.(jpg|jpeg|png|gif|ico|svg|webp)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - location ~* \.(css|js)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - location ~* \.(woff|woff2|ttf|eot|otf)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; - } - - # SPA fallback - all non-matching routes to index.html - location / { - try_files $uri $uri/ /index.html; - add_header Cache-Control "no-cache, must-revalidate"; - } - - # Health check - location /health { - access_log off; - return 200 "healthy\n"; - add_header Content-Type text/plain; - } - - # Deny access to hidden files - location ~ /\. { - deny all; - access_log off; - log_not_found off; - } - } -} -``` - ---- - -## Performance Optimization - -### Worker Configuration - -Running one worker process per CPU core works well in most cases, and setting worker_processes to auto achieves this: - -```nginx -worker_processes auto; -worker_rlimit_nofile 100000; # File descriptor limit - -events { - worker_connections 4096; - use epoll; # Most efficient on Linux - multi_accept on; # Accept multiple connections at once -} -``` - -### Connection and Request Handling - -The default worker_connections is 512, but most systems can support higher values. The optimal setting depends on server resources and traffic patterns. - -**Formula**: `max_clients = worker_processes * worker_connections` - -### Buffer Optimization - -If buffer sizes are too low, Nginx will write to temporary files, causing excessive disk I/O: - -```nginx -client_body_buffer_size 128k; -client_header_buffer_size 16k; -client_max_body_size 10m; -large_client_header_buffers 4 16k; -``` - -### Keepalive Connections - -Keepalive connections reduce CPU and network overhead by keeping connections open longer: - -```nginx -keepalive_timeout 65; -keepalive_requests 100; - -upstream backend { - server backend:8000; - keepalive 32; # Idle connections to upstream -} -``` - -### Sendfile and TCP Optimizations - -The sendfile() system call enables zero-copy data transfer, speeding up TCP transmissions without consuming CPU cycles: - -```nginx -sendfile on; -tcp_nopush on; # Send headers in one packet -tcp_nodelay on; # Disable Nagle's algorithm -``` - -### File Caching - -```nginx -open_file_cache max=10000 inactive=20s; -open_file_cache_valid 30s; -open_file_cache_min_uses 2; -open_file_cache_errors on; -``` - -### Compression - -```nginx -gzip on; -gzip_vary on; -gzip_proxied any; -gzip_comp_level 6; # Balance between CPU and compression -gzip_min_length 256; -gzip_types - text/plain - text/css - text/xml - text/javascript - application/json - application/javascript - application/xml+rss - application/atom+xml - image/svg+xml; -``` - -**Important**: Don't increase compression level too high, as it costs CPU effort without proportional throughput gains. - ---- - -## Security Headers - -### Essential Security Headers (2025) - -Security headers are levers that slash XSS risk, lock browsers to HTTPS, tame third-party scripts, and protect users without touching app code. - -```nginx -# HTTP Strict Transport Security (HSTS) -add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; - -# Prevent clickjacking -add_header X-Frame-Options "SAMEORIGIN" always; - -# Prevent MIME sniffing -add_header X-Content-Type-Options "nosniff" always; - -# XSS Protection (legacy but still useful) -add_header X-XSS-Protection "1; mode=block" always; - -# Referrer Policy -add_header Referrer-Policy "strict-origin-when-cross-origin" always; - -# Disable dangerous browser features -add_header Permissions-Policy "geolocation=(), microphone=(), camera=()" always; -``` - -### Content Security Policy (CSP) - -CSP controls where scripts, styles, images, frames, and connections can load from. Start with a restrictive policy: - -```nginx -add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' wss://yourdomain.com" always; -``` - -**Best Practice**: Roll out CSP in Report-Only mode first, sending violation reports to an endpoint you control: - -```nginx -# Test mode -add_header Content-Security-Policy-Report-Only "default-src 'self'; report-uri /csp-report" always; -``` - -### HSTS Explained - -HSTS forces browsers to only use HTTPS by caching this policy for the max-age period. The `includeSubDomains` directive applies the policy to all subdomains. - -**Preloading**: To add your domain to the browser preload list, include "preload" in the header and submit to hstspreload.org. This is a one-way decision—removal is difficult. - -```nginx -add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; -``` - -### SSL/TLS Best Practices - -```nginx -ssl_protocols TLSv1.2 TLSv1.3; -ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384'; -ssl_prefer_server_ciphers off; -ssl_session_cache shared:SSL:50m; -ssl_session_timeout 1d; -ssl_session_tickets off; -ssl_stapling on; -ssl_stapling_verify on; -``` - ---- - -## Static File Serving (Production) - -### Cache Headers Strategy - -Use Cache-Control on static files so that CDNs and browsers can cache them effectively. The modern approach favors `Cache-Control` over `Expires`. - -```nginx -# Images, fonts, media - long cache -location ~* \.(jpg|jpeg|png|gif|ico|svg|webp|avif|woff|woff2|ttf|eot|otf|mp4|mp3|ogg|webm)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; -} - -# CSS and JavaScript - long cache with versioning -location ~* \.(css|js)$ { - expires 1y; - add_header Cache-Control "public, immutable"; - access_log off; -} - -# HTML files - no caching -location ~* \.html$ { - expires -1; - add_header Cache-Control "no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0"; -} -``` - -### Cache Busting - -When files are updated, use cache-busting by appending version numbers to filenames (e.g., `style.css?v=2` or `style.v2.css`). - -### SPA Routing - -For Single Page Applications, route all non-file requests to index.html: - -```nginx -location / { - try_files $uri $uri/ /index.html; - add_header Cache-Control "no-cache, must-revalidate"; -} -``` - ---- - -## Vite Dev Server Integration (Development) - -### HMR WebSocket Support - -Vite's HMR requires WebSocket support with Upgrade and Connection headers set for the WebSocket connection to function: - -```nginx -location / { - proxy_pass http://frontend:5173; - proxy_http_version 1.1; - - # Critical for Vite HMR - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Host $host; - - # Timeouts - proxy_read_timeout 60s; - proxy_buffering off; -} -``` - -### Vite HMR Path Handling - -If Vite uses a custom HMR path: - -```nginx -location ~* /__vite_hmr { - proxy_pass http://frontend:5173; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; -} -``` - -### Common Vite + Nginx Issues - -1. **HMR not working**: Ensure WebSocket headers are set correctly -2. **Too many redirects**: Use `proxy_pass http://host;` without trailing slash to avoid path manipulation -3. **Connection timeout**: Default proxy timeouts may be too short; increase to 30-60 seconds for WebSocket connections - ---- - -## Load Balancing & Upstreams - -### Upstream Configuration - -```nginx -upstream backend { - least_conn; # Route to server with fewest connections - - server backend1:8000 max_fails=3 fail_timeout=30s; - server backend2:8000 max_fails=3 fail_timeout=30s; - server backend3:8000 backup; # Fallback server - - keepalive 32; # Connection pooling - keepalive_requests 100; - keepalive_timeout 60s; -} -``` - -### Load Balancing Methods - -- `round_robin` (default): Distribute requests evenly -- `least_conn`: Route to server with fewest active connections -- `ip_hash`: Consistent routing based on client IP (session persistence) -- `hash $request_uri consistent`: Route based on URI - -### Health Checks (Nginx Plus) - -```nginx -upstream backend { - zone backend 64k; - - server backend1:8000; - server backend2:8000; - - health_check interval=5s fails=3 passes=2; -} -``` - ---- - -## Rate Limiting - -### Zone Definitions - -```nginx -# In http block -limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s; -limit_req_zone $binary_remote_addr zone=auth_limit:10m rate=1r/s; -limit_conn_zone $binary_remote_addr zone=conn_limit:10m; -``` - -### Application in Locations - -```nginx -# General API rate limiting -location /api/ { - limit_req zone=api_limit burst=20 nodelay; - limit_conn conn_limit 20; - proxy_pass http://backend; -} - -# Stricter for authentication -location /api/auth/ { - limit_req zone=auth_limit burst=5 nodelay; - proxy_pass http://backend; -} -``` - -**Parameters**: -- `rate`: Requests per second (or `r/m` for per minute) -- `burst`: Allow temporary burst above rate -- `nodelay`: Process burst requests immediately -- `limit_conn`: Max simultaneous connections - ---- - -## Logging & Monitoring - -### Custom Log Format with Timing - -```nginx -log_format main_timed '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'rt=$request_time uct="$upstream_connect_time" ' - 'uht="$upstream_header_time" urt="$upstream_response_time"'; -``` - -### Buffered Logging for Performance - -Logging every request directly to disk is expensive; buffering reduces write operations: - -```nginx -access_log /var/log/nginx/access.log main_timed buffer=32k flush=5s; -``` - -### Conditional Logging - -```nginx -# Don't log health checks -location /health { - access_log off; - return 200 "healthy\n"; -} - -# Log only errors for static files -location ~* \.(jpg|png|css|js)$ { - access_log off; - error_log /var/log/nginx/static_error.log; -} -``` - ---- - -## Complete Configuration Examples - -### Docker Compose Integration - -```yaml -# docker-compose.prod.yml -version: '3.8' - -services: - nginx: - image: nginx:alpine - ports: - - "80:80" - - "443:443" - volumes: - - ./conf/nginx/http.conf:/etc/nginx/http.conf:ro - - ./conf/nginx/prod.nginx:/etc/nginx/nginx.conf:ro - - ./ssl:/etc/nginx/ssl:ro - - nginx_cache:/var/cache/nginx - depends_on: - - backend - networks: - - app_network - - backend: - build: - context: ./backend - dockerfile: Dockerfile.prod - expose: - - "8000" - networks: - - app_network - -volumes: - nginx_cache: - -networks: - app_network: -``` - -### Minimal Dev Configuration - -```nginx -# Absolute minimum for development -http { - include mime.types; - - map $http_upgrade $connection_upgrade { - default upgrade; - '' close; - } - - upstream backend { - server backend:8000; - } - - server { - listen 80; - - location /api/ { - proxy_pass http://backend; - } - - location /ws/ { - proxy_pass http://backend; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - proxy_read_timeout 3600s; - } - - location / { - proxy_pass http://frontend:5173; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection $connection_upgrade; - } - } -} -``` - ---- - -## Key Takeaways - -### WebSocket Requirements -1. Map `$http_upgrade` to `$connection_upgrade` -2. Set `proxy_http_version 1.1` -3. Set headers: `Upgrade $http_upgrade` and `Connection $connection_upgrade` -4. Increase timeouts: `proxy_read_timeout 3600s` -5. Disable buffering: `proxy_buffering off` - -### Dev vs Prod Differences -- **Dev**: Verbose logging, no caching, permissive CORS, proxy to Vite -- **Prod**: Error logging only, aggressive caching, strict security headers, serve static files - -### File Organization -- **http.conf**: Upstreams, maps, shared settings -- **dev.nginx**: Full config optimized for development -- **prod.nginx**: Full config optimized for production - -### Performance Priorities -1. Worker processes = CPU cores (`worker_processes auto`) -2. High worker connections (4096+) -3. Enable `sendfile`, `tcp_nopush`, `tcp_nodelay` -4. Buffer optimization to avoid disk I/O -5. Keepalive connections for upstreams - -### Security Essentials -1. HSTS with preload for HTTPS enforcement -2. Comprehensive CSP to prevent XSS -3. X-Frame-Options to prevent clickjacking -4. Rate limiting on API endpoints -5. Hide server version (`server_tokens off`) - -### Static File Serving -1. Long cache for assets (1 year with `immutable`) -2. No cache for HTML files -3. Use cache busting with versioned filenames -4. SPA fallback with `try_files $uri /index.html` diff --git a/docs/research/POSTRESQL.MD b/docs/research/POSTRESQL.MD deleted file mode 100644 index cae299c..0000000 --- a/docs/research/POSTRESQL.MD +++ /dev/null @@ -1,666 +0,0 @@ -# Production FastAPI + SQLAlchemy + PostgreSQL Template Guide (2025) - -Modern Python web development in 2025 demands async-first patterns with SQLAlchemy 2.0's native async syntax, PostgreSQL-specific optimizations, and tooling like **uv** and **Ruff** that have become the new standard. This guide covers production-ready configurations, senior-level patterns, and critical anti-patterns to avoid—focusing on what's changed in 2025 rather than rehashing tutorials. - ---- - -## SQLAlchemy 2.0+ async patterns have fundamentally changed - -The transition from SQLAlchemy 1.4 to 2.0+ introduced breaking changes that remain misunderstood. **Use `async_sessionmaker` directly**—not the legacy `sessionmaker(class_=AsyncSession)` pattern still found in older tutorials. - -```python -from sqlalchemy.ext.asyncio import ( - create_async_engine, - async_sessionmaker, - AsyncSession, - AsyncAttrs, -) -from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column - -# Single engine per service, created once at startup -engine = create_async_engine( - "postgresql+asyncpg://user:pass@host/db", - pool_size=25, - max_overflow=15, - pool_timeout=30, - pool_pre_ping=True, # Critical for production - connect_args={ - "command_timeout": 30, - "statement_cache_size": 200, - }, -) - -# Create sessionmaker factory -SessionLocal = async_sessionmaker( - bind=engine, - class_=AsyncSession, - expire_on_commit=False, # CRITICAL: prevents MissingGreenlet errors - autoflush=False, -) -``` - -The **`expire_on_commit=False`** setting is non-negotiable for async—without it, accessing any attribute after commit triggers implicit I/O, causing `MissingGreenlet` errors that crash your application. - -### Model definition uses typed syntax exclusively - -The old `Column(Integer)` style is deprecated. SQLAlchemy 2.0+ infers types from Python annotations: - -```python -class Base(AsyncAttrs, DeclarativeBase): - """Include AsyncAttrs for awaitable_attrs access.""" - pass - -class User(Base): - __tablename__ = "users" - - id: Mapped[int] = mapped_column(primary_key=True) - email: Mapped[str] = mapped_column(String(255), unique=True) - nickname: Mapped[str | None] = mapped_column(String(50)) # Optional = nullable - created_at: Mapped[datetime] = mapped_column(server_default=func.now()) - - # Relationships with proper async loading - posts: Mapped[list["Post"]] = relationship( - back_populates="author", - lazy="selectin", # NOT "select" which triggers implicit I/O - ) -``` - -**Key anti-pattern to avoid**: Using `lazy="select"` (the default) or `lazy="dynamic"` with async sessions. Both trigger implicit I/O. Use `lazy="selectin"` for collections or `lazy="raise"` to enforce explicit loading. - -### Relationship loading requires upfront decisions - -For async contexts, you must explicitly load relationships—there's no safe lazy loading: - -```python -from sqlalchemy.orm import selectinload, joinedload - -# Collections: use selectinload (emits SELECT with IN clause) -stmt = select(User).options(selectinload(User.posts)) - -# Scalar relationships: use joinedload -stmt = select(Post).options(joinedload(Post.author)) - -# Chaining for nested relationships -stmt = select(User).options( - selectinload(User.posts).joinedload(Post.category) -) -``` - -When you need on-demand loading, use the `AsyncAttrs` mixin and `awaitable_attrs`: - -```python -async with session: - user = await session.get(User, user_id) - posts = await user.awaitable_attrs.posts # Explicit async load -``` - ---- - -## PostgreSQL configuration for async workloads - -### Connection pool sizing follows a formula - -PostgreSQL's wiki recommends: **connections ≈ (CPU cores × 2) + effective_spindle_count**. For async FastAPI with 100+ concurrent users: - -| Scale | pool_size | max_overflow | Total Max | -|-------|-----------|--------------|-----------| -| Medium (100-500 users) | 20-25 | 10-15 | 40 | -| Large (500+ users) | 25-50 | 20-30 | 80 | - -**Critical consideration**: Total connections across all workers must not exceed PostgreSQL's `max_connections`. With 4 uvicorn workers at `pool_size=25`, you're consuming 100 connections before overflow. - -```python -engine = create_async_engine( - DATABASE_URL, - pool_size=25, - max_overflow=15, - pool_timeout=30, - pool_recycle=1800, # Recycle every 30 minutes - pool_pre_ping=True, # Test connections before use - connect_args={ - "command_timeout": 30, # Query timeout in seconds - "statement_cache_size": 200, - "ssl": "require", - }, -) -``` - -### PgBouncer requires special configuration - -When using PgBouncer in transaction mode, disable asyncpg's prepared statement cache: - -```python -from sqlalchemy.pool import NullPool - -engine = create_async_engine( - DATABASE_URL, - poolclass=NullPool, # Let PgBouncer handle pooling - connect_args={ - "statement_cache_size": 0, # Disable prepared statements - }, -) -``` - -### UUID v7 is now the recommended primary key strategy - -PostgreSQL 18 (September 2025) added native `uuidv7()` function. UUID v7 stores a timestamp in the first 48 bits, providing: -- **Sequential ordering**: Optimal for B-tree indexes (unlike UUID v4's random fragmentation) -- **Distributed generation**: No database coordination required -- **Time-sortable**: Natural chronological ordering - -```python -from sqlalchemy.dialects.postgresql import UUID - -class Entity(Base): - id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - primary_key=True, - server_default=text("uuidv7()"), # PostgreSQL 18+ - ) -``` - -For PostgreSQL < 18, use the `uuid6` Python library: - -```python -import uuid6 -id: Mapped[uuid.UUID] = mapped_column(primary_key=True, default=uuid6.uuid7) -``` - -### PostgreSQL-specific indexes you should know - -**GIN indexes** for JSONB containment queries: -```python -Index('ix_metadata_gin', 'metadata', postgresql_using='gin') -``` - -**Partial indexes** for frequently filtered subsets: -```python -Index('ix_active_users', 'email', postgresql_where=(User.is_active == True)) -``` - -**BRIN indexes** for large, naturally-ordered tables (logs, time-series): -```python -Index('ix_created_at_brin', 'created_at', postgresql_using='brin') -``` - ---- - -## FastAPI architecture for scalable applications - -### Domain-driven project structure scales better - -The 2025 consensus favors **module-based organization** over file-type organization, inspired by Netflix's Dispatch project: - -``` -src/ -├── auth/ -│ ├── router.py -│ ├── schemas.py -│ ├── models.py -│ ├── service.py -│ └── dependencies.py -├── posts/ -│ ├── router.py -│ ├── schemas.py -│ ├── models.py -│ ├── service.py -│ └── dependencies.py -├── config.py # Global configuration -├── database.py # Connection setup -└── main.py -``` - -Each domain package owns its complete vertical slice. Cross-domain imports use explicit module references to prevent circular imports: - -```python -from src.auth import constants as auth_constants -from src.posts.service import PostService -``` - -### Modern dependency injection uses Annotated types - -FastAPI's documentation now emphasizes `Annotated` for cleaner, reusable dependencies: - -```python -from typing import Annotated, AsyncIterator -from fastapi import Depends - -async def get_session() -> AsyncIterator[AsyncSession]: - async with SessionLocal() as session: - try: - yield session - await session.commit() - except Exception: - await session.rollback() - raise - -# Reusable type alias -SessionDep = Annotated[AsyncSession, Depends(get_session)] - -# Usage is clean -@router.post("/users") -async def create_user(data: UserCreate, session: SessionDep): - user = User(**data.model_dump()) - session.add(user) - return user -``` - -### Lifespan context manager replaces startup/shutdown events - -The `@app.on_event("startup")` decorator is deprecated. Use the lifespan context manager: - -```python -from contextlib import asynccontextmanager - -@asynccontextmanager -async def lifespan(app: FastAPI): - # STARTUP - app.state.db_engine = create_async_engine(DATABASE_URL) - app.state.session_factory = async_sessionmaker( - app.state.db_engine, expire_on_commit=False - ) - - # Warm the connection pool - async with app.state.db_engine.begin() as conn: - await conn.execute(text("SELECT 1")) - - yield # Application runs - - # SHUTDOWN - await app.state.db_engine.dispose() - -app = FastAPI(lifespan=lifespan) -``` - -### Repository pattern versus direct ORM is contextual - -The 2025 senior developer consensus: - -**Use repository pattern when:** -- Complex business logic requiring transaction coordination -- Multiple data sources or potential database migrations -- Large teams needing clear boundaries -- Comprehensive unit testing requirements - -**Direct ORM is acceptable when:** -- Simple CRUD operations -- Rapid prototyping or small services -- Using SQLModel where DTO/DAO are unified - -A pragmatic repository implementation: - -```python -from typing import Generic, TypeVar - -T = TypeVar("T", bound=Base) - -class BaseRepository(Generic[T]): - def __init__(self, model: type[T], session: AsyncSession): - self.model = model - self.session = session - - async def get(self, id: int) -> T | None: - return await self.session.get(self.model, id) - - async def create(self, data: dict) -> T: - obj = self.model(**data) - self.session.add(obj) - await self.session.flush() - await self.session.refresh(obj) - return obj -``` - -### Background tasks need their own sessions - -A critical anti-pattern: **never pass a database session to BackgroundTasks**. The session closes when the request completes, before the background task runs: - -```python -# WRONG - session will be closed -@router.post("/") -async def endpoint(session: SessionDep, tasks: BackgroundTasks): - tasks.add_task(some_task, session) # ❌ Session already closed! - -# CORRECT - create new session inside task -def background_db_task(user_id: int): - with SessionLocal() as session: - user = session.get(User, user_id) - # Process... - session.commit() - -@router.post("/") -async def endpoint(user_id: int, tasks: BackgroundTasks): - tasks.add_task(background_db_task, user_id) # ✅ Pass data, not session -``` - -For heavy workloads, use **ARQ** (async Redis queue) or **Celery**—both create their own sessions. - ---- - -## Alembic migrations for async environments - -### Initialize with the async template - -```bash -alembic init -t async migrations -``` - -The `env.py` requires careful configuration for SQLAlchemy 2.0+: - -```python -# alembic/env.py -from sqlalchemy.ext.asyncio import async_engine_from_config -from sqlalchemy import pool -from app.db import Base -from app.models import * # Import ALL models for autogenerate - -target_metadata = Base.metadata - -def do_run_migrations(connection): - context.configure( - connection=connection, - target_metadata=target_metadata, - compare_type=True, # Detect column type changes - compare_server_default=True, - ) - with context.begin_transaction(): - context.run_migrations() - -async def run_async_migrations(): - connectable = async_engine_from_config( - config.get_section(config.config_ini_section), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - async with connectable.connect() as connection: - await connection.run_sync(do_run_migrations) - await connectable.dispose() -``` - -### Naming conventions prevent migration headaches - -Define naming conventions in your Base class—this ensures consistent, predictable constraint names that Alembic can track: - -```python -from sqlalchemy import MetaData - -naming_convention = { - "ix": "ix_%(column_0_label)s", - "uq": "uq_%(table_name)s_%(column_0_name)s", - "ck": "ck_%(table_name)s_%(constraint_name)s", - "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", - "pk": "pk_%(table_name)s", -} - -class Base(DeclarativeBase): - metadata = MetaData(naming_convention=naming_convention) -``` - -### Zero-downtime migrations use expand-contract pattern - -**Phase 1 (Expand)**: Add new columns as nullable, deploy application that writes to both -**Phase 2 (Migrate)**: Backfill data in batches -**Phase 3 (Contract)**: Add constraints, remove old columns - -```python -# Phase 1: Add nullable column with server_default -def upgrade(): - op.add_column('users', - sa.Column('email_verified', sa.Boolean(), - server_default='false', nullable=False) - ) -``` - -Using `server_default` avoids table locks on large tables. - ---- - -## Docker containerization for production - -### Multi-stage builds reduce image size by 70%+ - -```dockerfile -# Stage 1: Builder -FROM python:3.12-slim AS builder -WORKDIR /app - -RUN apt-get update && apt-get install -y --no-install-recommends \ - build-essential gcc libpq-dev \ - && rm -rf /var/lib/apt/lists/* - -RUN python -m venv /app/.venv -ENV PATH="/app/.venv/bin:$PATH" - -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -# Stage 2: Production -FROM python:3.12-slim AS production - -RUN groupadd -r appuser && useradd -r -g appuser appuser -WORKDIR /app - -RUN apt-get update && apt-get install -y --no-install-recommends \ - libpq5 curl && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/.venv /app/.venv -ENV PATH="/app/.venv/bin:$PATH" - -COPY --chown=appuser:appuser ./app ./app -USER appuser - -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8000/health || exit 1 - -CMD ["gunicorn", "app.main:app", "-w", "4", "-k", "uvicorn.workers.UvicornWorker", "-b", "0.0.0.0:8000"] -``` - -### Docker Compose with proper health checks - -```yaml -services: - api: - build: - context: . - target: production - depends_on: - db: - condition: service_healthy - environment: - - DATABASE_URL=postgresql+asyncpg://postgres:postgres@db:5432/app - - db: - image: postgres:16-alpine - volumes: - - postgres_data:/var/lib/postgresql/data - healthcheck: - test: ["CMD-SHELL", "pg_isready -U postgres"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - - migrate: - build: . - command: ["alembic", "upgrade", "head"] - depends_on: - db: - condition: service_healthy - profiles: ["migrate"] -``` - -Run migrations separately: `docker compose --profile migrate up migrate` - ---- - -## Authentication uses PyJWT and Argon2 in 2025 - -### python-jose is deprecated—use PyJWT - -FastAPI's official documentation has switched to **PyJWT**. The `python-jose` library had Python 3.10+ compatibility issues and hasn't been updated since 2021: - -```python -import jwt -from datetime import datetime, timedelta, timezone - -SECRET_KEY = "your-256-bit-secret" # openssl rand -hex 32 -ALGORITHM = "HS256" - -def create_access_token(data: dict, expires_delta: timedelta | None = None) -> str: - to_encode = data.copy() - expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=15)) - to_encode.update({"exp": expire, "iat": datetime.now(timezone.utc)}) - return jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) -``` - -### Argon2id replaces bcrypt for password hashing - -OWASP and the Password Hashing Competition recommend **Argon2id**. FastAPI's docs now use `pwdlib[argon2]`: - -```python -from pwdlib import PasswordHash - -password_hash = PasswordHash.recommended() # Uses Argon2id - -def verify_password(plain: str, hashed: str) -> bool: - return password_hash.verify(plain, hashed) - -def hash_password(password: str) -> str: - return password_hash.hash(password) -``` - -Argon2id is memory-hard and GPU-resistant—bcrypt uses a fixed 4KB of memory, making it more vulnerable to parallel attacks. - -### Token patterns for production - -```python -# Access tokens: short-lived (15-30 min), stateless -# Refresh tokens: long-lived (7-30 days), stored in DB for revocation - -class RefreshToken(Base): - __tablename__ = "refresh_tokens" - id: Mapped[uuid.UUID] = mapped_column(primary_key=True) - user_id: Mapped[uuid.UUID] = mapped_column(ForeignKey("users.id")) - token_hash: Mapped[str] # Store hashed, never plain - expires_at: Mapped[datetime] - revoked: Mapped[bool] = mapped_column(default=False) -``` - -Implement **token rotation**: issue a new refresh token on each use and invalidate the old one. - ---- - -## Python tooling has consolidated around uv and Ruff - -### uv is replacing pip, Poetry, and pyenv - -The Astral team's **uv** tool is 10-100x faster than alternatives and handles package management, virtual environments, and Python version management: - -```bash -# Install uv -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Create project -uv init my-project -uv add fastapi uvicorn sqlalchemy - -# Sync dependencies -uv sync -``` - -### Modern pyproject.toml configuration - -```toml -[project] -name = "my-fastapi-app" -version = "0.1.0" -requires-python = ">=3.12" -dependencies = [ - "fastapi>=0.115.0", - "uvicorn[standard]>=0.32.0", - "sqlalchemy>=2.0.0", - "asyncpg>=0.30.0", - "pydantic-settings>=2.6.0", - "pyjwt>=2.9.0", - "pwdlib[argon2]>=0.2.0", -] - -[dependency-groups] -dev = ["pytest>=8.0", "ruff>=0.8.0", "mypy>=1.13.0", "pre-commit>=4.0"] - -[tool.ruff] -target-version = "py312" -line-length = 88 - -[tool.ruff.lint] -select = ["E", "F", "I", "UP", "B", "SIM", "C4", "DTZ", "T20", "RUF"] -``` - -### Ruff replaces Flake8, Black, and isort - -A single tool for linting and formatting: - -```bash -ruff check --fix . # Lint with auto-fix -ruff format . # Format code -``` - -### Pydantic Settings v2 for configuration - -```python -from pydantic import SecretStr, PostgresDsn -from pydantic_settings import BaseSettings, SettingsConfigDict - -class Settings(BaseSettings): - model_config = SettingsConfigDict( - env_file=".env", - env_nested_delimiter="__", - secrets_dir="/run/secrets", # Docker secrets - ) - - database_url: PostgresDsn - secret_key: SecretStr - debug: bool = False - -settings = Settings() -``` - ---- - -## Critical anti-patterns to avoid - -**Never share AsyncSession between concurrent tasks**: -```python -# WRONG -await asyncio.gather(process(session, a), process(session, b)) - -# CORRECT - each task gets its own session -async def process_item(item): - async with SessionLocal() as session: - ... -await asyncio.gather(process_item(a), process_item(b)) -``` - -**Don't use legacy Query API**: -```python -# DEPRECATED -session.query(User).filter(User.id == 1).first() - -# CORRECT -await session.execute(select(User).where(User.id == 1)) -result.scalars().first() -``` - -**Always dispose engine on shutdown**: -```python -await engine.dispose() # In lifespan shutdown -``` - -**Don't forget pool_pre_ping in production**—without it, your app will crash when PostgreSQL restarts and connections go stale. - -## Conclusion - -Building production FastAPI applications in 2025 requires embracing SQLAlchemy 2.0's typed, async-native syntax while avoiding legacy patterns that still pollute many tutorials. The tooling landscape has consolidated: **uv** for package management, **Ruff** for linting, **PyJWT** for tokens, and **Argon2id** for passwords. - -The most impactful changes are architectural: using domain-driven project structure, implementing proper connection pool sizing for your scale, and understanding that async sessions cannot be shared or lazily loaded. UUID v7's addition to PostgreSQL 18 finally makes distributed primary keys performant, and the expand-contract migration pattern enables zero-downtime deployments. - -Focus on these production fundamentals rather than chasing every new library—the core stack of FastAPI, SQLAlchemy 2.0+, asyncpg, and PostgreSQL is mature and battle-tested. diff --git a/docs/research/PYDANTIC.md b/docs/research/PYDANTIC.md deleted file mode 100644 index 34d0232..0000000 --- a/docs/research/PYDANTIC.md +++ /dev/null @@ -1,895 +0,0 @@ -# Production Pydantic v2 Guide for FastAPI (2025) - -Pydantic v2 represents a complete architectural overhaul from v1, with a Rust-powered core (`pydantic-core`) that delivers 5-17x performance improvements. The API has been redesigned around `Annotated` types, new validator patterns, and explicit serialization modes. This guide focuses on 2025 best practices—not tutorial basics—with emphasis on patterns that scale in production FastAPI applications. - ---- - -## Core v2 Changes That Actually Matter - -### The Rust Core Changes Everything - -Pydantic v2's validation engine is written in Rust and exposed via PyO3. This isn't just "faster validation"—it fundamentally changes how you should think about data processing: - -```python -# v1 pattern (deprecated) -from pydantic import BaseModel, validator - -class User(BaseModel): - age: int - - @validator('age') - def check_age(cls, v): - if v < 18: - raise ValueError('too young') - return v - -# v2 pattern (correct) -from pydantic import BaseModel, field_validator - -class User(BaseModel): - age: int - - @field_validator('age') - @classmethod - def check_age(cls, v: int) -> int: - if v < 18: - raise ValueError('too young') - return v -``` - -**Key differences:** -- `@field_validator` replaces `@validator`—the signature is now type-safe -- Validators are explicitly `@classmethod` decorated -- No more `each_item` keyword; use `Annotated` for collection items - -### Method Name Changes You Must Know - -| v1 Method | v2 Replacement | Purpose | -|-----------|----------------|---------| -| `.dict()` | `.model_dump()` | Serialize to dict | -| `.json()` | `.model_dump_json()` | Serialize to JSON string | -| `parse_obj()` | `.model_validate()` | Validate dict/object | -| `parse_raw()` | `.model_validate_json()` | Validate JSON string | -| `from_orm()` | `.model_validate()` + `from_attributes=True` | Load from ORM models | - -**Critical:** The old v1 methods still exist but are deprecated. Using them in new code is a mistake. - ---- - -## Annotated: The New Foundation - -`Annotated` is now the primary way to attach metadata to fields. This creates reusable, composable type aliases: - -```python -from typing import Annotated -from pydantic import BaseModel, Field, AfterValidator - -# Reusable validated types -PositiveInt = Annotated[int, Field(gt=0)] -NonEmptyStr = Annotated[str, Field(min_length=1)] -EmailStr = Annotated[str, Field(pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$')] - -# Composable validation -def check_even(v: int) -> int: - if v % 2 != 0: - raise ValueError('must be even') - return v - -EvenPositiveInt = Annotated[int, Field(gt=0), AfterValidator(check_even)] - -class Product(BaseModel): - quantity: PositiveInt - sku: NonEmptyStr - batch_size: EvenPositiveInt -``` - -**Why this matters:** Type aliases centralize validation logic. Change `PositiveInt` once, and every field using it updates automatically. - -### Field Metadata vs Validation - -```python -from pydantic import Field - -class Article(BaseModel): - # Metadata: describes the field, doesn't validate - title: str = Field( - description="Article title", - examples=["How to Scale FastAPI"], - json_schema_extra={"maxLength": 200} - ) - - # Constraints: actually validate - word_count: int = Field(gt=0, le=10000) - tags: list[str] = Field(min_length=1, max_length=10) -``` - -**Metadata** appears in OpenAPI/JSON schema but doesn't enforce anything. **Constraints** raise `ValidationError` on violation. - ---- - -## Validators: The Four Modes - -Pydantic v2 has four validator types with explicit execution order: - -### 1. Before Validators - -Run **before** Pydantic's type coercion. Handle raw input: - -```python -from pydantic import BaseModel, field_validator - -class Event(BaseModel): - timestamp: datetime - - @field_validator('timestamp', mode='before') - @classmethod - def parse_timestamp(cls, v): - # v could be str, int, datetime, anything - if isinstance(v, str): - return datetime.fromisoformat(v) - if isinstance(v, int): - return datetime.fromtimestamp(v) - return v # Let Pydantic handle it -``` - -**Use when:** You need to preprocess raw data before type checking. - -### 2. After Validators (default) - -Run **after** Pydantic validates the type. Input is guaranteed to match the type annotation: - -```python -from pydantic import field_validator - -class User(BaseModel): - username: str - - @field_validator('username') # mode='after' is default - @classmethod - def normalize_username(cls, v: str) -> str: - return v.lower().strip() -``` - -**Use when:** You need to transform already-validated data. - -### 3. Wrap Validators - -Run **around** Pydantic's validation—you control whether to call the handler: - -```python -from pydantic import WrapValidator, ValidationError -from typing import Annotated - -def truncate_or_fail(v, handler): - try: - return handler(v) # Try normal validation - except ValidationError as e: - if e.errors()[0]['type'] == 'string_too_long': - return v[:100] # Truncate instead of failing - raise - -LenientStr = Annotated[str, Field(max_length=100), WrapValidator(truncate_or_fail)] - -class Post(BaseModel): - title: LenientStr -``` - -**Use when:** You need to catch validation errors and recover. - -### 4. Plain Validators - -Replace Pydantic's validation entirely. **Dangerous** but sometimes necessary: - -```python -from pydantic import PlainValidator -from typing import Annotated - -def custom_validation(v): - # No type checking happens—you're responsible for everything - return int(v) * 2 - -WeirdInt = Annotated[int, PlainValidator(custom_validation)] - -class Model(BaseModel): - number: WeirdInt - -print(Model(number='5').number) # 10 -print(Model(number='invalid').number) # 'invalidinvalid' (no error!) -``` - -**Use when:** Pydantic's type system can't express your validation logic. - -### Annotated Validators vs Decorator Syntax - -Both styles work; choose based on reusability: - -```python -# Annotated: reusable across models -AgeInt = Annotated[int, AfterValidator(lambda v: v if v >= 18 else 18)] - -class User(BaseModel): - age: AgeInt - -# Decorator: model-specific logic -class User(BaseModel): - age: int - - @field_validator('age') - @classmethod - def min_age(cls, v: int) -> int: - return v if v >= 18 else 18 -``` - -### Accessing Other Fields in Validators - -Use `ValidationInfo` to access previously validated fields: - -```python -from pydantic import field_validator, ValidationInfo - -class PasswordReset(BaseModel): - password: str - password_confirm: str - - @field_validator('password_confirm') - @classmethod - def passwords_match(cls, v: str, info: ValidationInfo) -> str: - if 'password' in info.data and v != info.data['password']: - raise ValueError('passwords do not match') - return v -``` - -**Critical:** Field order matters. `password` must be defined before `password_confirm`. - ---- - -## Model Serialization: Python vs JSON Mode - -Pydantic v2 has two serialization modes with different outputs: - -```python -from datetime import datetime -from pydantic import BaseModel - -class Event(BaseModel): - name: str - occurred_at: datetime - tags: set[str] - -event = Event(name='Launch', occurred_at='2025-01-01', tags={'python', 'api'}) - -# Python mode: preserves Python types -print(event.model_dump()) -# {'name': 'Launch', -# 'occurred_at': datetime.datetime(2025, 1, 1, 0, 0), -# 'tags': {'python', 'api'}} - -# JSON mode: only JSON-compatible types -print(event.model_dump(mode='json')) -# {'name': 'Launch', -# 'occurred_at': '2025-01-01T00:00:00', -# 'tags': ['api', 'python']} -``` - -**Use JSON mode when:** -- Sending data to FastAPI response models -- Storing in Redis/DynamoDB (requires JSON) -- Passing to `json.dumps()` yourself - -**Use Python mode when:** -- Manipulating data in Python -- Passing to other Python functions -- Need access to Python objects (datetime, UUID, etc.) - -### The serialize_as_any Gotcha - -In v2, nested subclass fields are serialized according to the annotated type, **not the runtime type**: - -```python -class User(BaseModel): - name: str - -class AdminUser(User): - permissions: list[str] - -class Company(BaseModel): - employees: list[User] - -admin = AdminUser(name='Alice', permissions=['admin']) -company = Company(employees=[admin]) - -# v2 default: only User fields serialized -print(company.model_dump()) -# {'employees': [{'name': 'Alice'}]} # permissions missing! - -# To get v1 behavior, use serialize_as_any -print(company.model_dump(serialize_as_any=True)) -# {'employees': [{'name': 'Alice', 'permissions': ['admin']}]} -``` - -**Fix:** Annotate with `SerializeAsAny` if you need duck-typing: - -```python -from pydantic import SerializeAsAny - -class Company(BaseModel): - employees: list[SerializeAsAny[User]] # Now includes subclass fields -``` - ---- - -## Computed Fields: Properties That Serialize - -Computed fields are properties that automatically appear in `model_dump()`: - -```python -from pydantic import BaseModel, computed_field - -class Rectangle(BaseModel): - width: float - height: float - - @computed_field - @property - def area(self) -> float: - return self.width * self.height - -rect = Rectangle(width=10, height=5) -print(rect.area) # 50.0 -print(rect.model_dump()) # {'width': 10.0, 'height': 5.0, 'area': 50.0} -``` - -**Critical differences from regular properties:** -- Included in `model_dump()` and `model_dump_json()` -- Appear in JSON schema (marked `readOnly`) -- Cannot be set during initialization -- Only support `alias`, not `validation_alias`/`serialization_alias` - -### Computed Fields with Setters - -```python -from pydantic import computed_field - -class Square(BaseModel): - width: float - - @computed_field - @property - def area(self) -> float: - return self.width ** 2 - - @area.setter - def area(self, value: float): - self.width = value ** 0.5 - -square = Square(width=4) -square.area = 25 # Sets width to 5.0 -print(square.model_dump()) # {'width': 5.0, 'area': 25.0} -``` - -**Use case:** Exposing derived data in APIs without storing it. - ---- - -## ConfigDict: Model-Level Configuration - -`ConfigDict` replaces the v1 `Config` class: - -```python -from pydantic import BaseModel, ConfigDict, Field - -class StrictModel(BaseModel): - model_config = ConfigDict( - strict=True, # No type coercion - frozen=True, # Immutable (replaces allow_mutation=False) - validate_assignment=True, # Validate on attribute assignment - extra='forbid', # Reject extra fields - from_attributes=True, # Enable ORM mode - str_strip_whitespace=True, # Strip strings automatically - use_attribute_docstrings=True, # Use docstrings as descriptions - ) - - age: int - """User's age in years""" -``` - -### Strict Mode Explained - -By default, Pydantic coerces types (lax mode): - -```python -class User(BaseModel): - age: int - -print(User(age='25').age) # 25 (int, coerced from str) -``` - -Strict mode disables coercion: - -```python -class User(BaseModel): - model_config = ConfigDict(strict=True) - age: int - -User(age='25') # ValidationError: Input should be a valid integer -``` - -**Per-field override:** - -```python -class User(BaseModel): - model_config = ConfigDict(strict=True) - age: int - name: str = Field(strict=False) # Only this field allows coercion -``` - -### from_attributes: The New ORM Mode - -Replaces v1's `orm_mode=True`: - -```python -from pydantic import ConfigDict -from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column - -class User(DeclarativeBase): - __tablename__ = 'users' - id: Mapped[int] = mapped_column(primary_key=True) - email: Mapped[str] - -class UserSchema(BaseModel): - model_config = ConfigDict(from_attributes=True) - id: int - email: str - -# Load from SQLAlchemy model -db_user = session.get(User, 1) -user_data = UserSchema.model_validate(db_user) -``` - -**Critical:** Without `from_attributes=True`, you can only validate dicts. - ---- - -## TypeAdapter: Validation for Non-Model Types - -`TypeAdapter` validates any type, not just `BaseModel`: - -```python -from pydantic import TypeAdapter, ValidationError -from typing import TypedDict - -class UserDict(TypedDict): - name: str - age: int - -UserListAdapter = TypeAdapter(list[UserDict]) - -# Validate and coerce -data = [{'name': 'Alice', 'age': '30'}] -users = UserListAdapter.validate_python(data) -print(users) # [{'name': 'Alice', 'age': 30}] - -# Serialize -json_bytes = UserListAdapter.dump_json(users) -``` - -**Use cases:** -- Validating API responses (don't want full `BaseModel`) -- Working with `TypedDict` or dataclasses -- Generic container types (`list[int]`, `dict[str, float]`) - -### TypeAdapter for Constrained Types - -```python -from pydantic import Field -from typing import Annotated - -PositiveInt = Annotated[int, Field(gt=0)] -adapter = TypeAdapter(PositiveInt) - -adapter.validate_python(5) # OK -adapter.validate_python(-5) # ValidationError -``` - ---- - -## Discriminated Unions: Type-Safe Polymorphism - -Discriminated unions are FastAPI's secret weapon for handling polymorphic request/response data: - -```python -from typing import Literal, Union -from pydantic import BaseModel, Field - -class Cat(BaseModel): - type: Literal['cat'] - meows: int - -class Dog(BaseModel): - type: Literal['dog'] - barks: int - -class Pet(BaseModel): - pet: Union[Cat, Dog] = Field(discriminator='type') - -# Efficient validation—checks 'type' field first -pet = Pet.model_validate({'pet': {'type': 'cat', 'meows': 5}}) -print(type(pet.pet)) # -``` - -### How Discriminators Improve Performance - -Without discriminator, Pydantic tries each union member sequentially: - -```python -# Slow: tries Cat, fails, tries Dog -pet: Union[Cat, Dog] -``` - -With discriminator, Pydantic jumps directly to the correct type: - -```python -# Fast: reads 'type' field, validates only Dog -pet: Union[Cat, Dog] = Field(discriminator='type') -``` - -**Production win:** On unions with 5+ members, discriminators are 10x+ faster. - -### Nested Discriminated Unions - -```python -class BlackCat(BaseModel): - species: Literal['cat'] - color: Literal['black'] - black_name: str - -class WhiteCat(BaseModel): - species: Literal['cat'] - color: Literal['white'] - white_name: str - -Cat = Annotated[Union[BlackCat, WhiteCat], Field(discriminator='color')] - -class Dog(BaseModel): - species: Literal['dog'] - name: str - -Pet = Annotated[Union[Cat, Dog], Field(discriminator='species')] -``` - -### Fallback Pattern for Unknown Types - -Perfect for webhook handlers that must not crash: - -```python -from typing import Annotated - -class GenericEvent(BaseModel): - type: str - data: dict - -class UserCreated(BaseModel): - type: Literal['user.created'] - user_id: int - -KnownEvents = Annotated[Union[UserCreated], Field(discriminator='type')] - -# Outer union tries discriminated first, falls back to generic -Event = Annotated[ - Union[KnownEvents, GenericEvent], - Field(union_mode='left_to_right') -] - -# Unknown type → GenericEvent (doesn't crash) -Event.model_validate({'type': 'unknown', 'data': {}}) -``` - -**Use case:** Third-party APIs that add new event types without notice. - ---- - -## Alias Patterns: validation_alias vs serialization_alias - -Separate aliases for input and output: - -```python -from pydantic import BaseModel, Field - -class User(BaseModel): - # Accept both snake_case and camelCase on input - first_name: str = Field(validation_alias='firstName') - - # Always output as camelCase - last_name: str = Field( - validation_alias='lastName', - serialization_alias='lastName' - ) - -# Validate with camelCase -user = User(firstName='John', lastName='Doe') - -# Serialize with snake_case (default) or camelCase -print(user.model_dump()) # {'first_name': 'John', 'last_name': 'Doe'} -print(user.model_dump(by_alias=True)) # {'first_name': 'John', 'lastName': 'Doe'} -``` - -### AliasPath: Extract Nested Fields - -```python -from pydantic import AliasPath - -class User(BaseModel): - first_name: str = Field(validation_alias=AliasPath('name', 'first')) - last_name: str = Field(validation_alias=AliasPath('name', 'last')) - city: str = Field(validation_alias=AliasPath('address', 'city')) - -# Validate nested structure -user = User.model_validate({ - 'name': {'first': 'Jane', 'last': 'Doe'}, - 'address': {'city': 'NYC', 'zip': '10001'} -}) -print(user.model_dump()) -# {'first_name': 'Jane', 'last_name': 'Doe', 'city': 'NYC'} -``` - -**Use case:** Flattening deeply nested API responses. - -### AliasChoices: Multiple Valid Aliases - -```python -from pydantic import AliasChoices - -class User(BaseModel): - username: str = Field( - validation_alias=AliasChoices('username', 'user', 'login') - ) - -# All of these work -User(username='alice') -User(user='alice') -User(login='alice') -``` - -**Use case:** Supporting legacy API versions during migration. - -### Global Alias Generators - -```python -from pydantic import ConfigDict, AliasGenerator - -def to_camel(field: str) -> str: - components = field.split('_') - return components[0] + ''.join(x.title() for x in components[1:]) - -class User(BaseModel): - model_config = ConfigDict( - alias_generator=AliasGenerator( - validation_alias=str.lower, # Accept any case - serialization_alias=to_camel # Output camelCase - ) - ) - - first_name: str - last_name: str - -user = User(FIRST_NAME='John', last_name='Doe') -print(user.model_dump(by_alias=True)) # {'firstName': 'John', 'lastName': 'Doe'} -``` - ---- - -## FastAPI Integration Patterns - -### Request/Response Schema Separation - -**Anti-pattern:** Using the same model for input and output: - -```python -# DON'T DO THIS -class User(BaseModel): - id: int # Who sets this on creation? - email: str - password: str # Leaked in responses! -``` - -**Correct pattern:** - -```python -class UserCreate(BaseModel): - email: str - password: str = Field(min_length=8) - -class UserUpdate(BaseModel): - email: str | None = None - password: str | None = Field(None, min_length=8) - -class UserResponse(BaseModel): - model_config = ConfigDict(from_attributes=True) - id: int - email: str - created_at: datetime - -@router.post('/users', response_model=UserResponse) -async def create_user(data: UserCreate): - user = User(email=data.email, password=hash_password(data.password)) - session.add(user) - await session.commit() - return user # FastAPI uses response_model to serialize -``` - -### The response_model Double Validation Trap - -FastAPI validates your return value twice: - -```python -@router.get('/', response_model=UserResponse) -def get_user(): - user = UserResponse(id=1, email='test@example.com') # Validation #1 - return user # Validation #2 (FastAPI converts to dict, validates again) -``` - -**This is intentional:** FastAPI guarantees `response_model` matches output, even if you return the wrong type. - -**Performance note:** If you return the exact `response_model` type, the cost is minimal. If you return a dict/ORM object, full validation runs. - -### Depend on Pydantic for FastAPI Dependencies - -```python -from pydantic import BaseModel, Field -from typing import Annotated -from fastapi import Depends, Query - -class Pagination(BaseModel): - page: int = Field(1, ge=1) - size: int = Field(20, ge=1, le=100) - -def get_pagination( - page: Annotated[int, Query(ge=1)] = 1, - size: Annotated[int, Query(ge=1, le=100)] = 20 -) -> Pagination: - return Pagination(page=page, size=size) - -@router.get('/items') -def list_items(pagination: Annotated[Pagination, Depends(get_pagination)]): - # pagination is validated - return {'page': pagination.page, 'size': pagination.size} -``` - ---- - -## Anti-Patterns to Avoid - -### Don't Use mutable Defaults - -```python -# WRONG -class Team(BaseModel): - members: list[str] = [] # Shared between instances! - -# CORRECT -class Team(BaseModel): - members: list[str] = Field(default_factory=list) -``` - -### Don't Validate in __init__ - -```python -# WRONG -class User(BaseModel): - email: str - - def __init__(self, **data): - super().__init__(**data) - # Custom logic here breaks validation context - self.email = self.email.lower() - -# CORRECT -class User(BaseModel): - email: str - - @field_validator('email') - @classmethod - def normalize_email(cls, v: str) -> str: - return v.lower() -``` - -### Don't Forget mode='json' for API Responses - -```python -# WRONG - datetime not JSON-serializable -return user.model_dump() # Returns datetime objects - -# CORRECT -return user.model_dump(mode='json') # Returns ISO strings -``` - -### Don't Use Optional Without Default - -```python -# WRONG - Optional doesn't add a default! -class User(BaseModel): - nickname: Optional[str] # Still required! - -# CORRECT -class User(BaseModel): - nickname: str | None = None # Now optional -``` - ---- - -## Performance Optimization - -### Precompile Models with Large Field Counts - -For models with 50+ fields, validation overhead stacks up: - -```python -# Reduce overhead by disabling extra checks -class LargeModel(BaseModel): - model_config = ConfigDict( - validate_assignment=False, # Don't re-validate on setattr - validate_default=False, # Skip default value validation - ) -``` - -### Use model_construct for Trusted Data - -Skip validation entirely when data is already validated (e.g., from database): - -```python -# Slow: full validation -user = UserResponse(**db_row) - -# Fast: no validation -user = UserResponse.model_construct(**db_row) -``` - -**Warning:** Only use with 100% trusted data. One bad field corrupts your model. - -### Cache TypeAdapters - -Creating `TypeAdapter` instances is expensive. Cache them: - -```python -from functools import lru_cache - -@lru_cache -def get_adapter(type_: type) -> TypeAdapter: - return TypeAdapter(type_) - -# Reuse adapter -adapter = get_adapter(list[int]) -adapter.validate_python([1, 2, 3]) -``` - ---- - -## Migration Checklist from v1 - -If you're migrating existing code: - -- [ ] Replace `.dict()` with `.model_dump()` -- [ ] Replace `.json()` with `.model_dump_json()` -- [ ] Replace `@validator` with `@field_validator` -- [ ] Replace `Config` class with `ConfigDict` -- [ ] Replace `orm_mode=True` with `from_attributes=True` -- [ ] Update `root_validator` to `@model_validator` -- [ ] Check for `Optional` without defaults -- [ ] Add `mode='json'` where needed for serialization -- [ ] Review discriminated unions for performance -- [ ] Test strict mode on critical endpoints - ---- - -## Conclusion - -Pydantic v2's Rust core and redesigned API make it the definitive choice for FastAPI data validation in 2025. The migration from v1 requires learning new patterns—`Annotated` types, validator modes, explicit serialization—but the performance gains and improved type safety justify the investment. - -Focus on these production-critical patterns: -1. Use `Annotated` for reusable validation logic -2. Separate request/response schemas (never share models) -3. Apply discriminated unions to speed up polymorphic validation -4. Choose `mode='json'` for API responses, `mode='python'` for internal processing -5. Use `from_attributes=True` when working with ORMs - -The v1 → v2 transition is a one-way door. The old patterns won't receive new features, and some are already deprecated. Build new applications on v2 patterns from day one. diff --git a/docs/research/PYPROJECT.md b/docs/research/PYPROJECT.md deleted file mode 100644 index d736de2..0000000 --- a/docs/research/PYPROJECT.md +++ /dev/null @@ -1,57 +0,0 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[project] -name = "spam-eggs" -version = "2020.0.0" -dependencies = [ - "httpx", - "gidgethub[httpx]>4.0.0", - "django>2.1; os_name != 'nt'", - "django>2.0; os_name == 'nt'", -] -requires-python = ">=3.8" -authors = [ - {name = "Pradyun Gedam", email = "pradyun@example.com"}, - {name = "Tzu-Ping Chung", email = "tzu-ping@example.com"}, - {name = "Another person"}, - {email = "different.person@example.com"}, -] -maintainers = [ - {name = "Brett Cannon", email = "brett@example.com"} -] -description = "Lovely Spam! Wonderful Spam!" -readme = "README.rst" -license = "MIT" -license-files = ["LICEN[CS]E.*"] -keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"] -classifiers = [ - "Development Status :: 4 - Beta", - "Programming Language :: Python" -] - -[project.optional-dependencies] -gui = ["PyQt5"] -cli = [ - "rich", - "click", -] - -[project.urls] -Homepage = "https://example.com" -Documentation = "https://readthedocs.org" -Repository = "https://github.com/me/spam.git" -"Bug Tracker" = "https://github.com/me/spam/issues" -Changelog = "https://github.com/me/spam/blob/master/CHANGELOG.md" - -[project.scripts] -spam-cli = "spam:main_cli" - -[project.gui-scripts] -spam-gui = "spam:main_gui" - -[project.entry-points."spam.magical"] -tomatoes = "spam:main_tomatoes" - ---- diff --git a/docs/research/PYTEST.md b/docs/research/PYTEST.md deleted file mode 100644 index 4f110db..0000000 --- a/docs/research/PYTEST.md +++ /dev/null @@ -1,389 +0,0 @@ -# Pytest Production Patterns for FastAPI + Async SQLAlchemy (2025) - -**The definitive testing architecture for async-first Python applications combines session-scoped database engines, transaction-based isolation via SQLAlchemy 2.0's `join_transaction_mode`, Polyfactory for type-safe data generation, and pytest-asyncio in auto mode.** This approach delivers sub-second test isolation without recreating tables, handles explicit commits in application code gracefully, and scales to parallel execution with pytest-xdist. The key insight: structure fixtures as a hierarchy where expensive resources (engines, containers) live at session scope while per-test sessions use savepoint rollbacks for isolation. - ---- - -## Pytest 9.x arrives with native TOML and strict mode - -Pytest 9.0 (November 2025) introduces significant improvements over the 8.x series. The headline feature is **native TOML configuration** via `[tool.pytest]` instead of the legacy `[tool.pytest.ini_options]` INI-compatibility mode. This enables proper TOML arrays and typed configuration: - -```toml -# pyproject.toml (pytest 9.0+) -[tool.pytest] -minversion = "9.0" -testpaths = ["tests"] -addopts = ["-ra", "--strict-markers", "--import-mode=importlib"] -asyncio_mode = "auto" -asyncio_default_fixture_loop_scope = "function" -markers = [ - "slow: marks tests as slow", - "integration: integration tests requiring database", -] -strict = true # Enables all strictness options -``` - -The new **`strict = true`** option activates `strict_config`, `strict_markers`, `strict_parametrization_ids`, and `strict_xfail` simultaneously—essential for catching configuration errors in CI. Pytest 9.0 also adds **built-in subtests** (`pytest.Subtests`) for dynamic test generation when values aren't known at collection time, and **`pytest.RaisesGroup`** for testing Python 3.11+ `ExceptionGroup` exceptions. - -Breaking changes to note: Python **3.9 support dropped** in 9.0 (3.8 was dropped in 8.4), and test functions returning non-None or containing `yield` now fail explicitly rather than warning. The async behavior changed in 8.4—async tests without a plugin now fail immediately instead of being silently skipped. - ---- - -## pytest-asyncio configuration requires matching loop scopes - -The pytest-asyncio ecosystem underwent a major API revision from 0.23 through 1.0 (May 2025). The critical configuration decision is **asyncio_mode**: use `"auto"` for asyncio-only projects to avoid decorating every test and fixture; use `"strict"` only when coexisting with other async frameworks like trio. - -```toml -[tool.pytest.ini_options] -asyncio_mode = "auto" -asyncio_default_fixture_loop_scope = "function" -asyncio_default_test_loop_scope = "function" -``` - -The most common pitfall involves **scope mismatches**. Session-scoped async fixtures require session-scoped event loops: - -```python -# ❌ WRONG: Session fixture with function-scoped loop -@pytest_asyncio.fixture(scope="session") -async def db_engine(): # Will fail with "attached to different loop" - pass - -# ✅ CORRECT: Matching scopes -@pytest_asyncio.fixture(scope="session", loop_scope="session") -async def db_engine(): - engine = create_async_engine(DB_URL, poolclass=NullPool) - yield engine - await engine.dispose() -``` - -Note that **pytest-asyncio 1.0 removed the `event_loop` fixture entirely**—use `loop_scope` parameters instead. For fixtures, choose between `@pytest.fixture` (works in auto mode) and `@pytest_asyncio.fixture` (required in strict mode, explicit in either). Always use `NullPool` for async engines in tests to prevent connection leakage between tests. - ---- - -## Conftest architecture balances DRY principles with navigability - -The "fat conftest" approach works well when organized thoughtfully. **Root conftest.py should contain cross-cutting fixtures** (database engine, async client, authentication tokens) while **directory-specific conftest files handle overrides and specialized fixtures**. - -``` -tests/ -├── conftest.py # Root: engine, base client, auth fixtures -├── fixtures/ -│ ├── database.py # Complex DB setup logic -│ └── factories.py # Polyfactory definitions -├── unit/ -│ ├── conftest.py # Mocked DB, isolated fixtures -│ └── test_services.py -└── integration/ - ├── conftest.py # Real DB session override - └── test_api.py -``` - -Import shared fixture modules via `pytest_plugins` for explicit control: - -```python -# tests/conftest.py -pytest_plugins = [ - "tests.fixtures.database", - "tests.fixtures.factories", -] -``` - -**Fixture scopes should follow a clear hierarchy**: session scope for expensive resources (engines, Docker containers), function scope for test isolation. The key pattern is **session-scoped engine with function-scoped transactional sessions**: - -```python -@pytest.fixture(scope="session") -async def db_engine(): - engine = create_async_engine(DATABASE_URL, poolclass=NullPool) - async with engine.begin() as conn: - await conn.run_sync(Base.metadata.create_all) - yield engine - await engine.dispose() - -@pytest.fixture(scope="function") -async def db_session(db_engine): - async with db_engine.connect() as conn: - async with conn.begin() as trans: - session = AsyncSession( - bind=conn, - expire_on_commit=False, - join_transaction_mode="create_savepoint" # Critical! - ) - yield session - await session.close() - await trans.rollback() -``` - -The **`join_transaction_mode="create_savepoint"`** setting is the SQLAlchemy 2.0 solution for handling tested code that calls `session.commit()`—commits become savepoints within the outer transaction, which rolls back completely after the test. - ---- - -## Polyfactory outperforms Factory Boy for async stacks - -For FastAPI + async SQLAlchemy + Pydantic v2, **Polyfactory is the clear winner**. It provides native async support, automatic Pydantic constraint validation, and type-safe generics. Factory Boy requires third-party extensions (`async-factory-boy`) and manual workarounds for async operations. - -```python -from polyfactory.factories.sqlalchemy_factory import SQLAlchemyFactory - -class UserFactory(SQLAlchemyFactory[User]): - __model__ = User - __set_relationships__ = True - __async_session__ = None # Injected via fixture - -# Pydantic schema factory (respects constraints automatically) -from polyfactory.factories.pydantic_factory import ModelFactory - -class UserCreateFactory(ModelFactory[UserCreate]): - __model__ = UserCreate - __random_seed__ = 12345 # Deterministic output -``` - -Configure factories via a fixture to inject the async session: - -```python -@pytest.fixture(autouse=True) -def configure_factories(db_session): - UserFactory.__async_session__ = db_session - PostFactory.__async_session__ = db_session - -# Usage in tests -async def test_create_user(db_session): - user = await UserFactory.create_async() - assert user.id is not None - - # Batch creation - users = await UserFactory.create_batch_async(10) -``` - -For maximum performance with large datasets, bypass ORM and use SQLAlchemy Core: - -```python -from sqlalchemy import insert - -async def bulk_create_users(session: AsyncSession, count: int): - users_data = [UserFactory.build() for _ in range(count)] - values = [{"name": u.name, "email": u.email} for u in users_data] - await session.execute(insert(User), values) - await session.commit() -``` - -**Seed Faker for reproducible tests**—non-deterministic test data causes flaky tests: - -```python -@pytest.fixture(scope="session", autouse=True) -def faker_seed(): - return 12345 -``` - ---- - -## Database isolation through transactions beats recreation - -The production-ready pattern uses **testcontainers for ephemeral PostgreSQL** and **transaction rollback for per-test isolation**. Never use SQLite as a PostgreSQL substitute—JSONB operators, array types, and savepoint semantics differ fundamentally. - -```python -from testcontainers.postgres import PostgresContainer - -@pytest.fixture(scope="session") -def postgres_container(): - container = PostgresContainer("postgres:16-alpine") - container.start() - yield container - container.stop() - -@pytest.fixture(scope="session") -async def async_engine(postgres_container): - url = postgres_container.get_connection_url() - async_url = url.replace("postgresql://", "postgresql+asyncpg://") - - engine = create_async_engine(async_url, poolclass=NullPool) - async with engine.begin() as conn: - await conn.run_sync(Base.metadata.create_all) - yield engine - await engine.dispose() -``` - -For **Alembic migration testing**, use pytest-alembic with dedicated tests rather than running migrations for every test: - -```python -# conftest.py -@pytest.fixture -def alembic_config(): - return {"script_location": "alembic"} - -# Unit tests: use create_all() for speed -# Migration tests: use pytest-alembic's built-in tests -# - test_single_head_revision -# - test_upgrade (base→head) -# - test_up_down_consistency -``` - ---- - -## FastAPI testing combines async clients with dependency overrides - -Use `httpx.AsyncClient` with `ASGITransport` for async endpoint testing: - -```python -from httpx import ASGITransport, AsyncClient - -@pytest_asyncio.fixture -async def async_client(db_session): - def get_db_override(): - yield db_session - - app.dependency_overrides[get_db] = get_db_override - - async with AsyncClient( - transport=ASGITransport(app=app), - base_url="http://test" - ) as client: - yield client - - app.dependency_overrides.clear() -``` - -**Authentication fixtures** should cover valid, expired, and invalid tokens: - -```python -@pytest.fixture -def access_token(test_user): - return create_access_token(user_id=test_user.id, expires_delta=timedelta(hours=1)) - -@pytest.fixture -def expired_token(test_user): - return create_access_token(user_id=test_user.id, expires_delta=timedelta(seconds=-1)) - -@pytest.fixture -def authenticated_client(async_client, access_token): - async_client.headers["Authorization"] = f"Bearer {access_token}" - return async_client -``` - ---- - -## Essential plugins and parallel execution strategy - -The 2025 production stack requires these versions: - -```toml -[project.optional-dependencies] -test = [ - "pytest>=9.0.0", - "pytest-asyncio>=1.0.0", - "pytest-cov>=7.0.0", - "pytest-xdist>=3.8.0", - "pytest-mock>=3.12.0", - "httpx>=0.27.0", - "polyfactory>=2.0.0", - "testcontainers>=4.0.0", -] -``` - -For **parallel execution with pytest-xdist**, use the `worksteal` scheduler for tests with varying durations: - -```bash -pytest -n auto --dist=worksteal -``` - -When running parallel tests against databases, each worker needs isolation. With testcontainers, **create separate containers per worker**: - -```python -@pytest.fixture(scope="session") -def database_url(worker_id): - if worker_id == "master": - return create_single_container() - return create_container_for_worker(worker_id) -``` - ---- - -## Complete conftest.py reference implementation - -```python -# tests/conftest.py -import asyncio -import pytest -from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker -from sqlalchemy.pool import NullPool -from httpx import ASGITransport, AsyncClient -from testcontainers.postgres import PostgresContainer - -from app.main import app -from app.database import Base, get_db - -pytest_plugins = ["tests.fixtures.factories"] - -# Event loop (session-scoped for session fixtures) -@pytest.fixture(scope="session") -def event_loop(): - loop = asyncio.get_event_loop_policy().new_event_loop() - yield loop - loop.close() - -# PostgreSQL container -@pytest.fixture(scope="session") -def postgres_container(): - container = PostgresContainer("postgres:16-alpine") - container.start() - yield container - container.stop() - -# Async engine (session-scoped) -@pytest.fixture(scope="session") -async def async_engine(postgres_container): - url = postgres_container.get_connection_url() - async_url = url.replace("postgresql://", "postgresql+asyncpg://") - async_url = async_url.replace("psycopg2", "asyncpg") - - engine = create_async_engine(async_url, poolclass=NullPool, echo=False) - async with engine.begin() as conn: - await conn.run_sync(Base.metadata.create_all) - yield engine - await engine.dispose() - -# Per-test session with transaction rollback -@pytest.fixture(scope="function") -async def db_session(async_engine): - async with async_engine.connect() as conn: - async with conn.begin() as trans: - session = AsyncSession( - bind=conn, - expire_on_commit=False, - join_transaction_mode="create_savepoint" - ) - yield session - await session.close() - await trans.rollback() - -# Async test client -@pytest.fixture -async def async_client(db_session): - def get_db_override(): - yield db_session - - app.dependency_overrides[get_db] = get_db_override - async with AsyncClient( - transport=ASGITransport(app=app), - base_url="http://test" - ) as client: - yield client - app.dependency_overrides.clear() - -# Factory configuration -@pytest.fixture(autouse=True) -def configure_factories(db_session): - from tests.fixtures.factories import UserFactory, PostFactory - UserFactory.__async_session__ = db_session - PostFactory.__async_session__ = db_session - -# Faker seed for reproducibility -@pytest.fixture(scope="session", autouse=True) -def faker_seed(): - return 12345 -``` - ---- - -## Conclusion - -The modern pytest architecture for async FastAPI applications centers on **three key patterns**: transactional isolation via `join_transaction_mode="create_savepoint"`, Polyfactory for type-safe async data generation, and testcontainers for production-parity database testing. Configure pytest-asyncio in auto mode with matching loop scopes, structure fixtures hierarchically (session engine → function session), and embrace pytest 9.0's strict mode for early error detection. This architecture scales from single-threaded development to parallel CI execution while maintaining sub-second test isolation—the foundation for a productive TDD workflow with FastAPI. diff --git a/docs/research/REACT-19.md b/docs/research/REACT-19.md deleted file mode 100644 index 85ac773..0000000 --- a/docs/research/REACT-19.md +++ /dev/null @@ -1,734 +0,0 @@ -# React 19 Production Architecture Guide for Vite SPA (2025) - -Building a modern React 19 application with Vite, TanStack Query, Zustand, and React Router requires understanding significant new features and architectural patterns that have matured in 2025. This guide provides senior-level recommendations for your FastAPI template based on comprehensive research of current best practices. - -## React 19 delivers production-ready improvements - -React 19 (stable since December 2024) introduces **Actions** for async data mutations, new hooks (`useActionState`, `useOptimistic`, `use()`), and refs as regular props without `forwardRef`. The React Compiler reached v1.0 in October 2025 and is now production-ready, automatically handling memoization that developers previously managed with `useMemo` and `useCallback`. - -The most impactful changes for your SPA architecture are the new form handling primitives and the elimination of `forwardRef` boilerplate. Server Components remain framework-only (Next.js, Remix) and aren't relevant for Vite SPAs. - -### Key React 19 features to adopt immediately - -**Actions and async transitions** fundamentally change mutation handling. The `useTransition` hook now supports async functions directly: - -```tsx -function UpdateProfile() { - const [isPending, startTransition] = useTransition(); - - const handleSubmit = () => { - startTransition(async () => { - const error = await updateProfile(formData); - if (error) setError(error); - }); - }; -} -``` - -**`useActionState`** (renamed from `useFormState`) provides built-in pending states and error handling for forms: - -```tsx -const [error, submitAction, isPending] = useActionState( - async (prevState, formData) => { - const result = await createItem(formData.get('name')); - return result.error ?? null; - }, - null -); - -return ( -
- - - {error &&

{error}

} -
-); -``` - -**`useOptimistic`** enables instant UI feedback while async operations complete—React automatically reverts on failure: - -```tsx -const [optimisticItems, addOptimistic] = useOptimistic(items, - (state, newItem) => [...state, { ...newItem, pending: true }] -); -``` - -**Refs as props** eliminates `forwardRef` boilerplate entirely: - -```tsx -// React 19 - ref is just a prop now -function MyInput({ placeholder, ref }: { placeholder: string; ref?: Ref }) { - return ; -} -``` - -### React Compiler is production-ready - -The React Compiler v1.0 (October 2025) automatically adds memoization at build time, eliminating most manual `useMemo`, `useCallback`, and `React.memo` usage. For Vite, enable it via Babel: - -```ts -// vite.config.ts -export default defineConfig({ - plugins: [ - react({ - babel: { - plugins: ['babel-plugin-react-compiler'], - }, - }), - ], -}); -``` - -Run `npx react-compiler-healthcheck@latest` before enabling—your code must follow React's rules (pure components, hooks rules). **For new projects, enable the compiler. For existing code with manual memoization, keep existing optimizations temporarily** while the compiler handles new code. - ---- - -## Entry point and provider architecture - -### main.tsx best practices - -StrictMode has **zero performance impact in production**—it only runs development checks. Always enable it. React 19 adds new error handling callbacks to `createRoot`: - -```tsx -// main.tsx -import { StrictMode } from 'react'; -import { createRoot } from 'react-dom/client'; -import { ErrorBoundary } from 'react-error-boundary'; -import App from './App'; -import './styles.css'; - -// Initialize monitoring BEFORE React renders -import { initMonitoring } from './lib/monitoring'; -initMonitoring(); - -const root = createRoot(document.getElementById('root')!, { - // React 19: new error callbacks - onUncaughtError: (error, info) => { - monitoring.captureError(error, { componentStack: info.componentStack }); - }, - onCaughtError: (error, info) => { - monitoring.captureError(error, { severity: 'warning' }); - }, -}); - -root.render( - - - - - -); - -// Service worker registration after render -if ('serviceWorker' in navigator && import.meta.env.PROD) { - import('virtual:pwa-register').then(({ registerSW }) => { - registerSW({ immediate: true }); - }); -} -``` - -### Provider ordering matters - -Providers can only access context from providers **above** them in the tree. The recommended order (outside → inside): QueryClient → Router → Auth → Theme → Error Boundary → Suspense. - -```tsx -// App.tsx -import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; -import { RouterProvider, createBrowserRouter } from 'react-router-dom'; -import { Toaster } from 'sonner'; -import { routes } from './routes'; - -const queryClient = new QueryClient({ - defaultOptions: { - queries: { staleTime: 60_000 }, - }, -}); - -const router = createBrowserRouter(routes); - -export default function App() { - return ( - - - - - ); -} -``` - -**QueryClientProvider wraps Router** so route loaders can access the query client for prefetching. Toast components stay outside error boundaries so they remain functional when errors occur. - -### Theme and sidebar state belongs in Zustand - -For frequently-changing UI state like theme or sidebar collapse, **Zustand outperforms Context** with selective re-renders and built-in persistence: - -```tsx -// stores/ui.store.ts -import { create } from 'zustand'; -import { persist } from 'zustand/middleware'; - -interface UIStore { - theme: 'light' | 'dark' | 'system'; - sidebarCollapsed: boolean; - setTheme: (theme: UIStore['theme']) => void; - toggleSidebar: () => void; -} - -export const useUIStore = create()( - persist( - (set) => ({ - theme: 'system', - sidebarCollapsed: false, - setTheme: (theme) => set({ theme }), - toggleSidebar: () => set((s) => ({ sidebarCollapsed: !s.sidebarCollapsed })), - }), - { name: 'ui-storage' } - ) -); -``` - ---- - -## Shell architecture with React Router layout routes - -Use React Router v6+ **layout routes** (pathless routes with `element` and `children`) rather than manual shell wrapper components. This integrates with data loading, provides automatic `Outlet` rendering, and enables multiple distinct layouts. - -```tsx -// routes/index.tsx -import { createBrowserRouter } from 'react-router-dom'; - -export const router = createBrowserRouter([ - // Auth layout (centered, no sidebar) - { - element: , - children: [ - { path: '/login', lazy: () => import('@/pages/auth/Login') }, - { path: '/register', lazy: () => import('@/pages/auth/Register') }, - ], - }, - - // Protected app layout (sidebar + header) - { - element: , - children: [{ - element: , - children: [ - { path: '/', lazy: () => import('@/pages/Dashboard') }, - { path: '/settings', lazy: () => import('@/pages/Settings') }, - { path: '*', element: }, - ], - }], - }, -]); -``` - -### The AppShell component - -```tsx -// layouts/AppShell.tsx -import { Outlet, ScrollRestoration } from 'react-router-dom'; -import { Suspense } from 'react'; -import { ErrorBoundary } from 'react-error-boundary'; - -export function AppShell() { - return ( -
- -
-
-
- - }> - - - -
-
- location.pathname} /> -
- ); -} -``` - -**Shell is inside routes** (as a layout route element) so it can access router context, use `Outlet` for children, and different routes can have different shells. - ---- - -## Protected routes with auth state - -The **wrapper component pattern** provides cleaner loading state handling and return-URL preservation than route loaders: - -```tsx -// routes/guards/ProtectedRoute.tsx -import { Navigate, Outlet, useLocation } from 'react-router-dom'; -import { useAuthStore } from '@/stores/auth.store'; - -export function ProtectedRoute() { - const { isAuthenticated, isLoading } = useAuthStore(); - const location = useLocation(); - - if (isLoading) { - return ; - } - - if (!isAuthenticated) { - return ( - - ); - } - - return ; -} -``` - -### Return-to-URL handling in Login - -```tsx -function Login() { - const navigate = useNavigate(); - const location = useLocation(); - const from = location.state?.from || '/'; - - const handleLogin = async (credentials: Credentials) => { - await login(credentials); - navigate(from, { replace: true }); - }; -} -``` - -### Role-based access control extension - -```tsx -interface ProtectedRouteProps { - allowedRoles?: string[]; -} - -export function ProtectedRoute({ allowedRoles }: ProtectedRouteProps) { - const { user, isAuthenticated, isLoading } = useAuthStore(); - - if (isLoading) return ; - if (!isAuthenticated) return ; - - if (allowedRoles && !allowedRoles.includes(user.role)) { - return ; - } - - return ; -} -``` - ---- - -## Routing patterns with TanStack Query integration - -Use `createBrowserRouter` (Data Router) for all new projects—it enables loaders, actions, and per-route error boundaries. **Combine route loaders with TanStack Query**: loaders initiate prefetches early, TanStack Query manages caching and refetching. - -```tsx -// Shared query options -const dashboardQueryOptions = queryOptions({ - queryKey: ['dashboard'], - queryFn: fetchDashboard, - staleTime: 5 * 60 * 1000, -}); - -// Route definition with loader prefetch -{ - path: '/dashboard', - loader: ({ context: { queryClient } }) => { - queryClient.ensureQueryData(dashboardQueryOptions); - return null; - }, - lazy: () => import('./pages/Dashboard'), -} - -// Component uses TanStack Query for full benefits -function Dashboard() { - const { data } = useSuspenseQuery(dashboardQueryOptions); - return ; -} -``` - -### Route-based code splitting with `lazy()` - -React Router's `lazy()` is superior to `React.lazy()` for routes—it loads component, loader, and error boundary in parallel: - -```tsx -// Routes load all exports in parallel -{ - path: '/analytics', - lazy: () => import('./routes/analytics'), -} - -// routes/analytics.tsx -export async function loader() { return fetchAnalytics(); } -export function Component() { /* ... */ } -export function ErrorBoundary() { return ; } -``` - ---- - -## Error boundaries and Suspense strategy - -Error boundaries remain **class components only** in React 19. Use `react-error-boundary` library for functional wrapper: - -```tsx -import { ErrorBoundary } from 'react-error-boundary'; -import { QueryErrorResetBoundary } from '@tanstack/react-query'; - -// Combined pattern for data fetching - - {({ reset }) => ( - ( -
-

Error: {error.message}

- -
- )} - > - }> - - -
- )} -
-``` - -### Layered error boundary strategy - -- **Global** (in main.tsx): Catches catastrophic failures, shows full-page error -- **Route-level** (in Shell): Isolates page failures, allows navigation to continue -- **Component-level**: Isolates non-critical features (widgets, charts) - -### Suspense with useSuspenseQuery - -`useSuspenseQuery` in TanStack Query v5 is **production-ready** and guarantees data is defined: - -```tsx -function UserProfile({ userId }: { userId: string }) { - // data is always defined - TypeScript knows this! - const { data } = useSuspenseQuery({ - queryKey: ['user', userId], - queryFn: () => fetchUser(userId), - }); - - return

{data.name}

; -} -``` - -**Avoid waterfall loading** by using `useSuspenseQueries` for parallel fetches or prefetching in route loaders. - ---- - -## Performance optimization with React 19 - -### When to still use manual memoization - -With React Compiler enabled, most memoization is automatic. **Still use manual memoization for**: - -- Values used as effect dependencies where you need precise control -- External library integrations requiring reference stability -- Expensive calculations the compiler can't detect - -```tsx -// Still useful: effect dependency with specific identity -const chartOptions = useMemo(() => ({ - responsive: true, - plugins: { legend: { position: 'top' } } -}), []); - -useEffect(() => { - chart.update(chartOptions); -}, [chartOptions]); -``` - -### useTransition for non-urgent updates - -```tsx -function SearchWithTransition() { - const [query, setQuery] = useState(''); - const [results, setResults] = useState([]); - const [isPending, startTransition] = useTransition(); - - const handleSearch = (value: string) => { - setQuery(value); // Urgent: update input immediately - startTransition(() => { - setResults(filterLargeDataset(value)); // Non-urgent - }); - }; - - return ( - <> - handleSearch(e.target.value)} /> - - - ); -} -``` - -### Virtual scrolling for large lists - -For lists over **100 items**, use TanStack Virtual (~5.5M weekly downloads) or react-virtuoso (easiest API for dynamic heights): - -```tsx -import { useVirtualizer } from '@tanstack/react-virtual'; - -function VirtualList({ items }: { items: Item[] }) { - const parentRef = useRef(null); - - const virtualizer = useVirtualizer({ - count: items.length, - getScrollElement: () => parentRef.current, - estimateSize: () => 50, - overscan: 5, - }); - - return ( -
-
- {virtualizer.getVirtualItems().map((virtualItem) => ( -
- {items[virtualItem.index].name} -
- ))} -
-
- ); -} -``` - ---- - -## State management philosophy - -### Decision tree for state location - -| State Type | Where to Store | -|------------|----------------| -| Server/async data | TanStack Query | -| Global UI state (theme, sidebar) | Zustand | -| Local UI state (dropdown open) | useState | -| URL-shareable state (filters, pagination) | URL search params | -| Form state | React Hook Form | -| Auth tokens | Zustand + persist middleware | - -### Zustand TypeScript patterns - -```tsx -import { create } from 'zustand'; -import { devtools, persist } from 'zustand/middleware'; - -interface AuthState { - user: User | null; - token: string | null; - isAuthenticated: boolean; - isLoading: boolean; - login: (user: User, token: string) => void; - logout: () => void; -} - -// Curried create()() required for proper inference with middleware -export const useAuthStore = create()( - devtools( - persist( - (set) => ({ - user: null, - token: null, - isAuthenticated: false, - isLoading: true, - login: (user, token) => set({ user, token, isAuthenticated: true, isLoading: false }), - logout: () => set({ user: null, token: null, isAuthenticated: false }), - }), - { name: 'auth-storage' } - ) - ) -); - -// Selectors for optimized re-renders -export const useUser = () => useAuthStore((s) => s.user); -export const useIsAuthenticated = () => useAuthStore((s) => s.isAuthenticated); -``` - ---- - -## TypeScript patterns for React 19 - -### Component props typing - -**Plain functions with typed props are recommended** over `React.FC`: - -```tsx -// ✅ Recommended pattern -interface ButtonProps { - variant: 'primary' | 'secondary'; - size?: 'sm' | 'md' | 'lg'; - children: React.ReactNode; - onClick?: () => void; -} - -function Button({ variant, size = 'md', children, onClick }: ButtonProps) { - return ; -} -``` - -### Generic components - -```tsx -interface ListProps { - items: T[]; - renderItem: (item: T) => React.ReactNode; - keyExtractor: (item: T) => string; -} - -function List({ items, renderItem, keyExtractor }: ListProps) { - return ( -
    - {items.map((item) => ( -
  • {renderItem(item)}
  • - ))} -
- ); -} - -// Usage - TypeScript infers T - {user.name}} - keyExtractor={(user) => user.id} -/> -``` - -### Event handler typing - -```tsx -const handleSubmit = (e: React.FormEvent) => { - e.preventDefault(); - const formData = new FormData(e.currentTarget); -}; - -const handleChange = (e: React.ChangeEvent) => { - setValue(e.target.value); -}; -``` - ---- - -## Forms with React Hook Form and Zod - -```tsx -import { useForm } from 'react-hook-form'; -import { zodResolver } from '@hookform/resolvers/zod'; -import { z } from 'zod'; - -const schema = z.object({ - email: z.string().email('Invalid email'), - password: z.string().min(8, 'Password must be at least 8 characters'), -}); - -type FormData = z.infer; - -function LoginForm() { - const { register, handleSubmit, formState: { errors, isSubmitting } } = useForm({ - resolver: zodResolver(schema), - }); - - const onSubmit = async (data: FormData) => { - await login(data); - }; - - return ( -
- - {errors.email && {errors.email.message}} - - - {errors.password && {errors.password.message}} - - -
- ); -} -``` - ---- - -## Vite build optimization - -```ts -// vite.config.ts -import { defineConfig, splitVendorChunkPlugin } from 'vite'; -import react from '@vitejs/plugin-react'; - -export default defineConfig({ - plugins: [ - react({ - babel: { - plugins: ['babel-plugin-react-compiler'], - }, - }), - splitVendorChunkPlugin(), - ], - build: { - rollupOptions: { - output: { - manualChunks: { - 'vendor-react': ['react', 'react-dom', 'react-router-dom'], - 'vendor-query': ['@tanstack/react-query'], - }, - }, - }, - chunkSizeWarningLimit: 500, - }, -}); -``` - ---- - -## Migration checklist from React 18 - -```bash -# 1. Update packages -npm install --save-exact react@^19.0.0 react-dom@^19.0.0 -npm install --save-exact @types/react@^19 @types/react-dom@^19 - -# 2. Run codemods -npx codemod@latest react/19/migration-recipe -npx types-react-codemod@latest preset-19 ./src - -# 3. Check for compiler compatibility -npx react-compiler-healthcheck@latest - -# 4. Add React Compiler -npm install --save-dev babel-plugin-react-compiler@latest -``` - -**Key breaking changes**: `useRef` requires an argument (`useRef(undefined)` not `useRef()`), ref callbacks can't have implicit returns, `ReactDOM.render` replaced by `createRoot`, `findDOMNode` removed. - ---- - -## Anti-patterns to avoid - -- **Don't drill props** through many levels—use Zustand or Context for truly global state -- **Don't overuse Context** for frequently changing state—it triggers full subtree re-renders -- **Don't use array indices as keys** in lists that can reorder or have deletions -- **Don't forget cleanup functions** in useEffect for subscriptions and abort controllers -- **Don't ignore `exhaustive-deps`** ESLint rule—use functional state updates to avoid stale closures -- **Don't create promises in render** when using `use()`—cache them in parent components or use TanStack Query - -## Conclusion - -For your React 19 + Vite + FastAPI template, adopt the **React Compiler** for automatic memoization, use **layout routes** for shell architecture, combine **route loaders with TanStack Query** for optimal data loading, and keep **Zustand for UI state** while TanStack Query handles server state. The new `useActionState` and `useOptimistic` hooks provide excellent form UX patterns, and `ref` as a prop eliminates forwardRef boilerplate. Layer error boundaries at global, route, and component levels with Suspense boundaries at route transitions for the best user experience. diff --git a/docs/research/SCSS.md b/docs/research/SCSS.md deleted file mode 100644 index c8bdd0f..0000000 --- a/docs/research/SCSS.md +++ /dev/null @@ -1,696 +0,0 @@ -# Production-Ready SCSS Architecture & Responsive Design for 2025 - -Modern CSS has fundamentally changed since 2023. **Container queries are now production-ready** with 90%+ browser support, making component-level responsiveness a reality. OKLCH colors enable perceptually uniform palettes, `@layer` provides cascade control, and `text-wrap: balance` eliminates awkward heading breaks. Meanwhile, SCSS's `@import` is officially deprecated—`@use` and `@forward` are mandatory. This report provides senior-level patterns for building a bulletproof design system that works flawlessly from 300px to 1800px+. - -## Modern CSS reset for 2025 browsers - -The reset landscape has shifted from "erase everything" to "minimal opinionated baseline." Browser consistency is now excellent, so resets focus on improving authoring experience and accessibility rather than fixing bugs. - -**Essential reset components for production:** - -```scss -// _reset.scss - Modern 2025 Reset - -*, *::before, *::after { - box-sizing: border-box; -} - -* { - margin: 0; -} - -html { - -moz-text-size-adjust: none; - -webkit-text-size-adjust: none; - text-size-adjust: none; -} - -// Enable animations to auto/fit-content (Chrome/Edge 2025) -@media (prefers-reduced-motion: no-preference) { - html { - interpolate-size: allow-keywords; - scroll-behavior: smooth; - } -} - -body { - min-height: 100vh; - line-height: 1.5; - -webkit-font-smoothing: antialiased; -} - -h1, h2, h3, h4, h5, h6 { - line-height: 1.1; - text-wrap: balance; - overflow-wrap: break-word; -} - -p { - text-wrap: pretty; - overflow-wrap: break-word; -} - -img, picture, video, canvas, svg { - display: block; - max-width: 100%; -} - -input, button, textarea, select { - font: inherit; -} - -textarea:not([rows]) { - min-height: 10em; -} - -// Safari VoiceOver list semantics fix -ul[role='list'], ol[role='list'] { - list-style: none; -} - -:target { - scroll-margin-block: 5ex; -} - -// SPA root stacking context -#root, #__next { - isolation: isolate; -} - -// Safe area for notched devices -@supports(padding: max(0px)) { - body { - padding-left: max(1rem, env(safe-area-inset-left)); - padding-right: max(1rem, env(safe-area-inset-right)); - } -} - -// Dark mode initialization -html { - color-scheme: dark light; -} -``` - -### What to add vs remove in 2025 - -The **new additions** that matter: `interpolate-size: allow-keywords` enables smooth animations to `auto` and `fit-content`; `text-wrap: balance` creates visually balanced headings (max **6 lines** in Chrome); `text-wrap: pretty` prevents orphans in paragraphs. Safe area insets using `env()` with `max()` fallback handle iPhone notches properly. - -**Remove these obsolete hacks:** All `-ms-` prefixes (IE11 is completely dead since June 2022), the old `-webkit-appearance: button` form control fixes, monospace font-family double declaration hack, `constant()` for notch (replaced by `env()` since iOS 11.2), and any `height: 100%` on html/body—dynamic viewport units (`dvh`, `svh`) are well-supported now. - -### Controversial decisions resolved - -**Box-sizing globally**: Still best practice. Performance impact is negligible—Paul Irish confirmed it's "as fast as h1 as a selector." Modern browsers optimize `*` selectors efficiently. The Microsoft Edge team notes "there are more strategic parts to optimize." - -**Scroll-behavior smooth**: Only with motion preference check. Apply inside `@media (prefers-reduced-motion: no-preference)` to avoid triggering vestibular disorders. Never apply globally without this check. - -**Line-height 1.5**: Apply to body text, but reduce headings to **1.1**. WCAG requires ≥1.5 for body text accessibility, but headings look better with tighter leading. - -## Comprehensive design token system - -### Spacing scale: 8px base with rem output - -The **8px base unit** dominates modern design systems (Atlassian, Material Design, IBM Carbon). Use 4px half-steps for fine control in dense interfaces. - -```scss -// _tokens.scss - -// Spacing (8px base, rem output) -$space-1: 0.25rem; // 4px -$space-2: 0.5rem; // 8px -$space-3: 0.75rem; // 12px -$space-4: 1rem; // 16px -$space-5: 1.25rem; // 20px -$space-6: 1.5rem; // 24px -$space-8: 2rem; // 32px -$space-10: 2.5rem; // 40px -$space-12: 3rem; // 48px -$space-16: 4rem; // 64px - -// CSS custom properties for runtime flexibility -:root { - --space-1: #{$space-1}; - --space-2: #{$space-2}; - --space-3: #{$space-3}; - --space-4: #{$space-4}; - --space-6: #{$space-6}; - --space-8: #{$space-8}; - --space-12: #{$space-12}; - --space-16: #{$space-16}; -} -``` - -Use **numeric naming** (`space-1, space-2, space-4`) rather than t-shirt sizes—it maps directly to multipliers and scales infinitely. Always use `rem` for spacing to respect user font-size preferences (accessibility requirement). - -### Typography scale: Major Third ratio - -The **1.25 (Major Third) ratio** is the most versatile default. It provides clear hierarchy without excessive jumps between sizes. - -```scss -// Typography scale (1.25 ratio) -:root { - --font-size-xs: 0.64rem; // ~10px - --font-size-sm: 0.8rem; // ~13px - --font-size-base: 1rem; // 16px - --font-size-md: 1.25rem; // 20px - --font-size-lg: 1.563rem; // 25px - --font-size-xl: 1.953rem; // 31px - --font-size-2xl: 2.441rem; // 39px - --font-size-3xl: 3.052rem; // 49px - - // Font weights - --font-weight-regular: 400; - --font-weight-medium: 500; - --font-weight-semibold: 600; - --font-weight-bold: 700; - - // Line heights (paired with sizes) - --line-height-tight: 1.1; - --line-height-snug: 1.25; - --line-height-normal: 1.5; - --line-height-relaxed: 1.625; - - // Tracking - --tracking-tight: -0.025em; - --tracking-normal: 0; - --tracking-wide: 0.025em; -} -``` - -### Color system: OKLCH is production-ready - -**OKLCH is the 2025 standard** with support in Chrome 111+, Safari 15.4+, Firefox 113+. It provides perceptually uniform lightness, better color manipulation (no muddy gradients), wide-gamut P3 support, and native `color-mix()` compatibility. - -```scss -// Color tokens using OKLCH -:root { - // Primitives - --color-blue-500: oklch(60% 0.2 250); - --color-blue-600: oklch(50% 0.2 250); - --color-gray-100: oklch(95% 0.01 250); - --color-gray-900: oklch(20% 0.01 250); - - // Semantic tokens (light mode) - --color-primary: var(--color-blue-500); - --color-primary-hover: var(--color-blue-600); - --color-text: var(--color-gray-900); - --color-text-muted: oklch(45% 0 0); - --color-bg: oklch(98% 0 0); - --color-surface: oklch(100% 0 0); - - // State variants using relative color syntax - --color-primary-disabled: oklch(from var(--color-primary) l calc(c * 0.3) h / 0.5); -} - -// Dark mode overrides -[data-theme="dark"] { - --color-text: oklch(95% 0 0); - --color-text-muted: oklch(70% 0 0); - --color-bg: oklch(15% 0 0); - --color-surface: oklch(22% 0 0); - --color-primary: oklch(65% 0.18 250); -} -``` - -### CSS custom properties vs SCSS variables - -**Use both strategically:** SCSS variables for build-time values (breakpoints, calculations, media queries), CSS custom properties for runtime theming. SCSS variables can't be used in media queries—`@media (min-width: $breakpoint-md)` works, but `@media (min-width: var(--breakpoint-md))` does not. - -Performance difference is **~0.8% slower** with CSS custom properties—negligible for most applications. The real performance concern is recalculating descendants when changing variables at a parent level. - -### Other essential tokens - -```scss -:root { - // Border radius - --radius-sm: 0.125rem; // 2px - --radius-md: 0.25rem; // 4px - --radius-lg: 0.5rem; // 8px - --radius-xl: 1rem; // 16px - --radius-full: 9999px; // pill - - // Shadows (4-6 levels) - --shadow-sm: 0 1px 2px 0 oklch(0% 0 0 / 0.05); - --shadow-md: 0 4px 6px -1px oklch(0% 0 0 / 0.1); - --shadow-lg: 0 10px 15px -3px oklch(0% 0 0 / 0.1); - --shadow-xl: 0 20px 25px -5px oklch(0% 0 0 / 0.1); - - // Z-index scale - --z-dropdown: 100; - --z-sticky: 200; - --z-fixed: 300; - --z-modal: 500; - --z-tooltip: 800; - - // Motion - --duration-fast: 100ms; - --duration-normal: 200ms; - --duration-slow: 300ms; - --ease-default: cubic-bezier(0.4, 0, 0.2, 1); - --ease-spring: cubic-bezier(0.34, 1.56, 0.64, 1); -} - -// Breakpoints (SCSS only—can't use CSS vars in media queries) -$breakpoint-sm: 640px; -$breakpoint-md: 768px; -$breakpoint-lg: 1024px; -$breakpoint-xl: 1280px; -$breakpoint-2xl: 1536px; -``` - -## Essential production mixins - -### Responsive breakpoint mixin - -```scss -// _mixins.scss - -$breakpoints: ( - 'sm': 640px, - 'md': 768px, - 'lg': 1024px, - 'xl': 1280px, - '2xl': 1536px -); - -@mixin breakpoint($size) { - @media (min-width: map-get($breakpoints, $size)) { - @content; - } -} - -// Usage -.card { - padding: 1rem; - @include breakpoint('md') { - padding: 2rem; - } -} -``` - -### Fluid typography mixin - -```scss -@function fluid-type($min-size, $max-size, $min-vw: 320px, $max-vw: 1200px) { - $slope: math.div($max-size - $min-size, $max-vw - $min-vw); - $intercept: $min-size - ($slope * $min-vw); - @return clamp(#{$min-size}, #{$intercept} + #{$slope * 100}vw, #{$max-size}); -} - -// Usage -h1 { - font-size: fluid-type(1.75rem, 3rem); -} -``` - -### Container query mixin - -```scss -@mixin container-query($name, $min-width) { - @container #{$name} (min-width: #{$min-width}) { - @content; - } -} - -// Usage -.card-container { - container: card / inline-size; -} - -.card-content { - @include container-query(card, 400px) { - flex-direction: row; - } -} -``` - -### Accessibility mixins - -```scss -// Screen reader only -@mixin sr-only { - clip: rect(0 0 0 0); - clip-path: inset(50%); - height: 1px; - width: 1px; - margin: -1px; - overflow: hidden; - padding: 0; - position: absolute; - white-space: nowrap; -} - -// Modern focus state (WCAG compliant) -@mixin focus-visible-ring($color: currentColor, $offset: 2px) { - &:focus-visible { - outline: 2px solid $color; - outline-offset: $offset; - } - &:focus:not(:focus-visible) { - outline: none; - } -} - -// Button reset -@mixin reset-button { - appearance: none; - background: none; - border: none; - padding: 0; - font: inherit; - color: inherit; - cursor: pointer; -} - -// Truncation -@mixin truncate-single { - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; -} - -@mixin truncate-multiline($lines: 3) { - display: -webkit-box; - -webkit-line-clamp: $lines; - -webkit-box-orient: vertical; - overflow: hidden; -} -``` - -### What NOT to include in mixins - -**Vendor prefix mixins are obsolete.** Autoprefixer handles this at build time—manual prefix mixins waste code. Configure `browserslist` and let PostCSS do the work: - -```json -{ - "browserslist": [ - "> 0.5%", - "last 2 versions", - "not dead", - "not op_mini all" - ] -} -``` - -Remove any clearfix hacks (use `display: flow-root` or Grid/Flexbox), aspect ratio mixins (native `aspect-ratio` works everywhere), and hardware acceleration mixins (`will-change` should be applied sparingly and dynamically, not via blanket mixins). - -## Flawless responsive design at every pixel width - -### Container queries are production-ready - -With **90%+ browser support** (Chrome 105+, Firefox 110+, Safari 16+), container queries are ready for production. They enable true component-level responsiveness—components adapt to their container rather than the viewport. - -```scss -// Container query pattern -.card-wrapper { - container-type: inline-size; - container-name: card; -} - -.card { - display: flex; - flex-direction: column; - gap: 1rem; -} - -@container card (min-width: 400px) { - .card { - flex-direction: row; - } -} -``` - -**When to use each approach:** - -- **Container queries:** Reusable components (cards, widgets), elements appearing in different contexts (main content vs sidebar) -- **Media queries:** Global page layout, navigation changes, user preference queries (`prefers-reduced-motion`) - -### Fluid typography that scales flawlessly - -Every font-size should be fluid, not just headings. Use **clamp()** with rem + vw for accessibility compliance—pure `vw` units don't respond to browser zoom (WCAG violation). - -```scss -:root { - // Fluid scale from Utopia.fyi - --step-0: clamp(1.13rem, 1.08rem + 0.22vw, 1.25rem); - --step-1: clamp(1.35rem, 1.24rem + 0.55vw, 1.67rem); - --step-2: clamp(1.62rem, 1.41rem + 1.05vw, 2.22rem); - --step-3: clamp(1.94rem, 1.59rem + 1.77vw, 2.96rem); - --step-4: clamp(2.33rem, 1.77rem + 2.81vw, 3.95rem); - - // Fluid spacing - --space-s: clamp(1rem, 0.92rem + 0.39vw, 1.25rem); - --space-m: clamp(1.5rem, 1.38rem + 0.58vw, 1.875rem); - --space-l: clamp(2rem, 1.85rem + 0.77vw, 2.5rem); -} -``` - -### Layouts that never break - -The key to avoiding awkward in-between layouts is using **intrinsic sizing patterns** that work at any width: - -```scss -// Holy grail grid pattern -.grid-layout { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(min(100%, 250px), 1fr)); - gap: 1rem; -} - -// Sidebar pattern that never breaks -.with-sidebar { - display: flex; - flex-wrap: wrap; - gap: 1rem; -} - -.with-sidebar > :first-child { - flex-basis: 20rem; - flex-grow: 1; -} - -.with-sidebar > :last-child { - flex-basis: 0; - flex-grow: 999; - min-inline-size: 50%; -} -``` - -The `min(100%, 250px)` trick prevents overflow on small viewports. Using `auto-fit` with `minmax()` lets the grid automatically adjust column count based on available space—no media queries needed. - -### Solving the in-between problem - -**More fluid, fewer breakpoints** is the 2025 approach. Use `clamp()` for sizing, let intrinsic sizing work, and reserve breakpoints for major layout shifts. **3-5 breakpoints are optimal** for most sites: - -```scss -// Modern breakpoint values -$breakpoints: ( - 'mobile': 360px, // Small phones - 'tablet': 768px, // Tablets portrait - 'laptop': 1024px, // Small laptops - 'desktop': 1280px, // Standard desktop - 'wide': 1440px // Large screens -); -``` - -**Mobile-first remains the consensus** with 58%+ of traffic being mobile. Content-based breakpoints are preferred over device-based—"The moment your layout looks stretched or awkward—that's your first breakpoint." - -## Modern CSS features ready for production - -### Production-ready now (use freely) - -| Feature | Support | Notes | -|---------|---------|-------| -| `:has()` selector | 90%+ | The "parent selector" everyone wanted | -| `@layer` | All modern | Essential for cascade control | -| Container queries | 90%+ | Component-level responsiveness | -| CSS Nesting | All modern | Native syntax matches Sass | -| Subgrid | 78%+ | Safe with graceful degradation | -| `text-wrap: balance` | 87%+ | Limit: 6 lines Chrome, 10 Firefox | -| `text-wrap: pretty` | 72%+ | Falls back gracefully | -| `color-mix()` | All modern | Use with OKLCH | -| `oklch()` / `oklab()` | All modern | Recommended color format | - -### Use with caution (progressive enhancement) - -**`@scope`** has no Firefox support (Nightly only). Use with fallbacks. **View Transitions** work in Chrome/Safari 18+ but Firefox is pending. **Anchor positioning** is Chromium-only (Chrome 125+, Edge 125+)—Firefox and Safari are still in development. - -### Browser baseline for 2025 - -Target **Chrome 111+, Safari 16.4+, Firefox 128+**. This aligns with Tailwind CSS v4's baseline. IE11 is completely dead—Microsoft ended support in June 2022. No production sites should support IE11. - -## Cascade layers for design systems - -`@layer` provides explicit cascade control, eliminating specificity wars with third-party CSS: - -```scss -// Declare layer order first—this controls priority -@layer reset, base, tokens, components, utilities, overrides; - -@layer reset { - // Your reset styles (lowest priority) -} - -@layer components { - .button { /* component styles */ } -} - -@layer utilities { - .sr-only { /* highest priority utility */ } -} - -// Import third-party CSS into low-priority layer -@import url('library.css') layer(third-party); -``` - -Key principle: **layer order equals priority order** (last declared wins). Unlayered styles beat all layered styles—be intentional. - -## Performance optimization - -### content-visibility for long pages - -```scss -.below-fold-section { - content-visibility: auto; - contain-intrinsic-size: auto 600px; // Prevent layout shift -} -``` - -Real-world results show **50-80% reduction in initial rendering time** on content-heavy pages. Apply to off-screen, complex content sections—never above-the-fold content (delays LCP). - -### will-change anti-patterns - -Never apply `will-change` broadly—it causes performance problems: - -```scss -// ❌ Never do this -* { will-change: transform; } - -// ✅ Apply dynamically before animation -.element:hover { will-change: transform; } -``` - -Use as last resort for existing performance problems, apply to few elements, and remove it when animation completes. - -### Efficient animation properties - -```scss -// ✅ GPU-accelerated, no layout reflow -.efficient-animation { - transform: translateX(100px); - opacity: 0.8; - filter: blur(2px); -} - -// ❌ Avoid animating these (cause reflow) -.slow-animation { - width: 200px; // triggers layout - margin: 1rem; // triggers layout -} -``` - -## Accessibility at every screen size - -### Touch targets: updated guidelines - -WCAG 2.2 introduced a new AA minimum of **24×24 CSS pixels** (SC 2.5.8), with **44×44px remaining the AAA/best practice** recommendation (SC 2.5.5). Aim for 44×44px on all interactive elements. - -### Reduced motion implementation - -Use the **motion-reduce-first approach**—default to no motion, add motion only for users who haven't requested reduced motion: - -```scss -// Default: no motion -.modal-enter { - opacity: 0; -} - -// Add motion for users without preference -@media (prefers-reduced-motion: no-preference) { - .modal-enter { - transform: scale(0.7); - transition: opacity 0.3s, transform 0.3s; - } -} -``` - -Disable **parallax, large-scale transforms, and zooming completely** for reduced-motion users (vestibular triggers). Micro-interactions can be reduced rather than disabled. - -### Dark mode without FOUC - -Prevent Flash of Unstyled Content with an **inline blocking script in ``**: - -```html - - - - -``` - -### Zoom and reflow requirements - -Content must work at **200% zoom** (WCAG 2.1). At 400% zoom (equivalent to 320px viewport), content should reflow to single column without horizontal scrolling: - -```scss -@media (max-width: 320px) { - .grid { display: block; } -} -``` - -## SCSS module system migration - -### @import is officially deprecated - -Dart Sass deprecated `@import` in version 1.80.0 (2024), with removal scheduled for Dart Sass 3.0.0. Use the migration tool: `sass-migrator module --migrate-deps `. - -### @use and @forward patterns - -```scss -// tokens/_colors.scss -$primary: oklch(60% 0.2 250); -$secondary: oklch(55% 0.1 250); - -// tokens/_index.scss (barrel file) -@forward 'colors'; -@forward 'spacing'; -@forward 'typography'; - -// components/button.scss -@use '../tokens'; - -.button { - background: tokens.$primary; - padding: tokens.$space-4; -} -``` - -### Configuring defaults - -```scss -// _theme.scss -$primary-color: blue !default; -$border-radius: 4px !default; - -// main.scss - Override before loading -@use 'theme' with ( - $primary-color: purple, - $border-radius: 8px -); -``` - -Private members use underscore or dash prefix: `$-private-value` is not accessible externally. - -## Conclusion - -Senior-level SCSS architecture in 2025 combines **fluid intrinsic layouts** that work at every pixel width, **container queries** for component-level responsiveness, **OKLCH colors** for perceptually uniform palettes, and **cascade layers** for explicit style priority. The shift is clear: fewer media queries, more fluid techniques; fewer hacks, more native CSS features; fewer SCSS variables, more CSS custom properties for theming. - -The key insight is that **flawless responsive design comes from intrinsic sizing, not more breakpoints**. Use `repeat(auto-fit, minmax(min(100%, 250px), 1fr))` grids, `clamp()` for typography and spacing, and container queries for component adaptation. Reserve the 3-5 breakpoints for major layout shifts, not incremental adjustments. - -Test continuously between breakpoints, not just at them. Use Chrome DevTools' drag-to-resize, test at 200% zoom for accessibility, and validate touch targets are at least 44×44px on mobile. With these patterns, layouts scale smoothly from 300px to 1800px+ without awkward in-between states. diff --git a/docs/research/TANSTACK-QUERY.md b/docs/research/TANSTACK-QUERY.md deleted file mode 100644 index 5258d9b..0000000 --- a/docs/research/TANSTACK-QUERY.md +++ /dev/null @@ -1,593 +0,0 @@ -# TanStack Query v5 production architecture for React + FastAPI in 2025 - -**TanStack Query v5 represents a significant evolution** with unified API signatures, stable Suspense support, and improved TypeScript inference. For a production React + Vite + FastAPI template, the architecture centers on centralized QueryClient configuration, feature-based hook organization using `queryOptions`, Zod runtime validation, Axios interceptors for JWT refresh, and layered error handling. The key insight: v5's `queryOptions` helper eliminates the need for custom wrapper hooks in most cases, while the removal of `onSuccess`/`onError` from queries pushes error handling to global QueryCache callbacks—a cleaner separation of concerns for production applications. - -## V5 breaking changes demand immediate attention - -The most impactful v5 change is the **unified object syntax**—all hooks now accept only a single object parameter. The old `useQuery(key, fn, options)` pattern is gone: - -```typescript -// v4 (multiple signatures) → v5 (single object only) -useQuery(['todos'], fetchTodos, { staleTime: 5000 }) // ❌ Removed -useQuery({ queryKey: ['todos'], queryFn: fetchTodos, staleTime: 5000 }) // ✅ Required -``` - -**Critical renames** affect every v4 codebase: `cacheTime` becomes `gcTime` (garbage collection time), `isLoading` becomes `isPending` for status checks, and the new `isLoading` now equals `isPending && isFetching`. The `useErrorBoundary` option is now `throwOnError`, and `keepPreviousData` migrates to `placeholderData: keepPreviousData` import. - -**Removed features** include query-level `onSuccess`, `onError`, and `onSettled` callbacks. Use global QueryCache callbacks instead: - -```typescript -const queryClient = new QueryClient({ - queryCache: new QueryCache({ - onError: (error, query) => { - if (query.state.data !== undefined) { - toast.error(`Background update failed: ${error.message}`) - } - } - }) -}) -``` - -Infinite queries now **require `initialPageParam`**—it's no longer optional. A codemod exists for migration via `npx jscodeshift` with the `@tanstack/react-query` transform. - -## QueryClient configuration for production workloads - -The optimal production configuration balances freshness against network efficiency. The **5-minute staleTime default** suits most dashboard-style data, while static reference data like countries or categories should use `Infinity`: - -```typescript -// src/core/api/query.config.ts -import { QueryClient, QueryCache, MutationCache } from '@tanstack/react-query' -import { ApiError, ApiErrorCode } from './errors' - -export const queryClient = new QueryClient({ - defaultOptions: { - queries: { - staleTime: 1000 * 60 * 5, // 5 minutes - gcTime: 1000 * 60 * 30, // 30 minutes - retry: (failureCount, error) => { - if (error instanceof ApiError) { - // Don't retry auth or not-found errors - if ([ApiErrorCode.AUTHENTICATION_ERROR, - ApiErrorCode.NOT_FOUND, - ApiErrorCode.VALIDATION_ERROR].includes(error.code)) { - return false - } - } - return failureCount < 3 - }, - retryDelay: (attemptIndex) => Math.min(1000 * 2 ** attemptIndex, 30000), - refetchOnWindowFocus: true, // Essential for data freshness - refetchOnMount: true, - refetchOnReconnect: true, - networkMode: 'online', - structuralSharing: true, // Maintains referential equality - }, - mutations: { - retry: 0, // Mutations shouldn't auto-retry - networkMode: 'online', - }, - }, - queryCache: new QueryCache({ - onError: (error, query) => { - // Only toast for background refetch failures - if (query.state.data !== undefined) { - toast.error(`Update failed: ${error.message}`) - } - Sentry.captureException(error, { extra: { queryKey: query.queryKey } }) - }, - }), - mutationCache: new MutationCache({ - onError: (error, _variables, _context, mutation) => { - if (!mutation.options.onError) { - toast.error(`Operation failed: ${error.message}`) - } - }, - }), -}) -``` - -**staleTime recommendations by data type**: Static data (Infinity), semi-static like user profile (**5-30 minutes**), frequently changing dashboard data (**30s-2 minutes**), real-time feeds (**0** with `refetchInterval`). The `gcTime` must exceed `staleTime` to enable instant rendering from cache while background refetch occurs. - -## Axios remains the production choice for API configuration - -Despite native fetch improvements, **Axios still leads** for production applications due to built-in interceptors, automatic JSON handling, and superior TypeScript support with `AxiosError` type narrowing. For bundle-conscious apps, consider `ky` (~3KB) or `wretch` (~2KB). - -```typescript -// src/core/api/api.config.ts -import axios, { AxiosInstance, AxiosError, InternalAxiosRequestConfig } from 'axios' - -const getBaseURL = (): string => { - const env = import.meta.env.MODE - return import.meta.env.VITE_API_BASE_URL || ({ - development: '/api/v1', // Uses Vite proxy - staging: 'https://staging-api.example.com/api/v1', - production: 'https://api.example.com/api/v1', - }[env] || '/api/v1') -} - -export const apiClient: AxiosInstance = axios.create({ - baseURL: getBaseURL(), - timeout: 15000, - headers: { 'Content-Type': 'application/json' }, - withCredentials: true, -}) -``` - -The **Vite proxy configuration** eliminates CORS issues in development: - -```typescript -// vite.config.ts -export default defineConfig({ - server: { - proxy: { - '/api': { - target: 'http://localhost:8000', - changeOrigin: true, - rewrite: (path) => path.replace(/^\/api/, ''), - }, - }, - }, -}) -``` - -## JWT refresh flow belongs in Axios interceptors - -The **hybrid approach** is optimal: token injection and refresh in Axios interceptors, network retry in TanStack Query. This keeps auth concerns centralized while leveraging TanStack Query's built-in retry for transient failures: - -```typescript -// src/core/api/interceptors.ts -let isRefreshing = false -let refreshSubscribers: ((token: string) => void)[] = [] - -apiClient.interceptors.request.use((config) => { - const token = tokenStorage.getAccessToken() - if (token) config.headers.Authorization = `Bearer ${token}` - return config -}) - -apiClient.interceptors.response.use( - (response) => response, - async (error: AxiosError) => { - const originalRequest = error.config as InternalAxiosRequestConfig & { _retry?: boolean } - - if (error.response?.status === 401 && - !originalRequest._retry && - !originalRequest.url?.includes('/auth/refresh')) { - - if (isRefreshing) { - // Queue requests during refresh - return new Promise((resolve) => { - refreshSubscribers.push((newToken) => { - originalRequest.headers.Authorization = `Bearer ${newToken}` - resolve(apiClient(originalRequest)) - }) - }) - } - - originalRequest._retry = true - isRefreshing = true - - try { - const { access_token, refresh_token } = await refreshTokens() - tokenStorage.setAccessToken(access_token) - if (refresh_token) tokenStorage.setRefreshToken(refresh_token) - - // Retry queued requests - refreshSubscribers.forEach((cb) => cb(access_token)) - refreshSubscribers = [] - - originalRequest.headers.Authorization = `Bearer ${access_token}` - return apiClient(originalRequest) - } catch { - tokenStorage.clearTokens() - window.location.href = '/login' - return Promise.reject(error) - } finally { - isRefreshing = false - } - } - return Promise.reject(transformAxiosError(error)) - } -) -``` - -## Query key factories with queryOptions define the modern pattern - -The v5 `queryOptions` helper provides **type-safe query definitions** that work everywhere—`useQuery`, `useSuspenseQuery`, `prefetchQuery`, and cache operations. This replaces the need for custom hooks in many cases: - -```typescript -// src/api/hooks/useUsers.ts -import { queryOptions, useQuery, useMutation, useQueryClient } from '@tanstack/react-query' -import { userService } from '../services/users' - -// Query key factory with embedded queryOptions -export const userQueries = { - all: () => ['users'] as const, - lists: () => [...userQueries.all(), 'list'] as const, - list: (filters: { page?: number; limit?: number }) => - queryOptions({ - queryKey: [...userQueries.lists(), filters] as const, - queryFn: () => userService.getAll(filters), - staleTime: 1000 * 60 * 5, - }), - details: () => [...userQueries.all(), 'detail'] as const, - detail: (id: number) => - queryOptions({ - queryKey: [...userQueries.details(), id] as const, - queryFn: () => userService.getById(id), - staleTime: 1000 * 60 * 2, - }), -} - -// Usage - type safety flows automatically -export const useUsers = (page = 1, limit = 10) => - useQuery(userQueries.list({ page, limit })) - -export const useUser = (id: number) => - useQuery({ ...userQueries.detail(id), enabled: !!id }) - -// Cache operations are type-safe -const queryClient = useQueryClient() -queryClient.setQueryData(userQueries.detail(5).queryKey, updatedUser) // Typed! -``` - -**Hook organization follows feature-based structure**: queries and mutations co-located in feature folders, not a global `queryKeys.ts`. The pattern `todoKeys.all → todoKeys.lists() → todoKeys.list(filters)` enables hierarchical invalidation. - -## Mutations require explicit callback separation - -TanStack Query recommends **separating logic callbacks from UI callbacks**. Logic in `useMutation` runs even if the component unmounts; UI actions in `mutate()` call don't: - -```typescript -export function useUpdateUser() { - const queryClient = useQueryClient() - - return useMutation({ - mutationFn: userService.update, - // Logic: always runs - onMutate: async (newData) => { - await queryClient.cancelQueries({ queryKey: userQueries.detail(newData.id).queryKey }) - const previousUser = queryClient.getQueryData(userQueries.detail(newData.id).queryKey) - queryClient.setQueryData(userQueries.detail(newData.id).queryKey, (old) => ({ ...old, ...newData })) - return { previousUser } - }, - onError: (err, newData, context) => { - queryClient.setQueryData(userQueries.detail(newData.id).queryKey, context?.previousUser) - }, - onSettled: (data, error, variables) => { - queryClient.invalidateQueries({ queryKey: userQueries.detail(variables.id).queryKey }) - }, - }) -} - -// Component usage -const updateUser = useUpdateUser() -updateUser.mutate(userData, { - // UI: only runs if component still mounted - onSuccess: () => navigate('/users'), -}) -``` - -**Optimistic updates via UI** (using `mutation.variables` in render) is simpler than cache manipulation for many cases, with automatic cleanup on error. - -## Zod validates at the API boundary for type safety - -**Validate in the query function**, not in components. Zod remains the recommended choice for 2025 due to ecosystem maturity, though Valibot offers 90%+ smaller bundles: - -```typescript -// src/api/types/user.types.ts -import { z } from 'zod' - -export const userSchema = z.object({ - id: z.number(), - email: z.string().email(), - firstName: z.string(), - lastName: z.string(), - role: z.enum(['admin', 'user', 'guest']), - createdAt: z.string().datetime(), -}) - -export const usersResponseSchema = z.object({ - data: z.array(userSchema), - total: z.number(), - page: z.number(), -}) - -export type User = z.infer -export type UsersResponse = z.infer - -// src/api/services/users.ts -export const userService = { - getAll: async (params: { page?: number; limit?: number }): Promise => { - const response = await apiClient.get('/users', { params }) - return usersResponseSchema.parse(response.data) // Runtime validation - }, - getById: async (id: number): Promise => { - const response = await apiClient.get(`/users/${id}`) - return userSchema.parse(response.data) - }, -} -``` - -**Validation errors should be caught and transformed** into actionable API errors, not allowed to crash the application. Use `safeParse` when you need graceful handling. - -## Error handling flows through three layers - -The production pattern establishes **three error handling layers**: Axios interceptor transforms errors, QueryCache handles global concerns, and component-level handles specific UI: - -```typescript -// src/core/api/errors.ts -export enum ApiErrorCode { - NETWORK_ERROR = 'NETWORK_ERROR', - VALIDATION_ERROR = 'VALIDATION_ERROR', - AUTHENTICATION_ERROR = 'AUTHENTICATION_ERROR', - NOT_FOUND = 'NOT_FOUND', - SERVER_ERROR = 'SERVER_ERROR', -} - -export class ApiError extends Error { - constructor( - message: string, - public readonly code: ApiErrorCode, - public readonly statusCode: number, - public readonly details?: Record - ) { - super(message) - this.name = 'ApiError' - } - - getUserMessage(): string { - const messages: Record = { - [ApiErrorCode.NETWORK_ERROR]: 'Unable to connect. Check your internet.', - [ApiErrorCode.VALIDATION_ERROR]: 'Please check your input.', - [ApiErrorCode.AUTHENTICATION_ERROR]: 'Session expired. Please log in.', - [ApiErrorCode.NOT_FOUND]: 'Resource not found.', - [ApiErrorCode.SERVER_ERROR]: 'Something went wrong. Try again.', - } - return messages[this.code] || this.message - } -} - -// Register global error type -declare module '@tanstack/react-query' { - interface Register { - defaultError: ApiError - } -} -``` - -**throwOnError configuration** determines Error Boundary behavior. Use a function for granular control: `throwOnError: (error) => error.statusCode >= 500` sends only server errors to boundaries. - -## Loading states changed significantly in v5 - -The **naming changes** affect every component: v4's `isLoading` (no data yet) is now `isPending`, while v5's `isLoading` means `isPending && isFetching` (first fetch in flight). Use `isRefetching` (equals `isFetching && !isPending`) for background update indicators: - -```typescript -function UserList() { - const { data, isPending, isFetching, isRefetching } = useQuery(userQueries.list({})) - - if (isPending) return // Initial load - - return ( -
- {data.map(user => )} - {isRefetching && } -
- ) -} -``` - -**placeholderData with keepPreviousData** prevents loading flicker during pagination: - -```typescript -import { keepPreviousData } from '@tanstack/react-query' - -const { data, isPlaceholderData } = useQuery({ - queryKey: ['users', page], - queryFn: () => fetchUsers(page), - placeholderData: keepPreviousData, -}) -``` - -## Suspense is production-ready with dedicated hooks - -**useSuspenseQuery is now stable** in v5 and guarantees `data` is never undefined. The key difference: `enabled`, `placeholderData`, and error callbacks aren't available—use component composition for conditional queries: - -```typescript -import { useSuspenseQuery, QueryErrorResetBoundary } from '@tanstack/react-query' -import { ErrorBoundary } from 'react-error-boundary' - -function UserProfile({ userId }: { userId: number }) { - const { data } = useSuspenseQuery(userQueries.detail(userId)) - // data is User, never undefined - return
{data.firstName}
-} - -// Parent provides Suspense and Error boundaries -function UserProfilePage({ userId }: { userId: number }) { - return ( - - {({ reset }) => ( - ( - - )}> - }> - - - - )} - - ) -} -``` - -**Avoid waterfall requests** with `useSuspenseQueries` for parallel data fetching within Suspense boundaries. - -## Caching strategies vary by data volatility - -| Data Type | staleTime | gcTime | Strategy | -|-----------|-----------|--------|----------| -| Static reference (countries) | `Infinity` | `Infinity` | Fetch once, cache forever | -| User profile | 5-30 min | 1 hour | Background refresh on focus | -| Dashboard metrics | 30s-2 min | 10 min | Frequent background updates | -| Real-time (prices) | 0 | 1-5 min | Use `refetchInterval` | - -**Prefetching on hover** improves perceived performance dramatically: - -```typescript -function UserLink({ userId }: { userId: number }) { - const queryClient = useQueryClient() - - const prefetch = () => { - queryClient.prefetchQuery(userQueries.detail(userId)) - } - - return ( - - View User - - ) -} -``` - -## Offline support uses persistence plugins - -For offline-first applications, combine `PersistQueryClientProvider` with LocalStorage (small caches) or IndexedDB (large datasets): - -```typescript -import { PersistQueryClientProvider } from '@tanstack/react-query-persist-client' -import { createSyncStoragePersister } from '@tanstack/query-sync-storage-persister' - -const persister = createSyncStoragePersister({ - storage: window.localStorage, - key: 'QUERY_CACHE', - throttleTime: 1000, -}) - -// gcTime must match or exceed persistence maxAge -const queryClient = new QueryClient({ - defaultOptions: { - queries: { gcTime: 1000 * 60 * 60 * 24 }, // 24 hours - }, -}) - -function App() { - return ( - - - - ) -} -``` - -## WebSocket integration updates cache directly - -For real-time features, **update the query cache from WebSocket events**. Use `setQueryData` for frequent small updates, `invalidateQueries` for complex state changes: - -```typescript -useEffect(() => { - const socket = io(WS_URL) - - socket.on('user:updated', (user: User) => { - queryClient.setQueryData(userQueries.detail(user.id).queryKey, user) - queryClient.setQueryData(userQueries.list({}).queryKey, (old) => - old?.data.map(u => u.id === user.id ? user : u) - ) - }) - - socket.on('user:created', () => { - queryClient.invalidateQueries({ queryKey: userQueries.lists() }) - }) - - return () => socket.disconnect() -}, [queryClient]) -``` - -## Testing patterns use QueryClient wrapper - -```typescript -// test-utils.tsx -const createTestQueryClient = () => new QueryClient({ - defaultOptions: { - queries: { retry: false, gcTime: Infinity }, - }, -}) - -export function renderWithClient(ui: React.ReactElement) { - const testQueryClient = createTestQueryClient() - return render( - - {ui} - - ) -} - -// With MSW for API mocking -import { setupServer } from 'msw/node' -import { http, HttpResponse } from 'msw' - -const server = setupServer( - http.get('/api/users/:id', ({ params }) => - HttpResponse.json({ id: params.id, name: 'Test User' }) - ) -) -``` - -## DevTools load conditionally in production - -DevTools are **automatically excluded in production builds**. For on-demand production debugging, lazy-load from the production bundle: - -```typescript -const ReactQueryDevtoolsProduction = lazy(() => - import('@tanstack/react-query-devtools/production').then((d) => ({ - default: d.ReactQueryDevtools, - })) -) - -// Toggle with window.toggleDevtools() in console -``` - -## Recommended file structure synthesizes all patterns - -``` -src/ -├── core/ -│ └── api/ -│ ├── query.config.ts # QueryClient with defaults -│ ├── api.config.ts # Axios instance + base config -│ ├── interceptors.ts # Auth + error interceptors -│ └── errors.ts # ApiError class + transformer -├── api/ -│ ├── hooks/ -│ │ ├── useUsers.ts # Query factories + hooks + mutations -│ │ ├── usePosts.ts -│ │ └── useAuth.ts -│ ├── services/ -│ │ ├── users.ts # Type-safe API functions with Zod -│ │ ├── posts.ts -│ │ └── auth.ts -│ └── types/ -│ ├── user.types.ts # Zod schemas + inferred types -│ ├── post.types.ts -│ └── common.types.ts # Shared schemas (pagination, etc.) -├── components/ -│ └── providers/ -│ └── QueryProvider.tsx # QueryClientProvider + DevTools -└── App.tsx -``` - -## Conclusion - -Building a production TanStack Query v5 architecture requires embracing the **unified object API**, leveraging `queryOptions` for type-safe query definitions, and establishing clear boundaries between API configuration (Axios interceptors), caching behavior (QueryClient defaults), and UI concerns (component-level error handling). The removal of per-query callbacks in v5 isn't a limitation—it's a forcing function toward cleaner global error handling via QueryCache. - -The key anti-patterns to avoid: storing JWTs in localStorage (use HttpOnly cookies + memory), over-using custom hooks when `queryOptions` suffices, handling auth refresh in TanStack Query's retry logic (interceptors are cleaner), and neglecting `staleTime` configuration (the default `0` causes excessive refetching). For FastAPI backends, the Axios + Zod combination provides the strongest type safety from API response to component render. diff --git a/docs/research/TY.md b/docs/research/TY.md deleted file mode 100644 index 1f05aee..0000000 --- a/docs/research/TY.md +++ /dev/null @@ -1,570 +0,0 @@ -# ty - Extremely Fast Python Type Checker - -**Official Docs**: https://docs.astral.sh/ty - -## What is ty? - -ty is an **extremely fast** Python type checker written in Rust by Astral (the creators of uv and Ruff). It's designed to be: -- **10-100x faster** than mypy and pyright -- **Zero configuration** to get started -- **Compatible** with existing type annotations -- **Production-ready** for large codebases - -Think: "Ruff for type checking" - blazing fast, modern, and built for scale. - ---- - -## Installation - -### Add to your project dependencies: - -```bash -# With uv (recommended) -uv add --dev ty - -# With pip -pip install ty -``` - -Or run it directly without installing: -```bash -uvx ty check -``` - ---- - -## Quick Start - -### 1. Basic Usage - -```bash -# Check entire project -ty check - -# Check specific files/directories -ty check src/ -ty check src/models/User.py - -# Watch mode (recheck on file changes) -ty check --watch -``` - -### 2. Exit Codes - -- `0` - No errors -- `1` - Type errors found -- `2` - Invalid config/CLI options -- `101` - Internal error - -### 3. Output Formats - -```bash -# Default verbose output with context -ty check - -# Concise (one per line) -ty check --output-format concise - -# GitHub Actions annotations -ty check --output-format github - -# GitLab Code Quality JSON -ty check --output-format gitlab -``` - ---- - -## Configuration - -### Option 1: pyproject.toml (Recommended) - -```toml -[tool.ty] -# Python version (auto-detected from requires-python if not set) -python-version = "3.12" - -# Source directories -[tool.ty.src] -include = ["src", "tests"] -exclude = ["src/generated/**", "*.proto"] - -# Python environment (auto-detected from .venv if not set) -[tool.ty.environment] -root = ["./src"] -python = "./.venv" - -# Rule severity configuration -[tool.ty.rules] -# Make warnings errors -possibly-missing-attribute = "error" -possibly-missing-import = "error" - -# Downgrade errors to warnings -division-by-zero = "warn" - -# Disable specific rules -redundant-cast = "ignore" -unused-ignore-comment = "ignore" - -# Override rules for specific files -[[tool.ty.overrides]] -include = ["tests/**"] -[tool.ty.overrides.rules] -unresolved-reference = "warn" - -# Terminal output -[tool.ty.terminal] -error-on-warning = false # exit code 1 if warnings exist -output-format = "full" # full | concise | github | gitlab -``` - -### Option 2: ty.toml (Alternative) - -Create `backend/ty.toml` (same structure, no `[tool.ty]` prefix): - -```toml -python-version = "3.12" - -[src] -include = ["src", "tests"] - -[rules] -possibly-unresolved-reference = "warn" -``` - ---- - -## Important Rules - -### Error-Level (Default) - -These **will fail** your CI/CD: - -| Rule | What it catches | -|------|----------------| -| `call-non-callable` | Calling non-callable objects: `4()` | -| `division-by-zero` | Division by zero: `5 / 0` | -| `unresolved-import` | Missing modules: `import nonexistent` | -| `unresolved-reference` | Undefined variables: `print(undefined_var)` | -| `unresolved-attribute` | Missing attributes: `obj.missing_attr` | -| `invalid-argument-type` | Wrong arg types: `func(x: int)` called with `func("str")` | -| `invalid-return-type` | Return type mismatch | -| `missing-argument` | Missing required args: `func(x: int)` called as `func()` | -| `unknown-argument` | Unknown kwargs: `func(x=1, unknown=2)` | -| `unsupported-operator` | Bad operators: `"string" + 123` | -| `invalid-assignment` | Type mismatch: `x: int = "string"` | - -### Warning-Level (Default) - -Won't fail CI unless you enable `--error-on-warning`: - -| Rule | What it catches | -|------|----------------| -| `possibly-unresolved-reference` | Variables that **might** not be defined (conditional) | -| `possibly-missing-attribute` | Attributes that **might** not exist (conditional) | -| `possibly-missing-import` | Imports that **might** be missing (conditional) | -| `redundant-cast` | Unnecessary `cast()` calls | -| `deprecated` | Usage of deprecated APIs | -| `undefined-reveal` | `reveal_type()` without importing it | - -### Ignore-Level (Disabled by Default) - -Must explicitly enable: - -| Rule | What it catches | -|------|----------------| -| `unused-ignore-comment` | Unused `# type: ignore` or `# ty: ignore` | -| `possibly-unresolved-reference` | Possibly undefined refs in conditional code | -| `division-by-zero` | Preview rule - division by zero | - ---- - -## Suppression Comments - -### ty-specific suppression - -```python -# Suppress specific rule -result = unsafe_operation() # ty: ignore[invalid-argument-type] - -# Suppress multiple rules -value = risky() # ty: ignore[unresolved-attribute, invalid-return-type] - -# Multi-line expressions (comment on first OR last line) -result = long_function( # ty: ignore[missing-argument] - arg1, - arg2 -) - -# Combine with other tools -x = 1 # ty: ignore[division-by-zero] # fmt: skip -``` - -### Standard type: ignore (PEP 484) - -```python -# ty respects standard type: ignore -result = something() # type: ignore - -# But ty: ignore is preferred for specificity -result = something() # ty: ignore[invalid-return-type] -``` - -### Disable all checking in a function - -```python -from typing import no_type_check - -@no_type_check -def untyped_function(): - return "anything" + 123 # no errors -``` - -### Check for unused suppressions - -```toml -[tool.ty.rules] -unused-ignore-comment = "warn" # warn about unused suppressions -``` - ---- - -## Common Configurations for Production - -### Strict Mode (Recommended) - -```toml -[tool.ty.rules] -# Treat all "possibly" rules as errors -possibly-missing-attribute = "error" -possibly-missing-import = "error" -possibly-unresolved-reference = "error" - -# Catch unused suppressions -unused-ignore-comment = "warn" - -# Stricter terminal behavior -[tool.ty.terminal] -error-on-warning = true -``` - -### Gradual Adoption (Recommended for existing codebases) - -```toml -[tool.ty.rules] -# Downgrade strict rules to warnings -unresolved-attribute = "warn" -invalid-argument-type = "warn" - -# Focus on critical errors only -[tool.ty.terminal] -error-on-warning = false -``` - -### FastAPI-Specific - -```toml -[[tool.ty.overrides]] -include = ["src/routes/**", "src/dependencies/**"] - -[tool.ty.overrides.rules] -# FastAPI uses runtime dependency injection -unresolved-reference = "warn" # for Depends() params -``` - ---- - -## CLI Flags Reference - -### Rule Control - -```bash -# Override rule severity -ty check --error possibly-unresolved-reference -ty check --warn division-by-zero -ty check --ignore redundant-cast - -# Can combine multiple -ty check --error rule1 --warn rule2 --ignore rule3 -``` - -### Environment - -```bash -# Specify Python environment -ty check --python .venv - -# Python version -ty check --python-version 3.12 - -# Platform -ty check --python-platform linux -ty check --python-platform all # no platform assumptions -``` - -### Output Control - -```bash -# Verbosity -ty check -v # verbose -ty check -vv # very verbose -ty check -q # quiet -ty check -qq # silent - -# Exit codes -ty check --exit-zero # always exit 0 -ty check --error-on-warning # warnings = exit 1 -``` - ---- - -## Environment Variables - -```bash -# Log level (for debugging ty itself) -TY_LOG=debug ty check -TY_LOG=trace ty check - -# Parallelism limit -TY_MAX_PARALLELISM=4 ty check - -# Profile performance -TY_LOG_PROFILE=1 ty check # creates tracing.folded - -# Python path (additional search paths) -PYTHONPATH=/extra/path ty check - -# Virtual environment -VIRTUAL_ENV=/path/to/.venv ty check -``` - ---- - -## Integration - -### CI/CD (GitHub Actions) - -```yaml -name: Type Check - -on: [push, pull_request] - -jobs: - typecheck: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: astral-sh/setup-uv@v5 - - - name: Install dependencies - run: uv sync --all-extras - - - name: Type check - run: uv run ty check --output-format github -``` - -### Pre-commit Hook - -```yaml -# .pre-commit-config.yaml -repos: - - repo: https://github.com/astral-sh/ty - rev: v0.0.1 # use latest version - hooks: - - id: ty -``` - -### VS Code - -```json -// .vscode/settings.json -{ - "python.linting.enabled": true, - "python.linting.tyEnabled": true, - "python.linting.tyArgs": ["check"], -} -``` - -### Just/Makefile - -```make -# Makefile -.PHONY: typecheck -typecheck: - ty check - -.PHONY: typecheck-watch -typecheck-watch: - ty check --watch -``` - ---- - -## ty vs mypy vs pyright - -| Feature | ty | mypy | pyright | -|---------|----|----|---------| -| **Speed** | 🚀 10-100x faster | Baseline | Fast (but slower than ty) | -| **Language** | Rust | Python | TypeScript | -| **Config** | Minimal (auto-detects) | Verbose | Verbose | -| **Strictness** | Configurable | Very strict | Very strict | -| **IDE Support** | Growing | Excellent | Excellent (VSCode) | -| **Ecosystem** | New (2024) | Mature (2012) | Mature (2019) | -| **Plugin Support** | Limited | Extensive | Limited | -| **Adoption** | Early | Industry standard | Microsoft standard | - -### Migration from mypy - -ty is **mostly compatible** with mypy. You can run both in parallel: - -```toml -[tool.ty.rules] -# Map mypy behavior to ty -invalid-argument-type = "error" # mypy: arg-type -invalid-return-type = "error" # mypy: return-value -unresolved-attribute = "error" # mypy: attr-defined -``` - -Key differences: -- ty is **faster** but **less mature** -- mypy has more plugins (e.g., sqlalchemy, django) -- ty auto-detects more (less config needed) -- ty focuses on speed, mypy on completeness - -**Recommendation**: Use ty in dev for **fast feedback**, keep mypy in CI for **comprehensive checks** (for now). - ---- - -## Troubleshooting - -### ty can't find my virtual environment - -```bash -# Explicitly specify -ty check --python .venv - -# Or in pyproject.toml -[tool.ty.environment] -python = "./.venv" -``` - -### False positives in generated code - -```toml -[tool.ty.src] -exclude = ["src/generated/**", "alembic/versions/**"] -``` - -### ty is too strict - -```toml -# Downgrade specific rules -[tool.ty.rules] -possibly-missing-attribute = "warn" -possibly-unresolved-reference = "warn" -``` - -### Performance profiling - -```bash -# Generate flamegraph -TY_LOG_PROFILE=1 ty check - -# View with flamegraph.pl or speedscope.app -``` - ---- - -## Best Practices for This Project - -### 1. Use ty for fast local development - -```bash -# Quick checks while coding -ty check --watch -``` - -### 2. Keep mypy for CI completeness - -```yaml -# Both in CI -- run: ty check # fast, catches most issues -- run: mypy src/ # thorough, catches edge cases -``` - -### 3. Suppress intentional violations - -```python -# FastAPI dependency injection -async def get_db(db: Annotated[AsyncSession, Depends(get_db_session)]): - # ty might not understand Depends() - return db # ty: ignore[invalid-return-type] -``` - -### 4. Configure for async/SQLAlchemy - -```toml -[[tool.ty.overrides]] -include = ["src/repositories/**", "src/services/**"] - -[tool.ty.overrides.rules] -# Async/SQLAlchemy patterns ty might not understand yet -unresolved-attribute = "warn" -``` - ---- - -## Key Takeaways - -✅ **DO**: -- Use `ty check --watch` during development -- Configure `pyproject.toml` for your project -- Enable `unused-ignore-comment` to keep suppressions clean -- Use `--error-on-warning` in CI for strictness - -❌ **DON'T**: -- Blindly suppress errors (investigate first) -- Use `# type: ignore` without rule codes -- Disable important rules globally (use overrides) -- Expect feature parity with mypy (yet) - ---- - -## Quick Reference Card - -```bash -# Development -ty check # check everything -ty check --watch # watch mode -ty check src/models/ # specific directory - -# CI/CD -ty check --error-on-warning # warnings = errors -ty check --output-format github # GitHub annotations - -# Debugging -ty check -vv # very verbose -TY_LOG=debug ty check # ty internal logs - -# Configuration -ty check --python .venv # specify venv -ty check --python-version 3.12 # specify version -ty check --error rule-name # override rule severity -``` - ---- - -## Resources - -- **Official Docs**: https://docs.astral.sh/ty -- **GitHub**: https://github.com/astral-sh/ty -- **Changelog**: https://github.com/astral-sh/ty/releases -- **Rule Reference**: https://docs.astral.sh/ty/reference/rules -- **Astral Blog**: https://astral.sh/blog - ---- - -**Last Updated**: 2025-12-06 -**ty Version**: 0.0.1-alpha.30+ -**Maintained By**: Astral (creators of uv, Ruff) diff --git a/docs/research/VITE.md b/docs/research/VITE.md deleted file mode 100644 index 60d8221..0000000 --- a/docs/research/VITE.md +++ /dev/null @@ -1,274 +0,0 @@ -# Production-ready React + Vite 6 template for 2025 - -A senior frontend engineer building a reusable FastAPI + React + TypeScript template in 2025 should use **Vite 6** with **pnpm**, **Biome** for linting/formatting, **Zustand** for client state, **TanStack Query** for server state, and **Tailwind CSS v4** for styling. This configuration prioritizes developer experience, build performance, and production reliability while avoiding over-engineering. - -The most significant shift in 2025 is the consolidation of tooling: Biome replaces ESLint + Prettier with **35x faster performance**, ESLint's flat config (`eslint.config.js`) is now mandatory, and Vite 6's Environment API enables better SSR handling. TypeScript's `moduleResolution: "bundler"` is the correct setting for Vite projects, and the `splitVendorChunkPlugin` has been deprecated in favor of manual chunks. - -## Vite 6 brings breaking changes that matter - -Vite 6 introduced several breaking changes from Vite 5 that affect production templates. The `resolve.conditions` default now explicitly includes `['module', 'browser', 'development|production']`, affecting how packages resolve. JSON stringify behavior changed to `'auto'` mode, and Sass now uses the modern API by default—the legacy API was removed entirely in Vite 7. - -A production-ready `vite.config.ts` should handle environment-specific builds, FastAPI proxy setup, and proper chunk splitting: - -```typescript -import { defineConfig, loadEnv, type PluginOption } from 'vite' -import react from '@vitejs/plugin-react' -import tsconfigPaths from 'vite-tsconfig-paths' -import { visualizer } from 'rollup-plugin-visualizer' - -export default defineConfig(({ mode }) => { - const env = loadEnv(mode, process.cwd(), '') - const isProduction = mode === 'production' - - return { - base: env.VITE_BASE_URL || '/', - plugins: [ - react(), - tsconfigPaths(), - isProduction && visualizer({ - open: true, - gzipSize: true, - brotliSize: true, - }) as PluginOption, - ].filter(Boolean), - - build: { - target: 'ES2022', - sourcemap: isProduction ? 'hidden' : true, - minify: 'esbuild', - rollupOptions: { - output: { - manualChunks: { - 'react-vendor': ['react', 'react-dom'], - 'router': ['react-router-dom'], - }, - }, - }, - }, - - server: { - port: 3000, - proxy: { - '/api': { - target: 'http://localhost:8000', - changeOrigin: true, - rewrite: (path) => path.replace(/^\/api/, ''), - }, - }, - }, - } -}) -``` - -The `splitVendorChunkPlugin` was deprecated and removed in Vite 7—use `manualChunks` for vendor splitting. Source maps should be `'hidden'` in production to enable error tracking while preventing source code exposure. - -## TypeScript configuration requires the bundler resolution strategy - -Vite projects require `moduleResolution: "bundler"` rather than `node16`, enabling extensionless imports and proper handling of package.json exports. The multi-file tsconfig approach separates browser (app) and Node.js (config) environments: - -```json -// tsconfig.app.json -{ - "compilerOptions": { - "target": "ES2022", - "lib": ["ES2022", "DOM", "DOM.Iterable"], - "module": "ESNext", - "moduleResolution": "bundler", - "jsx": "react-jsx", - "strict": true, - "noEmit": true, - "verbatimModuleSyntax": true, - "erasableSyntaxOnly": true, - "baseUrl": ".", - "paths": { "@/*": ["./src/*"] } - }, - "include": ["src"] -} -``` - -Key TypeScript 5.x features worth enabling include **verbatimModuleSyntax** (enforces explicit type imports), **erasableSyntaxOnly** (ensures transpiler compatibility), and **noUncheckedSideEffectImports** (catches missing side-effect imports). The `vite-tsconfig-paths` plugin automatically syncs path aliases between TypeScript and Vite. - -Type-safe environment variables require a declaration file: - -```typescript -// src/vite-env.d.ts -interface ImportMetaEnv { - readonly VITE_API_URL: string - readonly VITE_APP_TITLE: string -} -``` - -## Biome replaces ESLint and Prettier with dramatic speed gains - -**Biome is production-ready in 2025** with 800,000+ weekly npm downloads, 97% Prettier compatibility, and 35x faster performance than ESLint + Prettier combined. Major companies including Shopify, Airbnb, and Mercedes-Benz use it in production. - -```json -// biome.json -{ - "$schema": "https://biomejs.dev/schemas/1.0.0/schema.json", - "formatter": { - "indentStyle": "space", - "indentWidth": 2, - "lineWidth": 100 - }, - "linter": { - "rules": { - "recommended": true, - "correctness": { "noUnusedVariables": "error" } - } - }, - "organizeImports": { "enabled": true } -} -``` - -Migration from ESLint is straightforward: `npx @biomejs/biome migrate eslint --write`. For teams not ready to switch, ESLint's **flat config** (`eslint.config.js`) is now mandatory—the legacy `.eslintrc` format will be removed in ESLint 10. Use `typescript-eslint` with `eslint-plugin-react`, `eslint-plugin-react-hooks`, and `eslint-plugin-jsx-a11y` for accessibility. - -**Oxlint** (50-100x faster than ESLint, written in Rust) reached 1.0 stability and can complement ESLint using `eslint-plugin-oxlint` to disable overlapping rules. - -## Project structure should be feature-based for scalability - -Feature-based organization groups related code by domain, enabling independent team workflows: - -``` -src/ -├── features/ # Domain modules -│ ├── auth/ -│ │ ├── components/ -│ │ ├── hooks/ -│ │ ├── api/ -│ │ └── types.ts -│ └── posts/ -├── components/ui/ # Shared primitives (Button, Modal) -├── hooks/ # Shared custom hooks -├── lib/ -│ ├── api/client.ts # Axios instance with interceptors -│ └── query/ # TanStack Query config -├── stores/ # Zustand stores -└── types/ # Global TypeScript types -``` - -**State management hierarchy for 2025**: Use **TanStack Query** for all server state (API data caching), **Zustand** (~2.5KB) for global client state, and **React Context** only for simple shared state like themes. Never store fetched API data in Zustand—let React Query handle caching. - -The Zustand pattern with persistence: - -```typescript -import { create } from 'zustand' -import { persist } from 'zustand/middleware' - -export const useAuthStore = create()( - persist( - (set) => ({ - user: null, - token: null, - login: (user, token) => set({ user, token }), - logout: () => set({ user: null, token: null }), - }), - { name: 'auth-storage', partialize: (s) => ({ token: s.token }) } - ) -) -``` - -## Package managers and enterprise tooling choices - -**pnpm** is the recommended package manager for production: 70% disk space savings, strict dependency resolution preventing phantom dependencies, and excellent monorepo support. Benchmarks show pnpm with cache/lockfile completing installs in **761ms** versus npm's 1.3s. - -| Tool | Recommendation | -|------|----------------| -| Package manager | **pnpm** (or Bun for new projects) | -| Git hooks | **Lefthook** (parallel execution, Go binary) | -| Formatting/linting | **Biome** (or ESLint flat config + Prettier) | -| Testing | **Vitest** with React Testing Library | -| API mocking | **MSW 2.0** for dev and testing | -| Dep updates | **Renovate** (superior to Dependabot for monorepos) | - -Essential config files for a production template: -- `.editorconfig` — Cross-IDE consistency, still relevant -- `.nvmrc` — Node version pinning (critical for CI/CD) -- `browserslist` in package.json — Target browser specification -- `lefthook.yml` — Pre-commit linting and type checking - -Skip Stylelint when using Tailwind CSS—the Tailwind IntelliSense VS Code extension provides sufficient class ordering. - -## Production hardening requires deliberate security measures - -**Error tracking** with Sentry requires the `@sentry/vite-plugin` to upload source maps, with `filesToDeleteAfterUpload` to prevent source code leakage: - -```typescript -sentryVitePlugin({ - sourcemaps: { - filesToDeleteAfterUpload: ["./**/*.map"], - }, -}) -``` - -**Critical security practices**: -- Never put secrets in `VITE_` environment variables—they're embedded in the client bundle -- Use `sourcemap: 'hidden'` in production (creates maps for error tracking without exposing them) -- Implement CSP headers via your server/CDN, not meta tags -- React auto-escapes JSX, but never use `dangerouslySetInnerHTML` with user input - -**Web Vitals tracking** is essential. The core metrics for 2025 are LCP (<2.5s), INP (<200ms, replaced FID), and CLS (<0.1). Use the attribution build (`web-vitals/attribution`) for debugging performance issues. - -For bundle optimization, use `rollup-plugin-visualizer` to identify bloat, implement route-based lazy loading with `React.lazy()`, and preload routes on hover for faster transitions. - -## Cutting-edge tools: what's actually production-ready - -| Tool | Status | Recommendation | -|------|--------|----------------| -| **Biome** | ✅ Production-ready | Use it—replaces ESLint + Prettier | -| **Bun** | ✅ Stable for package management | Viable alternative to pnpm | -| **Oxlint** | ✅ 1.0 stable | Complement ESLint for speed | -| **Lightning CSS** | ✅ Stable in Vite | Skip if using Tailwind (requires PostCSS) | -| **Rspack** | ✅ 1.0 production-ready | Drop-in Webpack replacement | -| **Turbopack** | ⚠️ Next.js only, alpha for prod | Wait for broader ecosystem support | - -Lightning CSS provides 100x faster CSS processing than PostCSS but isn't compatible with Tailwind CSS. Use it only for vanilla CSS workflows. - -## CI/CD pipeline essentials - -A production CI pipeline should include type checking, linting, bundle size monitoring, and Lighthouse audits: - -```yaml -- run: pnpm install --frozen-lockfile -- run: pnpm typecheck # tsc --noEmit -- run: pnpm lint # biome ci -- run: pnpm build -- run: lhci autorun # Lighthouse CI -``` - -Use **compressed-size-action** or **size-limit** for bundle size monitoring to catch regressions before deployment. Lighthouse CI with performance budgets (`categories:performance > 0.9`) prevents performance degradation. - -**Renovate** is superior to Dependabot for monorepos, offering advanced grouping, a dependency dashboard, and support for 90+ package managers versus Dependabot's 14. - -## Essential plugins for a production Vite template - -```typescript -plugins: [ - react(), - tsconfigPaths(), // Path alias sync - svgr({ include: '**/*.svg?react' }), // SVG as components - checker({ typescript: true }), // Dev-time type errors - VitePWA({ registerType: 'autoUpdate' }), // If PWA needed - isProduction && visualizer(), // Bundle analysis -] -``` - -Include PWA support only when offline capability or installability adds genuine user value—it introduces complexity that many applications don't need. - -## The complete recommended stack - -For a senior-level, production-ready React + Vite 6 + FastAPI template in 2025: - -- **Build tool**: Vite 6 with `manualChunks` for vendor splitting -- **Package manager**: pnpm with strict lockfile -- **TypeScript**: Strict mode, `moduleResolution: "bundler"`, path aliases -- **Linting/formatting**: Biome (or ESLint flat config + Prettier) -- **Styling**: Tailwind CSS v4 with CSS-first configuration -- **State**: TanStack Query (server) + Zustand (client) -- **Routing**: React Router v6 with lazy loading -- **Testing**: Vitest + React Testing Library + MSW -- **Error tracking**: Sentry with hidden source maps -- **Git hooks**: Lefthook + lint-staged -- **CI/CD**: Type check → Lint → Build → Lighthouse - -This configuration balances modern tooling with production stability, avoiding bleeding-edge tools that haven't proven enterprise reliability while embracing genuinely superior alternatives like Biome that have earned industry trust. diff --git a/docs/research/ZUSTAND.md b/docs/research/ZUSTAND.md deleted file mode 100644 index 8e061a2..0000000 --- a/docs/research/ZUSTAND.md +++ /dev/null @@ -1,2962 +0,0 @@ -# Zustand Production Patterns: JWT Auth + UI State Management (2025) - -**Comprehensive Research for React + Vite + FastAPI Production Template** - ---- - -## Table of Contents - -1. [Executive Summary](#executive-summary) -2. [Zustand v4+ Fundamentals](#zustand-v4-fundamentals) -3. [JWT Token Storage: The 2025 Consensus](#jwt-token-storage-the-2025-consensus) -4. [Auth Store Architecture](#auth-store-architecture) -5. [Token Refresh Patterns](#token-refresh-patterns) -6. [UI State Stores (Per-Page Patterns)](#ui-state-stores-per-page-patterns) -7. [Persistence Middleware Deep Dive](#persistence-middleware-deep-dive) -8. [Selectors & Performance Optimization](#selectors--performance-optimization) -9. [Form State Management](#form-state-management) -10. [Cross-Tab Synchronization](#cross-tab-synchronization) -11. [Protected Routes Integration](#protected-routes-integration) -12. [TypeScript Patterns](#typescript-patterns) -13. [Middleware Combinations](#middleware-combinations) -14. [Testing Strategies](#testing-strategies) -15. [Security Best Practices](#security-best-practices) -16. [Anti-Patterns to Avoid](#anti-patterns-to-avoid) -17. [Production Checklist](#production-checklist) - ---- - -## Executive Summary - -### The 2025 Consensus for JWT Storage in SPAs - -**Short Answer:** Access token in **memory** (React state/Zustand), refresh token in **httpOnly cookie**. - -**Why this matters:** -- Access tokens in localStorage = XSS vulnerable -- Both tokens in httpOnly cookies = CSRF vulnerable + can't set `Authorization` header -- Memory-only = lost on refresh (bad UX) -- **The hybrid approach** balances security and UX - -### Key Architectural Decisions - -1. **Auth Store**: Store access token in Zustand (memory), use httpOnly cookies for refresh token -2. **UI Stores**: Per-page stores for UI state with selective persistence -3. **Form State**: Use Zustand for drafts, React Hook Form for validation -4. **Token Refresh**: Axios interceptors on 401, with request queue pattern -5. **Persistence**: `partialize` to exclude sensitive data, version for migrations -6. **Selectors**: Use `useShallow` for multiple selections, atomic selectors for primitives -7. **Cross-Tab**: BroadcastChannel or localStorage events for sync - ---- - -## Zustand v4+ Fundamentals - -### What's New in v4+ - -**Major Changes from v3:** -- **Curried create syntax**: `create()(...)` for better TypeScript inference -- **Improved middleware typing**: Automatic type inference for middleware chains -- **`useShallow` hook**: Replaces `shallow` import for React components -- **Vanilla store separation**: `createStore` for vanilla JS, `create` for React -- **Better devtools integration**: Enhanced Redux DevTools support - -### Basic Store Creation (v4 Pattern) - -```typescript -// src/core/lib/auth.store.ts -import { create } from 'zustand' - -interface AuthState { - accessToken: string | null - user: User | null - isAuthenticated: boolean -} - -interface AuthActions { - setTokens: (token: string, user: User) => void - clearAuth: () => void -} - -type AuthStore = AuthState & AuthActions - -// v4 curried syntax for better TypeScript inference -export const useAuthStore = create()((set) => ({ - // State - accessToken: null, - user: null, - isAuthenticated: false, - - // Actions - setTokens: (token, user) => set({ - accessToken: token, - user, - isAuthenticated: true - }), - clearAuth: () => set({ - accessToken: null, - user: null, - isAuthenticated: false - }), -})) -``` - -### Slice Pattern for Large Stores - -```typescript -// src/pages/Dashboard/stores/slices/dashboard-ui.slice.ts -import { StateCreator } from 'zustand' - -interface DashboardUISlice { - isSidebarOpen: boolean - activeModal: 'create' | 'edit' | null - toggleSidebar: () => void - openModal: (type: 'create' | 'edit') => void - closeModal: () => void -} - -export const createDashboardUISlice: StateCreator< - DashboardUISlice, - [], - [], - DashboardUISlice -> = (set) => ({ - isSidebarOpen: true, - activeModal: null, - - toggleSidebar: () => set((state) => ({ - isSidebarOpen: !state.isSidebarOpen - })), - openModal: (type) => set({ activeModal: type }), - closeModal: () => set({ activeModal: null }), -}) - -// Combine slices -import { create } from 'zustand' -import { createDashboardUISlice } from './slices/dashboard-ui.slice' - -export const useDashboardStore = create()((...a) => ({ - ...createDashboardUISlice(...a), -})) -``` - ---- - -## JWT Token Storage: The 2025 Consensus - -### The Security Landscape - -**localStorage/sessionStorage:** -- ❌ **Vulnerable to XSS** - Any malicious script can read tokens -- ✅ Easy to implement -- ✅ Persists across refreshes - -**httpOnly Cookies:** -- ✅ **Immune to XSS** - JavaScript cannot read the cookie -- ❌ Vulnerable to CSRF (mitigated with SameSite=Strict) -- ❌ Cannot set `Authorization: Bearer` header from client -- ✅ Automatically sent with requests - -**Memory Only (React State/Zustand):** -- ✅ **Immune to XSS** - No persistence layer to attack -- ❌ Lost on page refresh (bad UX) -- ❌ Lost on new tab (bad UX) - -### The 2025 Best Practice: Hybrid Approach - -``` -┌─────────────────────────────────────────┐ -│ CLIENT (Browser) │ -│ │ -│ ┌─────────────────┐ ┌──────────────┐ │ -│ │ Zustand Store │ │ httpOnly │ │ -│ │ (Memory) │ │ Cookie │ │ -│ │ │ │ │ │ -│ │ accessToken ✓ │ │ refreshToken │ │ -│ │ user data ✓ │ │ (server-set) │ │ -│ └─────────────────┘ └──────────────┘ │ -│ │ -└─────────────────────────────────────────┘ - │ │ - │ Bearer {access} │ Cookie (auto) - ▼ ▼ -┌─────────────────────────────────────────┐ -│ SERVER (FastAPI) │ -│ │ -│ POST /auth/login │ -│ → Returns: { accessToken, user } │ -│ → Sets: refreshToken in httpOnly cookie│ -│ │ -│ GET /auth/refresh │ -│ → Reads: refreshToken from cookie │ -│ → Returns: { accessToken } │ -└─────────────────────────────────────────┘ -``` - -### Implementation: Auth Store - -```typescript -// src/core/lib/auth.store.ts -import { create } from 'zustand' -import { devtools } from 'zustand/middleware' - -interface User { - id: string - email: string - name: string - role: string -} - -interface AuthState { - // Access token stored in memory - accessToken: string | null - user: User | null - isAuthenticated: boolean - isLoading: boolean -} - -interface AuthActions { - setAuth: (token: string, user: User) => void - clearAuth: () => void - refreshAccessToken: () => Promise - setLoading: (loading: boolean) => void -} - -type AuthStore = AuthState & AuthActions - -export const useAuthStore = create()( - devtools( - (set, get) => ({ - // State - accessToken: null, - user: null, - isAuthenticated: false, - isLoading: true, - - // Actions - setAuth: (token, user) => set({ - accessToken: token, - user, - isAuthenticated: true, - isLoading: false - }, false, 'auth/setAuth'), - - clearAuth: () => set({ - accessToken: null, - user: null, - isAuthenticated: false, - isLoading: false - }, false, 'auth/clearAuth'), - - refreshAccessToken: async () => { - try { - // Call refresh endpoint (refresh token sent via httpOnly cookie) - const response = await fetch('/api/auth/refresh', { - method: 'POST', - credentials: 'include', // Important: sends cookies - }) - - if (!response.ok) { - throw new Error('Refresh failed') - } - - const { accessToken, user } = await response.json() - get().setAuth(accessToken, user) - } catch (error) { - get().clearAuth() - throw error - } - }, - - setLoading: (loading) => set({ isLoading: loading }), - }), - { name: 'AuthStore' } - ) -) - -// Convenient selectors -export const selectAccessToken = (state: AuthStore) => state.accessToken -export const selectUser = (state: AuthStore) => state.user -export const selectIsAuthenticated = (state: AuthStore) => state.isAuthenticated -export const selectIsLoading = (state: AuthStore) => state.isLoading -``` - -### Why This Works - -1. **Access Token in Memory**: - - Used for API requests via `Authorization: Bearer {token}` - - XSS attacks can't steal it from localStorage - - Lost on refresh, but we use refresh token to get a new one - -2. **Refresh Token in httpOnly Cookie**: - - Set by server: `Set-Cookie: refreshToken=...; HttpOnly; Secure; SameSite=Strict` - - JavaScript cannot read it (XSS protection) - - CSRF mitigated by SameSite=Strict - - Automatically sent on `/auth/refresh` requests - -3. **Page Refresh Flow**: - ``` - User refreshes page - → Access token lost (Zustand memory cleared) - → App calls /auth/refresh on mount - → Browser automatically sends refreshToken cookie - → Server returns new accessToken - → Store in Zustand - ``` - -### Security Considerations - -**XSS Protection:** -- ✅ Access token not in localStorage = safe from XSS -- ✅ Refresh token not readable by JS = safe from XSS -- ⚠️ Still need CSP headers and input sanitization - -**CSRF Protection:** -- ✅ SameSite=Strict prevents cross-origin requests with cookies -- ✅ Access token uses `Authorization` header (not cookies) = CSRF immune -- ⚠️ For older browsers, implement CSRF token pattern - -**Token Expiry:** -- Access token: Short-lived (5-15 minutes) -- Refresh token: Longer-lived (7-30 days) -- Implement token rotation on refresh - ---- - -## Auth Store Architecture - -### Full Production Auth Store - -```typescript -// src/core/lib/auth.store.ts -import { create } from 'zustand' -import { devtools } from 'zustand/middleware' -import { api } from '@/core/api/client' - -interface User { - id: string - email: string - name: string - role: 'admin' | 'user' - permissions: string[] -} - -interface AuthState { - accessToken: string | null - user: User | null - isAuthenticated: boolean - isLoading: boolean - error: string | null -} - -interface AuthActions { - // Core auth actions - setAuth: (token: string, user: User) => void - clearAuth: () => void - - // Login/Logout - login: (email: string, password: string) => Promise - logout: () => Promise - - // Token management - refreshAccessToken: () => Promise - - // Utility - setLoading: (loading: boolean) => void - setError: (error: string | null) => void - checkAuth: () => Promise -} - -type AuthStore = AuthState & AuthActions - -export const useAuthStore = create()( - devtools( - (set, get) => ({ - // Initial State - accessToken: null, - user: null, - isAuthenticated: false, - isLoading: true, - error: null, - - // Core Actions - setAuth: (token, user) => set( - { - accessToken: token, - user, - isAuthenticated: true, - isLoading: false, - error: null - }, - false, - 'auth/setAuth' - ), - - clearAuth: () => set( - { - accessToken: null, - user: null, - isAuthenticated: false, - isLoading: false, - error: null - }, - false, - 'auth/clearAuth' - ), - - // Login - login: async (email, password) => { - set({ isLoading: true, error: null }) - try { - // Server sets refreshToken as httpOnly cookie - const { accessToken, user } = await api.post('/auth/login', { - email, - password - }) - get().setAuth(accessToken, user) - } catch (error) { - const message = error instanceof Error ? error.message : 'Login failed' - set({ error: message, isLoading: false }) - throw error - } - }, - - // Logout - logout: async () => { - try { - // Server clears refreshToken cookie - await api.post('/auth/logout') - } finally { - get().clearAuth() - } - }, - - // Refresh Token - refreshAccessToken: async () => { - try { - // refreshToken sent automatically via httpOnly cookie - const { accessToken, user } = await api.post('/auth/refresh') - get().setAuth(accessToken, user) - } catch (error) { - get().clearAuth() - throw error - } - }, - - // Check Auth on App Mount - checkAuth: async () => { - set({ isLoading: true }) - try { - await get().refreshAccessToken() - } catch { - get().clearAuth() - } - }, - - // Utility - setLoading: (loading) => set({ isLoading: loading }), - setError: (error) => set({ error }), - }), - { name: 'AuthStore' } - ) -) - -// Selectors -export const selectAuth = (state: AuthStore) => ({ - accessToken: state.accessToken, - user: state.user, - isAuthenticated: state.isAuthenticated, -}) - -export const selectIsLoading = (state: AuthStore) => state.isLoading -export const selectUser = (state: AuthStore) => state.user -export const selectHasRole = (role: string) => (state: AuthStore) => - state.user?.role === role -export const selectHasPermission = (permission: string) => (state: AuthStore) => - state.user?.permissions.includes(permission) -``` - -### FastAPI Backend Integration - -```python -# backend/app/routes/auth.py -from fastapi import APIRouter, Response, Depends, HTTPException -from fastapi.security import HTTPBearer -from datetime import datetime, timedelta -import jwt - -router = APIRouter(prefix="/auth", tags=["auth"]) -security = HTTPBearer() - -@router.post("/login") -async def login( - credentials: LoginCredentials, - response: Response -): - # Validate credentials - user = await authenticate_user(credentials.email, credentials.password) - if not user: - raise HTTPException(401, "Invalid credentials") - - # Generate tokens - access_token = create_access_token(user.id, expires_delta=timedelta(minutes=15)) - refresh_token = create_refresh_token(user.id, expires_delta=timedelta(days=30)) - - # Set refresh token as httpOnly cookie - response.set_cookie( - key="refreshToken", - value=refresh_token, - httponly=True, - secure=True, # HTTPS only - samesite="strict", # CSRF protection - max_age=30 * 24 * 60 * 60 # 30 days - ) - - # Return access token in response body - return { - "accessToken": access_token, - "user": user.to_dict() - } - -@router.post("/refresh") -async def refresh(request: Request): - # Extract refresh token from cookie - refresh_token = request.cookies.get("refreshToken") - if not refresh_token: - raise HTTPException(401, "No refresh token") - - try: - # Validate refresh token - payload = jwt.decode(refresh_token, SECRET_KEY, algorithms=["HS256"]) - user_id = payload["sub"] - user = await get_user(user_id) - - # Generate new access token - access_token = create_access_token(user.id, expires_delta=timedelta(minutes=15)) - - return { - "accessToken": access_token, - "user": user.to_dict() - } - except jwt.ExpiredSignatureError: - raise HTTPException(401, "Refresh token expired") - except jwt.InvalidTokenError: - raise HTTPException(401, "Invalid refresh token") - -@router.post("/logout") -async def logout(response: Response): - # Clear refresh token cookie - response.delete_cookie( - key="refreshToken", - httponly=True, - secure=True, - samesite="strict" - ) - return {"message": "Logged out"} -``` - ---- - -## Token Refresh Patterns - -### Axios Interceptor Setup - -```typescript -// src/core/api/client.ts -import axios from 'axios' -import { useAuthStore } from '@/core/lib/auth.store' - -// Create axios instance -export const api = axios.create({ - baseURL: import.meta.env.VITE_API_URL, - withCredentials: true, // Important: sends cookies -}) - -// Request queue for handling concurrent requests during token refresh -let isRefreshing = false -let failedQueue: Array<{ - resolve: (value?: any) => void - reject: (reason?: any) => void -}> = [] - -const processQueue = (error: any, token: string | null = null) => { - failedQueue.forEach(prom => { - if (error) { - prom.reject(error) - } else { - prom.resolve(token) - } - }) - failedQueue = [] -} - -// Request interceptor: Add access token to headers -api.interceptors.request.use( - (config) => { - const token = useAuthStore.getState().accessToken - - if (token && config.headers) { - config.headers.Authorization = `Bearer ${token}` - } - - return config - }, - (error) => Promise.reject(error) -) - -// Response interceptor: Handle 401 and refresh token -api.interceptors.response.use( - (response) => response.data, // Return data directly - async (error) => { - const originalRequest = error.config - - // If error is not 401 or request already retried, reject - if (error.response?.status !== 401 || originalRequest._retry) { - return Promise.reject(error) - } - - // If token refresh is already in progress, queue this request - if (isRefreshing) { - return new Promise((resolve, reject) => { - failedQueue.push({ resolve, reject }) - }) - .then((token) => { - originalRequest.headers.Authorization = `Bearer ${token}` - return api(originalRequest) - }) - .catch((err) => Promise.reject(err)) - } - - // Mark request as retried - originalRequest._retry = true - isRefreshing = true - - try { - // Attempt to refresh token - await useAuthStore.getState().refreshAccessToken() - const newToken = useAuthStore.getState().accessToken - - // Process queued requests - processQueue(null, newToken) - - // Retry original request with new token - originalRequest.headers.Authorization = `Bearer ${newToken}` - return api(originalRequest) - } catch (refreshError) { - // Refresh failed - clear auth and reject all queued requests - processQueue(refreshError, null) - useAuthStore.getState().clearAuth() - - // Redirect to login - window.location.href = '/login' - - return Promise.reject(refreshError) - } finally { - isRefreshing = false - } - } -) -``` - -### Alternative: Proactive Token Refresh - -```typescript -// src/core/api/token-refresh.ts -import { useAuthStore } from '@/core/lib/auth.store' -import { jwtDecode } from 'jwt-decode' - -interface JwtPayload { - exp: number - iat: number - sub: string -} - -// Start background token refresh -export function startTokenRefreshTimer() { - const checkAndRefresh = async () => { - const { accessToken, refreshAccessToken } = useAuthStore.getState() - - if (!accessToken) return - - try { - const decoded = jwtDecode(accessToken) - const expiresAt = decoded.exp * 1000 // Convert to ms - const now = Date.now() - const timeUntilExpiry = expiresAt - now - - // Refresh if token expires in less than 2 minutes - if (timeUntilExpiry < 2 * 60 * 1000) { - await refreshAccessToken() - } - } catch (error) { - console.error('Token refresh check failed:', error) - } - } - - // Check every minute - setInterval(checkAndRefresh, 60 * 1000) - - // Also check immediately - checkAndRefresh() -} - -// In App.tsx -import { startTokenRefreshTimer } from '@/core/api/token-refresh' - -function App() { - useEffect(() => { - const timer = startTokenRefreshTimer() - return () => clearInterval(timer) - }, []) - - // ... -} -``` - -### Request Queue Pattern Explained - -**Why we need it:** -When an access token expires, multiple API requests might fail with 401 simultaneously. Without a queue, each request would try to refresh the token, causing race conditions. - -**How it works:** -1. First 401 triggers refresh, sets `isRefreshing = true` -2. Subsequent 401s are queued in `failedQueue` -3. After successful refresh, all queued requests are retried with new token -4. If refresh fails, all queued requests are rejected - -**Visual Flow:** -``` -Request 1 (401) ─┐ -Request 2 (401) ─┼─→ Queued ─→ Wait for refresh -Request 3 (401) ─┘ - │ - ├─ isRefreshing = true - ├─ Call /auth/refresh - ├─ Get new accessToken - ├─ Process queue with new token - └─ isRefreshing = false - │ - ├─→ Request 1 retried ✓ - ├─→ Request 2 retried ✓ - └─→ Request 3 retried ✓ -``` - ---- - -## UI State Stores (Per-Page Patterns) - -### When to Create a UI Store - -**Create a per-page UI store when:** -- UI state needs persistence across refreshes (sidebar open/closed, view mode) -- State is shared across multiple components in that page -- State is complex (modals, multi-step forms, filters) - -**Don't create a store when:** -- State is local to one component (use `useState`) -- State doesn't need persistence -- State is server-derived (use TanStack Query) - -### Dashboard UI Store Example - -```typescript -// src/pages/Dashboard/stores/dashboard-ui.store.ts -import { create } from 'zustand' -import { persist, createJSONStorage } from 'zustand/middleware' -import { devtools } from 'zustand/middleware' - -interface DashboardUIState { - // Sidebar - isSidebarOpen: boolean - sidebarWidth: number - - // Modals - activeModal: 'create' | 'edit' | 'delete' | null - modalData: any | null - - // View preferences - viewMode: 'grid' | 'list' - sortBy: 'name' | 'date' | 'size' - sortOrder: 'asc' | 'desc' - - // Filters - filters: { - status: string[] - tags: string[] - dateRange: { start: string; end: string } | null - } - - // Selection - selectedItems: Set -} - -interface DashboardUIActions { - // Sidebar - toggleSidebar: () => void - setSidebarWidth: (width: number) => void - - // Modals - openModal: (type: 'create' | 'edit' | 'delete', data?: any) => void - closeModal: () => void - - // View - setViewMode: (mode: 'grid' | 'list') => void - setSortBy: (sortBy: string, order: 'asc' | 'desc') => void - - // Filters - setFilter: (key: keyof DashboardUIState['filters'], value: any) => void - clearFilters: () => void - - // Selection - selectItem: (id: string) => void - deselectItem: (id: string) => void - clearSelection: () => void - selectAll: (ids: string[]) => void -} - -type DashboardUIStore = DashboardUIState & DashboardUIActions - -export const useDashboardUIStore = create()( - devtools( - persist( - (set, get) => ({ - // State - isSidebarOpen: true, - sidebarWidth: 280, - activeModal: null, - modalData: null, - viewMode: 'grid', - sortBy: 'date', - sortOrder: 'desc', - filters: { - status: [], - tags: [], - dateRange: null, - }, - selectedItems: new Set(), - - // Actions - toggleSidebar: () => set((state) => ({ - isSidebarOpen: !state.isSidebarOpen - })), - - setSidebarWidth: (width) => set({ sidebarWidth: width }), - - openModal: (type, data = null) => set({ - activeModal: type, - modalData: data - }), - - closeModal: () => set({ - activeModal: null, - modalData: null - }), - - setViewMode: (mode) => set({ viewMode: mode }), - - setSortBy: (sortBy, order) => set({ - sortBy: sortBy as any, - sortOrder: order - }), - - setFilter: (key, value) => set((state) => ({ - filters: { ...state.filters, [key]: value } - })), - - clearFilters: () => set({ - filters: { - status: [], - tags: [], - dateRange: null, - } - }), - - selectItem: (id) => set((state) => { - const newSet = new Set(state.selectedItems) - newSet.add(id) - return { selectedItems: newSet } - }), - - deselectItem: (id) => set((state) => { - const newSet = new Set(state.selectedItems) - newSet.delete(id) - return { selectedItems: newSet } - }), - - clearSelection: () => set({ selectedItems: new Set() }), - - selectAll: (ids) => set({ selectedItems: new Set(ids) }), - }), - { - name: 'dashboard-ui-storage', - storage: createJSONStorage(() => localStorage), - - // Only persist certain fields - partialize: (state) => ({ - isSidebarOpen: state.isSidebarOpen, - sidebarWidth: state.sidebarWidth, - viewMode: state.viewMode, - sortBy: state.sortBy, - sortOrder: state.sortOrder, - // Don't persist: modals, selection, filters (ephemeral) - }), - - // Custom serialization for Set - serialize: (state) => { - return JSON.stringify({ - state: { - ...state.state, - selectedItems: Array.from(state.state.selectedItems) - } - }) - }, - - deserialize: (str) => { - const parsed = JSON.parse(str) - return { - state: { - ...parsed.state, - selectedItems: new Set(parsed.state.selectedItems || []) - } - } - }, - } - ), - { name: 'DashboardUI' } - ) -) - -// Selectors -export const selectSidebar = (state: DashboardUIStore) => ({ - isOpen: state.isSidebarOpen, - width: state.sidebarWidth, -}) - -export const selectModal = (state: DashboardUIStore) => ({ - type: state.activeModal, - data: state.modalData, -}) - -export const selectView = (state: DashboardUIStore) => ({ - mode: state.viewMode, - sortBy: state.sortBy, - sortOrder: state.sortOrder, -}) -``` - -### Usage in Components - -```typescript -// src/pages/Dashboard/components/Sidebar.tsx -import { useDashboardUIStore } from '../stores/dashboard-ui.store' -import { useShallow } from 'zustand/react/shallow' - -function Sidebar() { - // Efficient: Only re-renders when these values change - const { isOpen, width } = useDashboardUIStore( - useShallow((state) => ({ - isOpen: state.isSidebarOpen, - width: state.sidebarWidth, - })) - ) - - const toggleSidebar = useDashboardUIStore((state) => state.toggleSidebar) - - return ( - - ) -} -``` - ---- - -## Persistence Middleware Deep Dive - -### partialize: Selective Persistence - -**Rule:** Never persist functions, only persist necessary state. - -```typescript -import { create } from 'zustand' -import { persist, createJSONStorage } from 'zustand/middleware' - -interface StoreState { - // Persist - theme: 'light' | 'dark' - preferences: { fontSize: number } - - // Don't persist (ephemeral) - isModalOpen: boolean - currentPage: number -} - -const useStore = create()( - persist( - (set) => ({ - theme: 'light', - preferences: { fontSize: 14 }, - isModalOpen: false, - currentPage: 1, - // actions... - }), - { - name: 'app-settings', - - // Method 1: Whitelist specific keys - partialize: (state) => ({ - theme: state.theme, - preferences: state.preferences, - }), - - // Method 2: Blacklist keys (filter out unwanted) - // partialize: (state) => - // Object.fromEntries( - // Object.entries(state).filter(([key]) => - // !['isModalOpen', 'currentPage'].includes(key) - // ) - // ), - } - ) -) -``` - -### Version Management & Migration - -```typescript -import { create } from 'zustand' -import { persist } from 'zustand/middleware' - -interface StoreV2 { - version: 2 - settings: { - theme: 'light' | 'dark' | 'auto' // Added 'auto' - locale: string // Added locale - } -} - -const useStore = create()( - persist( - (set) => ({ - version: 2, - settings: { - theme: 'light', - locale: 'en', - }, - }), - { - name: 'app-store', - version: 2, // Current version - - // Migration function - migrate: (persistedState: any, version: number) => { - if (version === 1) { - // Migrate from v1 to v2 - return { - version: 2, - settings: { - theme: persistedState.theme || 'light', - locale: 'en', // New field with default - }, - } - } - - return persistedState - }, - - // Called if migration fails - onRehydrateStorage: () => (state, error) => { - if (error) { - console.error('Hydration failed:', error) - // Could reset to defaults here - } - }, - } - ) -) -``` - -### Storage Options - -```typescript -// localStorage (default) - persists forever -storage: createJSONStorage(() => localStorage) - -// sessionStorage - cleared on tab close -storage: createJSONStorage(() => sessionStorage) - -// IndexedDB - for large data -import { get, set, del } from 'idb-keyval' - -storage: { - getItem: async (name) => { - return (await get(name)) || null - }, - setItem: async (name, value) => { - await set(name, value) - }, - removeItem: async (name) => { - await del(name) - }, -} - -// Custom encryption -import CryptoJS from 'crypto-js' - -const SECRET_KEY = import.meta.env.VITE_STORAGE_KEY - -storage: { - getItem: (name) => { - const encrypted = localStorage.getItem(name) - if (!encrypted) return null - - try { - const decrypted = CryptoJS.AES.decrypt(encrypted, SECRET_KEY).toString( - CryptoJS.enc.Utf8 - ) - return decrypted - } catch { - return null - } - }, - setItem: (name, value) => { - const encrypted = CryptoJS.AES.encrypt(value, SECRET_KEY).toString() - localStorage.setItem(name, encrypted) - }, - removeItem: (name) => localStorage.removeItem(name), -} -``` - -### Handling Storage Quota - -```typescript -import { persist } from 'zustand/middleware' - -persist( - (set) => ({ /* store */ }), - { - name: 'app-store', - - onRehydrateStorage: () => (state, error) => { - if (error) { - // Check if quota exceeded - if (error.name === 'QuotaExceededError') { - console.error('Storage quota exceeded') - - // Strategy 1: Clear old data - const storeNames = ['old-store-1', 'old-store-2'] - storeNames.forEach(name => localStorage.removeItem(name)) - - // Strategy 2: Compress data - // Strategy 3: Move to IndexedDB - } - } - }, - } -) -``` - ---- - -## Selectors & Performance Optimization - -### The Golden Rule - -**Atomic selectors** for primitives = ✅ Efficient -**Object selectors** without `useShallow` = ❌ Re-renders on every store update - -### Atomic Selectors (Recommended) - -```typescript -// ✅ GOOD: Atomic selectors -function Component() { - // Only re-renders when count changes - const count = useStore((state) => state.count) - - // Only re-renders when increment changes (never, it's a function) - const increment = useStore((state) => state.increment) - - return -} -``` - -### Object Selectors with useShallow - -```typescript -import { useShallow } from 'zustand/react/shallow' - -// ✅ GOOD: Object selector with useShallow -function Component() { - const { count, text, increment } = useStore( - useShallow((state) => ({ - count: state.count, - text: state.text, - increment: state.increment, - })) - ) - - return ( -
-

{count} - {text}

- -
- ) -} -``` - -### Array Selectors - -```typescript -// ✅ GOOD: Array selector with useShallow -const [nuts, honey] = useStore( - useShallow((state) => [state.nuts, state.honey]) -) - -// ✅ GOOD: Computed array with useShallow -const names = useStore( - useShallow((state) => Object.keys(state.users)) -) -``` - -### Anti-Pattern: Subscribing to Entire Store - -```typescript -// ❌ BAD: Re-renders on ANY state change -const state = useStore() - -// ❌ BAD: Same problem -const { count, text, increment } = useStore((state) => ({ - count: state.count, - text: state.text, - increment: state.increment, -})) -// Missing useShallow means new object every render! -``` - -### Custom Selectors for Reusability - -```typescript -// src/pages/Dashboard/stores/dashboard-ui.store.ts - -// Export selectors with the store -export const selectSidebar = (state: DashboardUIStore) => ({ - isOpen: state.isSidebarOpen, - width: state.sidebarWidth, -}) - -export const selectFilters = (state: DashboardUIStore) => state.filters - -export const selectHasSelection = (state: DashboardUIStore) => - state.selectedItems.size > 0 - -// Usage -import { useDashboardUIStore, selectSidebar } from '../stores/dashboard-ui.store' -import { useShallow } from 'zustand/react/shallow' - -function Sidebar() { - const sidebar = useDashboardUIStore(useShallow(selectSidebar)) - // ... -} -``` - -### Derived State / Computed Values - -```typescript -// Method 1: Compute in selector -const totalPrice = useStore((state) => - state.cart.reduce((sum, item) => sum + item.price * item.quantity, 0) -) - -// Method 2: Memoize with useMemo -const totalPrice = useMemo( - () => cart.reduce((sum, item) => sum + item.price * item.quantity, 0), - [cart] -) - -// Method 3: Store computed values (if expensive) -interface StoreState { - cart: CartItem[] - _totalPrice: number // Cached computed value - - addToCart: (item: CartItem) => void -} - -const useStore = create((set) => ({ - cart: [], - _totalPrice: 0, - - addToCart: (item) => set((state) => { - const newCart = [...state.cart, item] - const newTotal = newCart.reduce( - (sum, i) => sum + i.price * i.quantity, - 0 - ) - return { - cart: newCart, - _totalPrice: newTotal, - } - }), -})) -``` - -### Performance Monitoring - -```typescript -// Detect unnecessary re-renders -function Component() { - const renderCount = useRef(0) - renderCount.current++ - - console.log(`Component rendered ${renderCount.current} times`) - - const count = useStore((state) => state.count) - - return
{count}
-} - -// React DevTools Profiler -// Use React DevTools to identify components that re-render too often -``` - ---- - -## Form State Management - -### When to Use Zustand for Forms - -**Use Zustand when:** -- ✅ Draft persistence across refreshes -- ✅ Multi-step forms with state across pages -- ✅ Form state shared across components -- ✅ Auto-save functionality - -**Use React Hook Form when:** -- ✅ Form validation -- ✅ Field-level errors -- ✅ Controlled inputs -- ✅ Schema validation (Zod, Yup) - -### The Hybrid Approach (Recommended) - -```typescript -// src/pages/Dashboard/stores/form-drafts.store.ts -import { create } from 'zustand' -import { persist, createJSONStorage } from 'zustand/middleware' - -interface FormDraft { - formId: string - data: Record - lastSaved: number -} - -interface FormDraftsState { - drafts: Record -} - -interface FormDraftsActions { - saveDraft: (formId: string, data: Record) => void - loadDraft: (formId: string) => FormDraft | null - deleteDraft: (formId: string) => void - clearOldDrafts: (maxAgeMs: number) => void -} - -type FormDraftsStore = FormDraftsState & FormDraftsActions - -export const useFormDraftsStore = create()( - persist( - (set, get) => ({ - drafts: {}, - - saveDraft: (formId, data) => set((state) => ({ - drafts: { - ...state.drafts, - [formId]: { - formId, - data, - lastSaved: Date.now(), - }, - }, - })), - - loadDraft: (formId) => { - return get().drafts[formId] || null - }, - - deleteDraft: (formId) => set((state) => { - const newDrafts = { ...state.drafts } - delete newDrafts[formId] - return { drafts: newDrafts } - }), - - clearOldDrafts: (maxAgeMs) => set((state) => { - const now = Date.now() - const newDrafts = Object.fromEntries( - Object.entries(state.drafts).filter( - ([_, draft]) => now - draft.lastSaved < maxAgeMs - ) - ) - return { drafts: newDrafts } - }), - }), - { - name: 'form-drafts-storage', - storage: createJSONStorage(() => localStorage), - } - ) -) -``` - -### Form Component with React Hook Form + Zustand - -```typescript -// src/pages/Dashboard/components/CreateForm.tsx -import { useForm } from 'react-hook-form' -import { zodResolver } from '@hookform/resolvers/zod' -import { z } from 'zod' -import { useFormDraftsStore } from '../stores/form-drafts.store' -import { useEffect } from 'react' -import { useDebouncedCallback } from 'use-debounce' - -const formSchema = z.object({ - title: z.string().min(1, 'Title required'), - description: z.string().min(10, 'Description too short'), - tags: z.array(z.string()), -}) - -type FormData = z.infer - -function CreateForm() { - const FORM_ID = 'create-item-form' - const { saveDraft, loadDraft, deleteDraft } = useFormDraftsStore() - - const { - register, - handleSubmit, - watch, - reset, - formState: { errors }, - } = useForm({ - resolver: zodResolver(formSchema), - defaultValues: () => { - // Load draft on mount - const draft = loadDraft(FORM_ID) - return draft?.data || { - title: '', - description: '', - tags: [], - } - }, - }) - - // Auto-save draft on form changes (debounced) - const formData = watch() - - const saveDraftDebounced = useDebouncedCallback( - (data: FormData) => { - saveDraft(FORM_ID, data) - console.log('Draft saved') - }, - 1000 // Save after 1 second of no typing - ) - - useEffect(() => { - saveDraftDebounced(formData) - }, [formData, saveDraftDebounced]) - - // Submit form - const onSubmit = async (data: FormData) => { - try { - await api.post('/items', data) - - // Clear draft on successful submit - deleteDraft(FORM_ID) - reset() - - toast.success('Item created!') - } catch (error) { - toast.error('Failed to create item') - } - } - - return ( -
- - {errors.title && {errors.title.message}} - -