From 5220c31628ca6547d930e6768e3a1ac39d0efd51 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 8 Jan 2026 05:58:49 +0000 Subject: [PATCH 1/7] Initial plan From aa179accf910b274a9ffbe0c11c4bf08974674d5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 8 Jan 2026 06:01:59 +0000 Subject: [PATCH 2/7] Apply Black and isort formatting to fix CI formatting checks Co-authored-by: Lexicoding-systems <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/api/server.py | 655 ++++++++++-------- src/lexecon/audit_export/service.py | 284 ++++---- .../eu_ai_act/article_11_technical_docs.py | 172 +++-- .../eu_ai_act/article_12_records.py | 142 ++-- .../eu_ai_act/article_14_oversight.py | 117 ++-- src/lexecon/compliance/eu_ai_act/storage.py | 91 ++- src/lexecon/compliance_mapping/service.py | 129 ++-- src/lexecon/decision/service.py | 5 +- src/lexecon/escalation/service.py | 33 +- src/lexecon/evidence/append_only_store.py | 18 +- src/lexecon/evidence/service.py | 14 +- src/lexecon/identity/signing.py | 1 + src/lexecon/override/service.py | 37 +- src/lexecon/responsibility/__init__.py | 9 +- src/lexecon/responsibility/storage.py | 136 ++-- src/lexecon/responsibility/tracker.py | 61 +- src/lexecon/risk/service.py | 14 +- src/lexecon/security/audit_service.py | 246 ++++--- src/lexecon/security/auth_service.py | 313 ++++++--- src/lexecon/security/middleware.py | 33 +- src/lexecon/security/signature_service.py | 73 +- src/lexecon/storage/persistence.py | 99 ++- src/lexecon/tools/audit_verify.py | 5 +- tests/test_api.py | 15 +- tests/test_api_additional.py | 47 +- tests/test_append_only_store.py | 10 +- tests/test_article_12_records.py | 4 +- tests/test_audit_export.py | 184 ++--- tests/test_audit_verify.py | 50 +- tests/test_capability_tokens.py | 4 +- tests/test_cli.py | 3 +- tests/test_compliance_mapping.py | 73 +- tests/test_decision_service.py | 34 +- tests/test_escalation_service.py | 45 +- tests/test_evidence_service.py | 16 +- tests/test_export_determinism.py | 14 +- tests/test_governance_api.py | 3 +- tests/test_governance_models.py | 36 +- tests/test_identity.py | 6 +- tests/test_ledger.py | 3 + tests/test_logging.py | 4 +- tests/test_metrics.py | 46 +- tests/test_middleware.py | 5 +- tests/test_override_service.py | 63 +- tests/test_policy.py | 20 +- tests/test_risk_service.py | 25 +- tests/test_security.py | 10 +- tests/test_storage_persistence.py | 116 ++-- tests/test_tracing.py | 64 +- 49 files changed, 1844 insertions(+), 1743 deletions(-) diff --git a/src/lexecon/api/server.py b/src/lexecon/api/server.py index 371c09d..18823d1 100644 --- a/src/lexecon/api/server.py +++ b/src/lexecon/api/server.py @@ -6,70 +6,63 @@ """ import json +import os +import secrets import time import uuid -import secrets from datetime import datetime, timezone from typing import Any, Dict, List, Optional -from fastapi import FastAPI, HTTPException, status, Request +from fastapi import FastAPI, HTTPException, Request, status from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import HTMLResponse, FileResponse, JSONResponse, PlainTextResponse +from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, PlainTextResponse from pydantic import BaseModel, Field -import os +from lexecon.audit_export.service import AuditExportService, ExportFormat, ExportScope +from lexecon.compliance_mapping.service import ( + ComplianceMappingService, + ControlStatus, + GovernancePrimitive, + RegulatoryFramework, +) from lexecon.decision.service import DecisionRequest, DecisionService +from lexecon.escalation.service import EscalationService +from lexecon.evidence.service import EvidenceService from lexecon.identity.signing import KeyManager from lexecon.ledger.chain import LedgerChain +from lexecon.override.service import OverrideService from lexecon.policy.engine import PolicyEngine, PolicyMode -from lexecon.storage.persistence import LedgerStorage -from lexecon.responsibility.tracker import ( - ResponsibilityTracker, - DecisionMaker, - ResponsibilityLevel -) from lexecon.responsibility.storage import ResponsibilityStorage +from lexecon.responsibility.tracker import DecisionMaker, ResponsibilityLevel, ResponsibilityTracker -# Security imports -from lexecon.security.auth_service import AuthService, Role, Permission, User, Session +# Governance service imports +from lexecon.risk.service import RiskScoringEngine, RiskService from lexecon.security.audit_service import AuditService, ExportStatus -from lexecon.security.signature_service import SignatureService -# Governance service imports -from lexecon.risk.service import RiskService, RiskScoringEngine -from lexecon.escalation.service import EscalationService -from lexecon.override.service import OverrideService -from lexecon.evidence.service import EvidenceService -from lexecon.compliance_mapping.service import ( - ComplianceMappingService, - RegulatoryFramework, - GovernancePrimitive, - ControlStatus -) -from lexecon.audit_export.service import ( - AuditExportService, - ExportFormat, - ExportScope -) +# Security imports +from lexecon.security.auth_service import AuthService, Permission, Role, Session, User +from lexecon.security.signature_service import SignatureService +from lexecon.storage.persistence import LedgerStorage # Import governance models for type hints try: from model_governance_pack.models import ( - Risk, - RiskLevel, - RiskDimensions, + ArtifactType, + DigitalSignature, Escalation, EscalationPriority, EscalationStatus, - Override, - OverrideType, - OriginalOutcome, + EvidenceArtifact, NewOutcome, + OriginalOutcome, + Override, OverrideScope, - EvidenceArtifact, - ArtifactType, - DigitalSignature, + OverrideType, + Risk, + RiskDimensions, + RiskLevel, ) + GOVERNANCE_MODELS_AVAILABLE = True except ImportError: GOVERNANCE_MODELS_AVAILABLE = False @@ -95,26 +88,36 @@ class PolicyLoadModel(BaseModel): """Model for loading policy - supports both wrapped and direct formats.""" # Support wrapped format {"policy": {...}} - policy: Optional[Dict[str, Any]] = Field(default=None, description="Policy data (wrapped format)") + policy: Optional[Dict[str, Any]] = Field( + default=None, description="Policy data (wrapped format)" + ) # Support direct format name: Optional[str] = Field(default=None, description="Policy name") version: Optional[str] = Field(default="1.0", description="Policy version") mode: Optional[str] = Field(default=None, description="Policy mode") terms: Optional[List[Dict[str, Any]]] = Field(default=None, description="Policy terms") relations: Optional[List[Dict[str, Any]]] = Field(default=None, description="Policy relations") - constraints: Optional[List[Dict[str, Any]]] = Field(default=None, description="Policy constraints") + constraints: Optional[List[Dict[str, Any]]] = Field( + default=None, description="Policy constraints" + ) class InterventionModel(BaseModel): """Model for human intervention request.""" intervention_type: str = Field(..., description="Type of intervention") - ai_recommendation: Dict[str, Any] = Field(..., description="AI's recommendation (must include 'confidence' key)") + ai_recommendation: Dict[str, Any] = Field( + ..., description="AI's recommendation (must include 'confidence' key)" + ) human_decision: Dict[str, Any] = Field(..., description="Human's decision") human_role: str = Field(..., description="Human's role") reason: str = Field(..., description="Reason for intervention") - request_context: Optional[Dict[str, Any]] = Field(default=None, description="Additional request context") - response_time_ms: Optional[int] = Field(default=None, description="Response time in milliseconds") + request_context: Optional[Dict[str, Any]] = Field( + default=None, description="Additional request context" + ) + response_time_ms: Optional[int] = Field( + default=None, description="Response time in milliseconds" + ) class HealthResponse(BaseModel): @@ -142,12 +145,14 @@ class StatusResponse(BaseModel): # Security models class LoginRequest(BaseModel): """Login request.""" + username: str password: str class LoginResponse(BaseModel): """Login response.""" + success: bool session_id: Optional[str] = None user: Optional[Dict[str, Any]] = None @@ -156,6 +161,7 @@ class LoginResponse(BaseModel): class CreateUserRequest(BaseModel): """Create user request.""" + username: str email: str password: str @@ -165,6 +171,7 @@ class CreateUserRequest(BaseModel): class ExportRequestModel(BaseModel): """Audit packet export request with attestation.""" + # Step 1: Metadata requester_name: str requester_email: str @@ -187,9 +194,11 @@ class ExportRequestModel(BaseModel): # ========== Governance API Models (Phase 5) ========== + # Risk Service Models class RiskDimensionsModel(BaseModel): """Risk dimensions for assessment.""" + security: Optional[int] = Field(None, ge=0, le=100, description="Security risk score") privacy: Optional[int] = Field(None, ge=0, le=100, description="Privacy risk score") compliance: Optional[int] = Field(None, ge=0, le=100, description="Compliance risk score") @@ -200,6 +209,7 @@ class RiskDimensionsModel(BaseModel): class RiskAssessmentRequest(BaseModel): """Request to assess risk for a decision.""" + decision_id: str = Field(..., description="Decision ID to assess") dimensions: RiskDimensionsModel = Field(..., description="Risk dimensions") context: Optional[Dict[str, Any]] = Field(default=None, description="Additional context") @@ -208,16 +218,23 @@ class RiskAssessmentRequest(BaseModel): # Escalation Service Models class EscalationCreateRequest(BaseModel): """Request to create an escalation.""" + decision_id: str = Field(..., description="Decision ID being escalated") - trigger: str = Field(..., description="What triggered the escalation (risk_threshold/policy_conflict/explicit_rule/actor_request/anomaly_detected)") + trigger: str = Field( + ..., + description="What triggered the escalation (risk_threshold/policy_conflict/explicit_rule/actor_request/anomaly_detected)", + ) escalated_to: List[str] = Field(..., description="List of actor IDs to escalate to") - priority: Optional[str] = Field(default=None, description="Escalation priority (critical/high/medium/low)") + priority: Optional[str] = Field( + default=None, description="Escalation priority (critical/high/medium/low)" + ) context_summary: Optional[str] = Field(default=None, description="Summary for reviewers") metadata: Optional[Dict[str, Any]] = Field(default=None, description="Additional metadata") class EscalationResolveRequest(BaseModel): """Request to resolve an escalation.""" + resolved_by: str = Field(..., description="Actor ID resolving escalation") outcome: str = Field(..., description="Resolution outcome") notes: str = Field(..., description="Resolution notes") @@ -226,6 +243,7 @@ class EscalationResolveRequest(BaseModel): # Override Service Models class OverrideCreateRequest(BaseModel): """Request to create an override.""" + decision_id: str = Field(..., description="Decision ID to override") override_type: str = Field(..., description="Type of override") authorized_by: str = Field(..., description="Actor ID authorizing override") @@ -240,11 +258,16 @@ class OverrideCreateRequest(BaseModel): # Evidence Service Models class EvidenceStoreRequest(BaseModel): """Request to store an evidence artifact.""" + artifact_type: str = Field(..., description="Type of artifact") content: str = Field(..., description="Artifact content") source: str = Field(..., description="System/process that created artifact") - related_decision_ids: Optional[List[str]] = Field(default=None, description="Related decision IDs") - related_control_ids: Optional[List[str]] = Field(default=None, description="Related control IDs") + related_decision_ids: Optional[List[str]] = Field( + default=None, description="Related decision IDs" + ) + related_control_ids: Optional[List[str]] = Field( + default=None, description="Related control IDs" + ) content_type: Optional[str] = Field(default=None, description="MIME type") storage_uri: Optional[str] = Field(default=None, description="External storage location") retention_days: Optional[int] = Field(default=None, description="Custom retention period") @@ -253,11 +276,13 @@ class EvidenceStoreRequest(BaseModel): class EvidenceVerifyRequest(BaseModel): """Request to verify artifact integrity.""" + content: str = Field(..., description="Content to verify against stored hash") class EvidenceSignRequest(BaseModel): """Request to sign an artifact.""" + signer_id: str = Field(..., description="Actor ID of signer") signature: str = Field(..., description="Signature value (base64 encoded)") algorithm: str = Field(default="RSA-SHA256", description="Signature algorithm") @@ -265,8 +290,10 @@ class EvidenceSignRequest(BaseModel): # ========== Compliance Mapping API Models (Phase 7) ========== + class ComplianceMappingRequest(BaseModel): """Request to map a primitive to compliance controls.""" + primitive_type: str = Field(..., description="Type of governance primitive") primitive_id: str = Field(..., description="ID of primitive instance") framework: str = Field(..., description="Regulatory framework") @@ -275,21 +302,28 @@ class ComplianceMappingRequest(BaseModel): class ComplianceLinkEvidenceRequest(BaseModel): """Request to link evidence to a control.""" + evidence_artifact_id: str = Field(..., description="Evidence artifact ID") class ComplianceVerifyControlRequest(BaseModel): """Request to verify a control.""" + notes: Optional[str] = Field(default=None, description="Verification notes") # ========== Audit Export API Models (Phase 8) ========== + class AuditExportCreateRequest(BaseModel): """Request to create an audit export.""" + requester: str = Field(..., min_length=1, description="User or system requesting export") purpose: str = Field(..., min_length=10, description="Purpose of the export (min 10 chars)") - scope: str = Field(default="all", description="Scope of data: all, risk_only, escalation_only, override_only, evidence_only, compliance_only, decision_log_only") + scope: str = Field( + default="all", + description="Scope of data: all, risk_only, escalation_only, override_only, evidence_only, compliance_only, decision_log_only", + ) format: str = Field(default="json", description="Output format: json, csv, markdown, html") start_date: Optional[str] = Field(default=None, description="Start date filter (ISO 8601)") end_date: Optional[str] = Field(default=None, description="End date filter (ISO 8601)") @@ -306,7 +340,10 @@ class AuditExportCreateRequest(BaseModel): # Configure CORS import os -allowed_origins = os.getenv("LEXECON_CORS_ORIGINS", "http://localhost:3000,http://localhost:5173").split(",") + +allowed_origins = os.getenv( + "LEXECON_CORS_ORIGINS", "http://localhost:3000,http://localhost:5173" +).split(",") app.add_middleware( CORSMiddleware, allow_origins=allowed_origins if allowed_origins != ["*"] else ["*"], @@ -321,7 +358,9 @@ class AuditExportCreateRequest(BaseModel): storage: LedgerStorage = LedgerStorage("lexecon_ledger.db") ledger: LedgerChain = LedgerChain(storage=storage) responsibility_storage: ResponsibilityStorage = ResponsibilityStorage("lexecon_responsibility.db") -responsibility_tracker: ResponsibilityTracker = ResponsibilityTracker(storage=responsibility_storage) +responsibility_tracker: ResponsibilityTracker = ResponsibilityTracker( + storage=responsibility_storage +) key_manager: Optional[KeyManager] = None oversight_system = None # HumanOversightEvidence - initialized after key_manager intervention_storage = None # InterventionStorage - initialized with oversight_system @@ -364,14 +403,15 @@ def initialize_services(): # Initialize intervention storage if intervention_storage is None: from lexecon.compliance.eu_ai_act.storage import InterventionStorage + intervention_storage = InterventionStorage("lexecon_interventions.db") # Initialize oversight system after key_manager and storage are available if oversight_system is None: from lexecon.compliance.eu_ai_act.article_14_oversight import HumanOversightEvidence + oversight_system = HumanOversightEvidence( - key_manager=key_manager, - storage=intervention_storage + key_manager=key_manager, storage=intervention_storage ) # Initialize governance services (Phase 1-4) @@ -436,15 +476,9 @@ async def get_status(): async def serve_dashboard(): """Serve the compliance dashboard UI.""" # Dashboard is at project root, go up from src/lexecon/api/ - dashboard_path = os.path.join( - os.path.dirname(__file__), - "../../../dashboard.html" - ) + dashboard_path = os.path.join(os.path.dirname(__file__), "../../../dashboard.html") if not os.path.exists(dashboard_path): - raise HTTPException( - status_code=404, - detail=f"Dashboard not found at {dashboard_path}" - ) + raise HTTPException(status_code=404, detail=f"Dashboard not found at {dashboard_path}") return FileResponse(dashboard_path) @@ -452,14 +486,10 @@ async def serve_dashboard(): async def serve_governance_dashboard(): """Serve the governance dashboard UI (Phase 6).""" # Governance dashboard is at project root - dashboard_path = os.path.join( - os.path.dirname(__file__), - "../../../governance_dashboard.html" - ) + dashboard_path = os.path.join(os.path.dirname(__file__), "../../../governance_dashboard.html") if not os.path.exists(dashboard_path): raise HTTPException( - status_code=404, - detail=f"Governance dashboard not found at {dashboard_path}" + status_code=404, detail=f"Governance dashboard not found at {dashboard_path}" ) return FileResponse(dashboard_path) @@ -567,8 +597,8 @@ async def make_decision(request_model: DecisionRequestModel): responsible_party="system", role="AI Decision System", reasoning=response.reasoning, - confidence=response.confidence if hasattr(response, 'confidence') else 1.0, - responsibility_level=ResponsibilityLevel.AUTOMATED + confidence=response.confidence if hasattr(response, "confidence") else 1.0, + responsibility_level=ResponsibilityLevel.AUTOMATED, ) return response.to_dict() @@ -624,9 +654,7 @@ async def get_audit_report(): @app.get("/ledger/entries") async def get_ledger_entries( - event_type: Optional[str] = None, - limit: Optional[int] = None, - offset: int = 0 + event_type: Optional[str] = None, limit: Optional[int] = None, offset: int = 0 ): """Get ledger entries with optional filtering.""" entries = ledger.entries @@ -638,7 +666,7 @@ async def get_ledger_entries( # Apply pagination total = len(entries) if limit: - entries = entries[offset:offset + limit] + entries = entries[offset : offset + limit] else: entries = entries[offset:] @@ -657,36 +685,26 @@ async def get_storage_statistics(): raise HTTPException(status_code=503, detail="Persistence not configured") stats = storage.get_statistics() - return { - "storage_enabled": True, - **stats - } + return {"storage_enabled": True, **stats} @app.get("/compliance/eu-ai-act/article-11") async def generate_article_11_documentation(format: str = "json"): """Generate EU AI Act Article 11 technical documentation.""" - from lexecon.compliance.eu_ai_act.article_11_technical_docs import TechnicalDocumentationGenerator + from lexecon.compliance.eu_ai_act.article_11_technical_docs import ( + TechnicalDocumentationGenerator, + ) initialize_services() - generator = TechnicalDocumentationGenerator( - policy_engine=policy_engine, - ledger=ledger - ) + generator = TechnicalDocumentationGenerator(policy_engine=policy_engine, ledger=ledger) doc = generator.generate() if format == "markdown" or format == "md": - return { - "format": "markdown", - "content": generator.export_markdown(doc) - } + return {"format": "markdown", "content": generator.export_markdown(doc)} - return { - "format": "json", - "content": json.loads(generator.export_json(doc)) - } + return {"format": "json", "content": json.loads(generator.export_json(doc))} @app.get("/compliance/eu-ai-act/article-12/status") @@ -700,9 +718,7 @@ async def get_retention_status(): @app.get("/compliance/eu-ai-act/article-12/regulatory-package") async def generate_regulatory_package( - format: str = "json", - start_date: Optional[str] = None, - end_date: Optional[str] = None + format: str = "json", start_date: Optional[str] = None, end_date: Optional[str] = None ): """Generate complete regulatory response package.""" from lexecon.compliance.eu_ai_act.article_12_records import RecordKeepingSystem @@ -713,9 +729,7 @@ async def generate_regulatory_package( raise HTTPException(status_code=400, detail=f"Unsupported format: {format}") content = record_system.export_for_regulator( - format=format, - start_date=start_date, - end_date=end_date + format=format, start_date=start_date, end_date=end_date ) if format == "json": @@ -726,20 +740,14 @@ async def generate_regulatory_package( @app.post("/compliance/eu-ai-act/article-12/legal-hold") async def apply_legal_hold( - hold_id: str, - reason: str, - requester: str = "system", - entry_ids: Optional[List[str]] = None + hold_id: str, reason: str, requester: str = "system", entry_ids: Optional[List[str]] = None ): """Apply legal hold to records.""" from lexecon.compliance.eu_ai_act.article_12_records import RecordKeepingSystem record_system = RecordKeepingSystem(ledger=ledger) return record_system.apply_legal_hold( - hold_id=hold_id, - reason=reason, - entry_ids=entry_ids, - requester=requester + hold_id=hold_id, reason=reason, entry_ids=entry_ids, requester=requester ) @@ -762,14 +770,14 @@ async def log_human_intervention(request: InterventionModel): human_role=role_enum, reason=request.reason, request_context=request.request_context, - response_time_ms=request.response_time_ms + response_time_ms=request.response_time_ms, ) return { "status": "success", "intervention_id": intervention.intervention_id, "timestamp": intervention.timestamp, - "signature": intervention.signature + "signature": intervention.signature, } except ValueError as e: raise HTTPException(status_code=400, detail=f"Invalid parameter: {str(e)}") @@ -778,9 +786,7 @@ async def log_human_intervention(request: InterventionModel): @app.get("/compliance/eu-ai-act/article-14/effectiveness") -async def get_oversight_effectiveness( - time_period_days: int = 30 -): +async def get_oversight_effectiveness(time_period_days: int = 30): """Get Article 14 human oversight effectiveness report.""" initialize_services() @@ -792,11 +798,13 @@ async def get_oversight_effectiveness( @app.post("/compliance/eu-ai-act/article-14/verify") -async def verify_intervention( - intervention_data: Dict[str, Any] -): +async def verify_intervention(intervention_data: Dict[str, Any]): """Verify a human intervention's cryptographic signature.""" - from lexecon.compliance.eu_ai_act.article_14_oversight import HumanIntervention, InterventionType, OversightRole + from lexecon.compliance.eu_ai_act.article_14_oversight import ( + HumanIntervention, + InterventionType, + OversightRole, + ) initialize_services() @@ -811,7 +819,7 @@ async def verify_intervention( human_role=OversightRole(intervention_data["human_role"]), reason=intervention_data["reason"], signature=intervention_data.get("signature"), - response_time_ms=intervention_data.get("response_time_ms") + response_time_ms=intervention_data.get("response_time_ms"), ) is_valid = oversight_system.verify_intervention(intervention) @@ -819,7 +827,7 @@ async def verify_intervention( return { "verified": is_valid, "intervention_id": intervention.intervention_id, - "timestamp": intervention.timestamp + "timestamp": intervention.timestamp, } except KeyError as e: raise HTTPException(status_code=400, detail=f"Missing field: {str(e)}") @@ -829,20 +837,17 @@ async def verify_intervention( @app.get("/compliance/eu-ai-act/article-14/evidence-package") async def get_evidence_package( - format: str = "json", - start_date: Optional[str] = None, - end_date: Optional[str] = None + format: str = "json", start_date: Optional[str] = None, end_date: Optional[str] = None ): """Generate Article 14 evidence package for regulatory submission.""" initialize_services() if format not in ["json", "markdown"]: - raise HTTPException(status_code=400, detail=f"Unsupported format: {format}. Use 'json' or 'markdown'") + raise HTTPException( + status_code=400, detail=f"Unsupported format: {format}. Use 'json' or 'markdown'" + ) - package = oversight_system.export_evidence_package( - start_date=start_date, - end_date=end_date - ) + package = oversight_system.export_evidence_package(start_date=start_date, end_date=end_date) if format == "markdown": content = oversight_system.export_markdown(package) @@ -852,10 +857,7 @@ async def get_evidence_package( @app.post("/compliance/eu-ai-act/article-14/escalation") -async def simulate_escalation( - decision_class: str, - current_role: str -): +async def simulate_escalation(decision_class: str, current_role: str): """Simulate escalation path for a decision.""" from lexecon.compliance.eu_ai_act.article_14_oversight import OversightRole @@ -865,8 +867,7 @@ async def simulate_escalation( current_role_enum = OversightRole(current_role) escalation = oversight_system.simulate_escalation( - decision_class=decision_class, - current_role=current_role_enum + decision_class=decision_class, current_role=current_role_enum ) return escalation @@ -885,17 +886,11 @@ async def get_intervention_storage_stats(): raise HTTPException(status_code=503, detail="Intervention persistence not configured") stats = intervention_storage.get_statistics() - return { - "storage_enabled": True, - **stats - } + return {"storage_enabled": True, **stats} @app.get("/compliance/eu-ai-act/audit-packet") -async def generate_audit_packet( - time_window: Optional[str] = "all", - format: Optional[str] = "json" -): +async def generate_audit_packet(time_window: Optional[str] = "all", format: Optional[str] = "json"): """ Generate comprehensive audit packet for EU AI Act compliance. @@ -923,7 +918,7 @@ async def generate_audit_packet( "24h": timedelta(hours=24), "7d": timedelta(days=7), "30d": timedelta(days=30), - "all": None + "all": None, } cutoff = None @@ -931,13 +926,13 @@ async def generate_audit_packet( cutoff = now - window_map[time_window] # Filter helper - def filter_by_time(items, timestamp_key='timestamp'): + def filter_by_time(items, timestamp_key="timestamp"): if not cutoff: return items filtered = [] for item in items: try: - ts_str = item[timestamp_key].replace('Z', '+00:00') + ts_str = item[timestamp_key].replace("Z", "+00:00") ts = datetime.fromisoformat(ts_str) # Make timezone-aware if naive if ts.tzinfo is None: @@ -957,7 +952,7 @@ def filter_by_time(items, timestamp_key='timestamp'): "system_info": { "node_id": node_id, "system": "Lexecon Governance System", - "version": "0.1.0" + "version": "0.1.0", }, "compliance_status": { "overall": "COMPLIANT", @@ -965,84 +960,98 @@ def filter_by_time(items, timestamp_key='timestamp'): "article_11_technical_docs": { "status": "COMPLIANT", "description": "Technical documentation for high-risk AI systems", - "evidence": "Documentation generator active" + "evidence": "Documentation generator active", }, "article_12_record_keeping": { "status": "COMPLIANT", "description": "Automatic logging enabled for high-risk AI systems", - "evidence": f"{len(ledger.entries)} cryptographically chained ledger entries" + "evidence": f"{len(ledger.entries)} cryptographically chained ledger entries", }, "article_14_human_oversight": { "status": "COMPLIANT", "description": "Human oversight and intervention capabilities", - "evidence": f"{intervention_storage.get_statistics()['total_interventions'] if intervention_storage else 0} human interventions logged" - } - } - } + "evidence": f"{intervention_storage.get_statistics()['total_interventions'] if intervention_storage else 0} human interventions logged", + }, + }, + }, } # 2. DECISION LOG decision_entries = [e for e in ledger.entries if e.event_type == "decision"] - filtered_decisions = filter_by_time([ - { - "entry_id": e.entry_id, - "timestamp": e.timestamp, - "event_type": e.event_type, - "data": e.data, - "entry_hash": e.entry_hash, - "previous_hash": e.previous_hash - } - for e in decision_entries - ]) + filtered_decisions = filter_by_time( + [ + { + "entry_id": e.entry_id, + "timestamp": e.timestamp, + "event_type": e.event_type, + "data": e.data, + "entry_hash": e.entry_hash, + "previous_hash": e.previous_hash, + } + for e in decision_entries + ] + ) decision_log = { "total_decisions": len(filtered_decisions), "time_window": time_window, - "decisions": filtered_decisions[-100:] # Last 100 decisions + "decisions": filtered_decisions[-100:], # Last 100 decisions } # 3. HUMAN OVERSIGHT LOG (Article 14) oversight_log = { "article": "Article 14 - Human Oversight", "total_interventions": 0, - "interventions": [] + "interventions": [], } if oversight_system: all_interventions = oversight_system.interventions - filtered_interventions = filter_by_time([ - { - "intervention_id": i.intervention_id, - "timestamp": i.timestamp, - "intervention_type": i.intervention_type.value, - "ai_recommendation": i.ai_recommendation, - "ai_confidence": i.ai_confidence, - "human_decision": i.human_decision, - "human_role": i.human_role.value, - "reason": i.reason, - "request_context": i.request_context, - "signature": i.signature, - "response_time_ms": i.response_time_ms - } - for i in all_interventions - ]) + filtered_interventions = filter_by_time( + [ + { + "intervention_id": i.intervention_id, + "timestamp": i.timestamp, + "intervention_type": i.intervention_type.value, + "ai_recommendation": i.ai_recommendation, + "ai_confidence": i.ai_confidence, + "human_decision": i.human_decision, + "human_role": i.human_role.value, + "reason": i.reason, + "request_context": i.request_context, + "signature": i.signature, + "response_time_ms": i.response_time_ms, + } + for i in all_interventions + ] + ) oversight_log["total_interventions"] = len(filtered_interventions) oversight_log["interventions"] = filtered_interventions # Calculate oversight metrics if filtered_interventions: - override_count = sum(1 for i in filtered_interventions if i["intervention_type"] == "override") + override_count = sum( + 1 for i in filtered_interventions if i["intervention_type"] == "override" + ) # Calculate average response time, handling None values response_times = [i.get("response_time_ms", 0) or 0 for i in filtered_interventions] avg_response_time = sum(response_times) / len(response_times) if response_times else 0 oversight_log["metrics"] = { "override_count": override_count, - "override_rate": (override_count / len(filtered_interventions) * 100) if filtered_interventions else 0, - "approval_count": sum(1 for i in filtered_interventions if i["intervention_type"] == "approval"), - "escalation_count": sum(1 for i in filtered_interventions if i["intervention_type"] == "escalation"), - "average_response_time_ms": avg_response_time + "override_rate": ( + (override_count / len(filtered_interventions) * 100) + if filtered_interventions + else 0 + ), + "approval_count": sum( + 1 for i in filtered_interventions if i["intervention_type"] == "approval" + ), + "escalation_count": sum( + 1 for i in filtered_interventions if i["intervention_type"] == "escalation" + ), + "average_response_time_ms": avg_response_time, } # 4. CRYPTOGRAPHIC VERIFICATION REPORT @@ -1055,8 +1064,10 @@ def filter_by_time(items, timestamp_key='timestamp'): "storage_stats": { "ledger": storage.get_statistics() if storage else {}, "interventions": intervention_storage.get_statistics() if intervention_storage else {}, - "responsibility": responsibility_storage.get_statistics() if responsibility_storage else {} - } + "responsibility": ( + responsibility_storage.get_statistics() if responsibility_storage else {} + ), + }, } # Verify chain integrity @@ -1064,7 +1075,9 @@ def filter_by_time(items, timestamp_key='timestamp'): integrity_result = ledger.verify_integrity() is_valid = integrity_result.get("valid", False) verification_report["chain_integrity"] = "VALID" if is_valid else "INVALID" - verification_report["verification_details"] = integrity_result.get("message", "Chain verification completed") + verification_report["verification_details"] = integrity_result.get( + "message", "Chain verification completed" + ) except Exception as e: verification_report["chain_integrity"] = "ERROR" verification_report["verification_details"] = str(e) @@ -1083,8 +1096,8 @@ def filter_by_time(items, timestamp_key='timestamp'): "signature_info": { "packet_generated_at": now.isoformat(), "packet_generator": "Lexecon Compliance System", - "regulatory_framework": "EU AI Act (Regulation 2024/1689)" - } + "regulatory_framework": "EU AI Act (Regulation 2024/1689)", + }, } # Return as JSON or formatted text @@ -1178,13 +1191,11 @@ def filter_by_time(items, timestamp_key='timestamp'): @app.get("/responsibility/report") async def get_accountability_report( - start_date: Optional[str] = None, - end_date: Optional[str] = None + start_date: Optional[str] = None, end_date: Optional[str] = None ): """Generate accountability report showing who made decisions.""" return responsibility_tracker.generate_accountability_report( - start_date=start_date, - end_date=end_date + start_date=start_date, end_date=end_date ) @@ -1199,7 +1210,7 @@ async def get_responsibility_chain(decision_id: str): return { "decision_id": decision_id, "chain_length": len(chain), - "records": [r.to_dict() for r in chain] + "records": [r.to_dict() for r in chain], } @@ -1211,7 +1222,7 @@ async def get_by_party(party: str): return { "responsible_party": party, "decision_count": len(records), - "records": [r.to_dict() for r in records] + "records": [r.to_dict() for r in records], } @@ -1220,10 +1231,7 @@ async def get_ai_overrides(): """Get all decisions where humans overrode AI recommendations.""" overrides = responsibility_tracker.get_ai_overrides() - return { - "override_count": len(overrides), - "records": [r.to_dict() for r in overrides] - } + return {"override_count": len(overrides), "records": [r.to_dict() for r in overrides]} @app.get("/responsibility/pending-reviews") @@ -1231,10 +1239,7 @@ async def get_pending_reviews(): """Get decisions awaiting human review.""" pending = responsibility_tracker.get_pending_reviews() - return { - "pending_count": len(pending), - "records": [r.to_dict() for r in pending] - } + return {"pending_count": len(pending), "records": [r.to_dict() for r in pending]} @app.get("/responsibility/legal/{decision_id}") @@ -1250,10 +1255,7 @@ async def get_responsibility_storage_stats(): raise HTTPException(status_code=503, detail="Responsibility persistence not configured") stats = responsibility_storage.get_statistics() - return { - "storage_enabled": True, - **stats - } + return {"storage_enabled": True, **stats} # ============================================================================ @@ -1266,11 +1268,7 @@ async def login(request: Request, login_req: LoginRequest): """Authenticate user and create session.""" ip_address = request.client.host if request.client else None - user, error = auth_service.authenticate( - login_req.username, - login_req.password, - ip_address - ) + user, error = auth_service.authenticate(login_req.username, login_req.password, ip_address) if not user: return LoginResponse(success=False, error=error) @@ -1285,7 +1283,7 @@ async def login(request: Request, login_req: LoginRequest): status_code=200, user_id=user.user_id, username=user.username, - ip_address=ip_address + ip_address=ip_address, ) return LoginResponse( @@ -1296,8 +1294,8 @@ async def login(request: Request, login_req: LoginRequest): "username": user.username, "email": user.email, "role": user.role.value, - "full_name": user.full_name - } + "full_name": user.full_name, + }, ) @@ -1338,14 +1336,16 @@ async def get_current_user_info(request: Request): "email": user.email, "role": user.role.value, "full_name": user.full_name, - "last_login": user.last_login + "last_login": user.last_login, } @app.post("/auth/users") async def create_user(request: Request, user_req: CreateUserRequest): """Create a new user (admin only).""" - session_id = request.cookies.get("session_id") or request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = request.cookies.get("session_id") or request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if not session_id: raise HTTPException(status_code=401, detail="Not authenticated") @@ -1362,7 +1362,7 @@ async def create_user(request: Request, user_req: CreateUserRequest): email=user_req.email, password=user_req.password, role=Role(user_req.role), - full_name=user_req.full_name + full_name=user_req.full_name, ) return { @@ -1372,8 +1372,8 @@ async def create_user(request: Request, user_req: CreateUserRequest): "username": user.username, "email": user.email, "role": user.role.value, - "full_name": user.full_name - } + "full_name": user.full_name, + }, } except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) @@ -1382,7 +1382,9 @@ async def create_user(request: Request, user_req: CreateUserRequest): @app.get("/auth/users") async def list_users(request: Request): """List all users (admin only).""" - session_id = request.cookies.get("session_id") or request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = request.cookies.get("session_id") or request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if not session_id: raise HTTPException(status_code=401, detail="Not authenticated") @@ -1404,7 +1406,7 @@ async def list_users(request: Request): "role": u.role.value, "full_name": u.full_name, "last_login": u.last_login, - "is_active": u.is_active + "is_active": u.is_active, } for u in users ] @@ -1424,7 +1426,7 @@ async def verify_signature(packet: Dict[str, Any]): return { "valid": is_valid, "message": message, - "timestamp": datetime.now(timezone.utc).isoformat() + "timestamp": datetime.now(timezone.utc).isoformat(), } @@ -1435,7 +1437,7 @@ async def get_public_key(): "public_key_pem": signature_service.get_public_key_pem(), "fingerprint": signature_service.get_public_key_fingerprint(), "algorithm": "RSA-PSS-SHA256", - "key_size": 4096 + "key_size": 4096, } @@ -1447,7 +1449,9 @@ async def get_public_key(): @app.get("/compliance/export-requests") async def list_export_requests(request: Request, limit: int = 100): """List export requests (compliance officer+ only).""" - session_id = request.cookies.get("session_id") or request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = request.cookies.get("session_id") or request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if not session_id: raise HTTPException(status_code=401, detail="Not authenticated") @@ -1470,7 +1474,7 @@ async def list_export_requests(request: Request, limit: int = 100): "requested_at": r.requested_at, "export_status": r.export_status.value, "approval_status": r.approval_status.value, - "completed_at": r.completed_at + "completed_at": r.completed_at, } for r in requests ] @@ -1480,7 +1484,9 @@ async def list_export_requests(request: Request, limit: int = 100): @app.get("/compliance/audit-chain-verification") async def verify_audit_chain(request: Request): """Verify integrity of export audit chain.""" - session_id = request.cookies.get("session_id") or request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = request.cookies.get("session_id") or request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if not session_id: raise HTTPException(status_code=401, detail="Not authenticated") @@ -1499,6 +1505,7 @@ async def verify_audit_chain(request: Request): # ---------- Risk Service Endpoints ---------- + @app.post("/api/governance/risk/assess") async def assess_risk(request: RiskAssessmentRequest): """Assess risk for a decision.""" @@ -1514,9 +1521,7 @@ async def assess_risk(request: RiskAssessmentRequest): # Assess risk risk = risk_service.assess_risk( - decision_id=request.decision_id, - dimensions=dimensions, - metadata=request.context + decision_id=request.decision_id, dimensions=dimensions, metadata=request.context ) # Convert to dict for response @@ -1604,6 +1609,7 @@ async def get_risk_for_decision(decision_id: str): # ---------- Escalation Service Endpoints ---------- + @app.post("/api/governance/escalation") async def create_escalation(request: EscalationCreateRequest): """Create an escalation.""" @@ -1626,7 +1632,7 @@ async def create_escalation(request: EscalationCreateRequest): escalated_to=request.escalated_to, priority=priority, context_summary=request.context_summary, - metadata=request.metadata + metadata=request.metadata, ) return { @@ -1638,7 +1644,9 @@ async def create_escalation(request: EscalationCreateRequest): "escalated_to": escalation.escalated_to, "context_summary": escalation.context_summary, "created_at": escalation.created_at.isoformat(), - "sla_deadline": escalation.sla_deadline.isoformat() if escalation.sla_deadline else None, + "sla_deadline": ( + escalation.sla_deadline.isoformat() if escalation.sla_deadline else None + ), } except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) @@ -1665,13 +1673,13 @@ async def resolve_escalation(escalation_id: str, request: EscalationResolveReque escalation_id=escalation_id, resolved_by=request.resolved_by, outcome=outcome_enum, - notes=request.notes + notes=request.notes, ) # Extract outcome value - handle both enum and string outcome_value = None if escalation.resolution and escalation.resolution.outcome: - if hasattr(escalation.resolution.outcome, 'value'): + if hasattr(escalation.resolution.outcome, "value"): outcome_value = escalation.resolution.outcome.value else: outcome_value = escalation.resolution.outcome @@ -1755,15 +1763,10 @@ async def get_sla_violations(): # We need to list pending escalations that are past their SLA deadline from datetime import datetime, timezone - pending_escalations = escalation_service.list_escalations( - status=EscalationStatus.PENDING - ) + pending_escalations = escalation_service.list_escalations(status=EscalationStatus.PENDING) now = datetime.now(timezone.utc) - violations = [ - e for e in pending_escalations - if e.sla_deadline and now > e.sla_deadline - ] + violations = [e for e in pending_escalations if e.sla_deadline and now > e.sla_deadline] return { "count": len(violations), @@ -1782,6 +1785,7 @@ async def get_sla_violations(): # ---------- Override Service Endpoints ---------- + @app.post("/api/governance/override") async def create_override(request: OverrideCreateRequest): """Create an override.""" @@ -1793,14 +1797,17 @@ async def create_override(request: OverrideCreateRequest): try: # Convert string enums to proper types (values are lowercase) override_type = OverrideType(request.override_type.lower()) - original_outcome = OriginalOutcome(request.original_outcome.lower()) if request.original_outcome else None + original_outcome = ( + OriginalOutcome(request.original_outcome.lower()) if request.original_outcome else None + ) new_outcome = NewOutcome(request.new_outcome.lower()) if request.new_outcome else None # Parse expires_at if provided expires_at = None if request.expires_at: from datetime import datetime - expires_at = datetime.fromisoformat(request.expires_at.replace('Z', '+00:00')) + + expires_at = datetime.fromisoformat(request.expires_at.replace("Z", "+00:00")) # Create scope if provided scope = None @@ -1816,7 +1823,7 @@ async def create_override(request: OverrideCreateRequest): new_outcome=new_outcome, expires_at=expires_at, scope=scope, - metadata=request.metadata + metadata=request.metadata, ) return { @@ -1909,7 +1916,9 @@ async def check_active_override(decision_id: str): "override_type": active_override.override_type.value, "authorized_by": active_override.authorized_by, "timestamp": active_override.timestamp.isoformat(), - "expires_at": active_override.expires_at.isoformat() if active_override.expires_at else None, + "expires_at": ( + active_override.expires_at.isoformat() if active_override.expires_at else None + ), } return result @@ -1935,6 +1944,7 @@ async def get_decision_with_override_status(decision_id: str): # ---------- Evidence Service Endpoints ---------- + @app.post("/api/governance/evidence") async def store_evidence_artifact(request: EvidenceStoreRequest): """Store an evidence artifact.""" @@ -1956,7 +1966,7 @@ async def store_evidence_artifact(request: EvidenceStoreRequest): content_type=request.content_type, storage_uri=request.storage_uri, retention_days=request.retention_days, - metadata=request.metadata + metadata=request.metadata, ) return { @@ -1966,7 +1976,9 @@ async def store_evidence_artifact(request: EvidenceStoreRequest): "created_at": artifact.created_at.isoformat(), "source": artifact.source, "size_bytes": artifact.size_bytes, - "retention_until": artifact.retention_until.isoformat() if artifact.retention_until else None, + "retention_until": ( + artifact.retention_until.isoformat() if artifact.retention_until else None + ), "is_immutable": artifact.is_immutable, } except ValueError as e: @@ -2011,7 +2023,9 @@ async def get_evidence_artifact(artifact_id: str): "storage_uri": artifact.storage_uri, "related_decision_ids": artifact.related_decision_ids, "related_control_ids": artifact.related_control_ids, - "retention_until": artifact.retention_until.isoformat() if artifact.retention_until else None, + "retention_until": ( + artifact.retention_until.isoformat() if artifact.retention_until else None + ), "is_immutable": artifact.is_immutable, "has_signature": artifact.digital_signature is not None, } @@ -2094,15 +2108,23 @@ async def sign_evidence_artifact(artifact_id: str, request: EvidenceSignRequest) artifact_id=artifact_id, signer_id=request.signer_id, signature=request.signature, - algorithm=request.algorithm + algorithm=request.algorithm, ) return { "artifact_id": artifact.artifact_id, "signed": True, - "signer_id": artifact.digital_signature.signer_id if artifact.digital_signature else None, - "signed_at": artifact.digital_signature.signed_at.isoformat() if artifact.digital_signature else None, - "algorithm": artifact.digital_signature.algorithm if artifact.digital_signature else None, + "signer_id": ( + artifact.digital_signature.signer_id if artifact.digital_signature else None + ), + "signed_at": ( + artifact.digital_signature.signed_at.isoformat() + if artifact.digital_signature + else None + ), + "algorithm": ( + artifact.digital_signature.algorithm if artifact.digital_signature else None + ), } except ValueError as e: raise HTTPException(status_code=400, detail=str(e)) @@ -2112,6 +2134,7 @@ async def sign_evidence_artifact(artifact_id: str, request: EvidenceSignRequest) # ---------- Compliance Mapping Service Endpoints (Phase 7) ---------- + @app.post("/api/governance/compliance/map") async def map_primitive_to_controls(request: ComplianceMappingRequest): """Map a governance primitive to compliance controls.""" @@ -2129,7 +2152,7 @@ async def map_primitive_to_controls(request: ComplianceMappingRequest): primitive_type=primitive_type, primitive_id=request.primitive_id, framework=framework, - metadata=request.metadata + metadata=request.metadata, ) return { @@ -2150,9 +2173,7 @@ async def map_primitive_to_controls(request: ComplianceMappingRequest): @app.get("/api/governance/compliance/{framework}/controls") async def list_compliance_controls( - framework: str, - status: Optional[str] = None, - category: Optional[str] = None + framework: str, status: Optional[str] = None, category: Optional[str] = None ): """List compliance controls for a framework with optional filtering.""" initialize_services() @@ -2165,9 +2186,7 @@ async def list_compliance_controls( status_enum = ControlStatus(status.lower()) if status else None controls = compliance_mapping_service.list_controls( - framework=framework_enum, - status=status_enum, - category=category + framework=framework_enum, status=status_enum, category=category ) return { @@ -2211,8 +2230,7 @@ async def get_control_status(framework: str, control_id: str): if not control: raise HTTPException( - status_code=404, - detail=f"Control {control_id} not found in framework {framework}" + status_code=404, detail=f"Control {control_id} not found in framework {framework}" ) return { @@ -2246,15 +2264,12 @@ async def verify_control(framework: str, control_id: str, request: ComplianceVer framework_enum = RegulatoryFramework(framework.lower()) success = compliance_mapping_service.verify_control( - control_id=control_id, - framework=framework_enum, - notes=request.notes + control_id=control_id, framework=framework_enum, notes=request.notes ) if not success: raise HTTPException( - status_code=404, - detail=f"Control {control_id} not found in framework {framework}" + status_code=404, detail=f"Control {control_id} not found in framework {framework}" ) # Get updated control @@ -2276,9 +2291,7 @@ async def verify_control(framework: str, control_id: str, request: ComplianceVer @app.post("/api/governance/compliance/{framework}/{control_id}/link-evidence") async def link_evidence_to_control( - framework: str, - control_id: str, - request: ComplianceLinkEvidenceRequest + framework: str, control_id: str, request: ComplianceLinkEvidenceRequest ): """Link an evidence artifact to a compliance control.""" initialize_services() @@ -2292,13 +2305,12 @@ async def link_evidence_to_control( success = compliance_mapping_service.link_evidence_to_control( control_id=control_id, framework=framework_enum, - evidence_artifact_id=request.evidence_artifact_id + evidence_artifact_id=request.evidence_artifact_id, ) if not success: raise HTTPException( - status_code=404, - detail=f"Control {control_id} not found in framework {framework}" + status_code=404, detail=f"Control {control_id} not found in framework {framework}" ) # Get updated control @@ -2441,6 +2453,7 @@ async def get_compliance_statistics(): # ---------- Audit Export Service Endpoints (Phase 8) ---------- + @app.post("/api/governance/audit-export/request") async def create_audit_export_request(request: AuditExportCreateRequest, http_request: Request): """Create a new audit export request.""" @@ -2450,11 +2463,15 @@ async def create_audit_export_request(request: AuditExportCreateRequest, http_re raise HTTPException(status_code=500, detail="Audit export service not initialized") # Authentication check - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: # Convert string enums to service enums @@ -2464,13 +2481,13 @@ async def create_audit_export_request(request: AuditExportCreateRequest, http_re # Parse date strings if provided start_date = None if request.start_date: - start_date = datetime.fromisoformat(request.start_date.replace('Z', '+00:00')) + start_date = datetime.fromisoformat(request.start_date.replace("Z", "+00:00")) if start_date.tzinfo is None: start_date = start_date.replace(tzinfo=timezone.utc) end_date = None if request.end_date: - end_date = datetime.fromisoformat(request.end_date.replace('Z', '+00:00')) + end_date = datetime.fromisoformat(request.end_date.replace("Z", "+00:00")) if end_date.tzinfo is None: end_date = end_date.replace(tzinfo=timezone.utc) @@ -2483,7 +2500,7 @@ async def create_audit_export_request(request: AuditExportCreateRequest, http_re start_date=start_date, end_date=end_date, include_deleted=request.include_deleted, - metadata=request.metadata + metadata=request.metadata, ) return { @@ -2492,7 +2509,9 @@ async def create_audit_export_request(request: AuditExportCreateRequest, http_re "purpose": export_request.purpose, "scope": export_request.scope.value, "format": export_request.format.value, - "start_date": export_request.start_date.isoformat() if export_request.start_date else None, + "start_date": ( + export_request.start_date.isoformat() if export_request.start_date else None + ), "end_date": export_request.end_date.isoformat() if export_request.end_date else None, "include_deleted": export_request.include_deleted, "requested_at": export_request.requested_at.isoformat(), @@ -2514,11 +2533,15 @@ async def generate_audit_export(export_id: str, http_request: Request): raise HTTPException(status_code=500, detail="Audit export service not initialized") # Authentication check - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: # Get export request @@ -2539,7 +2562,7 @@ async def generate_audit_export(export_id: str, http_request: Request): override_service=override_service, evidence_service=evidence_service, compliance_service=compliance_mapping_service, - ledger=ledger + ledger=ledger, ) # Log audit event @@ -2548,7 +2571,7 @@ async def generate_audit_export(export_id: str, http_request: Request): method="POST", status_code=200, user_id=session.user_id if session_id and session else None, - ip_address=http_request.client.host if http_request.client else None + ip_address=http_request.client.host if http_request.client else None, ) return { @@ -2574,7 +2597,9 @@ async def generate_audit_export(export_id: str, http_request: Request): @app.get("/api/governance/audit-export/{export_id}") -async def get_audit_export(export_id: str, include_content: bool = False, http_request: Request = None): +async def get_audit_export( + export_id: str, include_content: bool = False, http_request: Request = None +): """Retrieve a completed export by ID.""" initialize_services() @@ -2583,11 +2608,17 @@ async def get_audit_export(export_id: str, include_content: bool = False, http_r # Authentication check if http_request: - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) - if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + if session and not auth_service.has_permission( + session.role, Permission.VIEW_AUDIT_LOGS + ): + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: package = audit_export_service.get_export(export_id) @@ -2619,7 +2650,9 @@ async def get_audit_export(export_id: str, include_content: bool = False, http_r response["content"] = package.content else: # Include preview (first 500 chars) - response["content_preview"] = package.content[:500] if len(package.content) > 500 else package.content + response["content_preview"] = ( + package.content[:500] if len(package.content) > 500 else package.content + ) return response except HTTPException: @@ -2637,11 +2670,15 @@ async def download_audit_export(export_id: str, http_request: Request): raise HTTPException(status_code=500, detail="Audit export service not initialized") # Authentication check - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: package = audit_export_service.get_export(export_id) @@ -2654,7 +2691,7 @@ async def download_audit_export(export_id: str, http_request: Request): method="GET", status_code=200, user_id=session.user_id if session_id and session else None, - ip_address=http_request.client.host if http_request.client else None + ip_address=http_request.client.host if http_request.client else None, ) # Determine Content-Type and filename based on format @@ -2680,7 +2717,7 @@ async def download_audit_export(export_id: str, http_request: Request): return PlainTextResponse( content=package.content, media_type=content_type, - headers={"Content-Disposition": f'attachment; filename="{filename}"'} + headers={"Content-Disposition": f'attachment; filename="{filename}"'}, ) except HTTPException: raise @@ -2695,7 +2732,7 @@ async def list_audit_exports( format: Optional[str] = None, status: Optional[str] = None, limit: int = 100, - http_request: Request = None + http_request: Request = None, ): """List export packages with filtering.""" initialize_services() @@ -2705,11 +2742,17 @@ async def list_audit_exports( # Authentication check if http_request: - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) - if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + if session and not auth_service.has_permission( + session.role, Permission.VIEW_AUDIT_LOGS + ): + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: # Validate limit @@ -2736,6 +2779,7 @@ async def list_audit_exports( if status: from lexecon.audit_export.service import ExportStatus + try: status_enum = ExportStatus(status.upper()) exports = [e for e in exports if e.request.status == status_enum] @@ -2775,9 +2819,7 @@ async def list_audit_exports( @app.get("/api/governance/audit-export/requests") async def list_audit_export_requests( - status: Optional[str] = None, - limit: int = 100, - http_request: Request = None + status: Optional[str] = None, limit: int = 100, http_request: Request = None ): """List export requests (including pending ones).""" initialize_services() @@ -2787,11 +2829,17 @@ async def list_audit_export_requests( # Authentication check if http_request: - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) - if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + if session and not auth_service.has_permission( + session.role, Permission.VIEW_AUDIT_LOGS + ): + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: # Validate limit @@ -2804,6 +2852,7 @@ async def list_audit_export_requests( # Filter by status if provided if status: from lexecon.audit_export.service import ExportStatus + try: status_enum = ExportStatus(status.upper()) requests = [r for r in requests if r.status == status_enum] @@ -2849,11 +2898,17 @@ async def get_audit_export_statistics(http_request: Request = None): # Authentication check if http_request: - session_id = http_request.cookies.get("session_id") or http_request.headers.get("Authorization", "").replace("Bearer ", "") + session_id = http_request.cookies.get("session_id") or http_request.headers.get( + "Authorization", "" + ).replace("Bearer ", "") if session_id: session, error = auth_service.validate_session(session_id) - if session and not auth_service.has_permission(session.role, Permission.VIEW_AUDIT_LOGS): - raise HTTPException(status_code=403, detail="Insufficient permissions for audit exports") + if session and not auth_service.has_permission( + session.role, Permission.VIEW_AUDIT_LOGS + ): + raise HTTPException( + status_code=403, detail="Insufficient permissions for audit exports" + ) try: stats = audit_export_service.get_export_statistics() diff --git a/src/lexecon/audit_export/service.py b/src/lexecon/audit_export/service.py index 118d7f1..b5e56a2 100644 --- a/src/lexecon/audit_export/service.py +++ b/src/lexecon/audit_export/service.py @@ -11,19 +11,20 @@ - Cryptographic verification """ -from dataclasses import dataclass, field -from datetime import datetime, timezone, timedelta -from enum import Enum -from typing import Any, Dict, List, Optional, Set -import json import csv +import hashlib import io +import json import uuid -import hashlib +from dataclasses import dataclass, field +from datetime import datetime, timedelta, timezone +from enum import Enum +from typing import Any, Dict, List, Optional, Set class ExportFormat(Enum): """Supported export formats.""" + JSON = "json" CSV = "csv" MARKDOWN = "markdown" @@ -32,6 +33,7 @@ class ExportFormat(Enum): class ExportScope(Enum): """Scope of data to export.""" + ALL = "all" RISK_ONLY = "risk_only" ESCALATION_ONLY = "escalation_only" @@ -43,6 +45,7 @@ class ExportScope(Enum): class ExportStatus(Enum): """Status of export operation.""" + PENDING = "pending" IN_PROGRESS = "in_progress" COMPLETED = "completed" @@ -52,6 +55,7 @@ class ExportStatus(Enum): @dataclass class ExportRequest: """Request for audit export.""" + export_id: str requester: str purpose: str @@ -69,6 +73,7 @@ class ExportRequest: @dataclass class ExportPackage: """Complete audit export package.""" + export_id: str request: ExportRequest generated_at: datetime @@ -85,6 +90,7 @@ class ExportPackage: @dataclass class ExportStatistics: """Statistics about exported data.""" + total_risks: int = 0 total_escalations: int = 0 total_overrides: int = 0 @@ -119,7 +125,7 @@ def create_export_request( start_date: Optional[datetime] = None, end_date: Optional[datetime] = None, include_deleted: bool = False, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> ExportRequest: """ Create a new export request. @@ -148,7 +154,7 @@ def create_export_request( start_date=start_date, end_date=end_date, include_deleted=include_deleted, - metadata=metadata + metadata=metadata, ) self._requests[export_id] = request @@ -162,7 +168,7 @@ def generate_export( override_service=None, evidence_service=None, compliance_service=None, - ledger=None + ledger=None, ) -> ExportPackage: """ Generate audit export package from governance services. @@ -193,7 +199,7 @@ def generate_export( "date_range": { "start": request.start_date.isoformat() if request.start_date else None, "end": request.end_date.isoformat() if request.end_date else None, - } + }, } } @@ -204,17 +210,23 @@ def generate_export( # Collect escalations if request.scope in [ExportScope.ALL, ExportScope.ESCALATION_ONLY] and escalation_service: - escalations = self._collect_escalations(escalation_service, request.start_date, request.end_date) + escalations = self._collect_escalations( + escalation_service, request.start_date, request.end_date + ) data["escalations"] = escalations # Collect overrides if request.scope in [ExportScope.ALL, ExportScope.OVERRIDE_ONLY] and override_service: - overrides = self._collect_overrides(override_service, request.start_date, request.end_date) + overrides = self._collect_overrides( + override_service, request.start_date, request.end_date + ) data["overrides"] = overrides # Collect evidence artifacts if request.scope in [ExportScope.ALL, ExportScope.EVIDENCE_ONLY] and evidence_service: - evidence = self._collect_evidence(evidence_service, request.start_date, request.end_date) + evidence = self._collect_evidence( + evidence_service, request.start_date, request.end_date + ) data["evidence"] = evidence # Collect compliance mappings @@ -248,7 +260,7 @@ def generate_export( checksum=checksum, size_bytes=len(content.encode()), record_count=self._count_records(data), - metadata=request.metadata + metadata=request.metadata, ) request.status = ExportStatus.COMPLETED @@ -257,16 +269,13 @@ def generate_export( return package def _collect_risks( - self, - risk_service, - start_date: Optional[datetime], - end_date: Optional[datetime] + self, risk_service, start_date: Optional[datetime], end_date: Optional[datetime] ) -> List[Dict[str, Any]]: """Collect risk assessments from service.""" risks = [] # Get all risks from service - all_risks = risk_service.list_risks() if hasattr(risk_service, 'list_risks') else [] + all_risks = risk_service.list_risks() if hasattr(risk_service, "list_risks") else [] for risk in all_risks: # Filter by date if specified @@ -275,36 +284,39 @@ def _collect_risks( if end_date and risk.timestamp > end_date: continue - risks.append({ - "risk_id": risk.risk_id, - "decision_id": risk.decision_id, - "overall_score": risk.overall_score, - "risk_level": risk.risk_level.value, - "dimensions": { - "security": risk.dimensions.security, - "privacy": risk.dimensions.privacy, - "compliance": risk.dimensions.compliance, - "operational": risk.dimensions.operational, - "reputational": risk.dimensions.reputational, - "financial": risk.dimensions.financial, - }, - "timestamp": risk.timestamp.isoformat(), - "factors": risk.factors, - }) + risks.append( + { + "risk_id": risk.risk_id, + "decision_id": risk.decision_id, + "overall_score": risk.overall_score, + "risk_level": risk.risk_level.value, + "dimensions": { + "security": risk.dimensions.security, + "privacy": risk.dimensions.privacy, + "compliance": risk.dimensions.compliance, + "operational": risk.dimensions.operational, + "reputational": risk.dimensions.reputational, + "financial": risk.dimensions.financial, + }, + "timestamp": risk.timestamp.isoformat(), + "factors": risk.factors, + } + ) return risks def _collect_escalations( - self, - escalation_service, - start_date: Optional[datetime], - end_date: Optional[datetime] + self, escalation_service, start_date: Optional[datetime], end_date: Optional[datetime] ) -> List[Dict[str, Any]]: """Collect escalations from service.""" escalations = [] # Get all escalations - all_escalations = escalation_service.list_escalations() if hasattr(escalation_service, 'list_escalations') else [] + all_escalations = ( + escalation_service.list_escalations() + if hasattr(escalation_service, "list_escalations") + else [] + ) for esc in all_escalations: # Filter by date @@ -324,13 +336,13 @@ def _collect_escalations( } # Add optional fields if they exist - if hasattr(esc, 'sla_deadline') and esc.sla_deadline: + if hasattr(esc, "sla_deadline") and esc.sla_deadline: esc_data["sla_deadline"] = esc.sla_deadline.isoformat() - if hasattr(esc, 'resolved_at') and esc.resolved_at: + if hasattr(esc, "resolved_at") and esc.resolved_at: esc_data["resolved_at"] = esc.resolved_at.isoformat() - if hasattr(esc, 'resolved_by'): + if hasattr(esc, "resolved_by"): esc_data["resolved_by"] = esc.resolved_by - if hasattr(esc, 'outcome'): + if hasattr(esc, "outcome"): esc_data["outcome"] = esc.outcome escalations.append(esc_data) @@ -338,16 +350,15 @@ def _collect_escalations( return escalations def _collect_overrides( - self, - override_service, - start_date: Optional[datetime], - end_date: Optional[datetime] + self, override_service, start_date: Optional[datetime], end_date: Optional[datetime] ) -> List[Dict[str, Any]]: """Collect overrides from service.""" overrides = [] # Get all overrides - all_overrides = override_service.list_overrides() if hasattr(override_service, 'list_overrides') else [] + all_overrides = ( + override_service.list_overrides() if hasattr(override_service, "list_overrides") else [] + ) for ovr in all_overrides: # Filter by date @@ -356,32 +367,35 @@ def _collect_overrides( if end_date and ovr.timestamp > end_date: continue - overrides.append({ - "override_id": ovr.override_id, - "decision_id": ovr.decision_id, - "override_type": ovr.override_type.value, - "authorized_by": ovr.authorized_by, - "justification": ovr.justification, - "timestamp": ovr.timestamp.isoformat(), - "original_outcome": ovr.original_outcome.value if ovr.original_outcome else None, - "new_outcome": ovr.new_outcome.value if ovr.new_outcome else None, - "expires_at": ovr.expires_at.isoformat() if ovr.expires_at else None, - "evidence_ids": ovr.evidence_ids, - }) + overrides.append( + { + "override_id": ovr.override_id, + "decision_id": ovr.decision_id, + "override_type": ovr.override_type.value, + "authorized_by": ovr.authorized_by, + "justification": ovr.justification, + "timestamp": ovr.timestamp.isoformat(), + "original_outcome": ( + ovr.original_outcome.value if ovr.original_outcome else None + ), + "new_outcome": ovr.new_outcome.value if ovr.new_outcome else None, + "expires_at": ovr.expires_at.isoformat() if ovr.expires_at else None, + "evidence_ids": ovr.evidence_ids, + } + ) return overrides def _collect_evidence( - self, - evidence_service, - start_date: Optional[datetime], - end_date: Optional[datetime] + self, evidence_service, start_date: Optional[datetime], end_date: Optional[datetime] ) -> List[Dict[str, Any]]: """Collect evidence artifacts from service.""" evidence = [] # Get all artifacts - all_artifacts = evidence_service.list_artifacts() if hasattr(evidence_service, 'list_artifacts') else [] + all_artifacts = ( + evidence_service.list_artifacts() if hasattr(evidence_service, "list_artifacts") else [] + ) for artifact in all_artifacts: # Filter by date @@ -390,34 +404,39 @@ def _collect_evidence( if end_date and artifact.created_at > end_date: continue - evidence.append({ - "artifact_id": artifact.artifact_id, - "artifact_type": artifact.artifact_type.value, - "sha256_hash": artifact.sha256_hash, - "created_at": artifact.created_at.isoformat(), - "source": artifact.source, - "content_type": artifact.content_type, - "size_bytes": artifact.size_bytes, - "related_decision_ids": artifact.related_decision_ids, - "related_control_ids": artifact.related_control_ids, - "retention_until": artifact.retention_until.isoformat() if artifact.retention_until else None, - "is_immutable": artifact.is_immutable, - "has_signature": artifact.digital_signature is not None, - }) + evidence.append( + { + "artifact_id": artifact.artifact_id, + "artifact_type": artifact.artifact_type.value, + "sha256_hash": artifact.sha256_hash, + "created_at": artifact.created_at.isoformat(), + "source": artifact.source, + "content_type": artifact.content_type, + "size_bytes": artifact.size_bytes, + "related_decision_ids": artifact.related_decision_ids, + "related_control_ids": artifact.related_control_ids, + "retention_until": ( + artifact.retention_until.isoformat() if artifact.retention_until else None + ), + "is_immutable": artifact.is_immutable, + "has_signature": artifact.digital_signature is not None, + } + ) return evidence def _collect_compliance(self, compliance_service) -> Dict[str, Any]: """Collect compliance mappings from service.""" - compliance_data = { - "statistics": compliance_service.get_statistics(), - "frameworks": {} - } + compliance_data = {"statistics": compliance_service.get_statistics(), "frameworks": {}} # Get data for each framework from lexecon.compliance_mapping.service import RegulatoryFramework - for framework in [RegulatoryFramework.SOC2, RegulatoryFramework.ISO27001, RegulatoryFramework.GDPR]: + for framework in [ + RegulatoryFramework.SOC2, + RegulatoryFramework.ISO27001, + RegulatoryFramework.GDPR, + ]: controls = compliance_service.list_controls(framework) coverage = compliance_service.get_framework_coverage(framework) gaps = compliance_service.analyze_gaps(framework) @@ -441,10 +460,7 @@ def _collect_compliance(self, compliance_service) -> Dict[str, Any]: return compliance_data def _collect_decisions( - self, - ledger, - start_date: Optional[datetime], - end_date: Optional[datetime] + self, ledger, start_date: Optional[datetime], end_date: Optional[datetime] ) -> List[Dict[str, Any]]: """Collect decision log from ledger.""" decisions = [] @@ -456,7 +472,7 @@ def _collect_decisions( # Parse timestamp try: - entry_time = datetime.fromisoformat(entry.timestamp.replace('Z', '+00:00')) + entry_time = datetime.fromisoformat(entry.timestamp.replace("Z", "+00:00")) if entry_time.tzinfo is None: entry_time = entry_time.replace(tzinfo=timezone.utc) except: @@ -468,13 +484,15 @@ def _collect_decisions( if end_date and entry_time > end_date: continue - decisions.append({ - "entry_id": entry.entry_id, - "timestamp": entry.timestamp, - "entry_hash": entry.entry_hash, - "previous_hash": entry.previous_hash, - "data": entry.data, - }) + decisions.append( + { + "entry_id": entry.entry_id, + "timestamp": entry.timestamp, + "entry_hash": entry.entry_hash, + "previous_hash": entry.previous_hash, + "data": entry.data, + } + ) return decisions @@ -533,17 +551,19 @@ def _format_csv(self, data: Dict[str, Any]) -> str: output.write("=== RISK ASSESSMENTS ===\n") writer = csv.DictWriter( output, - fieldnames=["risk_id", "decision_id", "overall_score", "risk_level", "timestamp"] + fieldnames=["risk_id", "decision_id", "overall_score", "risk_level", "timestamp"], ) writer.writeheader() for risk in data["risks"]: - writer.writerow({ - "risk_id": risk["risk_id"], - "decision_id": risk["decision_id"], - "overall_score": risk["overall_score"], - "risk_level": risk["risk_level"], - "timestamp": risk["timestamp"], - }) + writer.writerow( + { + "risk_id": risk["risk_id"], + "decision_id": risk["decision_id"], + "overall_score": risk["overall_score"], + "risk_level": risk["risk_level"], + "timestamp": risk["timestamp"], + } + ) output.write("\n") # Escalations section @@ -551,17 +571,19 @@ def _format_csv(self, data: Dict[str, Any]) -> str: output.write("=== ESCALATIONS ===\n") writer = csv.DictWriter( output, - fieldnames=["escalation_id", "decision_id", "status", "priority", "created_at"] + fieldnames=["escalation_id", "decision_id", "status", "priority", "created_at"], ) writer.writeheader() for esc in data["escalations"]: - writer.writerow({ - "escalation_id": esc["escalation_id"], - "decision_id": esc["decision_id"], - "status": esc["status"], - "priority": esc["priority"], - "created_at": esc["created_at"], - }) + writer.writerow( + { + "escalation_id": esc["escalation_id"], + "decision_id": esc["decision_id"], + "status": esc["status"], + "priority": esc["priority"], + "created_at": esc["created_at"], + } + ) output.write("\n") # Overrides section @@ -569,17 +591,25 @@ def _format_csv(self, data: Dict[str, Any]) -> str: output.write("=== OVERRIDES ===\n") writer = csv.DictWriter( output, - fieldnames=["override_id", "decision_id", "override_type", "authorized_by", "timestamp"] + fieldnames=[ + "override_id", + "decision_id", + "override_type", + "authorized_by", + "timestamp", + ], ) writer.writeheader() for ovr in data["overrides"]: - writer.writerow({ - "override_id": ovr["override_id"], - "decision_id": ovr["decision_id"], - "override_type": ovr["override_type"], - "authorized_by": ovr["authorized_by"], - "timestamp": ovr["timestamp"], - }) + writer.writerow( + { + "override_id": ovr["override_id"], + "decision_id": ovr["decision_id"], + "override_type": ovr["override_type"], + "authorized_by": ovr["authorized_by"], + "timestamp": ovr["timestamp"], + } + ) output.write("\n") return output.getvalue() @@ -674,9 +704,13 @@ def _format_html(self, data: Dict[str, Any]) -> str: html.append("MetricCount") stats = data["statistics"] html.append(f"Risk Assessments{stats.get('total_risks', 0)}") - html.append(f"Escalations{stats.get('total_escalations', 0)}") + html.append( + f"Escalations{stats.get('total_escalations', 0)}" + ) html.append(f"Overrides{stats.get('total_overrides', 0)}") - html.append(f"Evidence Artifacts{stats.get('total_evidence', 0)}") + html.append( + f"Evidence Artifacts{stats.get('total_evidence', 0)}" + ) html.append("") html.append("") @@ -688,9 +722,7 @@ def get_export(self, export_id: str) -> Optional[ExportPackage]: return self._exports.get(export_id) def list_exports( - self, - requester: Optional[str] = None, - limit: int = 100 + self, requester: Optional[str] = None, limit: int = 100 ) -> List[ExportPackage]: """List export packages with optional filtering.""" exports = list(self._exports.values()) diff --git a/src/lexecon/compliance/eu_ai_act/article_11_technical_docs.py b/src/lexecon/compliance/eu_ai_act/article_11_technical_docs.py index e4822b7..ee5efd0 100644 --- a/src/lexecon/compliance/eu_ai_act/article_11_technical_docs.py +++ b/src/lexecon/compliance/eu_ai_act/article_11_technical_docs.py @@ -15,12 +15,12 @@ import hashlib import json +from dataclasses import asdict, dataclass from datetime import datetime from typing import Any, Dict, List, Optional -from dataclasses import dataclass, asdict -from lexecon.policy.engine import PolicyEngine from lexecon.ledger.chain import LedgerChain +from lexecon.policy.engine import PolicyEngine @dataclass @@ -71,17 +71,12 @@ class TechnicalDocumentationGenerator: """ def __init__( - self, - policy_engine: Optional[PolicyEngine] = None, - ledger: Optional[LedgerChain] = None + self, policy_engine: Optional[PolicyEngine] = None, ledger: Optional[LedgerChain] = None ): self.policy_engine = policy_engine self.ledger = ledger - def generate( - self, - system_info: Optional[Dict[str, Any]] = None - ) -> TechnicalDocumentation: + def generate(self, system_info: Optional[Dict[str, Any]] = None) -> TechnicalDocumentation: """ Generate complete Article 11 documentation. @@ -114,8 +109,7 @@ def generate( return doc def _generate_general_description( - self, - system_info: Optional[Dict[str, Any]] = None + self, system_info: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Article 11(1) - General characteristics, capabilities and limitations.""" return { @@ -134,18 +128,18 @@ def _generate_general_description( "Immutable audit trail generation with hash-chaining", "Real-time decision gating and risk assessment", "Multi-model governance across AI providers", - "Automated compliance documentation generation" + "Automated compliance documentation generation", ], "technical_architecture": { "policy_engine": "Rule-based decision system with cryptographic verification", "ledger": "Hash-chained immutable audit log", "capability_tokens": "Cryptographic permissions with scope limitations", - "decision_service": "Runtime enforcement with sub-200ms latency" + "decision_service": "Runtime enforcement with sub-200ms latency", }, "deployment_modes": [ "Cloud-hosted SaaS", "Self-hosted enterprise", - "Hybrid deployment" + "Hybrid deployment", ], "integration_points": [ "OpenAI API wrapper", @@ -153,8 +147,8 @@ def _generate_general_description( "Generic model adapter", "Enterprise SSO (SAML/OIDC)", "SIEM integration", - "GRC platform integration" - ] + "GRC platform integration", + ], } def _generate_intended_purpose(self) -> Dict[str, Any]: @@ -173,15 +167,15 @@ def _generate_intended_purpose(self) -> Dict[str, Any]: "Enforce data retention policies on AI-generated content", "Limit AI access to sensitive systems based on risk level", "Ensure human oversight for high-risk AI decisions", - "Generate compliance evidence for regulatory audits" + "Generate compliance evidence for regulatory audits", ], "target_users": [ "Compliance officers", "Security teams (CISOs)", "Legal counsel", "AI operations teams", - "Risk management" - ] + "Risk management", + ], }, "current_policy_coverage": { "active_policies": policy_count, @@ -190,34 +184,34 @@ def _generate_intended_purpose(self) -> Dict[str, Any]: "Access control", "Risk thresholds", "Human oversight requirements", - "Audit requirements" - ] + "Audit requirements", + ], }, "reasonably_foreseeable_misuse": { "identified_risks": [ { "risk": "Over-restrictive policies blocking legitimate AI usage", "mitigation": "Policy testing and simulation mode before production deployment", - "residual_risk": "Low - policies are version-controlled with rollback capability" + "residual_risk": "Low - policies are version-controlled with rollback capability", }, { "risk": "Incorrect policy configuration allowing prohibited operations", "mitigation": "Policy validation on load, immutable audit trail of all decisions", - "residual_risk": "Medium - requires trained policy administrators" + "residual_risk": "Medium - requires trained policy administrators", }, { "risk": "System bypass through direct API access", "mitigation": "Cryptographic token verification, all API calls logged", - "residual_risk": "Low - bypass attempts are detectable and logged" - } + "residual_risk": "Low - bypass attempts are detectable and logged", + }, ], "prohibited_uses": [ "Circumventing other AI safety systems", "Surveillance without proper authorization", "Automated decision-making without human oversight where required", - "Processing special category data without appropriate safeguards" - ] - } + "Processing special category data without appropriate safeguards", + ], + }, } def _generate_design_specifications(self) -> Dict[str, Any]: @@ -230,38 +224,38 @@ def _generate_design_specifications(self) -> Dict[str, Any]: "name": "Policy Engine", "function": "Evaluates AI requests against organizational policies", "technology": "Rule-based system with lexicoding semantics", - "performance": "Sub-200ms evaluation latency" + "performance": "Sub-200ms evaluation latency", }, { "name": "Decision Service", "function": "Runtime gating of AI operations", "technology": "FastAPI service with async request handling", - "performance": "1000+ requests/minute throughput" + "performance": "1000+ requests/minute throughput", }, { "name": "Capability Token System", "function": "Cryptographic proof of authorized operations", "technology": "Ed25519 signatures with scope-limited tokens", - "performance": "Token generation <10ms" + "performance": "Token generation <10ms", }, { "name": "Audit Ledger", "function": "Immutable record of all governance decisions", "technology": "Hash-chained ledger with SHA-256", - "performance": "100% tamper-evident verification" - } + "performance": "100% tamper-evident verification", + }, ], "cryptographic_measures": { "signing": "Ed25519 for decision signatures", "hashing": "SHA-256 for audit chain integrity", "key_management": "HSM-compatible key storage", - "token_format": "JWS-compatible capability tokens" + "token_format": "JWS-compatible capability tokens", }, "data_flows": [ "AI request → Policy Engine → Decision Service → AI model", "All decisions → Audit Ledger", - "Evidence generation → Compliance Reports" - ] + "Evidence generation → Compliance Reports", + ], }, "security_specifications": { "authentication": "Multi-factor with SSO integration", @@ -269,16 +263,16 @@ def _generate_design_specifications(self) -> Dict[str, Any]: "encryption": { "at_rest": "AES-256", "in_transit": "TLS 1.3", - "key_rotation": "90-day mandatory rotation" + "key_rotation": "90-day mandatory rotation", }, "audit_logging": "All operations logged with cryptographic integrity", - "vulnerability_management": "Automated scanning, 30-day patch SLA" + "vulnerability_management": "Automated scanning, 30-day patch SLA", }, "interoperability": { "standards": ["OAuth 2.0", "SAML 2.0", "OpenAPI 3.0"], "integrations": ["OpenAI", "Anthropic", "Generic REST APIs"], - "export_formats": ["JSON", "PDF", "CSV", "XML"] - } + "export_formats": ["JSON", "PDF", "CSV", "XML"], + }, } def _generate_development_methodology(self) -> Dict[str, Any]: @@ -292,33 +286,33 @@ def _generate_development_methodology(self) -> Dict[str, Any]: "unit_tests": "Pytest framework, >80% coverage requirement", "integration_tests": "API contract testing", "security_tests": "SAST, DAST, dependency scanning", - "compliance_tests": "Policy validation test suite" + "compliance_tests": "Policy validation test suite", }, "deployment_process": { "ci_cd": "GitHub Actions with automated tests", "staging_environment": "Mandatory pre-production testing", "rollback_capability": "Immediate rollback on failure", - "deployment_frequency": "Continuous deployment to staging, weekly to production" - } + "deployment_frequency": "Continuous deployment to staging, weekly to production", + }, }, "quality_assurance": { "code_review": "Mandatory peer review for all changes", "security_review": "Quarterly third-party security audits", "compliance_review": "Legal review for EU AI Act changes", - "penetration_testing": "Annual third-party penetration tests" + "penetration_testing": "Annual third-party penetration tests", }, "training_data": { "applicability": "Not applicable - rule-based system, not ML-based", "note": ( "Lexecon does not use machine learning models. " "Policies are explicitly defined rules, not learned behaviors." - ) + ), }, "change_management": { "policy_changes": "Version-controlled with approval workflow", "system_updates": "Automated deployment with rollback capability", - "emergency_procedures": "Documented incident response plan" - } + "emergency_procedures": "Documented incident response plan", + }, } def _generate_data_requirements(self) -> Dict[str, Any]: @@ -331,36 +325,36 @@ def _generate_data_requirements(self) -> Dict[str, Any]: "User intent descriptions", "Policy evaluation results", "Decision timestamps and identifiers", - "Cryptographic hashes and signatures" + "Cryptographic hashes and signatures", ], "personal_data": { "categories": [ "User identifiers (role-based, not individual names)", "Request context (may contain user intent descriptions)", - "Audit trail metadata (timestamps, IP addresses)" + "Audit trail metadata (timestamps, IP addresses)", ], "special_categories": "None processed by Lexecon core system", "legal_basis": "Legitimate interest - fraud prevention and security monitoring", - "data_minimization": "Only metadata required for governance decisions stored" - } + "data_minimization": "Only metadata required for governance decisions stored", + }, }, "data_quality": { "accuracy": "Policy rules are deterministic - 100% reproducible decisions", "completeness": "All required fields validated before decision processing", "consistency": "Hash-chain integrity ensures consistent audit trail", - "timeliness": "Real-time decision processing with <200ms latency" + "timeliness": "Real-time decision processing with <200ms latency", }, "data_governance": { "retention": "Minimum 10 years for high-risk decisions per Article 12", "anonymization": "Automatic anonymization after retention period", "deletion_rights": "GDPR-compliant data subject access and deletion", - "cross_border": "Data residency options for EU-only storage" + "cross_border": "Data residency options for EU-only storage", }, "data_sources": { "primary": "Direct input from AI model API calls", "secondary": "Policy definitions loaded by administrators", - "external": "None - all data generated internally or provided by client systems" - } + "external": "None - all data generated internally or provided by client systems", + }, } def _generate_human_oversight(self) -> Dict[str, Any]: @@ -377,58 +371,58 @@ def _generate_human_oversight(self) -> Dict[str, Any]: "level": "Policy Definition", "actors": "Compliance officers, legal counsel", "control": "All policies require human approval before activation", - "verification": "Policy hash signed by authorized administrator" + "verification": "Policy hash signed by authorized administrator", }, { "level": "High-Risk Decision Approval", "actors": "Security teams, risk managers", "control": "High-risk AI requests require human approval before execution", - "verification": "Approval signed and logged in audit trail" + "verification": "Approval signed and logged in audit trail", }, { "level": "Continuous Monitoring", "actors": "SOC analysts, compliance monitors", "control": "Real-time dashboard with alerting on anomalies", - "verification": "All monitoring actions logged" + "verification": "All monitoring actions logged", }, { "level": "Audit Review", "actors": "Internal audit, external auditors", "control": "Periodic review of decision patterns and policy effectiveness", - "verification": "Audit reports with cryptographic integrity verification" - } - ] + "verification": "Audit reports with cryptographic integrity verification", + }, + ], }, "intervention_capabilities": { "real_time": [ "Manual override of AI decisions", "Emergency policy activation", "System pause (circuit breaker)", - "Rate limiting adjustment" + "Rate limiting adjustment", ], "configuration": [ "Policy modification", "Risk threshold adjustment", "Escalation path definition", - "Approval workflow configuration" - ] + "Approval workflow configuration", + ], }, "response_times": { "automated_alert_to_human": "<60 seconds", "human_decision_to_implementation": "<2 minutes", "emergency_override": "Immediate (synchronous)", - "policy_update_deployment": "<5 minutes" + "policy_update_deployment": "<5 minutes", }, "oversight_effectiveness": { "metrics_tracked": [ "Override frequency", "Human intervention rate", "Response time distribution", - "Policy exception approval rate" + "Policy exception approval rate", ], "reporting": "Quarterly oversight effectiveness reports", - "continuous_improvement": "Metrics inform policy refinement" - } + "continuous_improvement": "Metrics inform policy refinement", + }, } def _generate_accuracy_metrics(self) -> Dict[str, Any]: @@ -444,7 +438,7 @@ def _generate_accuracy_metrics(self) -> Dict[str, Any]: ), "correctness": "Policy rules are deterministic - 100% reproducible", "precision": "Policy matching is exact string/pattern matching", - "recall": "All AI requests evaluated - 0% false negatives" + "recall": "All AI requests evaluated - 0% false negatives", }, "performance_metrics": { "policy_evaluation_correctness": "100% - deterministic rule execution", @@ -452,15 +446,15 @@ def _generate_accuracy_metrics(self) -> Dict[str, Any]: "false_positive_rate": { "value": "Depends on policy configuration", "measurement": "Tracked as 'policy exception requests'", - "improvement": "Policies refined based on exception patterns" + "improvement": "Policies refined based on exception patterns", }, "uptime": "99.9% SLA target", - "latency": "p99 <200ms for decision evaluation" + "latency": "p99 <200ms for decision evaluation", }, "validation": { "pre_deployment": "Policy simulation with historical data", "post_deployment": "Continuous monitoring of decision patterns", - "verification": f"Total decisions processed: {decisions_count}" + "verification": f"Total decisions processed: {decisions_count}", }, "limitations": { "policy_completeness": ( @@ -470,8 +464,8 @@ def _generate_accuracy_metrics(self) -> Dict[str, Any]: "context_understanding": ( "System evaluates explicit policy rules. " "Cannot infer implicit intent or novel risk scenarios." - ) - } + ), + }, } def _generate_known_limitations(self) -> Dict[str, Any]: @@ -486,7 +480,7 @@ def _generate_known_limitations(self) -> Dict[str, Any]: "Novel AI behaviors not covered by existing rules may pass through." ), "mitigation": "Default-deny mode available, continuous policy refinement", - "residual_risk": "Medium - requires ongoing policy maintenance" + "residual_risk": "Medium - requires ongoing policy maintenance", }, { "limitation": "Content inspection depth", @@ -495,7 +489,7 @@ def _generate_known_limitations(self) -> Dict[str, Any]: "Malicious content in natural language may not be detected." ), "mitigation": "Integration with content filtering systems recommended", - "residual_risk": "Medium - depends on integration completeness" + "residual_risk": "Medium - depends on integration completeness", }, { "limitation": "Performance vs. complexity tradeoff", @@ -504,7 +498,7 @@ def _generate_known_limitations(self) -> Dict[str, Any]: "Sub-200ms latency requires policy optimization." ), "mitigation": "Policy complexity monitoring and optimization tools", - "residual_risk": "Low - latency monitored in real-time" + "residual_risk": "Low - latency monitored in real-time", }, { "limitation": "Distributed system consistency", @@ -512,42 +506,42 @@ def _generate_known_limitations(self) -> Dict[str, Any]: "In multi-node deployments, policy updates may have brief propagation delay." ), "mitigation": "Policy versioning and staged rollout procedures", - "residual_risk": "Low - propagation typically <30 seconds" - } + "residual_risk": "Low - propagation typically <30 seconds", + }, ], "operational_constraints": [ { "constraint": "Requires explicit policy definition", "impact": "Organizations must invest in policy creation and maintenance", - "recommendation": "Use provided policy templates and continuous refinement" + "recommendation": "Use provided policy templates and continuous refinement", }, { "constraint": "Administrator expertise required", "impact": "Effective use requires understanding of governance principles", - "recommendation": "Training and certification program recommended" + "recommendation": "Training and certification program recommended", }, { "constraint": "Integration overhead", "impact": "AI systems must route through Lexecon for governance", - "recommendation": "Use provided API adapters for transparent integration" - } + "recommendation": "Use provided API adapters for transparent integration", + }, ], "regulatory_scope": { "in_scope": [ "EU AI Act Article 11-14 compliance", "GDPR audit trail requirements", - "SOC 2 access control and logging" + "SOC 2 access control and logging", ], "out_of_scope": [ "AI model training data compliance (handled by AI provider)", "Model bias detection (not a predictive system)", - "Content moderation (recommend integrating dedicated tools)" + "Content moderation (recommend integrating dedicated tools)", ], "clarification": ( "Lexecon governs AI behavior and usage, not AI model development. " "Model-level compliance remains responsibility of AI provider." - ) - } + ), + }, } def _build_evidence_chain(self) -> List[str]: @@ -563,8 +557,8 @@ def _calculate_hash(self, doc: TechnicalDocumentation) -> str: """Calculate deterministic hash of documentation.""" doc_dict = asdict(doc) # Remove hash field before hashing - doc_dict.pop('document_hash', None) - canonical_json = json.dumps(doc_dict, sort_keys=True, separators=(',', ':')) + doc_dict.pop("document_hash", None) + canonical_json = json.dumps(doc_dict, sort_keys=True, separators=(",", ":")) return hashlib.sha256(canonical_json.encode()).hexdigest() def export_json(self, doc: TechnicalDocumentation) -> str: diff --git a/src/lexecon/compliance/eu_ai_act/article_12_records.py b/src/lexecon/compliance/eu_ai_act/article_12_records.py index 408c942..1e1ec62 100644 --- a/src/lexecon/compliance/eu_ai_act/article_12_records.py +++ b/src/lexecon/compliance/eu_ai_act/article_12_records.py @@ -12,16 +12,17 @@ """ import json +from dataclasses import asdict, dataclass from datetime import datetime, timedelta from enum import Enum from typing import Any, Dict, List, Optional -from dataclasses import dataclass, asdict from lexecon.ledger.chain import LedgerChain, LedgerEntry class RetentionClass(Enum): """Retention classification per EU AI Act Article 12.""" + HIGH_RISK = "high_risk" # 10 years minimum STANDARD = "standard" # 6 months minimum GDPR_INTERSECT = "gdpr_intersect" # Subject to data subject rights @@ -29,6 +30,7 @@ class RetentionClass(Enum): class RecordStatus(Enum): """Status of records in retention system.""" + ACTIVE = "active" # Within retention period EXPIRING = "expiring" # Approaching retention deadline LEGAL_HOLD = "legal_hold" # Frozen for investigation @@ -39,6 +41,7 @@ class RecordStatus(Enum): @dataclass class RetentionPolicy: """Retention policy for a class of records.""" + classification: RetentionClass retention_days: int auto_anonymize: bool = True @@ -49,6 +52,7 @@ class RetentionPolicy: @dataclass class ComplianceRecord: """Article 12 compliant record wrapper.""" + record_id: str original_entry: Dict[str, Any] retention_class: RetentionClass @@ -78,22 +82,22 @@ def __init__(self, ledger: LedgerChain): retention_days=3650, # 10 years auto_anonymize=True, legal_basis="EU AI Act Article 12 - high-risk system monitoring", - data_subject_rights=False # Exception for regulatory compliance + data_subject_rights=False, # Exception for regulatory compliance ), RetentionClass.STANDARD: RetentionPolicy( classification=RetentionClass.STANDARD, retention_days=180, # 6 months auto_anonymize=True, legal_basis="Legitimate interest - security monitoring", - data_subject_rights=True + data_subject_rights=True, ), RetentionClass.GDPR_INTERSECT: RetentionPolicy( classification=RetentionClass.GDPR_INTERSECT, retention_days=90, # 90 days default auto_anonymize=True, legal_basis="Consent - user data processing", - data_subject_rights=True - ) + data_subject_rights=True, + ), } def classify_entry(self, entry: LedgerEntry) -> RetentionClass: @@ -134,8 +138,14 @@ def classify_entry(self, entry: LedgerEntry) -> RetentionClass: def _contains_personal_data(self, data: Dict[str, Any]) -> bool: """Check if data contains personal information.""" personal_indicators = [ - "email", "name", "user_id", "ip_address", - "phone", "address", "ssn", "passport" + "email", + "name", + "user_id", + "ip_address", + "phone", + "address", + "ssn", + "passport", ] data_str = json.dumps(data).lower() return any(indicator in data_str for indicator in personal_indicators) @@ -145,12 +155,13 @@ def wrap_entry(self, entry: LedgerEntry) -> ComplianceRecord: classification = self.classify_entry(entry) policy = self.policies[classification] - created_at = datetime.fromisoformat(entry.timestamp.replace('Z', '+00:00')) + created_at = datetime.fromisoformat(entry.timestamp.replace("Z", "+00:00")) expires_at = created_at + timedelta(days=policy.retention_days) # Check if under legal hold legal_holds = [ - hold_id for hold_id, hold in self.legal_holds.items() + hold_id + for hold_id, hold in self.legal_holds.items() if entry.entry_id in hold.get("entry_ids", []) ] @@ -163,7 +174,7 @@ def wrap_entry(self, entry: LedgerEntry) -> ComplianceRecord: created_at=entry.timestamp, expires_at=expires_at.isoformat(), status=status, - legal_holds=legal_holds + legal_holds=legal_holds, ) def get_retention_status(self) -> Dict[str, Any]: @@ -181,22 +192,18 @@ def get_retention_status(self) -> Dict[str, Any]: by_class[record.retention_class] += 1 by_status[record.status] += 1 - expires = datetime.fromisoformat(record.expires_at.replace('Z', '+00:00')) + expires = datetime.fromisoformat(record.expires_at.replace("Z", "+00:00")) if (expires - now).days <= 30: expiring_soon += 1 return { "total_records": total, - "by_classification": { - cls.value: count for cls, count in by_class.items() - }, - "by_status": { - status.value: count for status, count in by_status.items() - }, + "by_classification": {cls.value: count for cls, count in by_class.items()}, + "by_status": {status.value: count for status, count in by_status.items()}, "expiring_within_30_days": expiring_soon, "legal_holds_active": len(self.legal_holds), "oldest_record": self.ledger.entries[0].timestamp if self.ledger.entries else None, - "newest_record": self.ledger.entries[-1].timestamp if self.ledger.entries else None + "newest_record": self.ledger.entries[-1].timestamp if self.ledger.entries else None, } def apply_legal_hold( @@ -204,7 +211,7 @@ def apply_legal_hold( hold_id: str, entry_ids: Optional[List[str]] = None, reason: str = "", - requester: str = "system" + requester: str = "system", ) -> Dict[str, Any]: """ Apply legal hold to records. @@ -221,14 +228,10 @@ def apply_legal_hold( "applied_at": datetime.utcnow().isoformat(), "requester": requester, "entry_ids": entry_ids, - "status": "active" + "status": "active", } - return { - "hold_id": hold_id, - "records_affected": len(entry_ids), - "status": "applied" - } + return {"hold_id": hold_id, "records_affected": len(entry_ids), "status": "applied"} def release_legal_hold(self, hold_id: str, releaser: str = "system") -> Dict[str, Any]: """Release a legal hold.""" @@ -242,25 +245,21 @@ def release_legal_hold(self, hold_id: str, releaser: str = "system") -> Dict[str affected_count = len(hold["entry_ids"]) - return { - "hold_id": hold_id, - "records_affected": affected_count, - "status": "released" - } + return {"hold_id": hold_id, "records_affected": affected_count, "status": "released"} def _record_to_dict(self, record: ComplianceRecord) -> Dict[str, Any]: """Convert ComplianceRecord to dict with enum values as strings.""" record_dict = asdict(record) # Convert enums to their string values - record_dict['retention_class'] = record.retention_class.value - record_dict['status'] = record.status.value + record_dict["retention_class"] = record.retention_class.value + record_dict["status"] = record.status.value return record_dict def generate_regulatory_package( self, start_date: Optional[str] = None, end_date: Optional[str] = None, - entry_types: Optional[List[str]] = None + entry_types: Optional[List[str]] = None, ) -> Dict[str, Any]: """ Generate complete package for regulatory requests. @@ -297,21 +296,18 @@ def generate_regulatory_package( package = { "package_type": "EU_AI_ACT_ARTICLE_12_REGULATORY_RESPONSE", "generated_at": datetime.utcnow().isoformat(), - "period": { - "start": start_date or "inception", - "end": end_date or "present" - }, + "period": {"start": start_date or "inception", "end": end_date or "present"}, "summary": { "total_records": len(records), "decisions": len(decisions), "policy_changes": len(policy_changes), "decision_outcomes": decision_outcomes, - "retention_status": self.get_retention_status() + "retention_status": self.get_retention_status(), }, "integrity_verification": { "ledger_valid": self.ledger.verify_integrity()["valid"], "chain_intact": self.ledger.verify_integrity()["chain_intact"], - "root_hash": self.ledger.entries[-1].entry_hash if self.ledger.entries else None + "root_hash": self.ledger.entries[-1].entry_hash if self.ledger.entries else None, }, "records": [self._record_to_dict(r) for r in records], "compliance_attestation": { @@ -319,17 +315,13 @@ def generate_regulatory_package( "retention_policies_applied": True, "audit_trail_integrity": True, "legal_holds_documented": len(self.legal_holds) > 0, - "generated_by": "Lexecon Compliance System" - } + "generated_by": "Lexecon Compliance System", + }, } return package - def export_for_regulator( - self, - format: str = "json", - **kwargs - ) -> str: + def export_for_regulator(self, format: str = "json", **kwargs) -> str: """ Export regulatory package in requested format. @@ -401,33 +393,42 @@ def _format_csv_package(self, package: Dict[str, Any]) -> str: import io output = io.StringIO() - if not package['records']: + if not package["records"]: return "No records found" # CSV headers fieldnames = [ - "record_id", "event_type", "timestamp", "retention_class", - "expires_at", "status", "decision", "actor", "action" + "record_id", + "event_type", + "timestamp", + "retention_class", + "expires_at", + "status", + "decision", + "actor", + "action", ] writer = csv.DictWriter(output, fieldnames=fieldnames) writer.writeheader() - for record in package['records']: - entry = record['original_entry'] - data = entry.get('data', {}) - - writer.writerow({ - "record_id": record['record_id'], - "event_type": entry['event_type'], - "timestamp": record['created_at'], - "retention_class": record['retention_class'], # Already converted to string - "expires_at": record['expires_at'], - "status": record['status'], # Already converted to string - "decision": data.get('decision', ''), - "actor": data.get('actor', ''), - "action": data.get('action', '') - }) + for record in package["records"]: + entry = record["original_entry"] + data = entry.get("data", {}) + + writer.writerow( + { + "record_id": record["record_id"], + "event_type": entry["event_type"], + "timestamp": record["created_at"], + "retention_class": record["retention_class"], # Already converted to string + "expires_at": record["expires_at"], + "status": record["status"], # Already converted to string + "decision": data.get("decision", ""), + "actor": data.get("actor", ""), + "action": data.get("action", ""), + } + ) return output.getvalue() @@ -453,7 +454,7 @@ def anonymize_record(self, entry_id: str) -> Dict[str, Any]: return { "error": "Cannot anonymize - under legal hold", "entry_id": entry_id, - "legal_holds": record.legal_holds + "legal_holds": record.legal_holds, } # Anonymize personal data fields @@ -464,7 +465,7 @@ def anonymize_record(self, entry_id: str) -> Dict[str, Any]: "status": "anonymized", "anonymized_at": datetime.utcnow().isoformat(), "original_hash": entry.entry_hash, - "note": "Personal data removed, decision metadata retained for compliance" + "note": "Personal data removed, decision metadata retained for compliance", } def _anonymize_data(self, data: Dict[str, Any]) -> Dict[str, Any]: @@ -473,8 +474,13 @@ def _anonymize_data(self, data: Dict[str, Any]) -> Dict[str, Any]: # Fields to anonymize personal_fields = [ - "actor", "user_intent", "request_id", - "email", "name", "user_id", "ip_address" + "actor", + "user_intent", + "request_id", + "email", + "name", + "user_id", + "ip_address", ] for field in personal_fields: diff --git a/src/lexecon/compliance/eu_ai_act/article_14_oversight.py b/src/lexecon/compliance/eu_ai_act/article_14_oversight.py index ee0b7bd..d9c43de 100644 --- a/src/lexecon/compliance/eu_ai_act/article_14_oversight.py +++ b/src/lexecon/compliance/eu_ai_act/article_14_oversight.py @@ -12,16 +12,17 @@ import hashlib import json +from dataclasses import asdict, dataclass from datetime import datetime, timedelta, timezone from enum import Enum from typing import Any, Dict, List, Optional -from dataclasses import dataclass, asdict from lexecon.identity.signing import KeyManager class InterventionType(Enum): """Types of human oversight interventions.""" + APPROVAL = "approval" # Human approved AI recommendation OVERRIDE = "override" # Human overrode AI decision ESCALATION = "escalation" # Escalated to higher authority @@ -32,6 +33,7 @@ class InterventionType(Enum): class OversightRole(Enum): """Roles with oversight authority.""" + COMPLIANCE_OFFICER = "compliance_officer" SECURITY_LEAD = "security_lead" LEGAL_COUNSEL = "legal_counsel" @@ -43,6 +45,7 @@ class OversightRole(Enum): @dataclass class HumanIntervention: """Record of human oversight intervention.""" + intervention_id: str timestamp: str intervention_type: InterventionType @@ -71,6 +74,7 @@ class HumanIntervention: @dataclass class EscalationPath: """Defines escalation chain for decision types.""" + decision_class: str # e.g., "financial", "safety", "operational" roles: List[OversightRole] # Ordered list of escalation roles max_response_time_minutes: int @@ -113,40 +117,37 @@ def _initialize_default_escalations(self): roles=[ OversightRole.SOC_ANALYST, OversightRole.SECURITY_LEAD, - OversightRole.EXECUTIVE + OversightRole.EXECUTIVE, ], max_response_time_minutes=15, - requires_approval_from=OversightRole.SECURITY_LEAD + requires_approval_from=OversightRole.SECURITY_LEAD, ), "financial": EscalationPath( decision_class="financial", roles=[ OversightRole.RISK_MANAGER, OversightRole.COMPLIANCE_OFFICER, - OversightRole.EXECUTIVE + OversightRole.EXECUTIVE, ], max_response_time_minutes=30, - requires_approval_from=OversightRole.RISK_MANAGER + requires_approval_from=OversightRole.RISK_MANAGER, ), "legal": EscalationPath( decision_class="legal", roles=[ OversightRole.COMPLIANCE_OFFICER, OversightRole.LEGAL_COUNSEL, - OversightRole.EXECUTIVE + OversightRole.EXECUTIVE, ], max_response_time_minutes=60, - requires_approval_from=OversightRole.LEGAL_COUNSEL + requires_approval_from=OversightRole.LEGAL_COUNSEL, ), "operational": EscalationPath( decision_class="operational", - roles=[ - OversightRole.SOC_ANALYST, - OversightRole.SECURITY_LEAD - ], + roles=[OversightRole.SOC_ANALYST, OversightRole.SECURITY_LEAD], max_response_time_minutes=5, - requires_approval_from=OversightRole.SOC_ANALYST - ) + requires_approval_from=OversightRole.SOC_ANALYST, + ), } def log_intervention( @@ -158,7 +159,7 @@ def log_intervention( reason: str, request_context: Optional[Dict[str, Any]] = None, response_time_ms: Optional[int] = None, - sign: bool = True + sign: bool = True, ) -> HumanIntervention: """ Log a human oversight intervention. @@ -176,7 +177,7 @@ def log_intervention( human_role=human_role, reason=reason, request_context=request_context or {}, - response_time_ms=response_time_ms + response_time_ms=response_time_ms, ) # Sign the intervention @@ -201,7 +202,7 @@ def _sign_intervention(self, intervention: HumanIntervention) -> str: "ai_recommendation": intervention.ai_recommendation, "human_decision": intervention.human_decision, "role": intervention.human_role.value, - "reason": intervention.reason + "reason": intervention.reason, } # KeyManager.sign() expects a dict and returns base64-encoded signature @@ -221,20 +222,18 @@ def verify_intervention(self, intervention: HumanIntervention) -> bool: "ai_recommendation": intervention.ai_recommendation, "human_decision": intervention.human_decision, "role": intervention.human_role.value, - "reason": intervention.reason + "reason": intervention.reason, } # Use KeyManager.verify static method from lexecon.identity.signing import KeyManager + try: return KeyManager.verify(data, intervention.signature, self.key_manager.public_key) except Exception: return False - def generate_oversight_effectiveness_report( - self, - time_period_days: int = 30 - ) -> Dict[str, Any]: + def generate_oversight_effectiveness_report(self, time_period_days: int = 30) -> Dict[str, Any]: """ Generate oversight effectiveness report. @@ -243,15 +242,16 @@ def generate_oversight_effectiveness_report( """ cutoff = datetime.now(timezone.utc) - timedelta(days=time_period_days) recent = [ - i for i in self.interventions - if datetime.fromisoformat(i.timestamp.replace('Z', '+00:00')) >= cutoff + i + for i in self.interventions + if datetime.fromisoformat(i.timestamp.replace("Z", "+00:00")) >= cutoff ] if not recent: return { "period_days": time_period_days, "total_interventions": 0, - "message": "No interventions in period" + "message": "No interventions in period", } # Calculate statistics @@ -286,7 +286,11 @@ def generate_oversight_effectiveness_report( response_times.append(intervention.response_time_ms) # Calculate override rate (key effectiveness metric!) - override_rate = (overrides / (overrides + rubber_stamps) * 100) if (overrides + rubber_stamps) > 0 else 0 + override_rate = ( + (overrides / (overrides + rubber_stamps) * 100) + if (overrides + rubber_stamps) > 0 + else 0 + ) # Response time statistics avg_response_time = sum(response_times) / len(response_times) if response_times else 0 @@ -302,37 +306,29 @@ def generate_oversight_effectiveness_report( "period_start": cutoff.isoformat(), "period_end": datetime.utcnow().isoformat(), "total_interventions": total, - - "intervention_breakdown": { - "by_type": by_type, - "by_role": by_role - }, - + "intervention_breakdown": {"by_type": by_type, "by_role": by_role}, "effectiveness_metrics": { "total_overrides": overrides, "total_approvals": rubber_stamps, "override_rate_percent": round(override_rate, 2), - "interpretation": self._interpret_override_rate(override_rate) + "interpretation": self._interpret_override_rate(override_rate), }, - "response_time_metrics": { "average_ms": round(avg_response_time, 2), "minimum_ms": min_response_time, "maximum_ms": max_response_time, "average_seconds": round(avg_response_time / 1000, 2), "compliance_target_seconds": 60, - "meets_target": avg_response_time / 1000 < 60 + "meets_target": avg_response_time / 1000 < 60, }, - "compliance_assessment": compliance_status, - "evidence_integrity": { "all_signed": all(i.signature for i in recent), "signatures_verified": sum(1 for i in recent if self.verify_intervention(i)), "verification_rate": round( sum(1 for i in recent if self.verify_intervention(i)) / total * 100, 2 - ) - } + ), + }, } return report @@ -368,7 +364,7 @@ def _assess_compliance(self, override_rate: float, avg_response_ms: float) -> Di return { "compliant": compliant, "status": "COMPLIANT" if compliant else "NEEDS_ATTENTION", - "issues": issues if issues else ["None - oversight is effective"] + "issues": issues if issues else ["None - oversight is effective"], } def get_escalation_path(self, decision_class: str) -> Optional[EscalationPath]: @@ -376,9 +372,7 @@ def get_escalation_path(self, decision_class: str) -> Optional[EscalationPath]: return self.escalation_paths.get(decision_class) def simulate_escalation( - self, - decision_class: str, - current_role: OversightRole + self, decision_class: str, current_role: OversightRole ) -> Dict[str, Any]: """ Simulate escalation chain for a decision. @@ -397,7 +391,9 @@ def simulate_escalation( # Determine next escalation level can_approve = current_role == path.requires_approval_from - next_escalation = path.roles[current_index + 1] if current_index + 1 < len(path.roles) else None + next_escalation = ( + path.roles[current_index + 1] if current_index + 1 < len(path.roles) else None + ) return { "decision_class": decision_class, @@ -406,13 +402,11 @@ def simulate_escalation( "requires_approval_from": path.requires_approval_from.value, "next_escalation": next_escalation.value if next_escalation else None, "max_response_time_minutes": path.max_response_time_minutes, - "full_escalation_chain": [r.value for r in path.roles] + "full_escalation_chain": [r.value for r in path.roles], } def export_evidence_package( - self, - start_date: Optional[str] = None, - end_date: Optional[str] = None + self, start_date: Optional[str] = None, end_date: Optional[str] = None ) -> Dict[str, Any]: """ Export complete evidence package for Article 14 compliance. @@ -425,28 +419,33 @@ def export_evidence_package( if start_date: start = datetime.fromisoformat(start_date) interventions = [ - i for i in interventions - if datetime.fromisoformat(i.timestamp.replace('Z', '+00:00')) >= start + i + for i in interventions + if datetime.fromisoformat(i.timestamp.replace("Z", "+00:00")) >= start ] if end_date: end = datetime.fromisoformat(end_date) interventions = [ - i for i in interventions - if datetime.fromisoformat(i.timestamp.replace('Z', '+00:00')) <= end + i + for i in interventions + if datetime.fromisoformat(i.timestamp.replace("Z", "+00:00")) <= end ] return { "package_type": "EU_AI_ACT_ARTICLE_14_OVERSIGHT_EVIDENCE", "generated_at": datetime.utcnow().isoformat(), - "period": { - "start": start_date or "inception", - "end": end_date or "present" - }, + "period": {"start": start_date or "inception", "end": end_date or "present"}, "summary": { "total_interventions": len(interventions), "all_signed": all(i.signature for i in interventions), - "verification_rate": sum(1 for i in interventions if self.verify_intervention(i)) / len(interventions) * 100 if interventions else 0 + "verification_rate": ( + sum(1 for i in interventions if self.verify_intervention(i)) + / len(interventions) + * 100 + if interventions + else 0 + ), }, "effectiveness_report": self.generate_oversight_effectiveness_report(), "interventions": [asdict(i) for i in interventions], @@ -455,7 +454,7 @@ def export_evidence_package( "decision_class": v.decision_class, "roles": [r.value for r in v.roles], "max_response_time_minutes": v.max_response_time_minutes, - "requires_approval_from": v.requires_approval_from.value + "requires_approval_from": v.requires_approval_from.value, } for k, v in self.escalation_paths.items() }, @@ -464,8 +463,8 @@ def export_evidence_package( "human_oversight_documented": True, "intervention_capability_proven": len(interventions) > 0, "cryptographic_signatures": True, - "generated_by": "Lexecon Human Oversight Evidence System" - } + "generated_by": "Lexecon Human Oversight Evidence System", + }, } def export_markdown(self, evidence_package: Dict[str, Any]) -> str: diff --git a/src/lexecon/compliance/eu_ai_act/storage.py b/src/lexecon/compliance/eu_ai_act/storage.py index 3f4f140..9296afd 100644 --- a/src/lexecon/compliance/eu_ai_act/storage.py +++ b/src/lexecon/compliance/eu_ai_act/storage.py @@ -38,7 +38,8 @@ def _init_database(self): cursor = conn.cursor() # Create interventions table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS interventions ( intervention_id TEXT PRIMARY KEY, timestamp TEXT NOT NULL, @@ -53,23 +54,30 @@ def _init_database(self): response_time_ms INTEGER, created_at TEXT NOT NULL ) - """) + """ + ) # Create indexes for efficient querying - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_timestamp ON interventions(timestamp) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_intervention_type ON interventions(intervention_type) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_human_role ON interventions(human_role) - """) + """ + ) conn.commit() conn.close() @@ -84,27 +92,30 @@ def save_intervention(self, intervention: HumanIntervention) -> None: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT OR REPLACE INTO interventions ( intervention_id, timestamp, intervention_type, ai_recommendation, ai_confidence, human_decision, human_role, reason, request_context, signature, response_time_ms, created_at ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - intervention.intervention_id, - intervention.timestamp, - intervention.intervention_type.value, - json.dumps(intervention.ai_recommendation), - intervention.ai_confidence, - json.dumps(intervention.human_decision), - intervention.human_role.value, - intervention.reason, - json.dumps(intervention.request_context), - intervention.signature, - intervention.response_time_ms, - datetime.utcnow().isoformat() - )) + """, + ( + intervention.intervention_id, + intervention.timestamp, + intervention.intervention_type.value, + json.dumps(intervention.ai_recommendation), + intervention.ai_confidence, + json.dumps(intervention.human_decision), + intervention.human_role.value, + intervention.reason, + json.dumps(intervention.request_context), + intervention.signature, + intervention.response_time_ms, + datetime.utcnow().isoformat(), + ), + ) conn.commit() conn.close() @@ -119,7 +130,8 @@ def load_all_interventions(self) -> List[HumanIntervention]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT intervention_id, timestamp, intervention_type, ai_recommendation, ai_confidence, human_decision, @@ -127,7 +139,8 @@ def load_all_interventions(self) -> List[HumanIntervention]: response_time_ms FROM interventions ORDER BY timestamp ASC - """) + """ + ) interventions = [] for row in cursor.fetchall(): @@ -142,7 +155,7 @@ def load_all_interventions(self) -> List[HumanIntervention]: reason=row[7], request_context=json.loads(row[8]), signature=row[9], - response_time_ms=row[10] + response_time_ms=row[10], ) interventions.append(intervention) @@ -150,9 +163,7 @@ def load_all_interventions(self) -> List[HumanIntervention]: return interventions def get_by_timerange( - self, - start_date: Optional[str] = None, - end_date: Optional[str] = None + self, start_date: Optional[str] = None, end_date: Optional[str] = None ) -> List[HumanIntervention]: """ Get interventions within a time range. @@ -203,7 +214,7 @@ def get_by_timerange( reason=row[7], request_context=json.loads(row[8]), signature=row[9], - response_time_ms=row[10] + response_time_ms=row[10], ) interventions.append(intervention) @@ -236,26 +247,32 @@ def get_statistics(self) -> dict: newest = cursor.fetchone()[0] # By intervention type - cursor.execute(""" + cursor.execute( + """ SELECT intervention_type, COUNT(*) FROM interventions GROUP BY intervention_type - """) + """ + ) by_type = {row[0]: row[1] for row in cursor.fetchall()} # By human role - cursor.execute(""" + cursor.execute( + """ SELECT human_role, COUNT(*) FROM interventions GROUP BY human_role - """) + """ + ) by_role = {row[0]: row[1] for row in cursor.fetchall()} # Override count - cursor.execute(""" + cursor.execute( + """ SELECT COUNT(*) FROM interventions WHERE intervention_type = 'override' - """) + """ + ) overrides = cursor.fetchone()[0] conn.close() @@ -269,7 +286,7 @@ def get_statistics(self) -> dict: "by_intervention_type": by_type, "by_human_role": by_role, "override_count": overrides, - "override_rate": (overrides / total * 100) if total > 0 else 0 + "override_rate": (overrides / total * 100) if total > 0 else 0, } def count_interventions(self) -> int: diff --git a/src/lexecon/compliance_mapping/service.py b/src/lexecon/compliance_mapping/service.py index 1ad5dda..6f49bb4 100644 --- a/src/lexecon/compliance_mapping/service.py +++ b/src/lexecon/compliance_mapping/service.py @@ -9,16 +9,17 @@ - Automated compliance reporting """ +import copy +import uuid from dataclasses import dataclass from datetime import datetime, timezone from enum import Enum from typing import Any, Dict, List, Optional, Set -import uuid -import copy class RegulatoryFramework(Enum): """Supported regulatory frameworks.""" + SOC2 = "soc2" ISO27001 = "iso27001" GDPR = "gdpr" @@ -29,6 +30,7 @@ class RegulatoryFramework(Enum): class ControlStatus(Enum): """Compliance control status.""" + NOT_IMPLEMENTED = "not_implemented" PARTIALLY_IMPLEMENTED = "partially_implemented" IMPLEMENTED = "implemented" @@ -38,6 +40,7 @@ class ControlStatus(Enum): class GovernancePrimitive(Enum): """Governance primitives from Phases 1-4.""" + RISK_ASSESSMENT = "risk_assessment" ESCALATION = "escalation" OVERRIDE = "override" @@ -48,6 +51,7 @@ class GovernancePrimitive(Enum): @dataclass class ComplianceControl: """Represents a compliance control requirement.""" + control_id: str framework: RegulatoryFramework title: str @@ -68,6 +72,7 @@ def __post_init__(self): @dataclass class ControlMapping: """Maps a governance primitive to compliance controls.""" + mapping_id: str primitive_type: GovernancePrimitive primitive_id: str # e.g., risk_id, escalation_id @@ -81,6 +86,7 @@ class ControlMapping: @dataclass class ComplianceReport: """Compliance status report for a framework.""" + report_id: str framework: RegulatoryFramework generated_at: datetime @@ -113,9 +119,9 @@ class ComplianceMappingService: mapped_primitives=[ GovernancePrimitive.OVERRIDE, GovernancePrimitive.EVIDENCE_ARTIFACT, - GovernancePrimitive.DECISION_LOG + GovernancePrimitive.DECISION_LOG, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "CC7.2": ComplianceControl( control_id="CC7.2", @@ -127,9 +133,9 @@ class ComplianceMappingService: mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, GovernancePrimitive.ESCALATION, - GovernancePrimitive.EVIDENCE_ARTIFACT + GovernancePrimitive.EVIDENCE_ARTIFACT, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "CC9.1": ComplianceControl( control_id="CC9.1", @@ -141,9 +147,9 @@ class ComplianceMappingService: mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, GovernancePrimitive.ESCALATION, - GovernancePrimitive.OVERRIDE + GovernancePrimitive.OVERRIDE, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), } @@ -158,9 +164,9 @@ class ComplianceMappingService: required_evidence_types=["decision_log", "audit_trail"], mapped_primitives=[ GovernancePrimitive.DECISION_LOG, - GovernancePrimitive.EVIDENCE_ARTIFACT + GovernancePrimitive.EVIDENCE_ARTIFACT, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "A.12.6.1": ComplianceControl( control_id="A.12.6.1", @@ -171,9 +177,9 @@ class ComplianceMappingService: required_evidence_types=["risk_assessment", "evidence_artifact"], mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, - GovernancePrimitive.EVIDENCE_ARTIFACT + GovernancePrimitive.EVIDENCE_ARTIFACT, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "A.16.1.4": ComplianceControl( control_id="A.16.1.4", @@ -185,9 +191,9 @@ class ComplianceMappingService: mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, GovernancePrimitive.ESCALATION, - GovernancePrimitive.DECISION_LOG + GovernancePrimitive.DECISION_LOG, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), } @@ -202,9 +208,9 @@ class ComplianceMappingService: required_evidence_types=["risk_assessment", "evidence_artifact", "audit_trail"], mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, - GovernancePrimitive.EVIDENCE_ARTIFACT + GovernancePrimitive.EVIDENCE_ARTIFACT, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "Art.33": ComplianceControl( control_id="Art.33", @@ -216,9 +222,9 @@ class ComplianceMappingService: mapped_primitives=[ GovernancePrimitive.ESCALATION, GovernancePrimitive.EVIDENCE_ARTIFACT, - GovernancePrimitive.DECISION_LOG + GovernancePrimitive.DECISION_LOG, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), "Art.35": ComplianceControl( control_id="Art.35", @@ -229,9 +235,9 @@ class ComplianceMappingService: required_evidence_types=["risk_assessment", "evidence_artifact"], mapped_primitives=[ GovernancePrimitive.RISK_ASSESSMENT, - GovernancePrimitive.EVIDENCE_ARTIFACT + GovernancePrimitive.EVIDENCE_ARTIFACT, ], - status=ControlStatus.NOT_IMPLEMENTED + status=ControlStatus.NOT_IMPLEMENTED, ), } @@ -250,7 +256,7 @@ def map_primitive_to_controls( primitive_type: GovernancePrimitive, primitive_id: str, framework: RegulatoryFramework, - metadata: Optional[Dict[str, Any]] = None + metadata: Optional[Dict[str, Any]] = None, ) -> ControlMapping: """ Map a governance primitive to relevant compliance controls. @@ -280,17 +286,14 @@ def map_primitive_to_controls( framework=framework, mapped_at=datetime.now(timezone.utc), verification_status="pending", - metadata=metadata + metadata=metadata, ) self._mappings[mapping_id] = mapping return mapping def link_evidence_to_control( - self, - control_id: str, - framework: RegulatoryFramework, - evidence_artifact_id: str + self, control_id: str, framework: RegulatoryFramework, evidence_artifact_id: str ) -> bool: """ Link an evidence artifact to a compliance control. @@ -316,10 +319,7 @@ def link_evidence_to_control( return True def verify_control( - self, - control_id: str, - framework: RegulatoryFramework, - notes: Optional[str] = None + self, control_id: str, framework: RegulatoryFramework, notes: Optional[str] = None ) -> bool: """ Mark a control as verified. @@ -346,9 +346,7 @@ def verify_control( return True def get_control_status( - self, - control_id: str, - framework: RegulatoryFramework + self, control_id: str, framework: RegulatoryFramework ) -> Optional[ComplianceControl]: """ Get the current status of a compliance control. @@ -369,7 +367,7 @@ def list_controls( self, framework: RegulatoryFramework, status: Optional[ControlStatus] = None, - category: Optional[str] = None + category: Optional[str] = None, ) -> List[ComplianceControl]: """ List compliance controls with optional filtering. @@ -395,10 +393,7 @@ def list_controls( return controls - def analyze_gaps( - self, - framework: RegulatoryFramework - ) -> List[Dict[str, Any]]: + def analyze_gaps(self, framework: RegulatoryFramework) -> List[Dict[str, Any]]: """ Analyze compliance gaps for a framework. @@ -414,22 +409,23 @@ def analyze_gaps( gaps = [] for control_id, control in self._control_registry[framework].items(): if control.status in [ControlStatus.NOT_IMPLEMENTED, ControlStatus.NON_COMPLIANT]: - gaps.append({ - "control_id": control_id, - "title": control.title, - "category": control.category, - "status": control.status.value, - "required_evidence_types": control.required_evidence_types, - "mapped_primitives": [p.value for p in control.mapped_primitives], - "severity": "high" if control.status == ControlStatus.NON_COMPLIANT else "medium" - }) + gaps.append( + { + "control_id": control_id, + "title": control.title, + "category": control.category, + "status": control.status.value, + "required_evidence_types": control.required_evidence_types, + "mapped_primitives": [p.value for p in control.mapped_primitives], + "severity": ( + "high" if control.status == ControlStatus.NON_COMPLIANT else "medium" + ), + } + ) return gaps - def generate_compliance_report( - self, - framework: RegulatoryFramework - ) -> ComplianceReport: + def generate_compliance_report(self, framework: RegulatoryFramework) -> ComplianceReport: """ Generate comprehensive compliance report for a framework. @@ -456,7 +452,9 @@ def generate_compliance_report( if non_compliant > 0: recommendations.append(f"Address {non_compliant} non-compliant controls immediately") if compliance_percentage < 80: - recommendations.append(f"Increase compliance coverage from {compliance_percentage:.1f}% to at least 80%") + recommendations.append( + f"Increase compliance coverage from {compliance_percentage:.1f}% to at least 80%" + ) if verified < implemented: recommendations.append(f"Verify {implemented - verified} implemented controls") @@ -471,16 +469,13 @@ def generate_compliance_report( non_compliant_controls=non_compliant, compliance_percentage=compliance_percentage, gaps=gaps, - recommendations=recommendations + recommendations=recommendations, ) self._reports[report_id] = report return report - def get_framework_coverage( - self, - framework: RegulatoryFramework - ) -> Dict[str, Any]: + def get_framework_coverage(self, framework: RegulatoryFramework) -> Dict[str, Any]: """ Get coverage statistics for a framework. @@ -515,13 +510,12 @@ def get_framework_coverage( "total_controls": total, "status_breakdown": status_counts, "categories": categories, - "overall_compliance": (status_counts.get("verified", 0) / total * 100) if total > 0 else 0 + "overall_compliance": ( + (status_counts.get("verified", 0) / total * 100) if total > 0 else 0 + ), } - def get_primitive_mappings( - self, - primitive_id: str - ) -> List[ControlMapping]: + def get_primitive_mappings(self, primitive_id: str) -> List[ControlMapping]: """ Get all control mappings for a primitive. @@ -532,8 +526,7 @@ def get_primitive_mappings( List of ControlMapping objects """ return [ - mapping for mapping in self._mappings.values() - if mapping.primitive_id == primitive_id + mapping for mapping in self._mappings.values() if mapping.primitive_id == primitive_id ] def get_statistics(self) -> Dict[str, Any]: @@ -546,9 +539,7 @@ def get_statistics(self) -> Dict[str, Any]: total_mappings = len(self._mappings) frameworks_tracked = len(self._control_registry) - total_controls = sum( - len(controls) for controls in self._control_registry.values() - ) + total_controls = sum(len(controls) for controls in self._control_registry.values()) verified_controls = sum( len([c for c in controls.values() if c.status == ControlStatus.VERIFIED]) @@ -561,5 +552,7 @@ def get_statistics(self) -> Dict[str, Any]: "total_controls": total_controls, "verified_controls": verified_controls, "frameworks": list(self._control_registry.keys()), - "overall_verification_rate": (verified_controls / total_controls * 100) if total_controls > 0 else 0 + "overall_verification_rate": ( + (verified_controls / total_controls * 100) if total_controls > 0 else 0 + ), } diff --git a/src/lexecon/decision/service.py b/src/lexecon/decision/service.py index d99b3d6..5c16dd3 100644 --- a/src/lexecon/decision/service.py +++ b/src/lexecon/decision/service.py @@ -18,12 +18,13 @@ # Import canonical governance models try: + from model_governance_pack.models import Decision as CanonicalDecision from model_governance_pack.models import ( - Decision as CanonicalDecision, DecisionOutcome, Risk, RiskLevel, ) + GOVERNANCE_MODELS_AVAILABLE = True except ImportError: GOVERNANCE_MODELS_AVAILABLE = False @@ -40,9 +41,9 @@ def generate_ulid() -> str: Format: 26 uppercase alphanumeric characters. Uses timestamp + random component for sortability and uniqueness. """ - import time import random import string + import time # ULID encoding alphabet (Crockford's Base32) alphabet = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" diff --git a/src/lexecon/escalation/service.py b/src/lexecon/escalation/service.py index 44b17e6..858f183 100644 --- a/src/lexecon/escalation/service.py +++ b/src/lexecon/escalation/service.py @@ -16,14 +16,14 @@ # Import canonical governance models try: from model_governance_pack.models import ( + ArtifactType, Escalation, - EscalationTrigger, - EscalationStatus, EscalationPriority, + EscalationStatus, + EscalationTrigger, + EvidenceArtifact, Resolution, ResolutionOutcome, - EvidenceArtifact, - ArtifactType, Risk, RiskLevel, ) @@ -131,9 +131,7 @@ def __init__( store_evidence: Whether to generate evidence artifacts """ if not GOVERNANCE_MODELS_AVAILABLE: - raise RuntimeError( - "Governance models not available. Install model_governance_pack." - ) + raise RuntimeError("Governance models not available. Install model_governance_pack.") self.config = config or EscalationConfig() self.emit_notifications = emit_notifications @@ -162,10 +160,7 @@ def should_auto_escalate(self, risk: Optional["Risk"]) -> bool: return True # Check risk level threshold - if ( - risk.risk_level - and risk.risk_level == self.config.AUTO_ESCALATE_RISK_LEVEL - ): + if risk.risk_level and risk.risk_level == self.config.AUTO_ESCALATE_RISK_LEVEL: return True return False @@ -327,9 +322,7 @@ def acknowledge_escalation( raise ValueError(f"Escalation {escalation_id} not found") if escalation.status in [EscalationStatus.RESOLVED, EscalationStatus.EXPIRED]: - raise ValueError( - f"Cannot acknowledge escalation in {escalation.status.value} state" - ) + raise ValueError(f"Cannot acknowledge escalation in {escalation.status.value} state") # Create updated escalation (immutable pattern) updated = escalation.model_copy( @@ -394,9 +387,7 @@ def resolve_escalation( if resolved_by not in escalation.escalated_to: # Allow if resolver acknowledged it if resolved_by != escalation.acknowledged_by: - raise ValueError( - f"Resolver {resolved_by} not authorized for this escalation" - ) + raise ValueError(f"Resolver {resolved_by} not authorized for this escalation") # Create resolution resolution = Resolution( @@ -537,9 +528,7 @@ def check_sla_status(self) -> List[NotificationEvent]: return notifications - def _infer_priority_from_trigger( - self, trigger: "EscalationTrigger" - ) -> "EscalationPriority": + def _infer_priority_from_trigger(self, trigger: "EscalationTrigger") -> "EscalationPriority": """Infer escalation priority from trigger type.""" priority_map = { EscalationTrigger.RISK_THRESHOLD: EscalationPriority.CRITICAL, @@ -635,9 +624,7 @@ def _create_evidence_artifact( return artifact - def _create_notification_artifact( - self, notification: NotificationEvent - ) -> "EvidenceArtifact": + def _create_notification_artifact(self, notification: NotificationEvent) -> "EvidenceArtifact": """Create evidence artifact for notification.""" # Serialize notification notification_data = { diff --git a/src/lexecon/evidence/append_only_store.py b/src/lexecon/evidence/append_only_store.py index 335f5ff..55a6c4c 100644 --- a/src/lexecon/evidence/append_only_store.py +++ b/src/lexecon/evidence/append_only_store.py @@ -13,6 +13,7 @@ class AppendOnlyViolationError(Exception): """Raised when attempting to modify or delete in append-only mode.""" + pass @@ -92,9 +93,7 @@ def __delitem__(self, key: str): AppendOnlyViolationError: If append-only enabled """ if self._enabled: - raise AppendOnlyViolationError( - f"Cannot delete artifact '{key}' in append-only mode" - ) + raise AppendOnlyViolationError(f"Cannot delete artifact '{key}' in append-only mode") del self._store[key] def __contains__(self, key: str) -> bool: @@ -170,10 +169,7 @@ def __init__(self, evidence_service, enabled: bool = False): # Wrap the internal storage if enabled if enabled: # Replace internal dict with append-only wrapper - self.service._artifacts = AppendOnlyStore( - self.service._artifacts, - enabled=True - ) + self.service._artifacts = AppendOnlyStore(self.service._artifacts, enabled=True) @property def enabled(self) -> bool: @@ -185,10 +181,7 @@ def enable(self): if not self._enabled: self._enabled = True if not isinstance(self.service._artifacts, AppendOnlyStore): - self.service._artifacts = AppendOnlyStore( - self.service._artifacts, - enabled=True - ) + self.service._artifacts = AppendOnlyStore(self.service._artifacts, enabled=True) else: self.service._artifacts.enable() @@ -214,8 +207,9 @@ def verify_integrity(self) -> bool: for artifact_id, artifact in artifacts.items(): # Recompute hash - if hasattr(artifact, 'content') and hasattr(artifact, 'sha256_hash'): + if hasattr(artifact, "content") and hasattr(artifact, "sha256_hash"): from .service import compute_sha256 + actual_hash = compute_sha256(artifact.content) if actual_hash != artifact.sha256_hash: return False diff --git a/src/lexecon/evidence/service.py b/src/lexecon/evidence/service.py index 051db68..780833c 100644 --- a/src/lexecon/evidence/service.py +++ b/src/lexecon/evidence/service.py @@ -18,9 +18,9 @@ # Import canonical governance models try: from model_governance_pack.models import ( - EvidenceArtifact, ArtifactType, DigitalSignature, + EvidenceArtifact, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -100,9 +100,7 @@ def __init__( enable_signatures: Whether to support digital signatures """ if not GOVERNANCE_MODELS_AVAILABLE: - raise RuntimeError( - "Governance models not available. Install model_governance_pack." - ) + raise RuntimeError("Governance models not available. Install model_governance_pack.") self.config = config or EvidenceConfig() self.enable_signatures = enable_signatures @@ -430,9 +428,7 @@ def sign_artifact( ) # Create new artifact with signature (immutability preserved) - signed_artifact = artifact.model_copy( - update={"digital_signature": digital_signature} - ) + signed_artifact = artifact.model_copy(update={"digital_signature": digital_signature}) # Replace in storage (allowed because it's adding signature) self._artifacts[artifact_id] = signed_artifact @@ -455,9 +451,7 @@ def get_statistics(self) -> Dict[str, Any]: if count > 0: type_counts[artifact_type.value] = count - signed_count = sum( - 1 for a in self._artifacts.values() if a.digital_signature is not None - ) + signed_count = sum(1 for a in self._artifacts.values() if a.digital_signature is not None) return { "total_artifacts": total_artifacts, diff --git a/src/lexecon/identity/signing.py b/src/lexecon/identity/signing.py index ce1c7bd..38b7954 100644 --- a/src/lexecon/identity/signing.py +++ b/src/lexecon/identity/signing.py @@ -51,6 +51,7 @@ def verify_signature(self, data: str, signature: str) -> bool: # For string data (like hashes), we need to verify against the string directly try: import base64 + signature_bytes = base64.b64decode(signature) message = data.encode() if isinstance(data, str) else str(data).encode() self.key_manager.public_key.verify(signature_bytes, message) diff --git a/src/lexecon/override/service.py b/src/lexecon/override/service.py index eeb67d2..738587b 100644 --- a/src/lexecon/override/service.py +++ b/src/lexecon/override/service.py @@ -17,13 +17,13 @@ # Import canonical governance models try: from model_governance_pack.models import ( - Override, - OverrideType, - OriginalOutcome, + ArtifactType, + EvidenceArtifact, NewOutcome, + OriginalOutcome, + Override, OverrideScope, - EvidenceArtifact, - ArtifactType, + OverrideType, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -105,9 +105,7 @@ def __init__( store_evidence: Whether to generate evidence artifacts """ if not GOVERNANCE_MODELS_AVAILABLE: - raise RuntimeError( - "Governance models not available. Install model_governance_pack." - ) + raise RuntimeError("Governance models not available. Install model_governance_pack.") self.config = config or OverrideConfig() self.store_evidence = store_evidence @@ -134,8 +132,7 @@ def is_authorized( """ # Check if actor is in authorized roles is_in_authorized_roles = any( - actor_id.startswith(role) or actor_id == role - for role in self.config.AUTHORIZED_ROLES + actor_id.startswith(role) or actor_id == role for role in self.config.AUTHORIZED_ROLES ) if not is_in_authorized_roles: @@ -344,14 +341,10 @@ def list_overrides( now = datetime.now(timezone.utc) if expired: # Get expired overrides - overrides = [ - o for o in overrides if o.expires_at and now > o.expires_at - ] + overrides = [o for o in overrides if o.expires_at and now > o.expires_at] else: # Get non-expired overrides - overrides = [ - o for o in overrides if not o.expires_at or now <= o.expires_at - ] + overrides = [o for o in overrides if not o.expires_at or now <= o.expires_at] # Sort by timestamp descending (most recent first) overrides.sort(key=lambda o: o.timestamp, reverse=True) @@ -412,14 +405,10 @@ def get_decision_with_override_status( else None ), "new_outcome": ( - active_override.new_outcome.value - if active_override.new_outcome - else None + active_override.new_outcome.value if active_override.new_outcome else None ), "expires_at": ( - active_override.expires_at.isoformat() - if active_override.expires_at - else None + active_override.expires_at.isoformat() if active_override.expires_at else None ), } else: @@ -496,9 +485,7 @@ def list_evidence_artifacts( if override_id: artifacts = [ - a - for a in artifacts - if a.metadata and a.metadata.get("override_id") == override_id + a for a in artifacts if a.metadata and a.metadata.get("override_id") == override_id ] if decision_id: diff --git a/src/lexecon/responsibility/__init__.py b/src/lexecon/responsibility/__init__.py index 50380be..e6fb365 100644 --- a/src/lexecon/responsibility/__init__.py +++ b/src/lexecon/responsibility/__init__.py @@ -5,17 +5,12 @@ """ from .storage import ResponsibilityStorage -from .tracker import ( - DecisionMaker, - ResponsibilityLevel, - ResponsibilityRecord, - ResponsibilityTracker -) +from .tracker import DecisionMaker, ResponsibilityLevel, ResponsibilityRecord, ResponsibilityTracker __all__ = [ "DecisionMaker", "ResponsibilityLevel", "ResponsibilityRecord", "ResponsibilityTracker", - "ResponsibilityStorage" + "ResponsibilityStorage", ] diff --git a/src/lexecon/responsibility/storage.py b/src/lexecon/responsibility/storage.py index 021da7b..c88be4b 100644 --- a/src/lexecon/responsibility/storage.py +++ b/src/lexecon/responsibility/storage.py @@ -11,7 +11,7 @@ from pathlib import Path from typing import List, Optional -from .tracker import ResponsibilityRecord, DecisionMaker, ResponsibilityLevel +from .tracker import DecisionMaker, ResponsibilityLevel, ResponsibilityRecord class ResponsibilityStorage: @@ -38,7 +38,8 @@ def _init_database(self): cursor = conn.cursor() # Create responsibility_records table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS responsibility_records ( record_id TEXT PRIMARY KEY, decision_id TEXT NOT NULL, @@ -60,38 +61,51 @@ def _init_database(self): liability_signature TEXT, created_at TEXT NOT NULL ) - """) + """ + ) # Create indexes for efficient querying - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_decision_id ON responsibility_records(decision_id) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_responsible_party ON responsibility_records(responsible_party) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_decision_maker ON responsibility_records(decision_maker) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_timestamp ON responsibility_records(timestamp) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_override_ai ON responsibility_records(override_ai) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_review_required ON responsibility_records(review_required, reviewed_by) - """) + """ + ) conn.commit() conn.close() @@ -106,7 +120,8 @@ def save_record(self, record: ResponsibilityRecord) -> None: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT OR REPLACE INTO responsibility_records ( record_id, decision_id, timestamp, decision_maker, responsible_party, role, reasoning, confidence, @@ -115,27 +130,29 @@ def save_record(self, record: ResponsibilityRecord) -> None: reviewed_by, reviewed_at, liability_accepted, liability_signature, created_at ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - record.record_id, - record.decision_id, - record.timestamp, - record.decision_maker.value, - record.responsible_party, - record.role, - record.reasoning, - record.confidence, - record.responsibility_level.value, - record.delegated_from, - record.escalated_to, - 1 if record.override_ai else 0, - record.ai_recommendation, - 1 if record.review_required else 0, - record.reviewed_by, - record.reviewed_at, - 1 if record.liability_accepted else 0, - record.liability_signature, - datetime.utcnow().isoformat() - )) + """, + ( + record.record_id, + record.decision_id, + record.timestamp, + record.decision_maker.value, + record.responsible_party, + record.role, + record.reasoning, + record.confidence, + record.responsibility_level.value, + record.delegated_from, + record.escalated_to, + 1 if record.override_ai else 0, + record.ai_recommendation, + 1 if record.review_required else 0, + record.reviewed_by, + record.reviewed_at, + 1 if record.liability_accepted else 0, + record.liability_signature, + datetime.utcnow().isoformat(), + ), + ) conn.commit() conn.close() @@ -150,7 +167,8 @@ def load_all_records(self) -> List[ResponsibilityRecord]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT record_id, decision_id, timestamp, decision_maker, responsible_party, role, reasoning, confidence, @@ -160,7 +178,8 @@ def load_all_records(self) -> List[ResponsibilityRecord]: liability_signature FROM responsibility_records ORDER BY timestamp ASC - """) + """ + ) records = [] for row in cursor.fetchall(): @@ -182,7 +201,7 @@ def load_all_records(self) -> List[ResponsibilityRecord]: reviewed_by=row[14], reviewed_at=row[15], liability_accepted=bool(row[16]), - liability_signature=row[17] + liability_signature=row[17], ) records.append(record) @@ -208,7 +227,7 @@ def update_record(self, record_id: str, **updates) -> bool: values = [] for key, value in updates.items(): - if key in ['reviewed_by', 'reviewed_at']: + if key in ["reviewed_by", "reviewed_at"]: update_fields.append(f"{key} = ?") values.append(value) @@ -249,29 +268,37 @@ def get_statistics(self) -> dict: db_size = Path(self.db_path).stat().st_size if Path(self.db_path).exists() else 0 # Oldest record - cursor.execute(""" + cursor.execute( + """ SELECT MIN(timestamp) FROM responsibility_records - """) + """ + ) oldest = cursor.fetchone()[0] # Newest record - cursor.execute(""" + cursor.execute( + """ SELECT MAX(timestamp) FROM responsibility_records - """) + """ + ) newest = cursor.fetchone()[0] # By decision maker - cursor.execute(""" + cursor.execute( + """ SELECT decision_maker, COUNT(*) FROM responsibility_records GROUP BY decision_maker - """) + """ + ) by_maker = {row[0]: row[1] for row in cursor.fetchall()} # Override count - cursor.execute(""" + cursor.execute( + """ SELECT COUNT(*) FROM responsibility_records WHERE override_ai = 1 - """) + """ + ) overrides = cursor.fetchone()[0] conn.close() @@ -283,7 +310,7 @@ def get_statistics(self) -> dict: "oldest_record": oldest, "newest_record": newest, "by_decision_maker": by_maker, - "override_count": overrides + "override_count": overrides, } def get_by_decision_id(self, decision_id: str) -> List[ResponsibilityRecord]: @@ -299,7 +326,8 @@ def get_by_decision_id(self, decision_id: str) -> List[ResponsibilityRecord]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT record_id, decision_id, timestamp, decision_maker, responsible_party, role, reasoning, confidence, @@ -310,7 +338,9 @@ def get_by_decision_id(self, decision_id: str) -> List[ResponsibilityRecord]: FROM responsibility_records WHERE decision_id = ? ORDER BY timestamp ASC - """, (decision_id,)) + """, + (decision_id,), + ) records = [] for row in cursor.fetchall(): @@ -332,7 +362,7 @@ def get_by_decision_id(self, decision_id: str) -> List[ResponsibilityRecord]: reviewed_by=row[14], reviewed_at=row[15], liability_accepted=bool(row[16]), - liability_signature=row[17] + liability_signature=row[17], ) records.append(record) diff --git a/src/lexecon/responsibility/tracker.py b/src/lexecon/responsibility/tracker.py index 77d288e..6721a4f 100644 --- a/src/lexecon/responsibility/tracker.py +++ b/src/lexecon/responsibility/tracker.py @@ -6,7 +6,7 @@ """ import json -from dataclasses import dataclass, asdict +from dataclasses import asdict, dataclass from datetime import datetime from enum import Enum from typing import Any, Dict, List, Optional @@ -14,6 +14,7 @@ class DecisionMaker(Enum): """Who made the decision.""" + AI_SYSTEM = "ai_system" # Fully automated HUMAN_OPERATOR = "human_operator" # Human with AI assistance HUMAN_SUPERVISOR = "human_supervisor" # Supervisory override @@ -24,6 +25,7 @@ class DecisionMaker(Enum): class ResponsibilityLevel(Enum): """Level of responsibility for the decision.""" + FULL = "full" # Full legal responsibility SHARED = "shared" # Shared between human and AI SUPERVISED = "supervised" # AI decision with human oversight @@ -37,6 +39,7 @@ class ResponsibilityRecord: Critical for legal liability and compliance audits. """ + record_id: str decision_id: str timestamp: str @@ -71,7 +74,7 @@ def to_dict(self) -> Dict[str, Any]: return { **asdict(self), "decision_maker": self.decision_maker.value, - "responsibility_level": self.responsibility_level.value + "responsibility_level": self.responsibility_level.value, } @@ -115,7 +118,7 @@ def record_decision( escalated_to: Optional[str] = None, review_required: bool = False, liability_accepted: bool = False, - liability_signature: Optional[str] = None + liability_signature: Optional[str] = None, ) -> ResponsibilityRecord: """ Record who is responsible for a decision. @@ -138,7 +141,7 @@ def record_decision( escalated_to=escalated_to, review_required=review_required, liability_accepted=liability_accepted, - liability_signature=liability_signature + liability_signature=liability_signature, ) self.records.append(record) @@ -149,12 +152,7 @@ def record_decision( return record - def mark_reviewed( - self, - record_id: str, - reviewed_by: str, - notes: Optional[str] = None - ) -> bool: + def mark_reviewed(self, record_id: str, reviewed_by: str, notes: Optional[str] = None) -> bool: """Mark a decision as reviewed.""" for record in self.records: if record.record_id == record_id: @@ -164,9 +162,7 @@ def mark_reviewed( # Auto-save update to storage if available if self.storage: self.storage.update_record( - record_id, - reviewed_by=reviewed_by, - reviewed_at=record.reviewed_at + record_id, reviewed_by=reviewed_by, reviewed_at=record.reviewed_at ) return True @@ -193,9 +189,7 @@ def get_pending_reviews(self) -> List[ResponsibilityRecord]: return [r for r in self.records if r.review_required and not r.reviewed_by] def generate_accountability_report( - self, - start_date: Optional[str] = None, - end_date: Optional[str] = None + self, start_date: Optional[str] = None, end_date: Optional[str] = None ) -> Dict[str, Any]: """ Generate accountability report. @@ -242,17 +236,16 @@ def generate_accountability_report( # Identify most responsible parties party_counts = {} for record in records: - party_counts[record.responsible_party] = party_counts.get(record.responsible_party, 0) + 1 + party_counts[record.responsible_party] = ( + party_counts.get(record.responsible_party, 0) + 1 + ) top_parties = sorted(party_counts.items(), key=lambda x: x[1], reverse=True)[:10] return { "report_type": "ACCOUNTABILITY_REPORT", "generated_at": datetime.utcnow().isoformat(), - "period": { - "start": start_date or "inception", - "end": end_date or "present" - }, + "period": {"start": start_date or "inception", "end": end_date or "present"}, "summary": { "total_decisions": total, "by_decision_maker": by_maker, @@ -261,20 +254,18 @@ def generate_accountability_report( "override_rate": (overrides / total * 100) if total > 0 else 0, "pending_reviews": pending_reviews, "liability_accepted_count": liability_accepted, - "liability_acceptance_rate": (liability_accepted / total * 100) if total > 0 else 0 + "liability_acceptance_rate": (liability_accepted / total * 100) if total > 0 else 0, }, "top_responsible_parties": [ - {"party": party, "decision_count": count} - for party, count in top_parties + {"party": party, "decision_count": count} for party, count in top_parties ], "compliance_indicators": { - "human_oversight_active": overrides > 0 or any( - r.decision_maker != DecisionMaker.AI_SYSTEM for r in records - ), + "human_oversight_active": overrides > 0 + or any(r.decision_maker != DecisionMaker.AI_SYSTEM for r in records), "review_process_active": any(r.reviewed_by for r in records), "liability_framework_active": liability_accepted > 0, - "delegation_chain_documented": any(r.delegated_from for r in records) - } + "delegation_chain_documented": any(r.delegated_from for r in records), + }, } def export_for_legal(self, decision_id: str) -> Dict[str, Any]: @@ -293,20 +284,16 @@ def export_for_legal(self, decision_id: str) -> Dict[str, Any]: "responsibility_records": [r.to_dict() for r in chain], "final_responsible_party": chain[-1].responsible_party if chain else None, "human_in_loop": any(r.decision_maker != DecisionMaker.AI_SYSTEM for r in chain), - "signatures_present": [ - r.record_id for r in chain if r.liability_signature - ], + "signatures_present": [r.record_id for r in chain if r.liability_signature], "legal_attestation": { "accountability_established": len(chain) > 0, "human_oversight_documented": any( r.decision_maker != DecisionMaker.AI_SYSTEM for r in chain ), - "liability_accepted": any(r.liability_accepted for r in chain) - } + "liability_accepted": any(r.liability_accepted for r in chain), + }, } def to_dict(self) -> Dict[str, Any]: """Serialize all records.""" - return { - "records": [r.to_dict() for r in self.records] - } + return {"records": [r.to_dict() for r in self.records]} diff --git a/src/lexecon/risk/service.py b/src/lexecon/risk/service.py index 37cfeed..891262b 100644 --- a/src/lexecon/risk/service.py +++ b/src/lexecon/risk/service.py @@ -16,8 +16,8 @@ # Import canonical governance models try: from model_governance_pack.models import ( - EvidenceArtifact, ArtifactType, + EvidenceArtifact, Risk, RiskDimensions, RiskFactor, @@ -141,9 +141,7 @@ def calculate_overall_score( # Calculate weighted average using only populated dimensions total_weight = sum(self.weights[k] for k in populated_dims.keys()) - weighted_sum = sum( - populated_dims[k] * self.weights[k] for k in populated_dims.keys() - ) + weighted_sum = sum(populated_dims[k] * self.weights[k] for k in populated_dims.keys()) overall_score = int(weighted_sum / total_weight) @@ -172,9 +170,7 @@ def determine_risk_level(self, overall_score: int) -> "RiskLevel": else: return RiskLevel.LOW - def calculate_risk_factors( - self, dimensions: "RiskDimensions" - ) -> List["RiskFactor"]: + def calculate_risk_factors(self, dimensions: "RiskDimensions") -> List["RiskFactor"]: """ Generate explainable risk factors from dimensions. @@ -233,9 +229,7 @@ def __init__( store_evidence: Whether to generate evidence artifacts """ if not GOVERNANCE_MODELS_AVAILABLE: - raise RuntimeError( - "Governance models not available. Install model_governance_pack." - ) + raise RuntimeError("Governance models not available. Install model_governance_pack.") self.scoring_engine = scoring_engine or RiskScoringEngine() self.store_evidence = store_evidence diff --git a/src/lexecon/security/audit_service.py b/src/lexecon/security/audit_service.py index 39584ac..3b2167b 100644 --- a/src/lexecon/security/audit_service.py +++ b/src/lexecon/security/audit_service.py @@ -11,14 +11,15 @@ import hashlib import json import sqlite3 -from datetime import datetime, timezone -from typing import Optional, List, Dict, Any from dataclasses import dataclass +from datetime import datetime, timezone from enum import Enum +from typing import Any, Dict, List, Optional class ExportStatus(str, Enum): """Export request status.""" + PENDING = "pending" # Awaiting approval APPROVED = "approved" # Approved, generation in progress COMPLETED = "completed" # Successfully generated and downloaded @@ -28,6 +29,7 @@ class ExportStatus(str, Enum): class ApprovalStatus(str, Enum): """Approval status for multi-party authorization.""" + NOT_REQUIRED = "not_required" # Auto-approved PENDING = "pending" # Awaiting approval APPROVED = "approved" # Approved by authorized user @@ -37,6 +39,7 @@ class ApprovalStatus(str, Enum): @dataclass class ExportRequest: """Export request audit record.""" + request_id: str user_id: str username: str @@ -99,7 +102,8 @@ def _init_database(self): cursor = conn.cursor() # Export requests audit log - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS export_requests ( request_id TEXT PRIMARY KEY, user_id TEXT NOT NULL, @@ -141,10 +145,12 @@ def _init_database(self): ip_address TEXT, user_agent TEXT ) - """) + """ + ) # Approval workflow table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS approval_workflow ( approval_id TEXT PRIMARY KEY, request_id TEXT NOT NULL, @@ -155,10 +161,12 @@ def _init_database(self): timestamp TEXT NOT NULL, FOREIGN KEY (request_id) REFERENCES export_requests(request_id) ) - """) + """ + ) # Access attempts log (all API calls) - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS access_log ( log_id TEXT PRIMARY KEY, user_id TEXT, @@ -170,7 +178,8 @@ def _init_database(self): user_agent TEXT, timestamp TEXT NOT NULL ) - """) + """ + ) conn.commit() conn.close() @@ -185,12 +194,14 @@ def _get_latest_hash(self) -> str: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT entry_hash FROM export_requests ORDER BY requested_at DESC LIMIT 1 - """) + """ + ) row = cursor.fetchone() conn.close() @@ -221,7 +232,7 @@ def log_export_request( attestation_ip_address: Optional[str], approval_required: bool, ip_address: Optional[str] = None, - user_agent: Optional[str] = None + user_agent: Optional[str] = None, ) -> ExportRequest: """ Log an export request to the audit trail. @@ -241,14 +252,15 @@ def log_export_request( "time_window": time_window, "formats": sorted(formats), "attestation_accepted": attestation_accepted, - "previous_hash": previous_hash + "previous_hash": previous_hash, } entry_hash = self._compute_hash(hash_data) conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT INTO export_requests ( request_id, user_id, username, user_email, user_role, purpose, case_id, notes, requested_at, @@ -262,19 +274,46 @@ def log_export_request( ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) - """, ( - request_id, user_id, username, user_email, user_role, - purpose, case_id, notes, now, - time_window, json.dumps(formats), - int(include_decisions), int(include_interventions), - int(include_ledger), int(include_responsibility), - int(attestation_accepted), now, attestation_ip_address, - ApprovalStatus.PENDING.value if approval_required else ApprovalStatus.NOT_REQUIRED.value, - int(approval_required), None, None, None, None, - ExportStatus.PENDING.value if approval_required else ExportStatus.APPROVED.value, - None, None, None, - previous_hash, entry_hash, ip_address, user_agent - )) + """, + ( + request_id, + user_id, + username, + user_email, + user_role, + purpose, + case_id, + notes, + now, + time_window, + json.dumps(formats), + int(include_decisions), + int(include_interventions), + int(include_ledger), + int(include_responsibility), + int(attestation_accepted), + now, + attestation_ip_address, + ( + ApprovalStatus.PENDING.value + if approval_required + else ApprovalStatus.NOT_REQUIRED.value + ), + int(approval_required), + None, + None, + None, + None, + ExportStatus.PENDING.value if approval_required else ExportStatus.APPROVED.value, + None, + None, + None, + previous_hash, + entry_hash, + ip_address, + user_agent, + ), + ) conn.commit() conn.close() @@ -298,7 +337,9 @@ def log_export_request( attestation_accepted=attestation_accepted, attestation_timestamp=now, attestation_ip_address=attestation_ip_address, - approval_status=ApprovalStatus.PENDING if approval_required else ApprovalStatus.NOT_REQUIRED, + approval_status=( + ApprovalStatus.PENDING if approval_required else ApprovalStatus.NOT_REQUIRED + ), approval_required=approval_required, approved_by_user_id=None, approved_by_username=None, @@ -311,7 +352,7 @@ def log_export_request( previous_hash=previous_hash, entry_hash=entry_hash, ip_address=ip_address, - user_agent=user_agent + user_agent=user_agent, ) def approve_export( @@ -319,7 +360,7 @@ def approve_export( request_id: str, approver_user_id: str, approver_username: str, - reason: Optional[str] = None + reason: Optional[str] = None, ): """Approve an export request.""" now = datetime.now(timezone.utc).isoformat() @@ -327,7 +368,8 @@ def approve_export( conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE export_requests SET approval_status = ?, approved_by_user_id = ?, @@ -335,29 +377,36 @@ def approve_export( approved_at = ?, export_status = ? WHERE request_id = ? - """, (ApprovalStatus.APPROVED.value, approver_user_id, approver_username, - now, ExportStatus.APPROVED.value, request_id)) + """, + ( + ApprovalStatus.APPROVED.value, + approver_user_id, + approver_username, + now, + ExportStatus.APPROVED.value, + request_id, + ), + ) # Log approval action import secrets + approval_id = f"approval_{secrets.token_hex(16)}" - cursor.execute(""" + cursor.execute( + """ INSERT INTO approval_workflow ( approval_id, request_id, reviewer_user_id, reviewer_username, action, reason, timestamp ) VALUES (?, ?, ?, ?, ?, ?, ?) - """, (approval_id, request_id, approver_user_id, approver_username, - "approved", reason, now)) + """, + (approval_id, request_id, approver_user_id, approver_username, "approved", reason, now), + ) conn.commit() conn.close() def reject_export( - self, - request_id: str, - reviewer_user_id: str, - reviewer_username: str, - reason: str + self, request_id: str, reviewer_user_id: str, reviewer_username: str, reason: str ): """Reject an export request.""" now = datetime.now(timezone.utc).isoformat() @@ -365,33 +414,36 @@ def reject_export( conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE export_requests SET approval_status = ?, rejection_reason = ?, export_status = ? WHERE request_id = ? - """, (ApprovalStatus.REJECTED.value, reason, ExportStatus.REJECTED.value, request_id)) + """, + (ApprovalStatus.REJECTED.value, reason, ExportStatus.REJECTED.value, request_id), + ) # Log rejection action import secrets + approval_id = f"approval_{secrets.token_hex(16)}" - cursor.execute(""" + cursor.execute( + """ INSERT INTO approval_workflow ( approval_id, request_id, reviewer_user_id, reviewer_username, action, reason, timestamp ) VALUES (?, ?, ?, ?, ?, ?, ?) - """, (approval_id, request_id, reviewer_user_id, reviewer_username, - "rejected", reason, now)) + """, + (approval_id, request_id, reviewer_user_id, reviewer_username, "rejected", reason, now), + ) conn.commit() conn.close() def complete_export( - self, - request_id: str, - packet_hashes: Dict[str, str], - packet_size_bytes: int + self, request_id: str, packet_hashes: Dict[str, str], packet_size_bytes: int ): """Mark export as completed.""" now = datetime.now(timezone.utc).isoformat() @@ -399,15 +451,23 @@ def complete_export( conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE export_requests SET export_status = ?, completed_at = ?, packet_hashes = ?, packet_size_bytes = ? WHERE request_id = ? - """, (ExportStatus.COMPLETED.value, now, json.dumps(packet_hashes), - packet_size_bytes, request_id)) + """, + ( + ExportStatus.COMPLETED.value, + now, + json.dumps(packet_hashes), + packet_size_bytes, + request_id, + ), + ) conn.commit() conn.close() @@ -419,12 +479,15 @@ def fail_export(self, request_id: str, error_message: str): conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE export_requests SET export_status = ?, rejection_reason = ? WHERE request_id = ? - """, (ExportStatus.FAILED.value, error_message, request_id)) + """, + (ExportStatus.FAILED.value, error_message, request_id), + ) conn.commit() conn.close() @@ -434,10 +497,13 @@ def get_export_request(self, request_id: str) -> Optional[ExportRequest]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT * FROM export_requests WHERE request_id = ? - """, (request_id,)) + """, + (request_id,), + ) row = cursor.fetchone() conn.close() @@ -448,10 +514,7 @@ def get_export_request(self, request_id: str) -> Optional[ExportRequest]: return self._row_to_export_request(row) def list_export_requests( - self, - user_id: Optional[str] = None, - status: Optional[ExportStatus] = None, - limit: int = 100 + self, user_id: Optional[str] = None, status: Optional[ExportStatus] = None, limit: int = 100 ) -> List[ExportRequest]: """List export requests with optional filters.""" conn = sqlite3.connect(self.db_path) @@ -516,7 +579,7 @@ def _row_to_export_request(self, row) -> ExportRequest: previous_hash=row[29], entry_hash=row[30], ip_address=row[31], - user_agent=row[32] + user_agent=row[32], ) def log_access( @@ -527,23 +590,36 @@ def log_access( user_id: Optional[str] = None, username: Optional[str] = None, ip_address: Optional[str] = None, - user_agent: Optional[str] = None + user_agent: Optional[str] = None, ): """Log API access for security monitoring.""" import secrets + log_id = f"log_{secrets.token_hex(16)}" timestamp = datetime.now(timezone.utc).isoformat() conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT INTO access_log ( log_id, user_id, username, endpoint, method, status_code, ip_address, user_agent, timestamp ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - """, (log_id, user_id, username, endpoint, method, - status_code, ip_address, user_agent, timestamp)) + """, + ( + log_id, + user_id, + username, + endpoint, + method, + status_code, + ip_address, + user_agent, + timestamp, + ), + ) conn.commit() conn.close() @@ -553,13 +629,15 @@ def verify_audit_chain(self) -> Dict[str, Any]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT request_id, user_id, purpose, case_id, requested_at, time_window, formats, attestation_accepted, previous_hash, entry_hash FROM export_requests ORDER BY requested_at ASC - """) + """ + ) valid = True invalid_entries = [] @@ -573,12 +651,14 @@ def verify_audit_chain(self) -> Dict[str, Any]: # Verify previous hash links correctly if stored_prev_hash != prev_hash: valid = False - invalid_entries.append({ - "request_id": request_id, - "reason": "broken_chain", - "expected_prev_hash": prev_hash, - "actual_prev_hash": stored_prev_hash - }) + invalid_entries.append( + { + "request_id": request_id, + "reason": "broken_chain", + "expected_prev_hash": prev_hash, + "actual_prev_hash": stored_prev_hash, + } + ) # Verify entry hash is correct hash_data = { @@ -590,18 +670,20 @@ def verify_audit_chain(self) -> Dict[str, Any]: "time_window": row[5], "formats": sorted(json.loads(row[6])), "attestation_accepted": bool(row[7]), - "previous_hash": row[8] + "previous_hash": row[8], } computed_hash = self._compute_hash(hash_data) if computed_hash != stored_entry_hash: valid = False - invalid_entries.append({ - "request_id": request_id, - "reason": "hash_mismatch", - "expected_hash": computed_hash, - "actual_hash": stored_entry_hash - }) + invalid_entries.append( + { + "request_id": request_id, + "reason": "hash_mismatch", + "expected_hash": computed_hash, + "actual_hash": stored_entry_hash, + } + ) prev_hash = stored_entry_hash @@ -610,5 +692,7 @@ def verify_audit_chain(self) -> Dict[str, Any]: return { "valid": valid, "invalid_entries": invalid_entries, - "message": "Audit chain is valid" if valid else f"Found {len(invalid_entries)} invalid entries" + "message": ( + "Audit chain is valid" if valid else f"Found {len(invalid_entries)} invalid entries" + ), } diff --git a/src/lexecon/security/auth_service.py b/src/lexecon/security/auth_service.py index 6271f4f..96d3905 100644 --- a/src/lexecon/security/auth_service.py +++ b/src/lexecon/security/auth_service.py @@ -13,14 +13,15 @@ import secrets import sqlite3 import time +from dataclasses import dataclass from datetime import datetime, timedelta, timezone from enum import Enum -from typing import Optional, Dict, List, Tuple -from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple class Role(str, Enum): """User roles with hierarchical permissions.""" + VIEWER = "viewer" # Can view dashboard only AUDITOR = "auditor" # Can generate audit packets (needs approval) COMPLIANCE_OFFICER = "compliance_officer" # Can approve audit requests @@ -29,6 +30,7 @@ class Role(str, Enum): class Permission(str, Enum): """Granular permissions.""" + VIEW_DASHBOARD = "view_dashboard" REQUEST_AUDIT_PACKET = "request_audit_packet" APPROVE_AUDIT_PACKET = "approve_audit_packet" @@ -68,6 +70,7 @@ class Permission(str, Enum): @dataclass class User: """User entity.""" + user_id: str username: str email: str @@ -83,6 +86,7 @@ class User: @dataclass class Session: """User session.""" + session_id: str user_id: str username: str @@ -110,7 +114,8 @@ def _init_database(self): cursor = conn.cursor() # Users table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS users ( user_id TEXT PRIMARY KEY, username TEXT UNIQUE NOT NULL, @@ -125,10 +130,12 @@ def _init_database(self): failed_login_attempts INTEGER DEFAULT 0, locked_until TEXT ) - """) + """ + ) # Sessions table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS sessions ( session_id TEXT PRIMARY KEY, user_id TEXT NOT NULL, @@ -139,10 +146,12 @@ def _init_database(self): revoked INTEGER DEFAULT 0, FOREIGN KEY (user_id) REFERENCES users(user_id) ) - """) + """ + ) # Login attempts log (for security monitoring) - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS login_attempts ( attempt_id TEXT PRIMARY KEY, username TEXT NOT NULL, @@ -152,7 +161,8 @@ def _init_database(self): timestamp TEXT NOT NULL, failure_reason TEXT ) - """) + """ + ) conn.commit() conn.close() @@ -160,19 +170,11 @@ def _init_database(self): def _hash_password(self, password: str, salt: str) -> str: """Hash password with salt using PBKDF2.""" return hashlib.pbkdf2_hmac( - 'sha256', - password.encode('utf-8'), - salt.encode('utf-8'), - 100000 # iterations + "sha256", password.encode("utf-8"), salt.encode("utf-8"), 100000 # iterations ).hex() def create_user( - self, - username: str, - email: str, - password: str, - role: Role, - full_name: str + self, username: str, email: str, password: str, role: Role, full_name: str ) -> User: """Create a new user.""" user_id = f"user_{secrets.token_hex(16)}" @@ -184,13 +186,15 @@ def create_user( cursor = conn.cursor() try: - cursor.execute(""" + cursor.execute( + """ INSERT INTO users ( user_id, username, email, password_hash, salt, role, full_name, created_at ) VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (user_id, username, email, password_hash, salt, - role.value, full_name, created_at)) + """, + (user_id, username, email, password_hash, salt, role.value, full_name, created_at), + ) conn.commit() except sqlite3.IntegrityError as e: conn.close() @@ -204,14 +208,11 @@ def create_user( email=email, role=role, full_name=full_name, - created_at=created_at + created_at=created_at, ) def authenticate( - self, - username: str, - password: str, - ip_address: Optional[str] = None + self, username: str, password: str, ip_address: Optional[str] = None ) -> Tuple[Optional[User], Optional[str]]: """ Authenticate user and return (User, error_message). @@ -227,60 +228,86 @@ def authenticate( cursor = conn.cursor() # Get user - cursor.execute(""" + cursor.execute( + """ SELECT user_id, username, email, password_hash, salt, role, full_name, created_at, last_login, is_active, failed_login_attempts, locked_until FROM users WHERE username = ? - """, (username,)) + """, + (username,), + ) row = cursor.fetchone() if not row: # Log failed attempt - cursor.execute(""" + cursor.execute( + """ INSERT INTO login_attempts ( attempt_id, username, success, ip_address, timestamp, failure_reason ) VALUES (?, ?, 0, ?, ?, ?) - """, (attempt_id, username, ip_address, timestamp, "user_not_found")) + """, + (attempt_id, username, ip_address, timestamp, "user_not_found"), + ) conn.commit() conn.close() return None, "Invalid username or password" - (user_id, username, email, password_hash, salt, role_str, - full_name, created_at, last_login, is_active, - failed_attempts, locked_until) = row + ( + user_id, + username, + email, + password_hash, + salt, + role_str, + full_name, + created_at, + last_login, + is_active, + failed_attempts, + locked_until, + ) = row # Check if account is locked if locked_until: - lock_time = datetime.fromisoformat(locked_until.replace('Z', '+00:00')) + lock_time = datetime.fromisoformat(locked_until.replace("Z", "+00:00")) if datetime.now(timezone.utc) < lock_time: remaining = (lock_time - datetime.now(timezone.utc)).seconds // 60 - cursor.execute(""" + cursor.execute( + """ INSERT INTO login_attempts ( attempt_id, username, success, ip_address, timestamp, failure_reason ) VALUES (?, ?, 0, ?, ?, ?) - """, (attempt_id, username, ip_address, timestamp, f"account_locked_{remaining}min")) + """, + (attempt_id, username, ip_address, timestamp, f"account_locked_{remaining}min"), + ) conn.commit() conn.close() return None, f"Account locked. Try again in {remaining} minutes." else: # Unlock account - cursor.execute(""" + cursor.execute( + """ UPDATE users SET locked_until = NULL, failed_login_attempts = 0 WHERE user_id = ? - """, (user_id,)) + """, + (user_id,), + ) conn.commit() # Check if account is active if not is_active: - cursor.execute(""" + cursor.execute( + """ INSERT INTO login_attempts ( attempt_id, username, success, ip_address, timestamp, failure_reason ) VALUES (?, ?, 0, ?, ?, ?) - """, (attempt_id, username, ip_address, timestamp, "account_disabled")) + """, + (attempt_id, username, ip_address, timestamp, "account_disabled"), + ) conn.commit() conn.close() return None, "Account is disabled" @@ -290,30 +317,40 @@ def authenticate( if computed_hash != password_hash: # Increment failed attempts new_failed_attempts = failed_attempts + 1 - cursor.execute(""" + cursor.execute( + """ UPDATE users SET failed_login_attempts = ? WHERE user_id = ? - """, (new_failed_attempts, user_id)) + """, + (new_failed_attempts, user_id), + ) # Lock account if too many failed attempts if new_failed_attempts >= self.max_failed_attempts: - lock_until = (datetime.now(timezone.utc) + - timedelta(minutes=self.lockout_duration_minutes)).isoformat() - cursor.execute(""" + lock_until = ( + datetime.now(timezone.utc) + timedelta(minutes=self.lockout_duration_minutes) + ).isoformat() + cursor.execute( + """ UPDATE users SET locked_until = ? WHERE user_id = ? - """, (lock_until, user_id)) + """, + (lock_until, user_id), + ) failure_reason = "invalid_password_account_locked" else: failure_reason = "invalid_password" - cursor.execute(""" + cursor.execute( + """ INSERT INTO login_attempts ( attempt_id, username, success, ip_address, timestamp, failure_reason ) VALUES (?, ?, 0, ?, ?, ?) - """, (attempt_id, username, ip_address, timestamp, failure_reason)) + """, + (attempt_id, username, ip_address, timestamp, failure_reason), + ) conn.commit() conn.close() @@ -321,22 +358,31 @@ def authenticate( if remaining_attempts > 0: return None, f"Invalid password. {remaining_attempts} attempts remaining." else: - return None, f"Account locked for {self.lockout_duration_minutes} minutes due to too many failed attempts." + return ( + None, + f"Account locked for {self.lockout_duration_minutes} minutes due to too many failed attempts.", + ) # Successful login # Reset failed attempts and update last login - cursor.execute(""" + cursor.execute( + """ UPDATE users SET failed_login_attempts = 0, last_login = ? WHERE user_id = ? - """, (timestamp, user_id)) + """, + (timestamp, user_id), + ) # Log successful attempt - cursor.execute(""" + cursor.execute( + """ INSERT INTO login_attempts ( attempt_id, username, success, ip_address, timestamp, failure_reason ) VALUES (?, ?, 1, ?, ?, NULL) - """, (attempt_id, username, ip_address, timestamp)) + """, + (attempt_id, username, ip_address, timestamp), + ) conn.commit() conn.close() @@ -350,16 +396,12 @@ def authenticate( created_at=created_at, last_login=timestamp, is_active=bool(is_active), - failed_login_attempts=0 + failed_login_attempts=0, ) return user, None - def create_session( - self, - user: User, - ip_address: Optional[str] = None - ) -> Session: + def create_session(self, user: User, ip_address: Optional[str] = None) -> Session: """Create a new session for authenticated user.""" session_id = secrets.token_urlsafe(32) now = datetime.now(timezone.utc) @@ -368,12 +410,21 @@ def create_session( conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ INSERT INTO sessions ( session_id, user_id, created_at, expires_at, last_activity, ip_address ) VALUES (?, ?, ?, ?, ?, ?) - """, (session_id, user.user_id, now.isoformat(), expires_at.isoformat(), - now.isoformat(), ip_address)) + """, + ( + session_id, + user.user_id, + now.isoformat(), + expires_at.isoformat(), + now.isoformat(), + ip_address, + ), + ) conn.commit() conn.close() @@ -386,7 +437,7 @@ def create_session( created_at=now.isoformat(), expires_at=expires_at.isoformat(), last_activity=now.isoformat(), - ip_address=ip_address + ip_address=ip_address, ) def validate_session(self, session_id: str) -> Tuple[Optional[Session], Optional[str]]: @@ -400,14 +451,17 @@ def validate_session(self, session_id: str) -> Tuple[Optional[Session], Optional conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT s.session_id, s.user_id, s.created_at, s.expires_at, s.last_activity, s.ip_address, s.revoked, u.username, u.role FROM sessions s JOIN users u ON s.user_id = u.user_id WHERE s.session_id = ? - """, (session_id,)) + """, + (session_id,), + ) row = cursor.fetchone() @@ -415,15 +469,24 @@ def validate_session(self, session_id: str) -> Tuple[Optional[Session], Optional conn.close() return None, "Invalid session" - (sid, user_id, created_at, expires_at, last_activity, - ip_address, revoked, username, role_str) = row + ( + sid, + user_id, + created_at, + expires_at, + last_activity, + ip_address, + revoked, + username, + role_str, + ) = row if revoked: conn.close() return None, "Session revoked" # Check expiration - expires = datetime.fromisoformat(expires_at.replace('Z', '+00:00')) + expires = datetime.fromisoformat(expires_at.replace("Z", "+00:00")) if datetime.now(timezone.utc) > expires: conn.close() return None, "Session expired" @@ -432,36 +495,45 @@ def validate_session(self, session_id: str) -> Tuple[Optional[Session], Optional now = datetime.now(timezone.utc) new_expires = now + timedelta(minutes=self.session_timeout_minutes) - cursor.execute(""" + cursor.execute( + """ UPDATE sessions SET last_activity = ?, expires_at = ? WHERE session_id = ? - """, (now.isoformat(), new_expires.isoformat(), session_id)) + """, + (now.isoformat(), new_expires.isoformat(), session_id), + ) conn.commit() conn.close() - return Session( - session_id=sid, - user_id=user_id, - username=username, - role=Role(role_str), - created_at=created_at, - expires_at=new_expires.isoformat(), - last_activity=now.isoformat(), - ip_address=ip_address - ), None + return ( + Session( + session_id=sid, + user_id=user_id, + username=username, + role=Role(role_str), + created_at=created_at, + expires_at=new_expires.isoformat(), + last_activity=now.isoformat(), + ip_address=ip_address, + ), + None, + ) def revoke_session(self, session_id: str): """Revoke a session (logout).""" conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE sessions SET revoked = 1 WHERE session_id = ? - """, (session_id,)) + """, + (session_id,), + ) conn.commit() conn.close() @@ -475,12 +547,15 @@ def get_user_by_id(self, user_id: str) -> Optional[User]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT user_id, username, email, role, full_name, created_at, last_login, is_active, failed_login_attempts, locked_until FROM users WHERE user_id = ? - """, (user_id,)) + """, + (user_id,), + ) row = cursor.fetchone() conn.close() @@ -498,7 +573,7 @@ def get_user_by_id(self, user_id: str) -> Optional[User]: last_login=row[6], is_active=bool(row[7]), failed_login_attempts=row[8], - locked_until=row[9] + locked_until=row[9], ) def list_users(self) -> List[User]: @@ -506,27 +581,31 @@ def list_users(self) -> List[User]: conn = sqlite3.connect(self.db_path) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ SELECT user_id, username, email, role, full_name, created_at, last_login, is_active, failed_login_attempts, locked_until FROM users ORDER BY created_at DESC - """) + """ + ) users = [] for row in cursor.fetchall(): - users.append(User( - user_id=row[0], - username=row[1], - email=row[2], - role=Role(row[3]), - full_name=row[4], - created_at=row[5], - last_login=row[6], - is_active=bool(row[7]), - failed_login_attempts=row[8], - locked_until=row[9] - )) + users.append( + User( + user_id=row[0], + username=row[1], + email=row[2], + role=Role(row[3]), + full_name=row[4], + created_at=row[5], + last_login=row[6], + is_active=bool(row[7]), + failed_login_attempts=row[8], + locked_until=row[9], + ) + ) conn.close() return users @@ -537,36 +616,44 @@ def get_active_sessions(self, user_id: Optional[str] = None) -> List[Session]: cursor = conn.cursor() if user_id: - cursor.execute(""" + cursor.execute( + """ SELECT s.session_id, s.user_id, s.created_at, s.expires_at, s.last_activity, s.ip_address, u.username, u.role FROM sessions s JOIN users u ON s.user_id = u.user_id WHERE s.user_id = ? AND s.revoked = 0 AND s.expires_at > ? ORDER BY s.last_activity DESC - """, (user_id, datetime.now(timezone.utc).isoformat())) + """, + (user_id, datetime.now(timezone.utc).isoformat()), + ) else: - cursor.execute(""" + cursor.execute( + """ SELECT s.session_id, s.user_id, s.created_at, s.expires_at, s.last_activity, s.ip_address, u.username, u.role FROM sessions s JOIN users u ON s.user_id = u.user_id WHERE s.revoked = 0 AND s.expires_at > ? ORDER BY s.last_activity DESC - """, (datetime.now(timezone.utc).isoformat(),)) + """, + (datetime.now(timezone.utc).isoformat(),), + ) sessions = [] for row in cursor.fetchall(): - sessions.append(Session( - session_id=row[0], - user_id=row[1], - created_at=row[2], - expires_at=row[3], - last_activity=row[4], - ip_address=row[5], - username=row[6], - role=Role(row[7]) - )) + sessions.append( + Session( + session_id=row[0], + user_id=row[1], + created_at=row[2], + expires_at=row[3], + last_activity=row[4], + ip_address=row[5], + username=row[6], + role=Role(row[7]), + ) + ) conn.close() return sessions diff --git a/src/lexecon/security/middleware.py b/src/lexecon/security/middleware.py index 8e6d23c..31434d8 100644 --- a/src/lexecon/security/middleware.py +++ b/src/lexecon/security/middleware.py @@ -2,10 +2,11 @@ FastAPI middleware for authentication and authorization. """ -from fastapi import Request, HTTPException, status -from fastapi.responses import JSONResponse -from typing import Optional, Callable from functools import wraps +from typing import Callable, Optional + +from fastapi import HTTPException, Request, status +from fastapi.responses import JSONResponse from lexecon.security.auth_service import AuthService, Permission, Session @@ -19,13 +20,7 @@ def __init__(self, auth_service: AuthService): async def __call__(self, request: Request, call_next): """Middleware to validate session on protected endpoints.""" # Skip authentication for public endpoints - public_endpoints = [ - "/health", - "/login", - "/docs", - "/openapi.json", - "/redoc" - ] + public_endpoints = ["/health", "/login", "/docs", "/openapi.json", "/redoc"] if any(request.url.path.startswith(endpoint) for endpoint in public_endpoints): return await call_next(request) @@ -38,7 +33,7 @@ async def __call__(self, request: Request, call_next): if not session_id: return JSONResponse( status_code=status.HTTP_401_UNAUTHORIZED, - content={"error": "Not authenticated", "message": "No session token provided"} + content={"error": "Not authenticated", "message": "No session token provided"}, ) # Validate session @@ -46,7 +41,7 @@ async def __call__(self, request: Request, call_next): if not session: return JSONResponse( status_code=status.HTTP_401_UNAUTHORIZED, - content={"error": "Invalid session", "message": error} + content={"error": "Invalid session", "message": error}, ) # Attach session to request state @@ -67,13 +62,13 @@ def require_permission(permission: Permission): async def some_endpoint(request: Request): ... """ + def decorator(func: Callable): @wraps(func) async def wrapper(request: Request, *args, **kwargs): if not hasattr(request.state, "session"): raise HTTPException( - status_code=status.HTTP_401_UNAUTHORIZED, - detail="Not authenticated" + status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated" ) session: Session = request.state.session @@ -82,11 +77,13 @@ async def wrapper(request: Request, *args, **kwargs): if not auth_service.has_permission(session.role, permission): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, - detail=f"Insufficient permissions. Required: {permission.value}" + detail=f"Insufficient permissions. Required: {permission.value}", ) return await func(request, *args, **kwargs) + return wrapper + return decorator @@ -98,5 +95,9 @@ def get_current_user(request: Request) -> Optional[dict]: return { "user_id": request.state.user_id, "username": request.state.username, - "role": request.state.role.value if hasattr(request.state.role, 'value') else str(request.state.role) + "role": ( + request.state.role.value + if hasattr(request.state.role, "value") + else str(request.state.role) + ), } diff --git a/src/lexecon/security/signature_service.py b/src/lexecon/security/signature_service.py index 392d029..1be2166 100644 --- a/src/lexecon/security/signature_service.py +++ b/src/lexecon/security/signature_service.py @@ -12,11 +12,12 @@ import json import os from datetime import datetime, timezone -from typing import Dict, Any, Tuple, Optional -from cryptography.hazmat.primitives.asymmetric import rsa, padding -from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.backends import default_backend +from typing import Any, Dict, Optional, Tuple + from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import padding, rsa class SignatureService: @@ -52,9 +53,7 @@ def _generate_keys(self): """Generate new RSA key pair.""" # Generate private key (4096 bits for high security) self.private_key = rsa.generate_private_key( - public_exponent=65537, - key_size=4096, - backend=default_backend() + public_exponent=65537, key_size=4096, backend=default_backend() ) # Derive public key @@ -64,19 +63,19 @@ def _generate_keys(self): private_pem = self.private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption() # For demo; use BestAvailableEncryption in production + encryption_algorithm=serialization.NoEncryption(), # For demo; use BestAvailableEncryption in production ) - with open(self.private_key_path, 'wb') as f: + with open(self.private_key_path, "wb") as f: f.write(private_pem) # Save public key public_pem = self.public_key.public_bytes( encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo + format=serialization.PublicFormat.SubjectPublicKeyInfo, ) - with open(self.public_key_path, 'wb') as f: + with open(self.public_key_path, "wb") as f: f.write(public_pem) # Set restrictive permissions @@ -85,18 +84,13 @@ def _generate_keys(self): def _load_keys(self): """Load existing keys from disk.""" - with open(self.private_key_path, 'rb') as f: + with open(self.private_key_path, "rb") as f: self.private_key = serialization.load_pem_private_key( - f.read(), - password=None, # Use password in production - backend=default_backend() + f.read(), password=None, backend=default_backend() # Use password in production ) - with open(self.public_key_path, 'rb') as f: - self.public_key = serialization.load_pem_public_key( - f.read(), - backend=default_backend() - ) + with open(self.public_key_path, "rb") as f: + self.public_key = serialization.load_pem_public_key(f.read(), backend=default_backend()) def sign_packet(self, packet_data: Dict[str, Any]) -> Dict[str, Any]: """ @@ -118,11 +112,8 @@ def sign_packet(self, packet_data: Dict[str, Any]) -> Dict[str, Any]: # Sign the hash with RSA private key signature = self.private_key.sign( packet_hash.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() + padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), + hashes.SHA256(), ) # Encode signature as hex @@ -131,7 +122,7 @@ def sign_packet(self, packet_data: Dict[str, Any]) -> Dict[str, Any]: # Get public key fingerprint public_pem = self.public_key.public_bytes( encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo + format=serialization.PublicFormat.SubjectPublicKeyInfo, ) public_key_fingerprint = hashlib.sha256(public_pem).hexdigest()[:16] @@ -145,16 +136,12 @@ def sign_packet(self, packet_data: Dict[str, Any]) -> Dict[str, Any]: "public_key_fingerprint": public_key_fingerprint, "signed_at": datetime.now(timezone.utc).isoformat(), "signed_by": "Lexecon Governance System", - "verification_instructions": "Use /compliance/verify-signature endpoint with packet and signature" + "verification_instructions": "Use /compliance/verify-signature endpoint with packet and signature", } return signature_info - def verify_signature( - self, - packet_data: Dict[str, Any], - signature_hex: str - ) -> Tuple[bool, str]: + def verify_signature(self, packet_data: Dict[str, Any], signature_hex: str) -> Tuple[bool, str]: """ Verify a signature on an audit packet. @@ -180,11 +167,8 @@ def verify_signature( self.public_key.verify( signature, packet_hash.encode(), - padding.PSS( - mgf=padding.MGF1(hashes.SHA256()), - salt_length=padding.PSS.MAX_LENGTH - ), - hashes.SHA256() + padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH), + hashes.SHA256(), ) return True, "Signature is valid" @@ -201,10 +185,10 @@ def get_public_key_pem(self) -> str: public_pem = self.public_key.public_bytes( encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo + format=serialization.PublicFormat.SubjectPublicKeyInfo, ) - return public_pem.decode('utf-8') + return public_pem.decode("utf-8") def get_public_key_fingerprint(self) -> str: """Get SHA-256 fingerprint of public key.""" @@ -213,7 +197,7 @@ def get_public_key_fingerprint(self) -> str: public_pem = self.public_key.public_bytes( encoding=serialization.Encoding.PEM, - format=serialization.PublicFormat.SubjectPublicKeyInfo + format=serialization.PublicFormat.SubjectPublicKeyInfo, ) return hashlib.sha256(public_pem).hexdigest() @@ -229,16 +213,13 @@ def sign_and_enrich_packet(self, packet_data: Dict[str, Any]) -> Dict[str, Any]: Enriched packet with signature_info field """ # Create a copy without signature_info (if it exists) - packet_to_sign = {k: v for k, v in packet_data.items() if k != 'signature_info'} + packet_to_sign = {k: v for k, v in packet_data.items() if k != "signature_info"} # Generate signature signature_info = self.sign_packet(packet_to_sign) # Add signature to packet - enriched_packet = { - **packet_data, - "signature_info": signature_info - } + enriched_packet = {**packet_data, "signature_info": signature_info} return enriched_packet @@ -261,7 +242,7 @@ def verify_packet_signature(self, packet_data: Dict[str, Any]) -> Tuple[bool, st return False, "No signature found in signature_info" # Extract packet without signature - packet_to_verify = {k: v for k, v in packet_data.items() if k != 'signature_info'} + packet_to_verify = {k: v for k, v in packet_data.items() if k != "signature_info"} # Verify return self.verify_signature(packet_to_verify, signature_info["signature"]) diff --git a/src/lexecon/storage/persistence.py b/src/lexecon/storage/persistence.py index e27c85d..7385f38 100644 --- a/src/lexecon/storage/persistence.py +++ b/src/lexecon/storage/persistence.py @@ -36,7 +36,8 @@ def _init_database(self): cursor = conn.cursor() # Main ledger entries table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS ledger_entries ( entry_id TEXT PRIMARY KEY, event_type TEXT NOT NULL, @@ -46,25 +47,32 @@ def _init_database(self): entry_hash TEXT NOT NULL, created_at TEXT NOT NULL ) - """) + """ + ) # Create indexes separately (SQLite syntax) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_event_type ON ledger_entries(event_type) - """) + """ + ) - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_timestamp ON ledger_entries(timestamp) - """) + """ + ) # Metadata table for chain verification - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS ledger_metadata ( key TEXT PRIMARY KEY, value TEXT NOT NULL, updated_at TEXT NOT NULL ) - """) + """ + ) conn.commit() conn.close() @@ -75,25 +83,31 @@ def save_entry(self, entry: LedgerEntry) -> None: cursor = conn.cursor() try: - cursor.execute(""" + cursor.execute( + """ INSERT OR REPLACE INTO ledger_entries (entry_id, event_type, timestamp, data, previous_hash, entry_hash, created_at) VALUES (?, ?, ?, ?, ?, ?, ?) - """, ( - entry.entry_id, - entry.event_type, - entry.timestamp, - json.dumps(entry.data), - entry.previous_hash, - entry.entry_hash, - datetime.utcnow().isoformat() - )) + """, + ( + entry.entry_id, + entry.event_type, + entry.timestamp, + json.dumps(entry.data), + entry.previous_hash, + entry.entry_hash, + datetime.utcnow().isoformat(), + ), + ) # Update metadata with latest hash - cursor.execute(""" + cursor.execute( + """ INSERT OR REPLACE INTO ledger_metadata (key, value, updated_at) VALUES ('latest_hash', ?, ?) - """, (entry.entry_hash, datetime.utcnow().isoformat())) + """, + (entry.entry_hash, datetime.utcnow().isoformat()), + ) conn.commit() finally: @@ -105,11 +119,13 @@ def load_all_entries(self) -> List[LedgerEntry]: cursor = conn.cursor() try: - cursor.execute(""" + cursor.execute( + """ SELECT entry_id, event_type, timestamp, data, previous_hash, entry_hash FROM ledger_entries ORDER BY timestamp ASC - """) + """ + ) entries = [] for row in cursor.fetchall(): @@ -121,12 +137,14 @@ def load_all_entries(self) -> List[LedgerEntry]: event_type=row[1], timestamp=row[2], data=json.loads(row[3]), - previous_hash=row[4] + previous_hash=row[4], ) # Verify stored hash matches calculated hash if entry.entry_hash != stored_hash: - raise ValueError(f"Hash mismatch for entry {entry.entry_id}: stored={stored_hash}, calculated={entry.entry_hash}") + raise ValueError( + f"Hash mismatch for entry {entry.entry_id}: stored={stored_hash}, calculated={entry.entry_hash}" + ) entries.append(entry) @@ -140,12 +158,15 @@ def get_entries_by_type(self, event_type: str) -> List[LedgerEntry]: cursor = conn.cursor() try: - cursor.execute(""" + cursor.execute( + """ SELECT entry_id, event_type, timestamp, data, previous_hash, entry_hash FROM ledger_entries WHERE event_type = ? ORDER BY timestamp ASC - """, (event_type,)) + """, + (event_type,), + ) entries = [] for row in cursor.fetchall(): @@ -156,7 +177,7 @@ def get_entries_by_type(self, event_type: str) -> List[LedgerEntry]: event_type=row[1], timestamp=row[2], data=json.loads(row[3]), - previous_hash=row[4] + previous_hash=row[4], ) # Verify hash integrity @@ -186,9 +207,11 @@ def get_latest_hash(self) -> Optional[str]: cursor = conn.cursor() try: - cursor.execute(""" + cursor.execute( + """ SELECT value FROM ledger_metadata WHERE key = 'latest_hash' - """) + """ + ) result = cursor.fetchone() return result[0] if result else None finally: @@ -221,10 +244,10 @@ def export_to_json(self, output_path: str) -> None: data = { "exported_at": datetime.utcnow().isoformat(), "total_entries": len(entries), - "entries": [entry.to_dict() for entry in entries] + "entries": [entry.to_dict() for entry in entries], } - with open(output_path, 'w') as f: + with open(output_path, "w") as f: json.dump(data, f, indent=2) def get_statistics(self) -> dict: @@ -238,18 +261,22 @@ def get_statistics(self) -> dict: total = cursor.fetchone()[0] # Entries by type - cursor.execute(""" + cursor.execute( + """ SELECT event_type, COUNT(*) FROM ledger_entries GROUP BY event_type - """) + """ + ) by_type = dict(cursor.fetchall()) # Date range - cursor.execute(""" + cursor.execute( + """ SELECT MIN(timestamp), MAX(timestamp) FROM ledger_entries - """) + """ + ) date_range = cursor.fetchone() # Database file size @@ -262,7 +289,7 @@ def get_statistics(self) -> dict: "newest_entry": date_range[1], "database_size_bytes": db_size, "database_path": self.db_path, - "chain_integrity": self.verify_chain_integrity() + "chain_integrity": self.verify_chain_integrity(), } finally: conn.close() diff --git a/src/lexecon/tools/audit_verify.py b/src/lexecon/tools/audit_verify.py index 18c577b..177f024 100644 --- a/src/lexecon/tools/audit_verify.py +++ b/src/lexecon/tools/audit_verify.py @@ -15,6 +15,7 @@ class AuditVerificationError(Exception): """Base exception for verification failures.""" + pass @@ -192,9 +193,7 @@ def _verify_root_checksum(self) -> None: if self.packet_path.is_file(): # Note: This is a simplified check. Full verification would # require canonical JSON serialization - self.warnings.append( - "Root checksum verification for single-file packet is simplified" - ) + self.warnings.append("Root checksum verification for single-file packet is simplified") return # For directory packets, compute root hash diff --git a/tests/test_api.py b/tests/test_api.py index beb1850..ca8e601 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -495,8 +495,8 @@ def test_create_user(self, client): "email": "test@example.com", "password": "SecurePass123!", "role": "viewer", - "full_name": "Test User" - } + "full_name": "Test User", + }, ) # May succeed, require auth, or fail depending on setup assert response.status_code in [200, 201, 401, 403, 500] @@ -522,11 +522,8 @@ def test_assess_risk(self, client): "/api/governance/risk/assess", json={ "decision_id": "dec_test_123", - "risk_factors": { - "data_sensitivity": "high", - "action_reversibility": "low" - } - } + "risk_factors": {"data_sensitivity": "high", "action_reversibility": "low"}, + }, ) # Endpoint may not be fully functional without setup, or require validation assert response.status_code in [200, 201, 404, 422, 500] @@ -543,8 +540,8 @@ def test_store_evidence(self, client): "artifact_type": "decision_log", "content": {"test": "data"}, "source": "test_source", - "decision_id": "dec_test" - } + "decision_id": "dec_test", + }, ) # Endpoint behavior depends on setup assert response.status_code in [200, 201, 422, 500] diff --git a/tests/test_api_additional.py b/tests/test_api_additional.py index 876103b..f8a759c 100644 --- a/tests/test_api_additional.py +++ b/tests/test_api_additional.py @@ -64,8 +64,8 @@ def test_map_to_framework(self, client): json={ "framework": "EU_AI_ACT", "primitive_id": "dec_test_123", - "primitive_type": "decision" - } + "primitive_type": "decision", + }, ) assert response.status_code in [200, 201, 400, 404, 422, 500] @@ -83,7 +83,7 @@ def test_verify_control_compliance(self, client): """Test verifying control compliance.""" response = client.post( "/api/governance/compliance/EU_AI_ACT/article_12/verify", - json={"decision_id": "dec_test"} + json={"decision_id": "dec_test"}, ) assert response.status_code in [200, 400, 404, 422, 500] @@ -91,10 +91,7 @@ def test_link_evidence_to_control(self, client): """Test linking evidence to control.""" response = client.post( "/api/governance/compliance/EU_AI_ACT/article_12/link-evidence", - json={ - "evidence_id": "evd_test_123", - "decision_id": "dec_test" - } + json={"evidence_id": "evd_test_123", "decision_id": "dec_test"}, ) assert response.status_code in [200, 201, 400, 404, 422, 500] @@ -132,22 +129,16 @@ def test_request_audit_export(self, client): response = client.post( "/api/governance/audit-export/request", json={ - "scope": { - "start_date": "2024-01-01", - "end_date": "2024-12-31" - }, + "scope": {"start_date": "2024-01-01", "end_date": "2024-12-31"}, "requestor": "test_user", - "purpose": "regulatory_audit" - } + "purpose": "regulatory_audit", + }, ) assert response.status_code in [200, 201, 400, 404, 422, 500] def test_generate_audit_export(self, client): """Test generating audit export.""" - response = client.post( - "/api/governance/audit-export/exp_test_123/generate", - json={} - ) + response = client.post("/api/governance/audit-export/exp_test_123/generate", json={}) # Export may not exist assert response.status_code in [200, 400, 404, 422, 500] @@ -203,10 +194,7 @@ def test_article_12_legal_hold(self, client): """Test Article 12 legal hold.""" response = client.post( "/compliance/eu-ai-act/article-12/legal-hold", - json={ - "decision_id": "dec_test", - "reason": "regulatory_investigation" - } + json={"decision_id": "dec_test", "reason": "regulatory_investigation"}, ) assert response.status_code in [200, 201, 400, 404, 422, 500] @@ -218,8 +206,8 @@ def test_article_14_intervention(self, client): "decision_id": "dec_test", "intervention_type": "override", "human_role": "auditor", - "reason": "safety_concern" - } + "reason": "safety_concern", + }, ) assert response.status_code in [200, 201, 400, 404, 422, 500] @@ -231,8 +219,7 @@ def test_article_14_effectiveness(self, client): def test_article_14_verify(self, client): """Test Article 14 intervention verification.""" response = client.post( - "/compliance/eu-ai-act/article-14/verify", - json={"intervention_id": "int_test_123"} + "/compliance/eu-ai-act/article-14/verify", json={"intervention_id": "int_test_123"} ) assert response.status_code in [200, 400, 404, 422, 500] @@ -245,10 +232,7 @@ def test_article_14_escalation(self, client): """Test Article 14 escalation.""" response = client.post( "/compliance/eu-ai-act/article-14/escalation", - json={ - "intervention_id": "int_test", - "reason": "policy_violation" - } + json={"intervention_id": "int_test", "reason": "policy_violation"}, ) assert response.status_code in [200, 201, 400, 404, 422, 500] @@ -270,10 +254,7 @@ def test_verify_signature(self, client): """Test packet signature verification.""" response = client.post( "/compliance/verify-signature", - json={ - "packet": {"data": "test"}, - "signature": "test_signature" - } + json={"packet": {"data": "test"}, "signature": "test_signature"}, ) assert response.status_code in [200, 422, 500] diff --git a/tests/test_append_only_store.py b/tests/test_append_only_store.py index 5fc1fd7..13fd25a 100644 --- a/tests/test_append_only_store.py +++ b/tests/test_append_only_store.py @@ -3,10 +3,11 @@ """ import pytest + from src.lexecon.evidence.append_only_store import ( + AppendOnlyEvidenceStore, AppendOnlyStore, AppendOnlyViolationError, - AppendOnlyEvidenceStore, ) @@ -157,6 +158,7 @@ class TestAppendOnlyEvidenceStore: def test_wrap_evidence_service(self): """Can wrap an EvidenceService.""" + # Mock a simple service class MockEvidenceService: def __init__(self): @@ -170,6 +172,7 @@ def __init__(self): def test_enabled_wraps_storage(self): """When enabled, wraps service's internal storage.""" + class MockEvidenceService: def __init__(self): self._artifacts = {} @@ -182,6 +185,7 @@ def __init__(self): def test_enable_after_init(self): """Can enable append-only mode after initialization.""" + class MockEvidenceService: def __init__(self): self._artifacts = {"existing": "artifact"} @@ -203,6 +207,7 @@ def __init__(self): def test_disable_after_enable(self): """Can disable append-only mode after enabling.""" + class MockEvidenceService: def __init__(self): self._artifacts = {} @@ -247,8 +252,7 @@ def __init__(self): # Add artifact with incorrect hash service._artifacts._store["artifact2"] = MockArtifact( - "different content", - "0" * 64 # Wrong hash + "different content", "0" * 64 # Wrong hash ) # Integrity should fail diff --git a/tests/test_article_12_records.py b/tests/test_article_12_records.py index 5899e6b..5bc82f5 100644 --- a/tests/test_article_12_records.py +++ b/tests/test_article_12_records.py @@ -282,9 +282,7 @@ def test_export_for_regulator_csv(self, record_system, ledger): def test_anonymize_record(self, record_system, ledger): """Test anonymizing record with personal data.""" - entry = ledger.append( - "decision", {"user_email": "test@example.com", "action": "search"} - ) + entry = ledger.append("decision", {"user_email": "test@example.com", "action": "search"}) result = record_system.anonymize_record(entry.entry_id) diff --git a/tests/test_audit_export.py b/tests/test_audit_export.py index 28198a0..0e1baa8 100644 --- a/tests/test_audit_export.py +++ b/tests/test_audit_export.py @@ -4,8 +4,10 @@ Tests comprehensive governance data export functionality. """ +from datetime import datetime, timedelta, timezone + import pytest -from datetime import datetime, timezone, timedelta + from lexecon.audit_export.service import ( AuditExportService, ExportFormat, @@ -23,18 +25,16 @@ def export_service(): @pytest.fixture def mock_risk_service(): """Create a mock risk service with test data.""" - from lexecon.risk.service import RiskService, RiskDimensions + from lexecon.risk.service import RiskDimensions, RiskService service = RiskService() # Add test risks service.assess_risk( - decision_id="dec_001", - dimensions=RiskDimensions(security=80, privacy=60, compliance=40) + decision_id="dec_001", dimensions=RiskDimensions(security=80, privacy=60, compliance=40) ) service.assess_risk( - decision_id="dec_002", - dimensions=RiskDimensions(security=30, privacy=20, compliance=10) + decision_id="dec_002", dimensions=RiskDimensions(security=30, privacy=20, compliance=10) ) return service @@ -44,7 +44,7 @@ def mock_risk_service(): def mock_escalation_service(): """Create a mock escalation service with test data.""" from lexecon.escalation.service import EscalationService - from model_governance_pack.models import EscalationTrigger, EscalationPriority + from model_governance_pack.models import EscalationPriority, EscalationTrigger service = EscalationService() @@ -53,7 +53,7 @@ def mock_escalation_service(): decision_id="dec_001", trigger=EscalationTrigger.RISK_THRESHOLD, escalated_to=["manager@example.com"], - priority=EscalationPriority.HIGH + priority=EscalationPriority.HIGH, ) return service @@ -62,10 +62,11 @@ def mock_escalation_service(): @pytest.fixture def mock_override_service(): """Create a mock override service with test data.""" + import uuid + from datetime import datetime, timezone + from lexecon.override.service import OverrideService from model_governance_pack.models import Override, OverrideType - from datetime import datetime, timezone - import uuid service = OverrideService() @@ -77,7 +78,7 @@ def mock_override_service(): authorized_by="admin@example.com", justification="Emergency override due to critical business need", timestamp=datetime.now(timezone.utc), - evidence_ids=[] + evidence_ids=[], ) # Add to service's internal storage @@ -100,7 +101,7 @@ def mock_evidence_service(): artifact_type=ArtifactType.DECISION_LOG, content="Test decision log content", source="test_system", - related_decision_ids=["dec_001"] + related_decision_ids=["dec_001"], ) return service @@ -123,12 +124,10 @@ def mock_ledger(): ledger = LedgerChain() # Add test decisions - ledger.append("decision", { - "request_id": "req_001", - "decision": "allow", - "actor": "system", - "action": "test_action" - }) + ledger.append( + "decision", + {"request_id": "req_001", "decision": "allow", "actor": "system", "action": "test_action"}, + ) return ledger @@ -146,7 +145,7 @@ def test_create_export_request(self, export_service): requester="auditor@example.com", purpose="Regulatory compliance audit", scope=ExportScope.ALL, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) assert request is not None @@ -165,14 +164,14 @@ def test_generate_export_all_data( mock_override_service, mock_evidence_service, mock_compliance_service, - mock_ledger + mock_ledger, ): """Test generating complete export with all data.""" request = export_service.create_export_request( requester="auditor@example.com", purpose="Complete audit", scope=ExportScope.ALL, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) package = export_service.generate_export( @@ -182,7 +181,7 @@ def test_generate_export_all_data( override_service=mock_override_service, evidence_service=mock_evidence_service, compliance_service=mock_compliance_service, - ledger=mock_ledger + ledger=mock_ledger, ) assert package is not None @@ -207,13 +206,10 @@ def test_export_risk_only(self, export_service, mock_risk_service): requester="risk_analyst@example.com", purpose="Risk analysis", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package is not None assert "risks" in package.data @@ -222,12 +218,7 @@ def test_export_risk_only(self, export_service, mock_risk_service): assert "escalations" not in package.data assert "overrides" not in package.data - def test_export_with_date_filter( - self, - export_service, - mock_risk_service, - mock_ledger - ): + def test_export_with_date_filter(self, export_service, mock_risk_service, mock_ledger): """Test exporting with date range filter.""" now = datetime.now(timezone.utc) start_date = now - timedelta(days=7) @@ -239,13 +230,11 @@ def test_export_with_date_filter( scope=ExportScope.ALL, format=ExportFormat.JSON, start_date=start_date, - end_date=end_date + end_date=end_date, ) package = export_service.generate_export( - request=request, - risk_service=mock_risk_service, - ledger=mock_ledger + request=request, risk_service=mock_risk_service, ledger=mock_ledger ) assert package is not None @@ -258,13 +247,10 @@ def test_export_json_format(self, export_service, mock_risk_service): requester="dev@example.com", purpose="Testing", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.format == ExportFormat.JSON assert package.content.startswith("{") @@ -276,13 +262,10 @@ def test_export_csv_format(self, export_service, mock_risk_service): requester="analyst@example.com", purpose="Data analysis", scope=ExportScope.RISK_ONLY, - format=ExportFormat.CSV + format=ExportFormat.CSV, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.format == ExportFormat.CSV assert "RISK ASSESSMENTS" in package.content @@ -294,13 +277,10 @@ def test_export_markdown_format(self, export_service, mock_risk_service): requester="doc_writer@example.com", purpose="Documentation", scope=ExportScope.RISK_ONLY, - format=ExportFormat.MARKDOWN + format=ExportFormat.MARKDOWN, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.format == ExportFormat.MARKDOWN assert "# Governance Audit Export" in package.content @@ -312,38 +292,31 @@ def test_export_html_format(self, export_service, mock_risk_service): requester="reporter@example.com", purpose="Report generation", scope=ExportScope.RISK_ONLY, - format=ExportFormat.HTML + format=ExportFormat.HTML, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.format == ExportFormat.HTML assert "" in package.content assert "

Governance Audit Export

" in package.content def test_export_statistics_calculation( - self, - export_service, - mock_risk_service, - mock_escalation_service, - mock_override_service + self, export_service, mock_risk_service, mock_escalation_service, mock_override_service ): """Test statistics calculation in export.""" request = export_service.create_export_request( requester="auditor@example.com", purpose="Statistics test", scope=ExportScope.ALL, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) package = export_service.generate_export( request=request, risk_service=mock_risk_service, escalation_service=mock_escalation_service, - override_service=mock_override_service + override_service=mock_override_service, ) stats = package.data["statistics"] @@ -357,19 +330,17 @@ def test_export_checksum_generation(self, export_service, mock_risk_service): requester="security@example.com", purpose="Integrity verification", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.checksum is not None assert len(package.checksum) == 64 # SHA-256 hex digest # Verify checksum is correct import hashlib + expected_checksum = hashlib.sha256(package.content.encode()).hexdigest() assert package.checksum == expected_checksum @@ -379,13 +350,10 @@ def test_get_export(self, export_service, mock_risk_service): requester="user@example.com", purpose="Test", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) retrieved = export_service.get_export(package.export_id) assert retrieved is not None @@ -404,12 +372,9 @@ def test_list_exports(self, export_service, mock_risk_service): requester=f"user{i}@example.com", purpose=f"Test {i}", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON - ) - export_service.generate_export( - request=request, - risk_service=mock_risk_service + format=ExportFormat.JSON, ) + export_service.generate_export(request=request, risk_service=mock_risk_service) exports = export_service.list_exports() assert len(exports) == 3 @@ -421,7 +386,7 @@ def test_list_exports_by_requester(self, export_service, mock_risk_service): requester="alice@example.com", purpose="Test 1", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) export_service.generate_export(request=request1, risk_service=mock_risk_service) @@ -429,7 +394,7 @@ def test_list_exports_by_requester(self, export_service, mock_risk_service): requester="bob@example.com", purpose="Test 2", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) export_service.generate_export(request=request2, risk_service=mock_risk_service) @@ -445,12 +410,9 @@ def test_export_statistics(self, export_service, mock_risk_service): requester=f"user{i}@example.com", purpose=f"Test {i}", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON if i % 2 == 0 else ExportFormat.CSV - ) - export_service.generate_export( - request=request, - risk_service=mock_risk_service + format=ExportFormat.JSON if i % 2 == 0 else ExportFormat.CSV, ) + export_service.generate_export(request=request, risk_service=mock_risk_service) stats = export_service.get_export_statistics() assert stats["total_exports"] == 3 @@ -460,25 +422,21 @@ def test_export_statistics(self, export_service, mock_risk_service): assert stats["format_breakdown"]["csv"] == 1 def test_export_record_count( - self, - export_service, - mock_risk_service, - mock_escalation_service, - mock_override_service + self, export_service, mock_risk_service, mock_escalation_service, mock_override_service ): """Test record count in export.""" request = export_service.create_export_request( requester="auditor@example.com", purpose="Record count test", scope=ExportScope.ALL, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) package = export_service.generate_export( request=request, risk_service=mock_risk_service, escalation_service=mock_escalation_service, - override_service=mock_override_service + override_service=mock_override_service, ) assert package.record_count > 0 @@ -490,7 +448,7 @@ def test_export_with_metadata(self, export_service, mock_risk_service): metadata = { "compliance_framework": "SOC2", "audit_period": "Q4 2023", - "auditor_name": "John Doe" + "auditor_name": "John Doe", } request = export_service.create_export_request( @@ -498,13 +456,10 @@ def test_export_with_metadata(self, export_service, mock_risk_service): purpose="Compliance audit", scope=ExportScope.RISK_ONLY, format=ExportFormat.JSON, - metadata=metadata + metadata=metadata, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.metadata == metadata @@ -514,12 +469,11 @@ def test_export_compliance_data(self, export_service, mock_compliance_service): requester="compliance@example.com", purpose="Compliance review", scope=ExportScope.COMPLIANCE_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) package = export_service.generate_export( - request=request, - compliance_service=mock_compliance_service + request=request, compliance_service=mock_compliance_service ) assert "compliance" in package.data @@ -536,13 +490,10 @@ def test_export_decision_log(self, export_service, mock_ledger): requester="auditor@example.com", purpose="Decision log review", scope=ExportScope.DECISION_LOG_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - ledger=mock_ledger - ) + package = export_service.generate_export(request=request, ledger=mock_ledger) assert "decisions" in package.data assert len(package.data["decisions"]) > 0 @@ -561,11 +512,10 @@ def test_multiple_export_formats_same_data(self, export_service, mock_risk_servi requester="tester@example.com", purpose=f"Test {fmt.value} format", scope=ExportScope.RISK_ONLY, - format=fmt + format=fmt, ) package = export_service.generate_export( - request=request, - risk_service=mock_risk_service + request=request, risk_service=mock_risk_service ) packages.append(package) @@ -581,13 +531,10 @@ def test_export_size_calculation(self, export_service, mock_risk_service): requester="admin@example.com", purpose="Size test", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=mock_risk_service - ) + package = export_service.generate_export(request=request, risk_service=mock_risk_service) assert package.size_bytes == len(package.content.encode()) @@ -601,13 +548,10 @@ def test_export_empty_services(self, export_service): requester="tester@example.com", purpose="Empty data test", scope=ExportScope.RISK_ONLY, - format=ExportFormat.JSON + format=ExportFormat.JSON, ) - package = export_service.generate_export( - request=request, - risk_service=empty_risk_service - ) + package = export_service.generate_export(request=request, risk_service=empty_risk_service) assert package is not None assert len(package.data.get("risks", [])) == 0 diff --git a/tests/test_audit_verify.py b/tests/test_audit_verify.py index db3ab43..7614339 100644 --- a/tests/test_audit_verify.py +++ b/tests/test_audit_verify.py @@ -24,30 +24,22 @@ def valid_audit_packet_file(temp_dir): "packet_version": "1.0", "export_id": "exp_test_123", "generated_at": "2024-01-01T00:00:00Z", - "generator": { - "name": "lexecon", - "version": "0.1.0" - }, - "scope": { - "start_time": "2024-01-01T00:00:00Z", - "end_time": "2024-01-02T00:00:00Z" - }, + "generator": {"name": "lexecon", "version": "0.1.0"}, + "scope": {"start_time": "2024-01-01T00:00:00Z", "end_time": "2024-01-02T00:00:00Z"}, "contents": { "decision_count": 1, "evidence_count": 0, "risk_count": 0, "escalation_count": 0, - "override_count": 0 + "override_count": 0, }, "integrity": { "algorithm": "SHA-256", "root_checksum": "abc123def456", - "artifact_checksums": {} - } + "artifact_checksums": {}, + }, }, - "decisions": [ - {"decision_id": "dec_1", "actor": "model", "action": "read"} - ] + "decisions": [{"decision_id": "dec_1", "actor": "model", "action": "read"}], } packet_file = Path(temp_dir) / "audit_packet.json" @@ -66,23 +58,14 @@ def valid_audit_packet_dir(temp_dir): "packet_version": "1.0", "export_id": "exp_dir_123", "generated_at": "2024-01-01T00:00:00Z", - "generator": { - "name": "lexecon", - "version": "0.1.0" - }, - "scope": { - "start_time": "2024-01-01T00:00:00Z", - "end_time": "2024-01-02T00:00:00Z" - }, - "contents": { - "decision_count": 3, - "evidence_count": 0 - }, + "generator": {"name": "lexecon", "version": "0.1.0"}, + "scope": {"start_time": "2024-01-01T00:00:00Z", "end_time": "2024-01-02T00:00:00Z"}, + "contents": {"decision_count": 3, "evidence_count": 0}, "integrity": { "algorithm": "SHA-256", "root_checksum": "dir_checksum", - "artifact_checksums": {} - } + "artifact_checksums": {}, + }, } manifest_file = packet_dir / "manifest.json" @@ -90,11 +73,7 @@ def valid_audit_packet_dir(temp_dir): # Create decisions file decisions_file = packet_dir / "decisions.json" - decisions = [ - {"decision_id": "dec_1"}, - {"decision_id": "dec_2"}, - {"decision_id": "dec_3"} - ] + decisions = [{"decision_id": "dec_1"}, {"decision_id": "dec_2"}, {"decision_id": "dec_3"}] decisions_file.write_text(json.dumps(decisions, indent=2)) return str(packet_dir) @@ -220,10 +199,7 @@ def test_verify_full_valid_packet(self, capsys): "generator": {"name": "test", "version": "1.0"}, "scope": {}, "contents": {"decision_count": 0}, - "integrity": { - "algorithm": "SHA-256", - "root_checksum": "test" - } + "integrity": {"algorithm": "SHA-256", "root_checksum": "test"}, } } diff --git a/tests/test_capability_tokens.py b/tests/test_capability_tokens.py index efb65bf..43159cf 100644 --- a/tests/test_capability_tokens.py +++ b/tests/test_capability_tokens.py @@ -170,9 +170,7 @@ def test_token_deserialization_without_signature(self): def test_different_tokens_have_unique_ids(self): """Test that multiple tokens get unique IDs.""" - tokens = [ - CapabilityToken.create("action", "tool", "hash") for _ in range(100) - ] + tokens = [CapabilityToken.create("action", "tool", "hash") for _ in range(100)] token_ids = [t.token_id for t in tokens] assert len(token_ids) == len(set(token_ids)) # All unique diff --git a/tests/test_cli.py b/tests/test_cli.py index 7bdae87..6864179 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -331,11 +331,12 @@ def test_verify_ledger_validation_failed(self, runner, temp_dir, monkeypatch): # Mock verify_integrity to return failure original_verify = LedgerChain.verify_integrity + def mock_verify(self): return { "valid": False, "error": "Simulated verification failure", - "entries_verified": 0 + "entries_verified": 0, } monkeypatch.setattr(LedgerChain, "verify_integrity", mock_verify) diff --git a/tests/test_compliance_mapping.py b/tests/test_compliance_mapping.py index e525db8..8af0d60 100644 --- a/tests/test_compliance_mapping.py +++ b/tests/test_compliance_mapping.py @@ -5,11 +5,12 @@ """ import pytest + from lexecon.compliance_mapping.service import ( ComplianceMappingService, - RegulatoryFramework, - GovernancePrimitive, ControlStatus, + GovernancePrimitive, + RegulatoryFramework, ) @@ -36,7 +37,7 @@ def test_map_risk_assessment_to_soc2(self, compliance_service): mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_dec_test_001", - framework=RegulatoryFramework.SOC2 + framework=RegulatoryFramework.SOC2, ) assert mapping is not None @@ -51,7 +52,7 @@ def test_map_escalation_to_iso27001(self, compliance_service): mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.ESCALATION, primitive_id="esc_test_001", - framework=RegulatoryFramework.ISO27001 + framework=RegulatoryFramework.ISO27001, ) assert mapping is not None @@ -66,7 +67,7 @@ def test_map_override_to_gdpr(self, compliance_service): mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.OVERRIDE, primitive_id="ovr_test_001", - framework=RegulatoryFramework.GDPR + framework=RegulatoryFramework.GDPR, ) assert mapping is not None @@ -79,7 +80,7 @@ def test_link_evidence_to_control(self, compliance_service): result = compliance_service.link_evidence_to_control( control_id="CC6.1", framework=RegulatoryFramework.SOC2, - evidence_artifact_id="evd_decisionlog_abc123" + evidence_artifact_id="evd_decisionlog_abc123", ) assert result is True @@ -94,7 +95,7 @@ def test_link_evidence_to_invalid_control(self, compliance_service): result = compliance_service.link_evidence_to_control( control_id="INVALID", framework=RegulatoryFramework.SOC2, - evidence_artifact_id="evd_test_123" + evidence_artifact_id="evd_test_123", ) assert result is False @@ -104,7 +105,7 @@ def test_verify_control(self, compliance_service): result = compliance_service.verify_control( control_id="CC7.2", framework=RegulatoryFramework.SOC2, - notes="Verified through risk assessment process" + notes="Verified through risk assessment process", ) assert result is True @@ -119,8 +120,7 @@ def test_verify_control(self, compliance_service): def test_verify_invalid_control(self, compliance_service): """Test verifying non-existent control.""" result = compliance_service.verify_control( - control_id="INVALID", - framework=RegulatoryFramework.SOC2 + control_id="INVALID", framework=RegulatoryFramework.SOC2 ) assert result is False @@ -149,8 +149,7 @@ def test_list_controls_by_status(self, compliance_service): # List verified controls verified = compliance_service.list_controls( - RegulatoryFramework.SOC2, - status=ControlStatus.VERIFIED + RegulatoryFramework.SOC2, status=ControlStatus.VERIFIED ) assert len(verified) == 1 @@ -158,8 +157,7 @@ def test_list_controls_by_status(self, compliance_service): # List not implemented controls not_implemented = compliance_service.list_controls( - RegulatoryFramework.SOC2, - status=ControlStatus.NOT_IMPLEMENTED + RegulatoryFramework.SOC2, status=ControlStatus.NOT_IMPLEMENTED ) assert len(not_implemented) == 2 # Remaining controls @@ -168,8 +166,7 @@ def test_list_controls_by_category(self, compliance_service): """Test filtering controls by category.""" # All SOC 2 controls we defined are in "Common Criteria" category controls = compliance_service.list_controls( - RegulatoryFramework.SOC2, - category="Common Criteria" + RegulatoryFramework.SOC2, category="Common Criteria" ) assert len(controls) == 3 @@ -238,13 +235,13 @@ def test_get_primitive_mappings(self, compliance_service): compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_test_123", - framework=RegulatoryFramework.SOC2 + framework=RegulatoryFramework.SOC2, ) compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_test_123", - framework=RegulatoryFramework.ISO27001 + framework=RegulatoryFramework.ISO27001, ) # Get mappings @@ -254,7 +251,7 @@ def test_get_primitive_mappings(self, compliance_service): assert all(m.primitive_id == "rsk_test_123" for m in mappings) assert {m.framework for m in mappings} == { RegulatoryFramework.SOC2, - RegulatoryFramework.ISO27001 + RegulatoryFramework.ISO27001, } def test_get_statistics(self, compliance_service): @@ -263,13 +260,13 @@ def test_get_statistics(self, compliance_service): compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_test_001", - framework=RegulatoryFramework.SOC2 + framework=RegulatoryFramework.SOC2, ) compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.ESCALATION, primitive_id="esc_test_001", - framework=RegulatoryFramework.ISO27001 + framework=RegulatoryFramework.ISO27001, ) stats = compliance_service.get_statistics() @@ -285,7 +282,7 @@ def test_all_frameworks_have_controls(self, compliance_service): for framework in [ RegulatoryFramework.SOC2, RegulatoryFramework.ISO27001, - RegulatoryFramework.GDPR + RegulatoryFramework.GDPR, ]: controls = compliance_service.list_controls(framework) assert len(controls) > 0, f"No controls defined for {framework.value}" @@ -331,19 +328,19 @@ def test_multiple_frameworks_coverage(self, compliance_service): soc2_mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_multi_test", - framework=RegulatoryFramework.SOC2 + framework=RegulatoryFramework.SOC2, ) iso_mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_multi_test", - framework=RegulatoryFramework.ISO27001 + framework=RegulatoryFramework.ISO27001, ) gdpr_mapping = compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_multi_test", - framework=RegulatoryFramework.GDPR + framework=RegulatoryFramework.GDPR, ) assert len(soc2_mapping.control_ids) > 0 @@ -368,9 +365,7 @@ class FakeFramework(Enum): # This should return False for invalid framework result = compliance_service.link_evidence_to_control( - control_id="fake_control", - framework=FakeFramework.FAKE, - evidence_artifact_id="art_fake" + control_id="fake_control", framework=FakeFramework.FAKE, evidence_artifact_id="art_fake" ) assert result is False @@ -382,8 +377,7 @@ class FakeFramework(Enum): FAKE = "fake" result = compliance_service.verify_control( - control_id="fake_control", - framework=FakeFramework.FAKE + control_id="fake_control", framework=FakeFramework.FAKE ) assert result is False @@ -395,8 +389,7 @@ class FakeFramework(Enum): FAKE = "fake" result = compliance_service.get_control_status( - control_id="fake_control", - framework=FakeFramework.FAKE + control_id="fake_control", framework=FakeFramework.FAKE ) assert result is None @@ -442,13 +435,17 @@ class FakeFramework(Enum): def test_generate_report_with_non_compliant_controls(self, compliance_service): """Test report generation recommendations for non-compliant controls.""" - from lexecon.compliance_mapping.service import RegulatoryFramework, GovernancePrimitive, ControlStatus + from lexecon.compliance_mapping.service import ( + ControlStatus, + GovernancePrimitive, + RegulatoryFramework, + ) # Map a primitive compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.RISK_ASSESSMENT, primitive_id="rsk_test_123", - framework=RegulatoryFramework.SOC2 + framework=RegulatoryFramework.SOC2, ) # Get a control and mark it as non-compliant @@ -466,13 +463,17 @@ def test_generate_report_with_non_compliant_controls(self, compliance_service): def test_generate_report_with_unverified_controls(self, compliance_service): """Test report generation recommendations for unverified controls.""" - from lexecon.compliance_mapping.service import RegulatoryFramework, GovernancePrimitive, ControlStatus + from lexecon.compliance_mapping.service import ( + ControlStatus, + GovernancePrimitive, + RegulatoryFramework, + ) # Map a primitive compliance_service.map_primitive_to_controls( primitive_type=GovernancePrimitive.ESCALATION, primitive_id="esc_verify_test", - framework=RegulatoryFramework.ISO27001 + framework=RegulatoryFramework.ISO27001, ) # Get controls and mark some as implemented but not verified diff --git a/tests/test_decision_service.py b/tests/test_decision_service.py index 76b30b8..e9205d9 100644 --- a/tests/test_decision_service.py +++ b/tests/test_decision_service.py @@ -710,8 +710,10 @@ def test_decision_id_format(self, service_with_canonical): # Format: dec_<26 uppercase alphanumeric> assert decision_id.startswith("dec_") assert len(decision_id) == 30 # dec_ (4) + 26 chars - assert decision_id[4:].isupper() or decision_id[4:].isdigit() or all( - c in "0123456789ABCDEFGHJKMNPQRSTVWXYZ" for c in decision_id[4:] + assert ( + decision_id[4:].isupper() + or decision_id[4:].isdigit() + or all(c in "0123456789ABCDEFGHJKMNPQRSTVWXYZ" for c in decision_id[4:]) ) def test_response_has_decision_id(self, service_with_canonical): @@ -744,17 +746,13 @@ def test_canonical_actor_id_conversion(self): def test_canonical_actor_id_ai_agent(self): """Test AI agent actor conversion.""" for actor in ["model", "ai", "assistant"]: - request = DecisionRequest( - actor=actor, proposed_action="x", tool="t", user_intent="u" - ) + request = DecisionRequest(actor=actor, proposed_action="x", tool="t", user_intent="u") assert request.to_canonical_actor_id().startswith("act_ai_agent:") def test_canonical_actor_id_human(self): """Test human actor conversion.""" for actor in ["user", "human"]: - request = DecisionRequest( - actor=actor, proposed_action="x", tool="t", user_intent="u" - ) + request = DecisionRequest(actor=actor, proposed_action="x", tool="t", user_intent="u") assert request.to_canonical_actor_id().startswith("act_human_user:") def test_canonical_actor_id_already_formatted(self): @@ -863,6 +861,7 @@ def test_unique_decision_ids(self, service_with_canonical): def test_ulid_sortability(self): """Test that generated ULIDs are chronologically sortable.""" import time + from lexecon.decision.service import generate_decision_id ids = [] @@ -1075,7 +1074,7 @@ def test_to_canonical_dict_without_canonical(self): request_id="req_123", decision="allowed", reasoning="Test decision", - policy_version_hash="abc123" + policy_version_hash="abc123", ) # Don't set _canonical_decision @@ -1090,6 +1089,7 @@ class TestDecisionServiceFiltering: def policy_engine(self): """Create a policy engine.""" from lexecon.policy.engine import PolicyEngine, PolicyMode + return PolicyEngine(mode=PolicyMode.PERMISSIVE) @pytest.fixture @@ -1106,14 +1106,14 @@ def test_list_canonical_decisions_with_outcome_filter(self, service): actor="user", proposed_action="read", tool="database", - user_intent="Read data" + user_intent="Read data", ) request2 = DecisionRequest( request_id="req_2", actor="user", proposed_action="delete", tool="database", - user_intent="Delete data" + user_intent="Delete data", ) response1 = service.evaluate_request(request1) @@ -1122,6 +1122,7 @@ def test_list_canonical_decisions_with_outcome_filter(self, service): # Get decisions filtered by outcome (if governance models available) try: from model_governance_pack.models import DecisionOutcome + approved_decisions = service.list_canonical_decisions( limit=10, outcome=DecisionOutcome.APPROVED ) @@ -1141,7 +1142,7 @@ def test_export_decisions_with_time_filters(self, service): actor="user", proposed_action="read", tool="database", - user_intent="Read data" + user_intent="Read data", ) service.evaluate_request(request) @@ -1150,10 +1151,7 @@ def test_export_decisions_with_time_filters(self, service): start_time = now - timedelta(hours=1) end_time = now + timedelta(hours=1) - decisions = service.export_decisions_for_audit( - start_time=start_time, - end_time=end_time - ) + decisions = service.export_decisions_for_audit(start_time=start_time, end_time=end_time) # Should have the decision we just made assert len(decisions) > 0 @@ -1168,7 +1166,7 @@ def test_export_decisions_with_start_time_only(self, service): actor="user", proposed_action="read", tool="database", - user_intent="Read data" + user_intent="Read data", ) service.evaluate_request(request) @@ -1187,7 +1185,7 @@ def test_export_decisions_with_end_time_only(self, service): actor="user", proposed_action="read", tool="database", - user_intent="Read data" + user_intent="Read data", ) service.evaluate_request(request) diff --git a/tests/test_escalation_service.py b/tests/test_escalation_service.py index 9cefe3b..2223c47 100644 --- a/tests/test_escalation_service.py +++ b/tests/test_escalation_service.py @@ -1,28 +1,29 @@ """Tests for escalation service.""" -import pytest from datetime import datetime, timedelta, timezone +import pytest + from lexecon.escalation.service import ( - EscalationService, EscalationConfig, - generate_escalation_id, + EscalationService, NotificationEvent, + generate_escalation_id, ) # Import canonical governance models try: from model_governance_pack.models import ( Escalation, - EscalationTrigger, - EscalationStatus, EscalationPriority, + EscalationStatus, + EscalationTrigger, + EvidenceArtifact, Resolution, ResolutionOutcome, Risk, RiskDimensions, RiskLevel, - EvidenceArtifact, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -214,13 +215,9 @@ def test_create_escalation_generates_evidence(self, service): escalated_to=["act_human_user:reviewer1"], ) - artifacts = service.get_evidence_artifacts( - escalation_id=escalation.escalation_id - ) + artifacts = service.get_evidence_artifacts(escalation_id=escalation.escalation_id) assert len(artifacts) >= 1 - assert any( - a.metadata.get("event_type") == "escalation_created" for a in artifacts - ) + assert any(a.metadata.get("event_type") == "escalation_created" for a in artifacts) def test_auto_escalate_for_high_risk(self, service, high_risk): """Test auto-escalation for high-risk decision.""" @@ -602,9 +599,7 @@ def test_evidence_for_escalation_created(self, service): escalated_to=["act_human_user:reviewer1"], ) - artifacts = service.get_evidence_artifacts( - escalation_id=escalation.escalation_id - ) + artifacts = service.get_evidence_artifacts(escalation_id=escalation.escalation_id) created_artifacts = [ a for a in artifacts if a.metadata.get("event_type") == "escalation_created" @@ -639,9 +634,7 @@ def test_evidence_for_all_lifecycle_events(self, service): ) # Check artifacts - artifacts = service.get_evidence_artifacts( - escalation_id=escalation.escalation_id - ) + artifacts = service.get_evidence_artifacts(escalation_id=escalation.escalation_id) event_types = {a.metadata.get("event_type") for a in artifacts} assert "escalation_created" in event_types @@ -660,9 +653,7 @@ def test_notification_artifacts_created(self, service): artifacts = service.get_evidence_artifacts(decision_id=escalation.decision_id) # Should have artifacts for notifications - notification_artifacts = [ - a for a in artifacts if a.metadata.get("notification_type") - ] + notification_artifacts = [a for a in artifacts if a.metadata.get("notification_type")] assert len(notification_artifacts) >= 1 def test_evidence_disabled(self): @@ -677,9 +668,7 @@ def test_evidence_disabled(self): # Escalation should exist but no evidence assert escalation is not None - artifacts = service.get_evidence_artifacts( - escalation_id=escalation.escalation_id - ) + artifacts = service.get_evidence_artifacts(escalation_id=escalation.escalation_id) assert len(artifacts) == 0 @@ -725,9 +714,7 @@ def test_complete_escalation_workflow(self): assert resolved.resolution.outcome == ResolutionOutcome.APPROVED # 4. Verify evidence trail - artifacts = service.get_evidence_artifacts( - escalation_id=escalation.escalation_id - ) + artifacts = service.get_evidence_artifacts(escalation_id=escalation.escalation_id) assert len(artifacts) >= 3 # Created, acknowledged, resolved def test_auto_escalation_workflow(self): @@ -760,7 +747,5 @@ def test_auto_escalation_workflow(self): assert escalation.metadata["auto_escalated"] is True # Verify notifications - notifications = service.get_notifications( - escalation_id=escalation.escalation_id - ) + notifications = service.get_notifications(escalation_id=escalation.escalation_id) assert len(notifications) >= 1 diff --git a/tests/test_evidence_service.py b/tests/test_evidence_service.py index 380aab2..bf2142c 100644 --- a/tests/test_evidence_service.py +++ b/tests/test_evidence_service.py @@ -1,22 +1,23 @@ """Tests for evidence service.""" -import pytest from datetime import datetime, timedelta, timezone +import pytest + from lexecon.evidence.service import ( - EvidenceService, - EvidenceConfig, ArtifactBuilder, - generate_artifact_id, + EvidenceConfig, + EvidenceService, compute_sha256, + generate_artifact_id, ) # Import canonical governance models try: from model_governance_pack.models import ( - EvidenceArtifact, ArtifactType, DigitalSignature, + EvidenceArtifact, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -600,9 +601,7 @@ def test_store_artifact_with_bytes_content(self): # Store with bytes content (not string) content_bytes = b"Binary content data" artifact = service.store_artifact( - artifact_type=ArtifactType.SCREENSHOT, - content=content_bytes, - source="screenshot_tool" + artifact_type=ArtifactType.SCREENSHOT, content=content_bytes, source="screenshot_tool" ) assert artifact.artifact_id.startswith("evd_screenshot_") @@ -736,5 +735,6 @@ def test_enable_on_already_wrapped_store(self): # Verify append-only enforcement works from lexecon.evidence.append_only_store import AppendOnlyViolationError + with pytest.raises(AppendOnlyViolationError): service._artifacts[artifact.artifact_id] = artifact # Try to update diff --git a/tests/test_export_determinism.py b/tests/test_export_determinism.py index 4170e9d..dde37bc 100644 --- a/tests/test_export_determinism.py +++ b/tests/test_export_determinism.py @@ -5,14 +5,16 @@ and parameters produce byte-identical outputs. """ -import json import hashlib -import pytest +import json from datetime import datetime, timezone + +import pytest + from lexecon.audit_export.service import ( AuditExportService, - ExportScope, ExportFormat, + ExportScope, ExportStatus, ) @@ -109,9 +111,9 @@ def test_timestamp_format_consistent(self, export_service): if generated_at: assert "T" in generated_at # Has time component # Has timezone (Z or +/-offset) - assert (generated_at.endswith("Z") or - "+" in generated_at or - generated_at.endswith("+00:00")) + assert ( + generated_at.endswith("Z") or "+" in generated_at or generated_at.endswith("+00:00") + ) def test_empty_export_reproducible(self, export_service): """ diff --git a/tests/test_governance_api.py b/tests/test_governance_api.py index a1dbef4..d46566d 100644 --- a/tests/test_governance_api.py +++ b/tests/test_governance_api.py @@ -4,8 +4,9 @@ Tests REST API endpoints for Risk, Escalation, Override, and Evidence services. """ -import pytest from datetime import datetime, timedelta, timezone + +import pytest from fastapi.testclient import TestClient from lexecon.api.server import app diff --git a/tests/test_governance_models.py b/tests/test_governance_models.py index d624130..143bfdd 100644 --- a/tests/test_governance_models.py +++ b/tests/test_governance_models.py @@ -10,62 +10,50 @@ import pytest from pydantic import ValidationError -from model_governance_pack.models import ( - # Action +from model_governance_pack.models import ( # Action; Actor; Compliance Control; Context; Decision; Escalation; Evidence Artifact; Override; Policy; Resource; Risk Action, ActionCategory, - # Actor Actor, ActorType, - # Compliance Control + ArtifactType, + Behavioral, ComplianceControl, ComplianceFramework, - # Context - Behavioral, + Constraint, Context, - DeploymentEnvironment, - Environment, - Temporal, - # Decision Decision, DecisionOutcome, - # Escalation + DeploymentEnvironment, + DigitalSignature, + Environment, Escalation, EscalationPriority, EscalationStatus, EscalationTrigger, - Resolution, - ResolutionOutcome, - # Evidence Artifact - ArtifactType, - DigitalSignature, EvidenceArtifact, - # Override NewOutcome, OriginalOutcome, Override, OverrideScope, OverrideType, - # Policy - Constraint, Policy, PolicyMode, Relation, RelationType, - Term, - TermType, - # Resource + Resolution, + ResolutionOutcome, Resource, ResourceClassification, ResourceType, - # Risk Risk, RiskDimensions, RiskFactor, RiskLevel, + Temporal, + Term, + TermType, ) - # ============================================================================= # Decision Model Tests # ============================================================================= diff --git a/tests/test_identity.py b/tests/test_identity.py index 8af89d9..903db3c 100644 --- a/tests/test_identity.py +++ b/tests/test_identity.py @@ -76,8 +76,7 @@ def test_load_keys_from_disk(self): # Fingerprints should match assert ( - km_loaded.get_public_key_fingerprint() - == km_original.get_public_key_fingerprint() + km_loaded.get_public_key_fingerprint() == km_original.get_public_key_fingerprint() ) def test_load_public_key_from_disk(self): @@ -341,6 +340,7 @@ def test_verify_signature_with_string_data(self): # Sign it manually through key manager import base64 + message = hash_string.encode() signature_bytes = node.key_manager.private_key.sign(message) signature = base64.b64encode(signature_bytes).decode() @@ -359,6 +359,7 @@ def test_verify_signature_fails_with_wrong_data(self): # Sign original import base64 + message = original_data.encode() signature_bytes = node.key_manager.private_key.sign(message) signature = base64.b64encode(signature_bytes).decode() @@ -410,6 +411,7 @@ def test_node_can_verify_own_signature(self): # Convert dict to canonical JSON for verification import json + canonical = json.dumps(data, sort_keys=True, separators=(",", ":")) # This should work with the node's verify_signature method diff --git a/tests/test_ledger.py b/tests/test_ledger.py index 249588a..fbec8c5 100644 --- a/tests/test_ledger.py +++ b/tests/test_ledger.py @@ -154,6 +154,7 @@ def test_serialization(self): def test_storage_with_empty_entries(self): """Test ledger initialization with storage that returns empty list.""" + # Mock storage that returns empty list class MockEmptyStorage: def __init__(self): @@ -236,6 +237,7 @@ def save_entry(self, entry): def test_storage_saves_on_append(self): """Test that storage saves entry when appending.""" + class MockStorage: def __init__(self): self.saved_entries = [] @@ -270,5 +272,6 @@ def test_from_dict_hash_mismatch(self): # Should raise ValueError import pytest + with pytest.raises(ValueError, match="Hash mismatch"): LedgerChain.from_dict(data) diff --git a/tests/test_logging.py b/tests/test_logging.py index 4980b9f..61daa9f 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -411,9 +411,7 @@ def test_adapter_preserves_existing_extra(self): base_logger = logging.getLogger("test") adapter = LoggerAdapter(base_logger, {}) - msg, kwargs = adapter.process( - "Test message", {"extra": {"custom": "field"}} - ) + msg, kwargs = adapter.process("Test message", {"extra": {"custom": "field"}}) assert kwargs["extra"]["custom"] == "field" assert kwargs["extra"]["request_id"] == "req_777" diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 8721bf9..315da45 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -176,7 +176,7 @@ def test_export_metrics(self): assert len(output) > 0 # Should contain Prometheus format markers - decoded = output.decode('utf-8') + decoded = output.decode("utf-8") assert "# HELP" in decoded or "# TYPE" in decoded @@ -190,15 +190,11 @@ def test_global_metrics_exists(self): def test_global_metrics_record_decision(self): """Test using global metrics instance.""" - initial = decisions_total.labels( - allowed="False", actor="user", risk_level="3" - )._value.get() + initial = decisions_total.labels(allowed="False", actor="user", risk_level="3")._value.get() metrics.record_decision(allowed=False, actor="user", risk_level=3, duration=0.2) - final = decisions_total.labels( - allowed="False", actor="user", risk_level="3" - )._value.get() + final = decisions_total.labels(allowed="False", actor="user", risk_level="3")._value.get() assert final > initial @@ -208,15 +204,11 @@ class TestConvenienceFunctions: def test_record_decision_function(self): """Test record_decision convenience function.""" - initial = decisions_total.labels( - allowed="True", actor="bot", risk_level="2" - )._value.get() + initial = decisions_total.labels(allowed="True", actor="bot", risk_level="2")._value.get() record_decision(allowed=True, actor="bot", risk_level=2, duration=0.15) - final = decisions_total.labels( - allowed="True", actor="bot", risk_level="2" - )._value.get() + final = decisions_total.labels(allowed="True", actor="bot", risk_level="2")._value.get() assert final > initial @@ -244,9 +236,7 @@ def test_counter_increments(self): for _ in range(5): metrics.record_request("GET", "/test", 200, 0.1) - final = http_requests_total.labels( - method="GET", endpoint="/test", status=200 - )._value.get() + final = http_requests_total.labels(method="GET", endpoint="/test", status=200)._value.get() # Should have incremented by 5 assert final >= initial + 5 @@ -384,15 +374,15 @@ class TestPrometheusExport: def test_export_format(self): """Test Prometheus export format.""" output = metrics.export_metrics() - decoded = output.decode('utf-8') + decoded = output.decode("utf-8") # Should contain metric definitions assert "lexecon_" in decoded # Should contain HELP and TYPE comments - lines = decoded.split('\n') - help_lines = [l for l in lines if l.startswith('# HELP')] - type_lines = [l for l in lines if l.startswith('# TYPE')] + lines = decoded.split("\n") + help_lines = [l for l in lines if l.startswith("# HELP")] + type_lines = [l for l in lines if l.startswith("# TYPE")] assert len(help_lines) > 0 assert len(type_lines) > 0 @@ -404,7 +394,7 @@ def test_export_includes_values(self): metrics.record_ledger_entry() output = metrics.export_metrics() - decoded = output.decode('utf-8') + decoded = output.decode("utf-8") # Should contain metric values (numbers) assert any(char.isdigit() for char in decoded) @@ -412,19 +402,19 @@ def test_export_includes_values(self): def test_export_is_valid_prometheus_format(self): """Test that export is valid Prometheus format.""" output = metrics.export_metrics() - decoded = output.decode('utf-8') + decoded = output.decode("utf-8") - lines = decoded.split('\n') + lines = decoded.split("\n") # Each metric line should have format: metric_name{labels} value - metric_lines = [l for l in lines if l and not l.startswith('#')] + metric_lines = [l for l in lines if l and not l.startswith("#")] for line in metric_lines[:10]: # Check first 10 - if '{' in line: + if "{" in line: # Has labels - assert '}' in line - assert ' ' in line # Space before value - elif ' ' in line and line.strip(): + assert "}" in line + assert " " in line # Space before value + elif " " in line and line.strip(): # No labels, just name and value parts = line.split() assert len(parts) >= 2 diff --git a/tests/test_middleware.py b/tests/test_middleware.py index 2e11a30..265660c 100644 --- a/tests/test_middleware.py +++ b/tests/test_middleware.py @@ -1,11 +1,12 @@ """Tests for authentication middleware.""" -import pytest from unittest.mock import Mock + +import pytest from fastapi import Request +from lexecon.security.auth_service import Permission, Role, Session from lexecon.security.middleware import get_current_user, require_permission -from lexecon.security.auth_service import Role, Permission, Session class TestGetCurrentUser: diff --git a/tests/test_override_service.py b/tests/test_override_service.py index 0647f9d..26d558f 100644 --- a/tests/test_override_service.py +++ b/tests/test_override_service.py @@ -1,11 +1,12 @@ """Tests for override service.""" -import pytest from datetime import datetime, timedelta, timezone +import pytest + from lexecon.override.service import ( - OverrideService, OverrideConfig, + OverrideService, OverrideValidator, generate_override_id, ) @@ -13,12 +14,12 @@ # Import canonical governance models try: from model_governance_pack.models import ( - Override, - OverrideType, - OriginalOutcome, + EvidenceArtifact, NewOutcome, + OriginalOutcome, + Override, OverrideScope, - EvidenceArtifact, + OverrideType, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -116,23 +117,15 @@ def test_is_authorized_executive(self, service, executive_actor): def test_is_authorized_governance_lead(self, service, governance_actor): """Test that governance lead is authorized for non-executive overrides.""" assert service.is_authorized(governance_actor, OverrideType.RISK_ACCEPTED) - assert service.is_authorized( - governance_actor, OverrideType.TIME_LIMITED_EXCEPTION - ) + assert service.is_authorized(governance_actor, OverrideType.TIME_LIMITED_EXCEPTION) # But not for executive-only types - assert not service.is_authorized( - governance_actor, OverrideType.EMERGENCY_BYPASS - ) - assert not service.is_authorized( - governance_actor, OverrideType.EXECUTIVE_OVERRIDE - ) + assert not service.is_authorized(governance_actor, OverrideType.EMERGENCY_BYPASS) + assert not service.is_authorized(governance_actor, OverrideType.EXECUTIVE_OVERRIDE) def test_is_authorized_unauthorized_actor(self, service, unauthorized_actor): """Test that unauthorized actors cannot override.""" - assert not service.is_authorized( - unauthorized_actor, OverrideType.RISK_ACCEPTED - ) + assert not service.is_authorized(unauthorized_actor, OverrideType.RISK_ACCEPTED) def test_create_override_valid(self, service, executive_actor): """Test creating a valid override.""" @@ -466,9 +459,7 @@ def test_get_decision_with_override_status_not_overridden(self, service): assert enriched["decision"] == "deny" # Original preserved assert enriched["override_status"]["is_overridden"] is False - def test_get_decision_with_override_status_overridden( - self, service, executive_actor - ): + def test_get_decision_with_override_status_overridden(self, service, executive_actor): """Test enriching decision data when overridden.""" decision_id = "dec_01JQXYZ1234567890ABCDEFGH" decision_data = { @@ -496,10 +487,7 @@ def test_get_decision_with_override_status_overridden( # Override status added assert enriched["override_status"]["is_overridden"] is True assert enriched["override_status"]["override_id"] == override.override_id - assert ( - enriched["override_status"]["override_type"] - == OverrideType.EXECUTIVE_OVERRIDE.value - ) + assert enriched["override_status"]["override_type"] == OverrideType.EXECUTIVE_OVERRIDE.value assert enriched["override_status"]["original_outcome"] == "denied" assert enriched["override_status"]["new_outcome"] == "approved" @@ -559,34 +547,26 @@ def test_validate_time_limit_valid(self): """Test validating valid time limit.""" future = datetime.now(timezone.utc) + timedelta(hours=24) assert ( - OverrideValidator.validate_time_limit( - OverrideType.TIME_LIMITED_EXCEPTION, future - ) + OverrideValidator.validate_time_limit(OverrideType.TIME_LIMITED_EXCEPTION, future) is True ) def test_validate_time_limit_missing(self): """Test that time-limited exception requires expiration.""" with pytest.raises(ValueError, match="must have expiration"): - OverrideValidator.validate_time_limit( - OverrideType.TIME_LIMITED_EXCEPTION, None - ) + OverrideValidator.validate_time_limit(OverrideType.TIME_LIMITED_EXCEPTION, None) def test_validate_time_limit_past(self): """Test that expiration must be in future.""" past = datetime.now(timezone.utc) - timedelta(hours=1) with pytest.raises(ValueError, match="must be in the future"): - OverrideValidator.validate_time_limit( - OverrideType.TIME_LIMITED_EXCEPTION, past - ) + OverrideValidator.validate_time_limit(OverrideType.TIME_LIMITED_EXCEPTION, past) def test_validate_time_limit_too_long(self): """Test that time limit cannot exceed maximum.""" too_far = datetime.now(timezone.utc) + timedelta(days=100) with pytest.raises(ValueError, match="cannot exceed 90 days"): - OverrideValidator.validate_time_limit( - OverrideType.TIME_LIMITED_EXCEPTION, too_far - ) + OverrideValidator.validate_time_limit(OverrideType.TIME_LIMITED_EXCEPTION, too_far) def test_validate_scope_emergency_bypass(self): """Test that emergency bypass must be one-time.""" @@ -601,10 +581,7 @@ def test_validate_scope_emergency_bypass(self): # Valid scope valid_scope = OverrideScope(is_one_time=True) - assert ( - OverrideValidator.validate_scope(OverrideType.EMERGENCY_BYPASS, valid_scope) - is True - ) + assert OverrideValidator.validate_scope(OverrideType.EMERGENCY_BYPASS, valid_scope) is True class TestIntegrationWorkflows: @@ -730,9 +707,7 @@ def test_decision_integrity_preserved(self): ) # Get enriched view - enriched = service.get_decision_with_override_status( - decision_id, stored_decision - ) + enriched = service.get_decision_with_override_status(decision_id, stored_decision) # Original decision unchanged assert stored_decision == original_decision diff --git a/tests/test_policy.py b/tests/test_policy.py index 12c9e8a..8a66e00 100644 --- a/tests/test_policy.py +++ b/tests/test_policy.py @@ -113,34 +113,26 @@ def test_post_init_with_none_conditions(self): relation_type=RelationType.PERMITS, source="actor:user", target="action:read", - conditions=None # Explicitly None + conditions=None, # Explicitly None ) assert relation.conditions == [] assert relation.metadata == {} def test_from_dict_missing_relation_type(self): """Test from_dict raises ValueError when relation_type is missing.""" - data = { - "source": "actor:user", - "target": "action:read" - } + data = {"source": "actor:user", "target": "action:read"} with pytest.raises(ValueError, match="Missing relation_type or type field"): PolicyRelation.from_dict(data) def test_from_dict_missing_source_and_target(self): """Test from_dict raises ValueError when both source and target are missing.""" - data = { - "relation_type": "permits" - } + data = {"relation_type": "permits"} with pytest.raises(ValueError, match="Missing source/target"): PolicyRelation.from_dict(data) def test_from_dict_only_source(self): """Test from_dict uses source for target when target is missing.""" - data = { - "relation_type": "permits", - "source": "actor:user" - } + data = {"relation_type": "permits", "source": "actor:user"} relation = PolicyRelation.from_dict(data) assert relation.source == "actor:user" assert relation.target == "actor:user" # Should copy source to target @@ -149,7 +141,7 @@ def test_from_dict_only_target(self): """Test from_dict uses target for source when source is missing.""" data = { "relation_type": "permits", - "action": "action:read" # Using 'action' as alias for 'target' + "action": "action:read", # Using 'action' as alias for 'target' } relation = PolicyRelation.from_dict(data) assert relation.source == "action:read" # Should copy target to source @@ -163,7 +155,7 @@ def test_from_dict_with_object_and_justification(self): "target": "action:read", "object": "data:pii", "justification": "User has clearance", - "condition": "during_business_hours" + "condition": "during_business_hours", } relation = PolicyRelation.from_dict(data) assert relation.metadata["object"] == "data:pii" diff --git a/tests/test_risk_service.py b/tests/test_risk_service.py index 2ec26e2..5679dde 100644 --- a/tests/test_risk_service.py +++ b/tests/test_risk_service.py @@ -1,23 +1,24 @@ """Tests for risk assessment service.""" -import pytest from datetime import datetime +import pytest + from lexecon.risk.service import ( - RiskService, RiskScoringEngine, - generate_risk_id, + RiskService, generate_evidence_id, + generate_risk_id, ) # Import canonical governance models try: from model_governance_pack.models import ( + EvidenceArtifact, Risk, RiskDimensions, RiskFactor, RiskLevel, - EvidenceArtifact, ) GOVERNANCE_MODELS_AVAILABLE = True @@ -313,9 +314,7 @@ def test_list_risks_no_filter(self, service): # Create multiple risk assessments for i in range(5): dimensions = RiskDimensions(security=20 * i) - service.assess_risk( - decision_id=f"dec_01JQXYZ{i:022d}", dimensions=dimensions - ) + service.assess_risk(decision_id=f"dec_01JQXYZ{i:022d}", dimensions=dimensions) risks = service.list_risks() assert len(risks) == 5 @@ -375,9 +374,7 @@ def test_evidence_artifact_generation(self, service): def test_evidence_artifact_immutability(self, service): """Test that evidence artifacts have SHA-256 hashes.""" dimensions = RiskDimensions(security=70) - service.assess_risk( - decision_id="dec_01JQXYZ1234567890ABCDEFGH", dimensions=dimensions - ) + service.assess_risk(decision_id="dec_01JQXYZ1234567890ABCDEFGH", dimensions=dimensions) artifacts = service.list_evidence_artifacts() artifact = artifacts[0] @@ -422,9 +419,7 @@ def test_list_evidence_artifacts_by_decision(self, service): service.assess_risk("dec_01JQXYZ2222222222222222222", RiskDimensions(security=70)) # Filter by specific decision - artifacts = service.list_evidence_artifacts( - decision_id="dec_01JQXYZ1111111111111111111" - ) + artifacts = service.list_evidence_artifacts(decision_id="dec_01JQXYZ1111111111111111111") assert len(artifacts) == 1 assert "dec_01JQXYZ1111111111111111111" in artifacts[0].related_decision_ids @@ -469,9 +464,7 @@ def test_complete_risk_assessment_workflow(self): assert retrieved_risk.risk_id == risk.risk_id # 4. Verify evidence artifact - artifacts = service.list_evidence_artifacts( - decision_id="dec_01JQXYZ1234567890ABCDEFGH" - ) + artifacts = service.list_evidence_artifacts(decision_id="dec_01JQXYZ1234567890ABCDEFGH") assert len(artifacts) == 1 assert artifacts[0].is_immutable is True diff --git a/tests/test_security.py b/tests/test_security.py index cb6c571..485cdbf 100644 --- a/tests/test_security.py +++ b/tests/test_security.py @@ -6,8 +6,9 @@ """ import os -import tempfile import shutil +import tempfile + import pytest from lexecon.security.auth_service import AuthService, Role @@ -117,10 +118,7 @@ def test_create_session(self, auth_service): full_name="Session User", ) - session = auth_service.create_session( - user=user, - ip_address="192.168.1.1" - ) + session = auth_service.create_session(user=user, ip_address="192.168.1.1") assert session is not None assert session.username == "sessionuser" @@ -363,7 +361,7 @@ def test_verify_packet_with_missing_signature(self, signature_service): "signature_info": { "algorithm": "RSA-PSS-SHA256" # Missing "signature" field - } + }, } is_valid, message = signature_service.verify_packet_signature(packet_data) diff --git a/tests/test_storage_persistence.py b/tests/test_storage_persistence.py index eea90a5..bbf619d 100644 --- a/tests/test_storage_persistence.py +++ b/tests/test_storage_persistence.py @@ -36,7 +36,7 @@ def sample_entry(): event_type="test_event", data={"key": "value"}, previous_hash="prev_hash_abc", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) @@ -52,6 +52,7 @@ def test_init_creates_database(self, temp_db): # Tables should be created import sqlite3 + conn = sqlite3.connect(temp_db) cursor = conn.cursor() @@ -70,6 +71,7 @@ def test_save_entry(self, storage, sample_entry): # Entry should be saved import sqlite3 + conn = sqlite3.connect(storage.db_path) cursor = conn.cursor() @@ -91,7 +93,7 @@ def test_load_all_entries(self, storage): event_type="test", data={"index": i}, previous_hash=f"prev_{i}", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) storage.save_entry(entry) @@ -103,27 +105,33 @@ def test_load_all_entries(self, storage): def test_get_entries_by_type(self, storage): """Test filtering entries by event type.""" # Create entries with different types - storage.save_entry(LedgerEntry( - entry_id="e1", - event_type="decision", - data={}, - previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() - )) - storage.save_entry(LedgerEntry( - entry_id="e2", - event_type="policy", - data={}, - previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() - )) - storage.save_entry(LedgerEntry( - entry_id="e3", - event_type="decision", - data={}, - previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() - )) + storage.save_entry( + LedgerEntry( + entry_id="e1", + event_type="decision", + data={}, + previous_hash="prev", + timestamp=datetime.now(timezone.utc).isoformat(), + ) + ) + storage.save_entry( + LedgerEntry( + entry_id="e2", + event_type="policy", + data={}, + previous_hash="prev", + timestamp=datetime.now(timezone.utc).isoformat(), + ) + ) + storage.save_entry( + LedgerEntry( + entry_id="e3", + event_type="decision", + data={}, + previous_hash="prev", + timestamp=datetime.now(timezone.utc).isoformat(), + ) + ) decisions = storage.get_entries_by_type("decision") @@ -136,13 +144,15 @@ def test_get_entry_count(self, storage): # Add some entries for i in range(5): - storage.save_entry(LedgerEntry( - entry_id=f"e{i}", - event_type="test", - data={}, - previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() - )) + storage.save_entry( + LedgerEntry( + entry_id=f"e{i}", + event_type="test", + data={}, + previous_hash="prev", + timestamp=datetime.now(timezone.utc).isoformat(), + ) + ) assert storage.get_entry_count() == 5 @@ -167,13 +177,15 @@ def test_get_statistics(self, storage): """Test getting storage statistics.""" # Add some entries for i in range(5): - storage.save_entry(LedgerEntry( - entry_id=f"e{i}", - event_type="test" if i % 2 == 0 else "other", - data={}, - previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() - )) + storage.save_entry( + LedgerEntry( + entry_id=f"e{i}", + event_type="test" if i % 2 == 0 else "other", + data={}, + previous_hash="prev", + timestamp=datetime.now(timezone.utc).isoformat(), + ) + ) stats = storage.get_statistics() @@ -216,7 +228,7 @@ def test_hash_mismatch_detection_on_load(self, storage, temp_db): event_type="test", data={"test": "data"}, previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) # Save entry normally @@ -225,11 +237,14 @@ def test_hash_mismatch_detection_on_load(self, storage, temp_db): # Tamper with the stored hash directly in database conn = sqlite3.connect(temp_db) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE ledger_entries SET entry_hash = 'tampered_hash' WHERE entry_id = ? - """, (entry.entry_id,)) + """, + (entry.entry_id,), + ) conn.commit() conn.close() @@ -247,7 +262,7 @@ def test_hash_mismatch_detection_by_type(self, storage, temp_db): event_type="testtype", data={"test": "data"}, previous_hash="prev", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) # Save entry normally @@ -256,11 +271,14 @@ def test_hash_mismatch_detection_by_type(self, storage, temp_db): # Tamper with the stored hash conn = sqlite3.connect(temp_db) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE ledger_entries SET entry_hash = 'bad_hash' WHERE entry_id = ? - """, (entry.entry_id,)) + """, + (entry.entry_id,), + ) conn.commit() conn.close() @@ -279,18 +297,20 @@ def test_verify_chain_integrity_with_corruption(self, storage, temp_db): event_type="test", data={"index": i}, previous_hash=f"prev_{i}", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) storage.save_entry(entry) # Corrupt one entry's hash conn = sqlite3.connect(temp_db) cursor = conn.cursor() - cursor.execute(""" + cursor.execute( + """ UPDATE ledger_entries SET entry_hash = 'corrupted' WHERE entry_id = 'entry_1' - """) + """ + ) conn.commit() conn.close() @@ -310,7 +330,7 @@ def test_export_to_json(self, storage, temp_db): event_type="test", data={"index": i}, previous_hash=f"prev_{i}", - timestamp=datetime.now(timezone.utc).isoformat() + timestamp=datetime.now(timezone.utc).isoformat(), ) storage.save_entry(entry) @@ -321,7 +341,7 @@ def test_export_to_json(self, storage, temp_db): # Verify export file exists and has correct structure assert os.path.exists(output_path) - with open(output_path, 'r') as f: + with open(output_path, "r") as f: data = json.load(f) assert "exported_at" in data diff --git a/tests/test_tracing.py b/tests/test_tracing.py index 603226c..54e4470 100644 --- a/tests/test_tracing.py +++ b/tests/test_tracing.py @@ -33,8 +33,8 @@ def test_initialization(self): manager = TracingManager() assert manager is not None - assert hasattr(manager, 'enabled') - assert hasattr(manager, 'tracer') + assert hasattr(manager, "enabled") + assert hasattr(manager, "tracer") def test_enabled_status(self): """Test that enabled status matches availability.""" @@ -62,8 +62,8 @@ def test_start_span_when_enabled(self): assert span is not None # Span should be context manager - assert hasattr(span, '__enter__') - assert hasattr(span, '__exit__') + assert hasattr(span, "__enter__") + assert hasattr(span, "__exit__") def test_start_span_when_disabled(self): """Test starting a span when tracing is disabled.""" @@ -78,12 +78,7 @@ def test_span_with_attributes(self): """Test creating span with multiple attributes.""" manager = TracingManager() - span = manager.start_span( - "attributed_span", - attr1="value1", - attr2=42, - attr3=True - ) + span = manager.start_span("attributed_span", attr1="value1", attr2=42, attr3=True) assert span is not None @@ -104,6 +99,7 @@ class TestTraceFunctionDecorator: def test_decorator_without_name(self): """Test decorator without explicit name.""" + @trace_function() def test_func(): return "result" @@ -113,6 +109,7 @@ def test_func(): def test_decorator_with_name(self): """Test decorator with explicit span name.""" + @trace_function(name="custom_span_name") def test_func(): return "result" @@ -122,6 +119,7 @@ def test_func(): def test_decorator_preserves_function_behavior(self): """Test that decorator doesn't change function behavior.""" + @trace_function() def add(a, b): return a + b @@ -131,24 +129,21 @@ def add(a, b): def test_decorator_with_arguments(self): """Test decorating function with various arguments.""" + @trace_function() def complex_func(x, y, *args, **kwargs): - return { - 'x': x, - 'y': y, - 'args': args, - 'kwargs': kwargs - } + return {"x": x, "y": y, "args": args, "kwargs": kwargs} result = complex_func(1, 2, 3, 4, key="value") - assert result['x'] == 1 - assert result['y'] == 2 - assert result['args'] == (3, 4) - assert result['kwargs'] == {'key': 'value'} + assert result["x"] == 1 + assert result["y"] == 2 + assert result["args"] == (3, 4) + assert result["kwargs"] == {"key": "value"} def test_decorator_with_exception(self): """Test decorator handles exceptions properly.""" + @trace_function() def failing_func(): raise ValueError("Test error") @@ -158,6 +153,7 @@ def failing_func(): def test_decorator_exception_still_raises(self): """Test that exceptions are still raised after tracing.""" + @trace_function() def error_func(should_error): if should_error: @@ -173,6 +169,7 @@ def error_func(should_error): def test_decorator_on_class_method(self): """Test decorator on class methods.""" + class TestClass: @trace_function() def method(self, value): @@ -183,6 +180,7 @@ def method(self, value): def test_decorator_on_static_method(self): """Test decorator on static methods.""" + class TestClass: @staticmethod @trace_function() @@ -193,6 +191,7 @@ def static_method(value): def test_decorator_preserves_docstring(self): """Test that decorator preserves function docstring.""" + @trace_function() def documented_func(): """This is a docstring.""" @@ -202,6 +201,7 @@ def documented_func(): def test_decorator_preserves_function_name(self): """Test that decorator preserves function name.""" + @trace_function() def named_function(): return "result" @@ -211,6 +211,7 @@ def named_function(): @pytest.mark.skipif(not TRACING_AVAILABLE, reason="OpenTelemetry not installed") def test_decorator_records_duration(self): """Test that decorator records execution duration.""" + @trace_function() def slow_func(): time.sleep(0.1) @@ -225,6 +226,7 @@ def slow_func(): def test_decorator_when_tracing_disabled(self): """Test decorator works even when tracing is disabled.""" + # This should work regardless of TRACING_AVAILABLE @trace_function() def normal_func(): @@ -262,6 +264,7 @@ def test_multiple_sequential_spans(self): def test_decorated_functions_call_chain(self): """Test call chain of decorated functions.""" + @trace_function() def func_a(): return func_b() @@ -279,6 +282,7 @@ def func_c(): def test_decorator_with_multiple_returns(self): """Test decorator on function with multiple return paths.""" + @trace_function() def multi_return(value): if value > 0: @@ -320,6 +324,7 @@ def test_disabled_tracer_returns_none(self): def test_disabled_tracing_no_errors(self): """Test that disabled tracing doesn't cause errors.""" + @trace_function() def test_func(): return "result" @@ -330,6 +335,7 @@ def test_func(): def test_decorator_overhead_minimal_when_disabled(self): """Test that decorator has minimal overhead when disabled.""" + @trace_function() def fast_func(): return 42 @@ -350,6 +356,7 @@ class TestEdgeCases: def test_decorator_on_generator(self): """Test decorator on generator function.""" + @trace_function() def gen_func(): yield 1 @@ -361,15 +368,17 @@ def gen_func(): def test_decorator_on_async_function(self): """Test decorator on async function (should still work).""" + @trace_function() async def async_func(): return "async_result" # We can't easily await this in sync tests, but decorator should apply - assert hasattr(async_func, '__name__') + assert hasattr(async_func, "__name__") def test_decorator_with_none_return(self): """Test decorator on function returning None.""" + @trace_function() def none_func(): return None @@ -379,6 +388,7 @@ def none_func(): def test_decorator_with_no_return(self): """Test decorator on function with no explicit return.""" + @trace_function() def no_return_func(): pass @@ -410,6 +420,7 @@ def test_func(): def test_multiple_decorators(self): """Test function with multiple decorators.""" + @trace_function(name="outer") @trace_function(name="inner") def double_traced(): @@ -420,6 +431,7 @@ def double_traced(): def test_decorator_with_recursive_function(self): """Test decorator on recursive function.""" + @trace_function() def factorial(n): if n <= 1: @@ -436,11 +448,7 @@ def test_span_attributes_with_complex_types(self): # Different attribute types span = manager.start_span( - "complex_attrs", - string_attr="value", - int_attr=42, - float_attr=3.14, - bool_attr=True + "complex_attrs", string_attr="value", int_attr=42, float_attr=3.14, bool_attr=True ) assert span is not None @@ -451,6 +459,7 @@ class TestTracingPerformance: def test_decorator_minimal_overhead(self): """Test that decorator has minimal overhead.""" + # Baseline function def baseline(): return 42 @@ -489,6 +498,7 @@ def test_many_spans_no_memory_leak(self): def test_concurrent_tracing(self): """Test that concurrent tracing works.""" + @trace_function() def concurrent_func(n): time.sleep(0.001) @@ -515,7 +525,7 @@ def test_tracing_manager_setup(self): manager = TracingManager() # Should have setup method called - assert hasattr(manager, '_setup_tracing') + assert hasattr(manager, "_setup_tracing") def test_graceful_degradation(self): """Test graceful degradation when tracing unavailable.""" From d98b0a2e876e6844d2968a08450a739d993e6aa4 Mon Sep 17 00:00:00 2001 From: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> Date: Sat, 10 Jan 2026 01:01:14 -0500 Subject: [PATCH 3/7] Update auth_service.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/security/auth_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lexecon/security/auth_service.py b/src/lexecon/security/auth_service.py index 96d3905..181ffc8 100644 --- a/src/lexecon/security/auth_service.py +++ b/src/lexecon/security/auth_service.py @@ -16,7 +16,7 @@ from dataclasses import dataclass from datetime import datetime, timedelta, timezone from enum import Enum -from typing import Dict, List, Optional, Tuple +from typing import List, Optional, Tuple class Role(str, Enum): From 71644a72e54ed675137d915a7aaab04066e4d2a9 Mon Sep 17 00:00:00 2001 From: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> Date: Sat, 10 Jan 2026 01:01:34 -0500 Subject: [PATCH 4/7] Update service.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/audit_export/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lexecon/audit_export/service.py b/src/lexecon/audit_export/service.py index b5e56a2..6b87015 100644 --- a/src/lexecon/audit_export/service.py +++ b/src/lexecon/audit_export/service.py @@ -19,7 +19,7 @@ from dataclasses import dataclass, field from datetime import datetime, timedelta, timezone from enum import Enum -from typing import Any, Dict, List, Optional, Set +from typing import Any, Dict, List, Optional class ExportFormat(Enum): From fd2fe5144ecee6e8c459f723ed1063b213dc3ed2 Mon Sep 17 00:00:00 2001 From: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> Date: Sat, 10 Jan 2026 01:01:39 -0500 Subject: [PATCH 5/7] Update server.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/api/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lexecon/api/server.py b/src/lexecon/api/server.py index 18823d1..74e62f8 100644 --- a/src/lexecon/api/server.py +++ b/src/lexecon/api/server.py @@ -36,7 +36,7 @@ from lexecon.responsibility.tracker import DecisionMaker, ResponsibilityLevel, ResponsibilityTracker # Governance service imports -from lexecon.risk.service import RiskScoringEngine, RiskService +from lexecon.risk.service import RiskService from lexecon.security.audit_service import AuditService, ExportStatus # Security imports From 10032151bf29d4b78e87a86f1c87d1ceb07fb376 Mon Sep 17 00:00:00 2001 From: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> Date: Sat, 10 Jan 2026 01:01:45 -0500 Subject: [PATCH 6/7] Update server.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/api/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lexecon/api/server.py b/src/lexecon/api/server.py index 74e62f8..add77b8 100644 --- a/src/lexecon/api/server.py +++ b/src/lexecon/api/server.py @@ -40,7 +40,7 @@ from lexecon.security.audit_service import AuditService, ExportStatus # Security imports -from lexecon.security.auth_service import AuthService, Permission, Role, Session, User +from lexecon.security.auth_service import AuthService, Permission, Role from lexecon.security.signature_service import SignatureService from lexecon.storage.persistence import LedgerStorage From 624b84ef076176bb29fb9aace9c575c43de1fc04 Mon Sep 17 00:00:00 2001 From: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> Date: Sat, 10 Jan 2026 01:01:53 -0500 Subject: [PATCH 7/7] Update signature_service.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Signed-off-by: Lexicoding <234111021+Lexicoding-systems@users.noreply.github.com> --- src/lexecon/security/signature_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lexecon/security/signature_service.py b/src/lexecon/security/signature_service.py index 1be2166..4159206 100644 --- a/src/lexecon/security/signature_service.py +++ b/src/lexecon/security/signature_service.py @@ -12,7 +12,7 @@ import json import os from datetime import datetime, timezone -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict, Tuple from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend