diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5d908ff --- /dev/null +++ b/.gitignore @@ -0,0 +1,40 @@ +# Build output +build/output/ +*.exe + +# OS files +.DS_Store +Thumbs.db +desktop.ini + +# Node +node_modules/ +npm-debug.log + +# Plugin data (runtime, not shipped) +bates-core/plugins/*/data/ +bates-core/plugins/*/node_modules/ +bates-enhance/integrations/*/node_modules/ + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# Secrets (should never be committed) +*.env +.env.* +credentials.json +auth-profiles.json +*.key +*.pem + +# Temporary +*.tmp +*.bak +*.orig + +# Search index data +bates-enhance/integrations/search/search-index/db/ +bates-enhance/integrations/search/search-index/venv/ diff --git a/DISCLAIMER.txt b/DISCLAIMER.txt new file mode 100644 index 0000000..fd9c6c8 --- /dev/null +++ b/DISCLAIMER.txt @@ -0,0 +1,49 @@ +BATES AI ASSISTANT -- IMPORTANT DISCLAIMER + +PLEASE READ CAREFULLY BEFORE PROCEEDING WITH INSTALLATION. + +This software is provided "AS IS", without warranty of any kind, express +or implied. This is an EXPERIMENTAL, PRE-RELEASE PROJECT under active +development. + +By installing and using this software, you acknowledge and accept the +following: + +1. USE AT YOUR OWN RISK. The authors, contributors, and maintainers of + this project accept no responsibility or liability for any damage, + data loss, system instability, security incidents, unexpected costs, + or any other harm resulting from the use or misuse of this software. + +2. SYSTEM MODIFICATIONS. This installer modifies your system + configuration, including enabling WSL2, installing packages, creating + systemd services, setting up cron jobs, and configuring network + services. These changes may affect your system's stability, security, + and performance. + +3. THIRD-PARTY SERVICES. This software interacts with third-party APIs + and services (Anthropic, OpenAI, Google, Telegram, Twilio, Microsoft + 365, ElevenLabs, and others). You are solely responsible for any + costs, terms of service violations, or consequences arising from the + use of these services. + +4. NO WARRANTY. No guarantee of correctness, security, or fitness for + any particular purpose. The installer scripts have been tested on + specific hardware and software configurations. Your results may vary. + +5. AUTONOMOUS AI AGENTS. This software manages AI agents that can take + autonomous actions including sending messages, making API calls, + reading and writing files, and executing commands. You are responsible + for supervising and configuring these agents appropriately. + +6. BACK UP YOUR DATA before running the installer. We strongly recommend + testing on a dedicated or non-critical machine first. + +7. NO AFFILIATION. This project is not affiliated with, endorsed by, or + supported by OpenClaw, Anthropic, OpenAI, Google, Microsoft, Telegram, + Twilio, ElevenLabs, or any other third-party service mentioned herein. + +This software is licensed under the Apache License, Version 2.0. See the +LICENSE file for the full license text. + +BY PROCEEDING WITH THE INSTALLATION, YOU ACCEPT FULL RESPONSIBILITY FOR +ANY AND ALL CONSEQUENCES. diff --git a/README.md b/README.md index 5def77f..d056a89 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,22 @@ Built on [OpenClaw](https://openclaw.ai) · Open Source · Apache 2.0 --- +> **DISCLAIMER -- PLEASE READ BEFORE PROCEEDING** +> +> This software is provided **"AS IS"**, without warranty of any kind, express or implied. This is an **experimental, pre-release project** under active development. By using this software, you acknowledge and accept the following: +> +> - **USE AT YOUR OWN RISK.** The authors, contributors, and maintainers of this project accept **no responsibility or liability** for any damage, data loss, system instability, security incidents, unexpected costs, or any other harm resulting from the use or misuse of this software. +> - This installer **modifies your system configuration**, including enabling WSL2, installing packages, creating systemd services, setting up cron jobs, and configuring network services. These changes may affect your system's stability, security, and performance. +> - This software interacts with **third-party APIs and services** (Anthropic, OpenAI, Google, Telegram, Twilio, Microsoft 365, etc.). You are solely responsible for any costs, terms of service violations, or consequences arising from the use of these services. +> - **No guarantee of correctness, security, or fitness for any particular purpose.** The installer scripts have been tested on specific hardware and software configurations. Your results may vary. +> - This software manages **AI agents that can take autonomous actions** including sending messages, making API calls, reading and writing files, and executing commands. You are responsible for supervising and configuring these agents appropriately. +> - **Back up your data before running the installer.** We strongly recommend testing on a dedicated or non-critical machine first. +> - This project is **not affiliated with, endorsed by, or supported by** OpenClaw, Anthropic, OpenAI, Google, Microsoft, Telegram, Twilio, ElevenLabs, or any other third-party service mentioned herein. +> +> **BY PROCEEDING WITH THE INSTALLATION, YOU ACCEPT FULL RESPONSIBILITY FOR ANY AND ALL CONSEQUENCES.** + +--- + ## What Bates Does Bates runs 24/7 on your Windows PC and handles your operational workflow autonomously: diff --git a/bates-core/BatesCore.iss b/bates-core/BatesCore.iss new file mode 100644 index 0000000..9256df8 --- /dev/null +++ b/bates-core/BatesCore.iss @@ -0,0 +1,184 @@ +; BatesCore.iss -- Inno Setup script for Bates AI Assistant +; Compiles to BatesCore-2.0.0.exe +; +; Prerequisites handled by this installer: +; - Windows 10/11 Pro (build 19041+) +; - 8GB RAM minimum +; - 20GB free disk space +; - Internet connectivity +; - Admin rights (for WSL2 enablement) + +#define MyAppName "Bates AI Assistant" +#define MyAppVersion "2.0.0" +#define MyAppPublisher "getBates" +#define MyAppURL "https://github.com/getBates/Bates" + +[Setup] +AppId={{A7E3B4C1-8F9D-4E6A-B2C5-1D0F3E7A9B8C} +AppName={#MyAppName} +AppVersion={#MyAppVersion} +AppPublisher={#MyAppPublisher} +AppPublisherURL={#MyAppURL} +AppSupportURL={#MyAppURL}/issues +DefaultDirName={localappdata}\BatesInstaller +DefaultGroupName={#MyAppName} +OutputDir=..\build\output +OutputBaseFilename=BatesCore-{#MyAppVersion} +Compression=lzma2/ultra64 +SolidCompression=yes +PrivilegesRequired=admin +AllowNoIcons=yes +DisableProgramGroupPage=yes +LicenseFile=..\DISCLAIMER.txt +InfoBeforeFile=..\LICENSE +SetupIconFile=assets\bates-icon.ico +WizardSmallImageFile=assets\installer-logo.bmp +WizardImageFile=assets\installer-banner.bmp +WizardStyle=modern +ArchitecturesInstallIn64BitMode=x64compatible +MinVersion=10.0.19041 + +[Languages] +Name: "english"; MessagesFile: "compiler:Default.isl" + +[Files] +; Disclaimer (also shown by install.ps1 and core-setup.sh) +Source: "..\DISCLAIMER.txt"; DestDir: "{app}"; Flags: ignoreversion + +; Core setup scripts +Source: "core-setup.sh"; DestDir: "{app}"; Flags: ignoreversion +Source: "core-configure.sh"; DestDir: "{app}"; Flags: ignoreversion +Source: "core-verify.sh"; DestDir: "{app}"; Flags: ignoreversion +Source: "install.ps1"; DestDir: "{app}"; Flags: ignoreversion + +; Libraries +Source: "lib\*"; DestDir: "{app}\lib"; Flags: ignoreversion recursesubdirs + +; Workspace templates +Source: "workspace-core\*"; DestDir: "{app}\workspace-core"; Flags: ignoreversion recursesubdirs + +; Scripts +Source: "scripts-core\*"; DestDir: "{app}\scripts-core"; Flags: ignoreversion + +; Plugins +Source: "plugins\*"; DestDir: "{app}\plugins"; Flags: ignoreversion recursesubdirs + +; Systemd templates +Source: "systemd\*"; DestDir: "{app}\systemd"; Flags: ignoreversion + +; Config templates +Source: "templates\*"; DestDir: "{app}\templates"; Flags: ignoreversion + +; Crontab template +Source: "crontab\*"; DestDir: "{app}\crontab"; Flags: ignoreversion + +; Brand assets +Source: "assets\*"; DestDir: "{app}\assets"; Flags: ignoreversion + +[Run] +; Launch the PowerShell bootstrap after installation +Filename: "powershell.exe"; \ + Parameters: "-ExecutionPolicy Bypass -File ""{app}\install.ps1"" -InstallDir ""{app}"""; \ + StatusMsg: "Setting up Bates AI Assistant..."; \ + Flags: runascurrentuser waituntilterminated + +[UninstallRun] +; Run uninstall script if it exists +Filename: "wsl.exe"; \ + Parameters: "-d Ubuntu-24.04 -- bash -c ""~/.openclaw/scripts/uninstall.sh --auto 2>/dev/null || true"""; \ + Flags: runhidden waituntilterminated + +[UninstallDelete] +Type: filesandordirs; Name: "{app}" + +[Code] +// Pascal Script for prerequisite validation + +function IsWindows10ProOrLater(): Boolean; +var + Version: TWindowsVersion; +begin + GetWindowsVersionEx(Version); + // Windows 10 = 10.0, build 19041+ (version 2004) + Result := (Version.Major >= 10) and (Version.Build >= 19041); +end; + +function GetTotalRAM(): Integer; +var + MemStatus: MEMORYSTATUS; +begin + // Note: GlobalMemoryStatus is 32-bit limited, but good enough for our check + GlobalMemoryStatus(MemStatus); + Result := MemStatus.dwTotalPhys div (1024 * 1024 * 1024); +end; + +function GetFreeDiskSpace(): Integer; +var + FreeBytesAvailable: Int64; + TotalBytes: Int64; + FreeBytes: Int64; +begin + if GetDiskFreeSpaceEx(ExpandConstant('{sd}'), FreeBytesAvailable, TotalBytes, FreeBytes) then + Result := FreeBytesAvailable div (1024 * 1024 * 1024) + else + Result := 0; +end; + +function CheckInternetConnection(): Boolean; +var + WinHttpReq: Variant; +begin + Result := False; + try + WinHttpReq := CreateOleObject('WinHttp.WinHttpRequest.5.1'); + WinHttpReq.Open('GET', 'https://github.com', False); + WinHttpReq.SetTimeouts(5000, 5000, 5000, 5000); + WinHttpReq.Send(''); + Result := (WinHttpReq.Status = 200); + except + Result := False; + end; +end; + +function InitializeSetup(): Boolean; +var + RAM: Integer; + Disk: Integer; + ErrorMsg: String; +begin + Result := True; + ErrorMsg := ''; + + // Check Windows version + if not IsWindows10ProOrLater() then + begin + ErrorMsg := ErrorMsg + '- WSL2 requires Windows 10 Pro version 2004 (build 19041) or later.' + #13#10; + end; + + // Check RAM + RAM := GetTotalRAM(); + if RAM < 7 then // Use 7 as threshold (8GB reports as ~7.x) + begin + ErrorMsg := ErrorMsg + '- Bates needs at least 8GB RAM. Detected: ' + IntToStr(RAM) + 'GB.' + #13#10; + end; + + // Check disk space + Disk := GetFreeDiskSpace(); + if Disk < 20 then + begin + ErrorMsg := ErrorMsg + '- At least 20GB free disk space required. Available: ' + IntToStr(Disk) + 'GB.' + #13#10; + end; + + // Check internet + if not CheckInternetConnection() then + begin + ErrorMsg := ErrorMsg + '- Internet connection required for installation.' + #13#10; + end; + + if ErrorMsg <> '' then + begin + MsgBox('Prerequisites not met:' + #13#10 + #13#10 + ErrorMsg + #13#10 + + 'Please fix these issues and try again.', mbError, MB_OK); + Result := False; + end; +end; diff --git a/bates-core/assets/bates-icon.ico b/bates-core/assets/bates-icon.ico new file mode 100644 index 0000000..fa93e8d Binary files /dev/null and b/bates-core/assets/bates-icon.ico differ diff --git a/bates-core/assets/installer-banner.bmp b/bates-core/assets/installer-banner.bmp new file mode 100644 index 0000000..2fb15a9 Binary files /dev/null and b/bates-core/assets/installer-banner.bmp differ diff --git a/bates-core/assets/installer-logo.bmp b/bates-core/assets/installer-logo.bmp new file mode 100644 index 0000000..d5f39f3 Binary files /dev/null and b/bates-core/assets/installer-logo.bmp differ diff --git a/bates-core/assets/installer-splash.png b/bates-core/assets/installer-splash.png new file mode 100644 index 0000000..a702bbf Binary files /dev/null and b/bates-core/assets/installer-splash.png differ diff --git a/bates-core/core-configure.sh b/bates-core/core-configure.sh new file mode 100755 index 0000000..559f2e5 --- /dev/null +++ b/bates-core/core-configure.sh @@ -0,0 +1,292 @@ +#!/usr/bin/env bash +# core-configure.sh -- Phase 3: AI auth + personalization + Telegram +# Called after core-setup.sh has installed all dependencies. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/lib/common.sh" +source "$SCRIPT_DIR/lib/template-engine.sh" + +export PATH="$HOME/.npm-global/bin:$PATH" + +echo "" +echo "===========================================" +echo " Bates Core -- Configuration" +echo "===========================================" + +# ============================================================ +# AI Provider Selection +# ============================================================ +echo "" +echo "Choose your AI subscription:" +echo " 1) Anthropic (Claude Max) -- Best quality, Opus 4.6" +echo " 2) OpenAI (ChatGPT Pro) -- GPT-4o" +echo " 3) Google (Gemini Advanced) -- Gemini 2.0 Pro" +echo "" +read -rp "Selection [1]: " PROVIDER_CHOICE +PROVIDER_CHOICE="${PROVIDER_CHOICE:-1}" + +case "$PROVIDER_CHOICE" in + 1) + export PROVIDER="anthropic" + export PRIMARY_MODEL="anthropic/claude-opus-4-6" + export PRIMARY_MODEL_SHORT="Opus 4.6" + echo "" + echo "Anthropic subscription auth requires a token from Claude Code." + echo "" + echo "In another terminal, run:" + echo " claude setup-token" + echo "" + echo "Then paste the token here." + echo "" + read -rp "Subscription token: " SUB_TOKEN + if [[ -z "$SUB_TOKEN" ]]; then + fatal "Subscription token is required." + fi + # Try the interactive openclaw CLI first; fall back to manual credential + # storage if no TTY is available (e.g. piped input, automation). + if openclaw models auth setup-token --provider anthropic <<< "$SUB_TOKEN" 2>/dev/null; then + success "Anthropic subscription configured." + else + warn "openclaw CLI auth requires an interactive terminal. Storing token manually..." + mkdir -p "$HOME/.openclaw/credentials" + chmod 700 "$HOME/.openclaw/credentials" + echo -n "$SUB_TOKEN" > "$HOME/.openclaw/credentials/anthropic-token" + chmod 600 "$HOME/.openclaw/credentials/anthropic-token" + success "Anthropic token stored manually. Run 'openclaw models auth setup-token --provider anthropic' later to complete interactive setup." + fi + + echo "" + read -rp "Optional: API key as fallback (or Enter to skip): " API_KEY + if [[ -n "$API_KEY" ]]; then + # Store API key in systemd drop-in for gateway + cat > "$HOME/.config/systemd/user/openclaw-gateway.service.d/api-key.conf" << EOF +[Service] +Environment="ANTHROPIC_API_KEY=$API_KEY" +EOF + chmod 600 "$HOME/.config/systemd/user/openclaw-gateway.service.d/api-key.conf" + success "API key fallback configured." + fi + ;; + 2) + export PROVIDER="openai" + export PRIMARY_MODEL="openai/gpt-4o" + export PRIMARY_MODEL_SHORT="GPT-4o" + echo "" + echo "Starting OpenAI auth flow..." + openclaw models auth --provider openai + ;; + 3) + export PROVIDER="google" + export PRIMARY_MODEL="google/gemini-2.0-pro" + export PRIMARY_MODEL_SHORT="Gemini 2.0 Pro" + echo "" + echo "Starting Google auth flow..." + openclaw models auth --provider google + ;; + *) + fatal "Invalid selection: $PROVIDER_CHOICE" + ;; +esac + +# ============================================================ +# Personalization +# ============================================================ +echo "" +echo "--- Personalization ---" +read -rp "Assistant name [Bates]: " ASSISTANT_NAME +export ASSISTANT_NAME="${ASSISTANT_NAME:-Bates}" + +read -rp "Your name: " USER_NAME +if [[ -z "$USER_NAME" ]]; then + fatal "Your name is required." +fi +export USER_NAME + +read -rp "Your timezone [Europe/Lisbon]: " USER_TZ +export USER_TZ="${USER_TZ:-Europe/Lisbon}" + +# ============================================================ +# Telegram Setup +# ============================================================ +echo "" +echo "===========================================" +echo " Telegram Setup (your first messaging channel)" +echo "===========================================" +echo "" +echo "Telegram lets you talk to $ASSISTANT_NAME from your phone, anywhere." +echo "" +echo "Step 1: Create a bot" +echo " Open Telegram and message @BotFather:" +echo " /newbot -> follow the prompts -> copy the bot token" +echo "" +read -rp "Bot token (e.g., 7123456789:AAF...): " TELEGRAM_BOT_TOKEN +if [[ -z "$TELEGRAM_BOT_TOKEN" ]]; then + fatal "Telegram bot token is required." +fi +export TELEGRAM_BOT_TOKEN + +echo "" +echo "Step 2: Your Telegram user ID" +echo " Message @userinfobot in Telegram to get your numeric ID." +echo "" +read -rp "Your Telegram user ID (numeric): " TELEGRAM_USER_ID +if [[ -z "$TELEGRAM_USER_ID" ]]; then + fatal "Telegram user ID is required." +fi +export TELEGRAM_USER_ID + +# ============================================================ +# Generate Configuration +# ============================================================ +step "Generating configuration..." + +# Render openclaw.json +template_render "$SCRIPT_DIR/templates/openclaw.json.template" \ + "$HOME/.openclaw/openclaw.json" +chmod 600 "$HOME/.openclaw/openclaw.json" +success "openclaw.json generated" + +# Render auth profiles +mkdir -p "$HOME/.openclaw/agents/main/agent" +template_render "$SCRIPT_DIR/templates/auth-profiles.json.template" \ + "$HOME/.openclaw/agents/main/agent/auth-profiles.json" +chmod 600 "$HOME/.openclaw/agents/main/agent/auth-profiles.json" +success "Auth profiles generated" + +# ============================================================ +# Deploy Workspace +# ============================================================ +step "Deploying workspace..." + +# Render template files +for f in "$SCRIPT_DIR"/workspace-core/*.template; do + [[ -f "$f" ]] || continue + basename_full="$(basename "$f")" + basename_no_ext="${basename_full%.template}" + target="$HOME/.openclaw/workspace/$basename_no_ext" + template_render "$f" "$target" + echo " Rendered: $basename_no_ext" +done + +# Copy non-template files +for f in "$SCRIPT_DIR"/workspace-core/*.md; do + [[ -f "$f" ]] || continue + basename_full="$(basename "$f")" + # Skip if a .template version exists (already rendered above) + if [[ -f "$SCRIPT_DIR/workspace-core/${basename_full}.template" ]]; then + continue + fi + cp "$f" "$HOME/.openclaw/workspace/$basename_full" + echo " Copied: $basename_full" +done + +# Copy rules +if [[ -d "$SCRIPT_DIR/workspace-core/rules" ]]; then + cp "$SCRIPT_DIR"/workspace-core/rules/*.md "$HOME/.openclaw/workspace/rules/" 2>/dev/null || true + echo " Copied: rules/" +fi + +# Copy skills +if [[ -d "$SCRIPT_DIR/workspace-core/skills" ]]; then + cp -r "$SCRIPT_DIR"/workspace-core/skills/* "$HOME/.openclaw/workspace/skills/" 2>/dev/null || true + echo " Copied: skills/" +fi + +# Copy observations +if [[ -d "$SCRIPT_DIR/workspace-core/observations" ]]; then + cp "$SCRIPT_DIR"/workspace-core/observations/*.md "$HOME/.openclaw/workspace/observations/" 2>/dev/null || true + echo " Copied: observations/" +fi + +success "Workspace deployed" + +# ============================================================ +# Deploy Scripts +# ============================================================ +step "Installing scripts..." +cp "$SCRIPT_DIR"/scripts-core/*.sh "$HOME/.openclaw/scripts/" 2>/dev/null || true +chmod +x "$HOME/.openclaw/scripts/"*.sh 2>/dev/null || true +success "Scripts installed" + +# ============================================================ +# Deploy Plugins +# ============================================================ +step "Installing plugins..." + +# Cost tracker +if [[ -d "$SCRIPT_DIR/plugins/cost-tracker" ]]; then + mkdir -p "$HOME/.openclaw/extensions/cost-tracker" + cp -r "$SCRIPT_DIR/plugins/cost-tracker/"* "$HOME/.openclaw/extensions/cost-tracker/" + success "Cost tracker plugin installed" +fi + +# Dashboard +if [[ -d "$SCRIPT_DIR/plugins/dashboard" ]]; then + mkdir -p "$HOME/.openclaw/extensions/dashboard" + cp -r "$SCRIPT_DIR/plugins/dashboard/"* "$HOME/.openclaw/extensions/dashboard/" + # Install dashboard dependencies if package.json exists + if [[ -f "$HOME/.openclaw/extensions/dashboard/package.json" ]]; then + (cd "$HOME/.openclaw/extensions/dashboard" && npm install --production 2>/dev/null) || true + fi + success "Dashboard plugin installed" +fi + +# ============================================================ +# Core Cron Jobs (via OpenClaw) +# ============================================================ +step "Setting up cron jobs..." + +openclaw cron add --name "health-log" \ + --schedule "0 */6 * * *" --tz "$USER_TZ" \ + --message "Run health check: check gateway status, disk space, memory. Save to observations/health.json." \ + 2>/dev/null || warn "health-log cron already exists" + +openclaw cron add --name "context-watchdog" \ + --schedule "0 * * * *" --tz "$USER_TZ" \ + --message "Check context token usage. If approaching limit, trigger compaction." \ + 2>/dev/null || warn "context-watchdog cron already exists" + +openclaw cron add --name "proactive-checkin" \ + --schedule "0 2,9,12,16,20 * * *" --tz "$USER_TZ" \ + --message "Proactive check-in. Review available local data. Score changes. Only message if actionable." \ + 2>/dev/null || warn "proactive-checkin cron already exists" + +success "3 core cron jobs configured" + +# ============================================================ +# System Crontab +# ============================================================ +step "Installing system crontab..." + +CRONTAB_CONTENT="$(cat <> /tmp/watchdog-bates.log 2>&1 +*/30 * * * * $HOME/.openclaw/scripts/archive-sessions.sh >> /tmp/archive-sessions.log 2>&1 +0 2 * * * rm -f $HOME/.openclaw/sessions.json && systemctl --user restart openclaw-gateway >> /tmp/session-cleanup.log 2>&1 +EOF +)" + +# Merge with existing crontab (don't overwrite user entries) +(crontab -l 2>/dev/null | grep -v 'watchdog-bates\|archive-sessions\|session-cleanup'; echo "$CRONTAB_CONTENT") | crontab - +success "System crontab installed" + +# ============================================================ +# File Permissions +# ============================================================ +step "Securing configuration..." +chmod 600 "$HOME/.openclaw/openclaw.json" +chmod 600 "$HOME/.openclaw/agents/main/agent/auth-profiles.json" 2>/dev/null || true +chmod -R 700 "$HOME/.config/systemd/user/openclaw-gateway.service.d/" 2>/dev/null || true +success "Permissions set" + +# ============================================================ +# OpenClaw Onboard +# ============================================================ +step "Running OpenClaw onboard..." +openclaw onboard --install-daemon 2>/dev/null || warn "Onboard may have already run" + +echo "" +success "Configuration complete!" +echo "" +echo "Next: Run core-verify.sh to start the gateway and verify everything works." diff --git a/bates-core/core-setup.sh b/bates-core/core-setup.sh new file mode 100755 index 0000000..e57acf6 --- /dev/null +++ b/bates-core/core-setup.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +# core-setup.sh -- Phase 2: Linux environment setup +# Called by install.ps1 after WSL2 + Ubuntu are ready. +# Installs all system dependencies and prepares the environment. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/lib/common.sh" +source "$SCRIPT_DIR/lib/prerequisites.sh" + +echo "" +echo "===========================================" +echo " Bates Core -- Linux Environment Setup" +echo "===========================================" +echo "" + +# --- Disclaimer --- +DISCLAIMER_FILE="$SCRIPT_DIR/../DISCLAIMER.txt" +if [[ -f "$DISCLAIMER_FILE" ]]; then + echo -e "${YELLOW}${BOLD}" + echo "============================================" + echo " IMPORTANT -- PLEASE READ BEFORE CONTINUING" + echo "============================================" + echo -e "${NC}" + cat "$DISCLAIMER_FILE" + echo "" + echo -e "${YELLOW}${BOLD}============================================${NC}" + echo "" + if [[ "${BATES_ACCEPT_DISCLAIMER:-}" == "yes" ]]; then + info "Disclaimer accepted via BATES_ACCEPT_DISCLAIMER=yes" + else + echo -e "${BOLD}You must accept this disclaimer to continue.${NC}" + echo "" + read -rp "Type 'I ACCEPT' to proceed (or anything else to abort): " DISCLAIMER_REPLY + if [[ "$DISCLAIMER_REPLY" != "I ACCEPT" ]]; then + echo "" + error "Installation aborted. You must accept the disclaimer to proceed." + exit 1 + fi + echo "" + success "Disclaimer accepted." + fi + echo "" +fi + +# --- Prerequisite Checks --- +run_all_checks + +# --- System Packages --- +step "Updating system packages..." +sudo apt-get update -qq +sudo apt-get install -y -qq \ + build-essential curl git jq ntpdate poppler-utils tmux \ + python3 python3-pip python3-venv + +# --- Node.js 22 --- +step "Installing Node.js 22..." +if command -v node &>/dev/null && [[ "$(node -v)" == v22.* ]]; then + success "Node.js $(node -v) already installed" +else + curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - + sudo apt-get install -y -qq nodejs + success "Node.js $(node -v) installed" +fi + +# --- npm global prefix --- +step "Configuring npm global prefix..." +mkdir -p "$HOME/.npm-global" +npm config set prefix "$HOME/.npm-global" +if ! grep -q '.npm-global/bin' "$HOME/.bashrc" 2>/dev/null; then + echo 'export PATH="$HOME/.npm-global/bin:$PATH"' >> "$HOME/.bashrc" +fi +export PATH="$HOME/.npm-global/bin:$PATH" + +# --- OpenClaw --- +step "Installing OpenClaw..." +if command -v openclaw &>/dev/null; then + success "OpenClaw already installed ($(openclaw --version 2>/dev/null || echo 'unknown version'))" +else + npm install -g openclaw + success "OpenClaw installed" +fi + +# --- mcporter --- +step "Installing mcporter..." +if command -v mcporter &>/dev/null; then + success "mcporter already installed" +else + npm install -g mcporter + success "mcporter installed" +fi + +# --- Claude Code --- +step "Installing Claude Code..." +if command -v claude &>/dev/null; then + success "Claude Code already installed ($(claude --version 2>/dev/null || echo 'unknown version'))" +else + npm install -g @anthropic-ai/claude-code + success "Claude Code installed" +fi + +# --- systemd linger --- +step "Enabling systemd linger..." +if loginctl show-user "$(whoami)" 2>/dev/null | grep -q "Linger=yes"; then + success "Linger already enabled" +else + sudo loginctl enable-linger "$(whoami)" + success "Linger enabled" +fi + +# --- Directory structure --- +step "Creating directory structure..." +mkdir -p "$HOME/.openclaw"/{workspace/{rules,refs,skills,observations},scripts,extensions,cron,agents/main/{sessions,archive},enhance} +mkdir -p "$HOME/.config/systemd/user" + +# --- Clock sync timer --- +step "Installing clock-sync timer..." +cp "$SCRIPT_DIR/systemd/clock-sync.service" "$HOME/.config/systemd/user/" +cp "$SCRIPT_DIR/systemd/clock-sync.timer" "$HOME/.config/systemd/user/" +systemctl --user daemon-reload +systemctl --user enable clock-sync.timer 2>/dev/null || true + +# --- Gateway service --- +step "Installing gateway service..." +cp "$SCRIPT_DIR/systemd/openclaw-gateway.service.template" \ + "$HOME/.config/systemd/user/openclaw-gateway.service" +# Replace %h with actual home dir (systemd user units support %h, but template needs it) +sed -i "s|%h|$HOME|g" "$HOME/.config/systemd/user/openclaw-gateway.service" + +# NODE_PATH drop-in for npm-global plugin resolution +mkdir -p "$HOME/.config/systemd/user/openclaw-gateway.service.d" +cat > "$HOME/.config/systemd/user/openclaw-gateway.service.d/node-path.conf" << EOF +[Service] +Environment="NODE_PATH=$HOME/.npm-global/lib/node_modules" +EOF + +systemctl --user daemon-reload + +echo "" +success "Linux environment setup complete." +echo "" +echo "Next: Run core-configure.sh to set up AI auth and personalization." diff --git a/bates-core/core-verify.sh b/bates-core/core-verify.sh new file mode 100755 index 0000000..4f2ccc3 --- /dev/null +++ b/bates-core/core-verify.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +# core-verify.sh -- Phase 4: Health check + open dashboard +# Called after core-configure.sh to verify everything works. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/lib/common.sh" + +export PATH="$HOME/.npm-global/bin:$PATH" + +echo "" +echo "===========================================" +echo " Bates Core -- Verification" +echo "===========================================" +echo "" + +PASS=0 +FAIL=0 + +check() { + local name="$1" + shift + if "$@" &>/dev/null 2>&1; then + echo -e " ${GREEN}[PASS]${NC} $name" + ((PASS++)) + else + echo -e " ${RED}[FAIL]${NC} $name" + ((FAIL++)) + fi +} + +# --- Start Gateway --- +step "Starting gateway service..." +systemctl --user daemon-reload +systemctl --user enable --now openclaw-gateway 2>/dev/null || true + +echo "Waiting for gateway to start..." +sleep 8 + +# --- Run Checks --- +step "Running verification checks..." +echo "" + +check "Gateway service running" systemctl --user is-active openclaw-gateway +check "Dashboard accessible" curl -sf --max-time 5 http://localhost:18789/dashboard +check "Cost tracker API" curl -sf --max-time 5 http://localhost:18789/cost-tracker/api/today +check "Cron jobs configured" bash -c "openclaw cron list 2>/dev/null | grep -q health-log" +check "Scripts installed" test -x "$HOME/.openclaw/scripts/watchdog-bates.sh" +check "Workspace deployed" test -f "$HOME/.openclaw/workspace/SOUL.md" + +# Check Telegram channel +check "Telegram channel configured" bash -c "python3 -c \"import json; c=json.load(open('$HOME/.openclaw/openclaw.json')); assert c['channels']['telegram']['enabled']\"" + +echo "" +echo "===========================================" +echo " Results: $PASS passed, $FAIL failed" +echo "===========================================" +echo "" + +if [[ $FAIL -eq 0 ]]; then + echo "All checks passed! Your assistant is ready." + echo "" + echo "Dashboard: http://localhost:18789/dashboard" + echo "" + + # Read assistant name from config + ASSISTANT_NAME=$(python3 -c " +import json +c = json.load(open('$HOME/.openclaw/openclaw.json')) +name = c.get('agents', {}).get('definitions', {}).get('main', {}).get('name', 'Bates') +print(name.split(' (')[0]) +" 2>/dev/null || echo "Bates") + + echo "$ASSISTANT_NAME is now running and ready to chat!" + echo "" + echo "Talk to $ASSISTANT_NAME:" + echo " - Web dashboard: http://localhost:18789/dashboard" + echo " - Telegram: open the bot you created and send a message" + echo "" + echo "To add more integrations later:" + echo " bates-enhance.sh" + echo "" + + # Activate Telegram pairing + echo "===========================================" + echo " Telegram Activation" + echo "===========================================" + echo "" + echo "Open your Telegram bot and send any message to start the pairing." + echo "The gateway will prompt you to approve the pairing." + echo "" + echo "Check gateway logs for pairing status:" + echo " journalctl --user -u openclaw-gateway -n 20 --no-pager" + echo "" + + # Try to open browser on Windows + cmd.exe /c start http://localhost:18789/dashboard 2>/dev/null || true +else + echo "Some checks failed. Review the errors above." + echo "" + echo "Troubleshooting:" + echo " Gateway logs: journalctl --user -u openclaw-gateway -n 30 --no-pager" + echo " Service status: systemctl --user status openclaw-gateway" + echo " Config file: cat ~/.openclaw/openclaw.json" + echo "" + echo "Common issues:" + echo " - Gateway not starting: check Node.js version (need v22+)" + echo " - Dashboard not accessible: check port 18789 is not in use" + echo " - Auth failure: re-run 'claude setup-token' and update via openclaw models auth" +fi diff --git a/bates-core/crontab/core-crontab.template b/bates-core/crontab/core-crontab.template new file mode 100644 index 0000000..1d4cdae --- /dev/null +++ b/bates-core/crontab/core-crontab.template @@ -0,0 +1,11 @@ +# Bates Core system cron +# Installed by core-configure.sh + +# Process watchdog: restart gateway if it dies +*/2 * * * * {{HOME}}/.openclaw/scripts/watchdog-bates.sh >> /tmp/watchdog-bates.log 2>&1 + +# Session archival: move old .jsonl files to archive/ +*/30 * * * * {{HOME}}/.openclaw/scripts/archive-sessions.sh >> /tmp/archive-sessions.log 2>&1 + +# Daily session cleanup: clear stale session state at 2 AM +0 2 * * * rm -f {{HOME}}/.openclaw/sessions.json && systemctl --user restart openclaw-gateway >> /tmp/session-cleanup.log 2>&1 diff --git a/bates-core/install.ps1 b/bates-core/install.ps1 new file mode 100644 index 0000000..e4160b6 --- /dev/null +++ b/bates-core/install.ps1 @@ -0,0 +1,303 @@ +# install.ps1 -- Phase 1: Windows Bootstrap for Bates AI Assistant +# Run by Inno Setup after prerequisite checks pass, or standalone. +# +# This script: +# 1. Enables WSL2 if not already enabled +# 2. Installs Ubuntu 24.04 +# 3. Configures .wslconfig +# 4. Creates a Windows Scheduled Task for WSL2 auto-start +# 5. Handles reboot if needed (auto-resume via Scheduled Task) +# 6. Launches core-setup.sh inside WSL2 + +param( + [string]$InstallDir = "$env:LOCALAPPDATA\BatesInstaller" +) + +$ErrorActionPreference = "Stop" + +function Write-Step($msg) { + Write-Host "" + Write-Host "==> $msg" -ForegroundColor Cyan +} + +function Write-Success($msg) { + Write-Host "[OK] $msg" -ForegroundColor Green +} + +function Write-Warn($msg) { + Write-Host "[WARN] $msg" -ForegroundColor Yellow +} + +function Write-Fail($msg) { + Write-Host "[ERROR] $msg" -ForegroundColor Red +} + +# ============================================================ +# Banner +# ============================================================ +Write-Host "" +Write-Host "==========================================" -ForegroundColor Cyan +Write-Host " Bates AI Assistant -- Windows Setup" -ForegroundColor Cyan +Write-Host "==========================================" -ForegroundColor Cyan +Write-Host "" + +# ============================================================ +# Disclaimer acceptance +# ============================================================ +$disclaimerPath = Join-Path $InstallDir "DISCLAIMER.txt" +if (-not (Test-Path (Join-Path $InstallDir ".disclaimer-accepted"))) { + if (Test-Path $disclaimerPath) { + Write-Host "==========================================" -ForegroundColor Yellow + Write-Host " IMPORTANT -- PLEASE READ CAREFULLY" -ForegroundColor Yellow + Write-Host "==========================================" -ForegroundColor Yellow + Write-Host "" + Get-Content $disclaimerPath | Write-Host + Write-Host "" + Write-Host "==========================================" -ForegroundColor Yellow + Write-Host "" + + if ($env:BATES_ACCEPT_DISCLAIMER -eq "yes") { + Write-Success "Disclaimer accepted via BATES_ACCEPT_DISCLAIMER=yes" + } else { + Write-Host "You must accept this disclaimer to continue." -ForegroundColor White + Write-Host "" + $reply = Read-Host "Type 'I ACCEPT' to proceed (or anything else to abort)" + if ($reply -ne "I ACCEPT") { + Write-Host "" + Write-Fail "Installation aborted. You must accept the disclaimer to proceed." + exit 1 + } + Write-Host "" + Write-Success "Disclaimer accepted." + } + + # Mark as accepted so we don't re-prompt after reboot + "accepted" | Out-File (Join-Path $InstallDir ".disclaimer-accepted") -Force + Write-Host "" + } +} + +# ============================================================ +# Check if resuming after reboot +# ============================================================ +$resumeMarker = Join-Path $InstallDir ".resume-after-reboot" +if (Test-Path $resumeMarker) { + Write-Step "Resuming after reboot..." + Remove-Item $resumeMarker -Force + + # Remove the resume scheduled task + Unregister-ScheduledTask -TaskName "BatesInstallResume" -Confirm:$false -ErrorAction SilentlyContinue + + # Jump straight to WSL2 setup + goto_wsl_setup + exit 0 +} + +# ============================================================ +# Step 1: Check and Enable WSL2 +# ============================================================ +Write-Step "Checking WSL2..." + +$needsReboot = $false + +# Check WSL feature +$wslFeature = Get-WindowsOptionalFeature -Online -FeatureName Microsoft-Windows-Subsystem-Linux -ErrorAction SilentlyContinue +if ($null -eq $wslFeature -or $wslFeature.State -ne "Enabled") { + Write-Step "Enabling Windows Subsystem for Linux..." + dism.exe /online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux /all /norestart | Out-Null + $needsReboot = $true +} + +# Check Virtual Machine Platform +$vmFeature = Get-WindowsOptionalFeature -Online -FeatureName VirtualMachinePlatform -ErrorAction SilentlyContinue +if ($null -eq $vmFeature -or $vmFeature.State -ne "Enabled") { + Write-Step "Enabling Virtual Machine Platform..." + dism.exe /online /enable-feature /featurename:VirtualMachinePlatform /all /norestart | Out-Null + $needsReboot = $true +} + +# Set WSL2 as default version +try { + wsl --set-default-version 2 2>$null | Out-Null +} catch { + # May fail if WSL not fully installed yet (needs reboot) +} + +Write-Success "WSL2 features enabled" + +# ============================================================ +# Step 2: Handle Reboot if Needed +# ============================================================ +if ($needsReboot) { + Write-Step "WSL2 requires a system reboot to complete installation." + Write-Host "" + Write-Host "After reboot, the installer will resume automatically." -ForegroundColor Yellow + Write-Host "" + + # Create resume marker + New-Item -Path $resumeMarker -ItemType File -Force | Out-Null + + # Create scheduled task to resume after reboot + $action = New-ScheduledTaskAction -Execute "powershell.exe" ` + -Argument "-ExecutionPolicy Bypass -File `"$InstallDir\install.ps1`" -InstallDir `"$InstallDir`"" + $trigger = New-ScheduledTaskTrigger -AtLogOn -User $env:USERNAME + $settings = New-ScheduledTaskSettingsSet -AllowStartIfOnBatteries -DontStopIfGoingOnBatteries + $principal = New-ScheduledTaskPrincipal -UserId $env:USERNAME -RunLevel Highest + Register-ScheduledTask -TaskName "BatesInstallResume" ` + -Action $action -Trigger $trigger -Settings $settings -Principal $principal -Force | Out-Null + + Write-Success "Resume task created" + + $answer = Read-Host "Reboot now? (y/n)" + if ($answer -match "^[Yy]") { + Restart-Computer -Force + } else { + Write-Host "" + Write-Host "Please reboot manually, then the installer will resume." -ForegroundColor Yellow + exit 0 + } +} + +# ============================================================ +# Step 3: Install Ubuntu 24.04 +# ============================================================ +function goto_wsl_setup { + Write-Step "Checking Ubuntu 24.04..." + + # Check if Ubuntu-24.04 is already installed + $distros = wsl --list --quiet 2>$null + if ($distros -match "Ubuntu-24.04") { + Write-Success "Ubuntu 24.04 already installed" + } else { + Write-Step "Installing Ubuntu 24.04 (this may take a few minutes)..." + wsl --install -d Ubuntu-24.04 --no-launch 2>$null + + if ($LASTEXITCODE -ne 0) { + # Try alternative method + wsl --install Ubuntu-24.04 2>$null + } + + Write-Success "Ubuntu 24.04 installed" + } + + # Set as default distribution + wsl --set-default Ubuntu-24.04 2>$null + + # ============================================================ + # Step 4: Configure .wslconfig + # ============================================================ + Write-Step "Configuring WSL2..." + + $wslConfigPath = Join-Path $env:USERPROFILE ".wslconfig" + $wslConfigSource = Join-Path $InstallDir "templates\wslconfig.template" + + if (Test-Path $wslConfigSource) { + Copy-Item $wslConfigSource $wslConfigPath -Force + } else { + # Fallback: write config directly + @" +[wsl2] +memory=12GB +vmIdleTimeout=-1 + +[boot] +systemd=true +"@ | Set-Content $wslConfigPath + } + Write-Success ".wslconfig configured" + + # Restart WSL to apply config + wsl --shutdown 2>$null + Start-Sleep -Seconds 3 + + # ============================================================ + # Step 5: Create WSL2 Auto-Start Scheduled Task + # ============================================================ + Write-Step "Setting up WSL2 auto-start..." + + $wslAction = New-ScheduledTaskAction -Execute "wsl.exe" ` + -Argument "-d Ubuntu-24.04 -- bash -c 'sleep 5'" + $wslTrigger = New-ScheduledTaskTrigger -AtLogOn -User $env:USERNAME + $wslSettings = New-ScheduledTaskSettingsSet -AllowStartIfOnBatteries -DontStopIfGoingOnBatteries + Register-ScheduledTask -TaskName "BatesWSLAutoStart" ` + -Action $wslAction -Trigger $wslTrigger -Settings $wslSettings -Force | Out-Null + + Write-Success "WSL2 auto-start configured" + + # ============================================================ + # Step 6: Copy installer files into WSL2 + # ============================================================ + Write-Step "Copying installer files to WSL2..." + + # Convert Windows path to WSL path + $wslInstallDir = "/mnt/" + $InstallDir.Replace("\", "/").Replace(":", "").ToLower() + # Alternative: copy to a known location in WSL + $wslTargetDir = "/tmp/bates-installer" + + # Create target directory and copy files + wsl -d Ubuntu-24.04 -- bash -c "rm -rf $wslTargetDir && mkdir -p $wslTargetDir" + wsl -d Ubuntu-24.04 -- bash -c "cp -r '$wslInstallDir/'* '$wslTargetDir/' 2>/dev/null || true" + + # Make scripts executable + wsl -d Ubuntu-24.04 -- bash -c "chmod +x '$wslTargetDir/'*.sh '$wslTargetDir/scripts-core/'*.sh 2>/dev/null || true" + + Write-Success "Files copied to WSL2" + + # ============================================================ + # Step 7: Run Linux Setup + # ============================================================ + Write-Step "Starting Linux environment setup..." + Write-Host "" + Write-Host "This will install Node.js, OpenClaw, and system packages inside WSL2." -ForegroundColor Yellow + Write-Host "You may be prompted for your WSL2 user password (sudo)." -ForegroundColor Yellow + Write-Host "" + + # Run core-setup.sh + wsl -d Ubuntu-24.04 -- bash "$wslTargetDir/core-setup.sh" + + if ($LASTEXITCODE -eq 0) { + Write-Success "Linux setup complete" + + # ============================================================ + # Step 8: Run Configuration (interactive) + # ============================================================ + Write-Step "Starting AI configuration..." + Write-Host "" + + wsl -d Ubuntu-24.04 -- bash "$wslTargetDir/core-configure.sh" + + if ($LASTEXITCODE -eq 0) { + Write-Success "Configuration complete" + + # ============================================================ + # Step 9: Verify Installation + # ============================================================ + wsl -d Ubuntu-24.04 -- bash "$wslTargetDir/core-verify.sh" + } else { + Write-Fail "Configuration failed. Check the output above." + Write-Host "You can retry: wsl -d Ubuntu-24.04 -- bash $wslTargetDir/core-configure.sh" + } + } else { + Write-Fail "Linux setup failed. Check the output above." + Write-Host "You can retry: wsl -d Ubuntu-24.04 -- bash $wslTargetDir/core-setup.sh" + } +} + +# Call the setup function (when not resuming) +goto_wsl_setup + +# ============================================================ +# Final Message +# ============================================================ +Write-Host "" +Write-Host "==========================================" -ForegroundColor Green +Write-Host " Installation Complete!" -ForegroundColor Green +Write-Host "==========================================" -ForegroundColor Green +Write-Host "" +Write-Host "Your AI assistant is running at: http://localhost:18789/dashboard" +Write-Host "" +Write-Host "To add more integrations later, run in WSL2:" +Write-Host " bates-enhance.sh" +Write-Host "" +Write-Host "Press any key to exit..." +$null = $Host.UI.RawUI.ReadKey("NoEcho,IncludeKeyDown") diff --git a/bates-core/lib/common.sh b/bates-core/lib/common.sh new file mode 100755 index 0000000..41d73ca --- /dev/null +++ b/bates-core/lib/common.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# common.sh -- Shared functions for Bates installer scripts +# Provides logging, colors, prompts, and step tracking + +set -euo pipefail + +# Colors (only if terminal supports them) +if [[ -t 1 ]]; then + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + CYAN='\033[0;36m' + BOLD='\033[1m' + NC='\033[0m' +else + RED='' GREEN='' YELLOW='' CYAN='' BOLD='' NC='' +fi + +# Step counter +_STEP_NUM=0 + +step() { + ((_STEP_NUM++)) || true + echo -e "\n${CYAN}==> Step ${_STEP_NUM}: $1${NC}" +} + +info() { + echo -e "${CYAN}[INFO]${NC} $1" +} + +success() { + echo -e "${GREEN}[OK]${NC} $1" +} + +warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +fatal() { + error "$1" + exit 1 +} + +# Prompt with default value +prompt_default() { + local prompt="$1" + local default="$2" + local varname="$3" + local input + + if [[ -n "$default" ]]; then + read -rp "$prompt [$default]: " input + eval "$varname=\"${input:-$default}\"" + else + read -rp "$prompt: " input + eval "$varname=\"$input\"" + fi +} + +# Yes/No prompt (returns 0 for yes, 1 for no) +confirm() { + local prompt="${1:-Continue?}" + local reply + read -rp "$prompt (y/n): " reply + [[ "$reply" =~ ^[Yy] ]] +} + +# Check if a command exists +require_cmd() { + local cmd="$1" + local msg="${2:-$cmd is required but not installed}" + if ! command -v "$cmd" &>/dev/null; then + fatal "$msg" + fi +} + +# Spinner for long-running commands +spinner() { + local pid=$1 + local msg="${2:-Working...}" + local spin='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + local i=0 + while kill -0 "$pid" 2>/dev/null; do + printf "\r${CYAN}%s${NC} %s" "${spin:i++%${#spin}:1}" "$msg" + sleep 0.1 + done + printf "\r" +} + +# Run a command with spinner +run_with_spinner() { + local msg="$1" + shift + "$@" &>/dev/null & + local pid=$! + spinner "$pid" "$msg" + wait "$pid" + local rc=$? + if [[ $rc -eq 0 ]]; then + success "$msg" + else + error "$msg (exit code $rc)" + return $rc + fi +} + +# Get the install directory (where bates-core/ scripts live) +get_install_dir() { + local script_dir + script_dir="$(cd "$(dirname "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")" && pwd)" + # If called from lib/, go up one level + if [[ "$(basename "$script_dir")" == "lib" ]]; then + echo "$(dirname "$script_dir")" + else + echo "$script_dir" + fi +} + +INSTALL_DIR="$(get_install_dir)" diff --git a/bates-core/lib/prerequisites.sh b/bates-core/lib/prerequisites.sh new file mode 100755 index 0000000..d7e4299 --- /dev/null +++ b/bates-core/lib/prerequisites.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash +# prerequisites.sh -- System prerequisite checks for Bates Core +# Called from core-setup.sh to verify the environment is suitable + +source "$(dirname "${BASH_SOURCE[0]}")/common.sh" + +check_wsl2() { + if [[ -f /proc/version ]] && grep -qi microsoft /proc/version; then + success "Running inside WSL2" + return 0 + else + error "Not running inside WSL2" + return 1 + fi +} + +check_ubuntu() { + if [[ -f /etc/os-release ]]; then + local version + version=$(grep VERSION_ID /etc/os-release | cut -d'"' -f2) + if [[ "$version" == "24.04" ]]; then + success "Ubuntu 24.04 detected" + return 0 + else + warn "Ubuntu $version detected (24.04 recommended)" + return 0 + fi + else + error "Cannot determine Linux distribution" + return 1 + fi +} + +check_ram() { + local min_gb="${1:-8}" + local total_kb + total_kb=$(grep MemTotal /proc/meminfo | awk '{print $2}') + local total_gb=$(( total_kb / 1048576 )) + + if [[ $total_gb -ge $min_gb ]]; then + success "RAM: ${total_gb}GB (minimum ${min_gb}GB)" + return 0 + else + error "Insufficient RAM: ${total_gb}GB (minimum ${min_gb}GB)" + return 1 + fi +} + +check_disk() { + local min_gb="${1:-20}" + local avail_kb + avail_kb=$(df -k "$HOME" | tail -1 | awk '{print $4}') + local avail_gb=$(( avail_kb / 1048576 )) + + if [[ $avail_gb -ge $min_gb ]]; then + success "Disk space: ${avail_gb}GB free (minimum ${min_gb}GB)" + return 0 + else + error "Insufficient disk space: ${avail_gb}GB (minimum ${min_gb}GB)" + return 1 + fi +} + +check_internet() { + if curl -sf --max-time 10 https://github.com &>/dev/null; then + success "Internet connection OK" + return 0 + else + error "No internet connection (cannot reach github.com)" + return 1 + fi +} + +check_systemd() { + if systemctl --user status &>/dev/null 2>&1; then + success "systemd user session available" + return 0 + else + error "systemd user session not available" + echo " This may require WSL2 with systemd enabled." + echo " Add [boot] systemd=true to /etc/wsl.conf and restart WSL2." + return 1 + fi +} + +# Run all prerequisite checks, fail if any critical check fails +run_all_checks() { + local failures=0 + + info "Checking prerequisites..." + echo "" + + check_wsl2 || ((failures++)) + check_ubuntu || true # non-critical + check_ram 8 || ((failures++)) + check_disk 20 || ((failures++)) + check_internet || ((failures++)) + check_systemd || ((failures++)) + + echo "" + if [[ $failures -gt 0 ]]; then + fatal "$failures prerequisite check(s) failed. Fix the issues above and try again." + fi + success "All prerequisite checks passed." +} diff --git a/bates-core/lib/template-engine.sh b/bates-core/lib/template-engine.sh new file mode 100755 index 0000000..9f74275 --- /dev/null +++ b/bates-core/lib/template-engine.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# template-engine.sh -- Replace {{PLACEHOLDER}} variables in template files +# +# Usage: +# source lib/template-engine.sh +# export ASSISTANT_NAME="Bates" USER_NAME="Robert" +# template_render "input.template" "output.conf" +# +# Placeholders use the format {{VAR_NAME}} where VAR_NAME matches +# an exported environment variable. Unset variables are left as-is. + +template_render() { + local template="$1" + local output="$2" + + if [[ ! -f "$template" ]]; then + echo "ERROR: Template not found: $template" >&2 + return 1 + fi + + cp "$template" "$output" + + # Find all {{VAR}} placeholders in the output file + local vars + vars=$(grep -oP '\{\{[A-Z_][A-Z0-9_]*\}\}' "$output" 2>/dev/null | sort -u) || true + + for var_with_braces in $vars; do + # Strip {{ and }} + local var_name="${var_with_braces#\{\{}" + var_name="${var_name%\}\}}" + + # Get the value from the environment + local var_value="${!var_name:-}" + + if [[ -n "$var_value" ]]; then + # Escape special sed characters in the value + local escaped_value + escaped_value=$(printf '%s' "$var_value" | sed 's/[&/\]/\\&/g') + sed -i "s|{{${var_name}}}|${escaped_value}|g" "$output" + fi + done +} + +# Render a template string (stdin) to stdout +template_render_string() { + local content + content=$(cat) + + local vars + vars=$(echo "$content" | grep -oP '\{\{[A-Z_][A-Z0-9_]*\}\}' 2>/dev/null | sort -u) || true + + for var_with_braces in $vars; do + local var_name="${var_with_braces#\{\{}" + var_name="${var_name%\}\}}" + local var_value="${!var_name:-}" + if [[ -n "$var_value" ]]; then + local escaped_value + escaped_value=$(printf '%s' "$var_value" | sed 's/[&/\]/\\&/g') + content=$(echo "$content" | sed "s|{{${var_name}}}|${escaped_value}|g") + fi + done + + echo "$content" +} diff --git a/bates-core/plugins/cost-tracker/index.ts b/bates-core/plugins/cost-tracker/index.ts new file mode 100644 index 0000000..cd6b7af --- /dev/null +++ b/bates-core/plugins/cost-tracker/index.ts @@ -0,0 +1,773 @@ +import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync, statSync } from "fs"; +import { join, dirname } from "path"; +import { fileURLToPath } from "url"; +import { homedir } from "os"; +import { + emptyPluginConfigSchema, + onDiagnosticEvent, +} from "openclaw/plugin-sdk"; +import type { + OpenClawPluginApi, + DiagnosticUsageEvent, + DiagnosticEventPayload, +} from "openclaw/plugin-sdk"; + +// --------------------------------------------------------------------------- +// globalThis bridge for diagnostic events (future-proofing) +// --------------------------------------------------------------------------- +// BUG: onDiagnosticEvent from "openclaw/plugin-sdk" registers on a separate +// `listeners` Set (in dist/plugin-sdk/index.js) from where the gateway emits +// events (dist/extensionAPI.js has its own `listeners$3`). Both are inlined +// copies of src/infra/diagnostic-events.ts with no shared state. Plugins +// loaded via jiti therefore never receive model.usage events. +// +// WORKAROUND: We scan session transcript JSONL files to extract usage data. +// The globalThis bridge is registered for forward-compat if the gateway +// core adds dispatch to globalThis.__openclawDiagnosticListeners. +declare global { + // eslint-disable-next-line no-var + var __openclawDiagnosticListeners: Set<(evt: DiagnosticEventPayload) => void> | undefined; + // eslint-disable-next-line no-var + var __openclawMessageTransform: ((text: string, meta: { channel: string; to: string }) => string) | undefined; +} +if (!globalThis.__openclawDiagnosticListeners) { + globalThis.__openclawDiagnosticListeners = new Set(); +} + +// --------------------------------------------------------------------------- +// Constants +// --------------------------------------------------------------------------- +const PLUGIN_DIR = dirname(fileURLToPath(import.meta.url)); +const DATA_DIR = join(PLUGIN_DIR, "data"); +const DAILY_FILE = join(DATA_DIR, "daily-costs.json"); +const OFFSETS_FILE = join(DATA_DIR, "scan-offsets.json"); +const OPENCLAW_DIR = join(homedir(), ".openclaw"); +const AGENTS_DIR = join(OPENCLAW_DIR, "agents"); +const AUTH_PROFILES_FILE = join(AGENTS_DIR, "main", "agent", "auth-profiles.json"); + +// How often to scan session files (ms) +const SCAN_INTERVAL_MS = 60_000; + +// Anthropic model prefix for zero-cost detection under subscription +const ANTHROPIC_MODEL_PREFIXES = ["claude-"]; + +// Cost per million tokens (fallback when transcript has no cost field) +const MODEL_COSTS: Record< + string, + { input: number; output: number; cacheRead: number; cacheWrite: number } +> = { + "claude-sonnet-4-5-20250929": { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 }, + "claude-haiku-4-5-20251001": { input: 1, output: 5, cacheRead: 0.1, cacheWrite: 1.25 }, + "claude-opus-4-5-20251101": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 }, + "claude-opus-4-6": { input: 15, output: 75, cacheRead: 1.5, cacheWrite: 18.75 }, + "gemini-2.5-flash": { input: 0.15, output: 0.6, cacheRead: 0, cacheWrite: 0 }, + "deepseek-chat": { input: 0.27, output: 1.1, cacheRead: 0, cacheWrite: 0 }, + "sonar-pro": { input: 3, output: 15, cacheRead: 0, cacheWrite: 0 }, +}; + +// --------------------------------------------------------------------------- +// Subscription (token) profile detection +// --------------------------------------------------------------------------- +// When the active Anthropic auth profile is a "token" type (e.g. Claude Max +// subscription), per-token costs are $0 since they're covered by the flat fee. +// We cache this check and refresh it periodically (the file rarely changes). +let _isAnthropicSubscription: boolean | null = null; +let _subscriptionCheckAt = 0; +const SUBSCRIPTION_CHECK_INTERVAL_MS = 300_000; // re-check every 5 minutes + +function isAnthropicSubscription(): boolean { + const now = Date.now(); + if (_isAnthropicSubscription !== null && now - _subscriptionCheckAt < SUBSCRIPTION_CHECK_INTERVAL_MS) { + return _isAnthropicSubscription; + } + _subscriptionCheckAt = now; + try { + if (!existsSync(AUTH_PROFILES_FILE)) { + _isAnthropicSubscription = false; + return false; + } + const data = JSON.parse(readFileSync(AUTH_PROFILES_FILE, "utf-8")); + const activeProfile = data?.lastGood?.anthropic; + if (!activeProfile) { + _isAnthropicSubscription = false; + return false; + } + const profileDef = data?.profiles?.[activeProfile]; + _isAnthropicSubscription = profileDef?.type === "token"; + return _isAnthropicSubscription; + } catch { + _isAnthropicSubscription = false; + return false; + } +} + +function isAnthropicModel(model: string | undefined): boolean { + if (!model) return false; + return ANTHROPIC_MODEL_PREFIXES.some((prefix) => model.startsWith(prefix)); +} + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- +interface InteractionCost { + timestamp: number; + model?: string; + provider?: string; + inputTokens: number; + outputTokens: number; + cacheReadTokens: number; + cacheWriteTokens: number; + totalTokens: number; + costUsd: number; + sessionKey?: string; +} + +interface DailyCosts { + [dateKey: string]: { + totalCost: number; + totalTokens: number; + interactions: number; + byModel: Record< + string, + { + cost: number; + tokens: number; + count: number; + } + >; + }; +} + +interface SessionAccumulator { + totalCost: number; + totalTokens: number; + interactions: number; + lastInteractionCost: number; + lastInteractionTokens: number; + lastModel?: string; + startedAt: number; +} + +// --------------------------------------------------------------------------- +// State +// --------------------------------------------------------------------------- +let dailyCosts: DailyCosts = {}; +const sessionAccumulators = new Map(); +let globalAccumulator: SessionAccumulator = { + totalCost: 0, + totalTokens: 0, + interactions: 0, + lastInteractionCost: 0, + lastInteractionTokens: 0, + startedAt: Date.now(), +}; + +let lastInteraction: InteractionCost | null = null; +let diagnosticEventsReceived = 0; // track if onDiagnosticEvent works +let scanTimer: ReturnType | null = null; + +// Track which JSONL lines we've already processed (by file + byte offset) +// Persisted to disk to survive gateway restarts and prevent double-counting. +let scannedOffsets = new Map(); + +function loadScannedOffsets(): Map { + try { + if (existsSync(OFFSETS_FILE)) { + const data = JSON.parse(readFileSync(OFFSETS_FILE, "utf-8")); + return new Map(Object.entries(data)); + } + } catch {} + return new Map(); +} + +function saveScannedOffsets(): void { + try { + if (!existsSync(DATA_DIR)) { + mkdirSync(DATA_DIR, { recursive: true }); + } + const obj: Record = {}; + for (const [k, v] of scannedOffsets) obj[k] = v; + writeFileSync(OFFSETS_FILE, JSON.stringify(obj)); + } catch {} +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- +function todayKey(): string { + return new Date().toLocaleDateString("en-CA", { + timeZone: Intl.DateTimeFormat().resolvedOptions().timeZone, + }); +} + +function dateKeyFromTimestamp(ts: number): string { + return new Date(ts).toLocaleDateString("en-CA", { + timeZone: Intl.DateTimeFormat().resolvedOptions().timeZone, + }); +} + +function formatUsd(value: number): string { + if (value >= 1) return `$${value.toFixed(2)}`; + if (value >= 0.01) return `$${value.toFixed(2)}`; + if (value >= 0.001) return `$${value.toFixed(3)}`; + return `$${value.toFixed(4)}`; +} + +function formatTokens(value: number): string { + if (value >= 1_000_000) return `${(value / 1_000_000).toFixed(1)}M`; + if (value >= 10_000) return `${Math.round(value / 1_000)}k`; + if (value >= 1_000) return `${(value / 1_000).toFixed(1)}k`; + return String(Math.round(value)); +} + +function estimateCost( + model: string | undefined, + input: number, + output: number, + cacheRead: number, + cacheWrite: number +): number { + // Anthropic models are free under Claude Max subscription (token profile) + if (isAnthropicModel(model) && isAnthropicSubscription()) return 0; + + const costs = model ? MODEL_COSTS[model] : undefined; + if (!costs) return 0; + return ( + (input * costs.input + + output * costs.output + + cacheRead * costs.cacheRead + + cacheWrite * costs.cacheWrite) / + 1_000_000 + ); +} + +function loadDailyCosts(): DailyCosts { + try { + if (existsSync(DAILY_FILE)) { + return JSON.parse(readFileSync(DAILY_FILE, "utf-8")); + } + } catch {} + return {}; +} + +function saveDailyCosts(): void { + try { + if (!existsSync(DATA_DIR)) { + mkdirSync(DATA_DIR, { recursive: true }); + } + writeFileSync(DAILY_FILE, JSON.stringify(dailyCosts, null, 2)); + } catch {} +} + +function getOrCreateSession(sessionKey: string): SessionAccumulator { + let acc = sessionAccumulators.get(sessionKey); + if (!acc) { + acc = { + totalCost: 0, + totalTokens: 0, + interactions: 0, + lastInteractionCost: 0, + lastInteractionTokens: 0, + startedAt: Date.now(), + }; + sessionAccumulators.set(sessionKey, acc); + } + return acc; +} + +function recordUsage(event: DiagnosticUsageEvent): void { + const input = event.usage?.input ?? 0; + const output = event.usage?.output ?? 0; + const cacheRead = event.usage?.cacheRead ?? 0; + const cacheWrite = event.usage?.cacheWrite ?? 0; + const totalTokens = + event.usage?.total ?? input + output + cacheRead + cacheWrite; + + // Anthropic models are free under Claude Max subscription (token profile) + const subscriptionZero = isAnthropicModel(event.model) && isAnthropicSubscription(); + const costUsd = subscriptionZero ? 0 : + (event.costUsd ?? estimateCost(event.model, input, output, cacheRead, cacheWrite)); + + const interaction: InteractionCost = { + timestamp: event.ts ?? Date.now(), + model: event.model, + provider: event.provider, + inputTokens: input, + outputTokens: output, + cacheReadTokens: cacheRead, + cacheWriteTokens: cacheWrite, + totalTokens, + costUsd, + sessionKey: event.sessionKey, + }; + + lastInteraction = interaction; + + const sessionKey = event.sessionKey ?? "__global__"; + const session = getOrCreateSession(sessionKey); + session.totalCost += costUsd; + session.totalTokens += totalTokens; + session.interactions += 1; + session.lastInteractionCost = costUsd; + session.lastInteractionTokens = totalTokens; + session.lastModel = event.model; + + globalAccumulator.totalCost += costUsd; + globalAccumulator.totalTokens += totalTokens; + globalAccumulator.interactions += 1; + globalAccumulator.lastInteractionCost = costUsd; + globalAccumulator.lastInteractionTokens = totalTokens; + globalAccumulator.lastModel = event.model; + + const day = dateKeyFromTimestamp(interaction.timestamp); + if (!dailyCosts[day]) { + dailyCosts[day] = { + totalCost: 0, + totalTokens: 0, + interactions: 0, + byModel: {}, + }; + } + const dayBucket = dailyCosts[day]; + dayBucket.totalCost += costUsd; + dayBucket.totalTokens += totalTokens; + dayBucket.interactions += 1; + + const modelKey = event.model ?? "unknown"; + if (!dayBucket.byModel[modelKey]) { + dayBucket.byModel[modelKey] = { cost: 0, tokens: 0, count: 0 }; + } + dayBucket.byModel[modelKey].cost += costUsd; + dayBucket.byModel[modelKey].tokens += totalTokens; + dayBucket.byModel[modelKey].count += 1; + + if (globalAccumulator.interactions % 10 === 0) { + saveDailyCosts(); + } +} + +// --------------------------------------------------------------------------- +// Session transcript JSONL scanner (fallback for broken diagnostic events) +// --------------------------------------------------------------------------- +// Scans session JSONL files for assistant messages with usage data. +// Only processes new lines since the last scan (tracked by byte offset). +function recordFromTranscript( + model: string | undefined, + usage: { input?: number; output?: number; cacheRead?: number; cacheWrite?: number; totalTokens?: number; cost?: { total?: number } }, + timestamp: number, + provider?: string, +): void { + const input = usage.input ?? 0; + const output = usage.output ?? 0; + const cacheRead = usage.cacheRead ?? 0; + const cacheWrite = usage.cacheWrite ?? 0; + const totalTokens = usage.totalTokens ?? (input + output + cacheRead + cacheWrite); + + // Anthropic models are free under Claude Max subscription (token profile). + // Override any cost from the API response since it still reports billing rates. + const subscriptionZero = isAnthropicModel(model) && isAnthropicSubscription(); + const costUsd = subscriptionZero ? 0 : (usage.cost?.total ?? estimateCost(model, input, output, cacheRead, cacheWrite)); + + const interaction: InteractionCost = { + timestamp, + model, + provider, + inputTokens: input, + outputTokens: output, + cacheReadTokens: cacheRead, + cacheWriteTokens: cacheWrite, + totalTokens, + costUsd, + }; + + lastInteraction = interaction; + + globalAccumulator.totalCost += costUsd; + globalAccumulator.totalTokens += totalTokens; + globalAccumulator.interactions += 1; + globalAccumulator.lastInteractionCost = costUsd; + globalAccumulator.lastInteractionTokens = totalTokens; + globalAccumulator.lastModel = model; + + const day = dateKeyFromTimestamp(timestamp); + if (!dailyCosts[day]) { + dailyCosts[day] = { totalCost: 0, totalTokens: 0, interactions: 0, byModel: {} }; + } + const bucket = dailyCosts[day]; + bucket.totalCost += costUsd; + bucket.totalTokens += totalTokens; + bucket.interactions += 1; + + const mk = model ?? "unknown"; + if (!bucket.byModel[mk]) { + bucket.byModel[mk] = { cost: 0, tokens: 0, count: 0 }; + } + bucket.byModel[mk].cost += costUsd; + bucket.byModel[mk].tokens += totalTokens; + bucket.byModel[mk].count += 1; +} + +function scanSessionFiles(logger?: { debug: (...args: unknown[]) => void }): void { + // Skip if onDiagnosticEvent is actually working + if (diagnosticEventsReceived > 0) return; + + let newEntries = 0; + try { + if (!existsSync(AGENTS_DIR)) return; + const agentDirs = readdirSync(AGENTS_DIR, { withFileTypes: true }) + .filter((d) => d.isDirectory()); + + for (const agentDir of agentDirs) { + const sessionsDir = join(AGENTS_DIR, agentDir.name, "sessions"); + if (!existsSync(sessionsDir)) continue; + + let files: string[]; + try { + files = readdirSync(sessionsDir).filter((f) => f.endsWith(".jsonl")); + } catch { + continue; + } + + for (const file of files) { + const filePath = join(sessionsDir, file); + let fileSize: number; + try { + fileSize = statSync(filePath).size; + } catch { + continue; + } + + const prevOffset = scannedOffsets.get(filePath) ?? 0; + if (fileSize <= prevOffset) continue; + + // Read only new bytes + let newContent: string; + try { + const fd = require("fs").openSync(filePath, "r"); + const buf = Buffer.alloc(fileSize - prevOffset); + require("fs").readSync(fd, buf, 0, buf.length, prevOffset); + require("fs").closeSync(fd); + newContent = buf.toString("utf-8"); + } catch { + continue; + } + + scannedOffsets.set(filePath, fileSize); + + // Parse each line + const lines = newContent.split("\n"); + for (const line of lines) { + if (!line.trim()) continue; + try { + const entry = JSON.parse(line); + // Look for assistant messages with usage data + if ( + entry.type === "message" && + entry.message?.role === "assistant" && + entry.message?.usage + ) { + const msg = entry.message; + const ts = msg.timestamp ?? (entry.timestamp ? new Date(entry.timestamp).getTime() : Date.now()); + recordFromTranscript(msg.model, msg.usage, ts, msg.provider); + newEntries++; + } + } catch { + // Skip malformed lines + } + } + } + } + } catch { + // Silently handle scan errors + } + + if (newEntries > 0) { + saveDailyCosts(); + saveScannedOffsets(); + logger?.debug(`cost-tracker: scanned ${newEntries} new usage entries from session transcripts`); + } +} + +function buildCostFooter(turnCostSnapshot?: { cost: number; tokens: number }): string { + const day = todayKey(); + const dayData = dailyCosts[day]; + const dailyTotal = dayData?.totalCost ?? 0; + const dailyTokens = dayData?.totalTokens ?? 0; + + // Use snapshot if provided (captures cost delta for this specific turn) + const turnCost = turnCostSnapshot?.cost ?? lastInteraction?.costUsd ?? 0; + const turnTokens = turnCostSnapshot?.tokens ?? lastInteraction?.totalTokens ?? 0; + + if (turnCost === 0 && dailyTotal === 0 && turnTokens === 0 && dailyTokens === 0) return ""; + + // Show tokens when costs are $0 (subscription), costs when > $0, or both + if (dailyTotal === 0 && turnCost === 0) { + // Subscription mode: show token counts only + const turnPart = turnTokens > 0 ? formatTokens(turnTokens) : "0"; + return `\n\n_turn: ${turnPart} tokens · today: ${formatTokens(dailyTokens)} tokens_`; + } + + return `\n\n_turn: ${formatUsd(turnCost)} · today: ${formatUsd(dailyTotal)}_`; +} + +// --------------------------------------------------------------------------- +// Plugin +// --------------------------------------------------------------------------- +const plugin = { + id: "cost-tracker", + name: "Cost Tracker", + description: "Tracks per-interaction API costs and appends cost footer to responses", + configSchema: emptyPluginConfigSchema(), + + register(api: OpenClawPluginApi) { + dailyCosts = loadDailyCosts(); + scannedOffsets = loadScannedOffsets(); + + // ----------------------------------------------------------------------- + // 1. Listen to diagnostic usage events (broken due to module isolation, + // but kept for forward-compat if core fix lands) + // ----------------------------------------------------------------------- + const diagnosticHandler = (evt: DiagnosticEventPayload) => { + if (evt.type === "model.usage") { + diagnosticEventsReceived++; + recordUsage(evt as DiagnosticUsageEvent); + } + }; + const unsubscribe = onDiagnosticEvent(diagnosticHandler); + + // Also register on globalThis bridge for future gateway versions + globalThis.__openclawDiagnosticListeners!.add(diagnosticHandler); + + // ----------------------------------------------------------------------- + // 2. Session transcript scanner (primary data source until bug is fixed) + // ----------------------------------------------------------------------- + // Initial scan on startup: catch up with any usage since last restart + scanSessionFiles(api.logger as any); + + // Periodic scan every 60s + scanTimer = setInterval(() => { + scanSessionFiles(api.logger as any); + }, SCAN_INTERVAL_MS); + + // Also scan on agent_end to capture the latest interaction quickly + api.on("agent_end", () => { + // Small delay to let the transcript file flush + setTimeout(() => scanSessionFiles(api.logger as any), 2000); + }); + + // ----------------------------------------------------------------------- + // 3. Append cost footer via globalThis bridge + // ----------------------------------------------------------------------- + // The message_sending hook is defined in the gateway but never invoked. + // Instead, we use a globalThis.__openclawMessageTransform bridge that + // is called from a small patch in the gateway deliver module. + // Initialize preTurnDailyTotal from persisted data so first turn after + // restart doesn't show turn == today (was starting at 0). + const initDayData = dailyCosts[todayKey()]; + let preTurnDailyTotal = initDayData?.totalCost ?? 0; + let preTurnDailyTokens = initDayData?.totalTokens ?? 0; + api.on("message_received", () => { + scanSessionFiles(api.logger as any); + const dayData = dailyCosts[todayKey()]; + preTurnDailyTotal = dayData?.totalCost ?? 0; + preTurnDailyTokens = dayData?.totalTokens ?? 0; + }); + + globalThis.__openclawMessageTransform = (text: string, _meta: { channel: string; to: string }) => { + if (!text || !text.trim()) return text; + if (text.startsWith("[Tool:")) return text; + + // Scan transcripts to capture this turn's usage + scanSessionFiles(api.logger as any); + + const dayData = dailyCosts[todayKey()]; + const currentDailyTotal = dayData?.totalCost ?? 0; + const currentDailyTokens = dayData?.totalTokens ?? 0; + const turnCost = currentDailyTotal - preTurnDailyTotal; + const turnTokens = currentDailyTokens - preTurnDailyTokens; + + const footer = buildCostFooter( + (turnCost > 0 || turnTokens > 0) ? { cost: turnCost, tokens: turnTokens } : undefined + ); + if (!footer) return text; + + return text + footer; + }; + + // ----------------------------------------------------------------------- + // 4. Hook into session_end to persist costs and clean up + // ----------------------------------------------------------------------- + api.on("session_end", (event, ctx) => { + saveDailyCosts(); + if (ctx.sessionId) { + sessionAccumulators.delete(ctx.sessionId); + } + }); + + // ----------------------------------------------------------------------- + // 5. Hook into gateway_stop to persist costs and clean up timer + // ----------------------------------------------------------------------- + api.on("gateway_stop", () => { + saveDailyCosts(); + saveScannedOffsets(); + if (scanTimer) { + clearInterval(scanTimer); + scanTimer = null; + } + globalThis.__openclawDiagnosticListeners?.delete(diagnosticHandler); + delete (globalThis as any).__openclawMessageTransform; + }); + + // ----------------------------------------------------------------------- + // 6. Register /cost command for on-demand cost report + // ----------------------------------------------------------------------- + api.registerCommand({ + name: "cost", + description: "Show current cost summary (today, session, per-model breakdown)", + acceptsArgs: true, + requireAuth: true, + handler: (ctx) => { + // Trigger a scan before reporting to get fresh data + scanSessionFiles(api.logger as any); + + const day = todayKey(); + const dayData = dailyCosts[day]; + + const lines: string[] = []; + lines.push("--- Cost Report ---"); + lines.push(""); + + if (dayData) { + lines.push( + `Today (${day}): ${formatUsd(dayData.totalCost)} | ${formatTokens(dayData.totalTokens)} tokens | ${dayData.interactions} API calls` + ); + lines.push(""); + lines.push("By model:"); + const sortedModels = Object.entries(dayData.byModel).sort( + ([, a], [, b]) => b.cost - a.cost + ); + for (const [model, data] of sortedModels) { + const pct = + dayData.totalCost > 0 + ? ((data.cost / dayData.totalCost) * 100).toFixed(0) + : "0"; + lines.push( + ` ${model}: ${formatUsd(data.cost)} (${pct}%) | ${formatTokens(data.tokens)} tok | ${data.count} calls` + ); + } + } else { + lines.push(`Today (${day}): No usage recorded yet.`); + } + + lines.push(""); + lines.push( + `Since gateway start: ${formatUsd(globalAccumulator.totalCost)} | ${formatTokens(globalAccumulator.totalTokens)} tokens | ${globalAccumulator.interactions} calls` + ); + lines.push( + `Data source: ${diagnosticEventsReceived > 0 ? "diagnostic events (real-time)" : "session transcript scan (60s interval)"}` + ); + + const last7 = []; + for (let i = 0; i < 7; i++) { + const d = new Date(); + d.setDate(d.getDate() - i); + const key = d.toLocaleDateString("en-CA", { + timeZone: Intl.DateTimeFormat().resolvedOptions().timeZone, + }); + const data = dailyCosts[key]; + if (data) { + last7.push({ date: key, ...data }); + } + } + + if (last7.length > 1) { + lines.push(""); + lines.push("Last 7 days:"); + let weekTotal = 0; + for (const d of last7) { + lines.push( + ` ${d.date}: ${formatUsd(d.totalCost)} | ${d.interactions} calls` + ); + weekTotal += d.totalCost; + } + lines.push(` Total: ${formatUsd(weekTotal)}`); + } + + return { text: lines.join("\n") }; + }, + }); + + // ----------------------------------------------------------------------- + // 7. Register HTTP API endpoint for cost data + // ----------------------------------------------------------------------- + api.registerHttpHandler(async (req: any, res: any): Promise => { + const url = new URL( + req.url ?? "/", + `http://${req.headers.host || "localhost"}` + ); + const pathname = url.pathname; + + if (pathname === "/cost-tracker/api/summary") { + // Trigger scan for fresh data + scanSessionFiles(api.logger as any); + + const day = todayKey(); + const dayData = dailyCosts[day]; + + const response = { + today: dayData ?? { + totalCost: 0, + totalTokens: 0, + interactions: 0, + byModel: {}, + }, + gatewaySession: { + totalCost: globalAccumulator.totalCost, + totalTokens: globalAccumulator.totalTokens, + interactions: globalAccumulator.interactions, + startedAt: globalAccumulator.startedAt, + }, + lastInteraction: lastInteraction + ? { + model: lastInteraction.model, + costUsd: lastInteraction.costUsd, + totalTokens: lastInteraction.totalTokens, + timestamp: lastInteraction.timestamp, + } + : null, + daily: dailyCosts, + dataSource: diagnosticEventsReceived > 0 ? "diagnostic-events" : "transcript-scan", + }; + + res.setHeader("Content-Type", "application/json"); + res.setHeader("Cache-Control", "no-cache"); + res.end(JSON.stringify(response)); + return true; + } + + if (pathname === "/cost-tracker/api/today") { + scanSessionFiles(api.logger as any); + const day = todayKey(); + const dayData = dailyCosts[day] ?? { + totalCost: 0, + totalTokens: 0, + interactions: 0, + byModel: {}, + }; + + res.setHeader("Content-Type", "application/json"); + res.setHeader("Cache-Control", "no-cache"); + res.end(JSON.stringify({ date: day, ...dayData })); + return true; + } + + return false; + }); + + api.logger.info( + "Cost tracker plugin registered: /cost command, globalThis message transform, transcript scanner (60s), HTTP API at /cost-tracker/api/*" + ); + }, +}; + +export default plugin; diff --git a/bates-core/plugins/cost-tracker/openclaw.plugin.json b/bates-core/plugins/cost-tracker/openclaw.plugin.json new file mode 100644 index 0000000..e6f420c --- /dev/null +++ b/bates-core/plugins/cost-tracker/openclaw.plugin.json @@ -0,0 +1,10 @@ +{ + "id": "cost-tracker", + "name": "Cost Tracker", + "description": "Tracks per-interaction and daily API costs, appends cost footer to every Bates response", + "configSchema": { + "type": "object", + "additionalProperties": false, + "properties": {} + } +} diff --git a/bates-core/plugins/dashboard/index.ts b/bates-core/plugins/dashboard/index.ts new file mode 100644 index 0000000..ca29d20 --- /dev/null +++ b/bates-core/plugins/dashboard/index.ts @@ -0,0 +1,874 @@ +import { readFileSync, writeFileSync, readdirSync, statSync, existsSync, openSync, readSync, closeSync, mkdirSync, unlinkSync } from "fs"; +import { execSync, execFileSync } from "child_process"; +import { join, resolve, extname, dirname } from "path"; +import { fileURLToPath } from "url"; +import { homedir } from "os"; +import { emptyPluginConfigSchema } from "openclaw/plugin-sdk"; +import type { OpenClawPluginApi } from "openclaw/plugin-sdk"; + +const PLUGIN_DIR = dirname(fileURLToPath(import.meta.url)); +const STATIC_DIR = join(PLUGIN_DIR, "static"); +const OPENCLAW_HOME = join(homedir(), ".openclaw"); +const WORKSPACE = join(OPENCLAW_HOME, "workspace"); + +// Resolve the Control UI directory from the openclaw package installation +// This path may vary depending on how openclaw is installed (global npm, local, etc.) +function findControlUiDir(): string { + // Try common locations + const candidates = [ + join(homedir(), ".npm-global/lib/node_modules/openclaw/dist/control-ui"), + join("/usr/local/lib/node_modules/openclaw/dist/control-ui"), + join("/usr/lib/node_modules/openclaw/dist/control-ui"), + ]; + for (const c of candidates) { + if (existsSync(c)) return c; + } + return candidates[0]; // fallback +} +const CONTROL_UI_DIR = findControlUiDir(); + +// Injected into Control UI: sidebar nav item + top banner for mobile +const DASHBOARD_LINK_SNIPPET = ` + + + + + + + + + + + + + + + + + + + + + diff --git a/bates-core/plugins/dashboard/static/js/app.js b/bates-core/plugins/dashboard/static/js/app.js new file mode 100644 index 0000000..2542e70 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/app.js @@ -0,0 +1,379 @@ +/** + * Bates Command Center — App Controller v4 + * 5 tabs · persistent chat drawer · glassmorphism + */ +(function () { + const panels = {}; + let gateway = null; + let currentView = 'overview'; + + const VIEW_PANELS = { + overview: ['ceo', 'tasks', 'status', 'agents', 'files', 'crons'], + agents: ['agents'], + operations: ['crons', 'delegations', 'integrations', 'costs', 'settings'], + standup: ['standup'], + memory: ['memory'], + }; + + const DASH_API_BASE = ''; + + window.Dashboard = { + DASH_API: DASH_API_BASE, + registerPanel(id, mod) { panels[id] = mod; }, + getGateway() { return gateway; }, + + async fetchApi(ep) { + try { + const headers = {}; + const token = window.__GATEWAY_CONFIG?.token; + if (token) headers['Authorization'] = 'Bearer ' + token; + return await (await fetch(`/dashboard/api/${ep}`, { headers })).json(); + } + catch (e) { console.error(`API ${ep}:`, e); return null; } + }, + + // Compact task row for project detail modals (spreadsheet-dense) + renderTaskRowCompact(t) { + const done = t.completed; + const overdue = !done && t.dueDate && t.dueDate < new Date().toISOString().slice(0, 10); + const PRI_COLORS = { urgent: '#ff4757', important: '#ffa502', medium: '#00d4ff', low: '#747d8c' }; + const taskUrl = t.source === 'To Do' + ? `https://to-do.office.com/tasks/id/${t.id}/details` + : `https://tasks.office.com/TENANT.example.com/Home/Task/${t.id}`; + return ` + + + ${Dashboard.esc(t.title || '—')} + ${t.dueDate || ''} + `; + }, + + // Shared task row renderer used by panel-tasks.js and project detail modals + renderTaskRow(t, opts) { + opts = opts || {}; + const done = t.completed; + const overdue = !done && t.dueDate && t.dueDate < new Date().toISOString().slice(0, 10); + const PRI_COLORS = { urgent: '#ff4757', important: '#ffa502', medium: '#00d4ff', low: '#747d8c' }; + const taskUrl = t.source === 'To Do' + ? `https://to-do.office.com/tasks/id/${t.id}/details` + : `https://tasks.office.com/TENANT.example.com/Home/Task/${t.id}`; + return `
+ + +
+
${Dashboard.esc(t.title || '—')}
+
+ ${t.dueDate ? '📅 ' + t.dueDate : ''} + ${Dashboard.esc(t.planName || '')} + ${Dashboard.esc(t.source || '')} + ${t.checklistTotal ? `☑ ${t.checklistDone}/${t.checklistTotal}` : ''} + ${t.percentComplete > 0 && t.percentComplete < 100 ? `${t.percentComplete}%` : ''} +
+
+
`; + }, + + // Wire click and complete handlers on task rows within a container + wireTaskRows(container, onComplete) { + if (!container) return; + container.querySelectorAll('.task-row-clickable').forEach(el => { + el.style.cursor = 'pointer'; + el.addEventListener('click', (e) => { + e.stopPropagation(); + const url = el.dataset.url; + if (url) window.open(url, '_blank'); + }); + }); + container.querySelectorAll('.task-complete-btn').forEach(btn => { + btn.addEventListener('click', async (e) => { + e.stopPropagation(); + const row = btn.closest('.task-row-shared'); + if (!row || row.classList.contains('done')) return; + btn.disabled = true; + btn.textContent = '⏳'; + try { + const headers = { 'Content-Type': 'application/json' }; + const token = window.__GATEWAY_CONFIG?.token; + if (token) headers['Authorization'] = 'Bearer ' + token; + const resp = await fetch('/dashboard/api/tasks/complete', { + method: 'POST', headers, + body: JSON.stringify({ taskId: row.dataset.taskId, source: row.dataset.source, project: row.dataset.project }) + }); + const result = await resp.json(); + if (result.success) { + row.classList.add('done'); + btn.textContent = '✓'; + btn.style.background = 'var(--green)'; + btn.style.borderColor = 'var(--green)'; + btn.style.color = '#fff'; + if (onComplete) onComplete(); + } else { + btn.textContent = '✗'; + btn.style.color = 'var(--red)'; + setTimeout(() => { btn.textContent = '✓'; btn.style.color = ''; btn.disabled = false; }, 2000); + } + } catch { + btn.textContent = '✗'; + setTimeout(() => { btn.textContent = '✓'; btn.disabled = false; }, 2000); + } + }); + }); + }, + + timeAgo(d) { + if (!d) return 'never'; + const ms = Date.now() - new Date(d).getTime(); + if (ms < 0) { const a = -ms; return a < 60e3 ? `in ${(a/1e3)|0}s` : a < 36e5 ? `in ${(a/6e4)|0}m` : a < 864e5 ? `in ${(a/36e5)|0}h` : `in ${(a/864e5)|0}d`; } + return ms < 60e3 ? `${(ms/1e3)|0}s ago` : ms < 36e5 ? `${(ms/6e4)|0}m ago` : ms < 864e5 ? `${(ms/36e5)|0}h ago` : `${(ms/864e5)|0}d ago`; + }, + formatSize(b) { return b < 1024 ? b+'B' : b < 1048576 ? (b/1024).toFixed(1)+'KB' : (b/1048576).toFixed(1)+'MB'; }, + esc(s) { const d = document.createElement('div'); d.textContent = s; return d.innerHTML; }, + }; + + // ─── Navigation ─── + function switchView(id) { + if (!VIEW_PANELS[id]) return; + currentView = id; + document.querySelectorAll('.view').forEach(v => v.classList.remove('active')); + document.getElementById('view-' + id)?.classList.add('active'); + document.querySelectorAll('.nav-tab').forEach(n => n.classList.remove('active')); + document.querySelectorAll(`.nav-tab[data-view="${id}"]`).forEach(n => n.classList.add('active')); + for (const pid of VIEW_PANELS[id]) { + try { panels[pid]?.refresh?.(gateway); } catch (e) { console.error(`Refresh ${pid}:`, e); } + } + } + + // ─── Chat Drawer ─── + function setupChatDrawer() { + const drawer = document.getElementById('chat-drawer'); + const toggle = document.getElementById('chat-toggle-btn'); + const close = document.getElementById('chat-drawer-close'); + if (!drawer || !toggle) return; + + function setOpen(open) { + drawer.classList.toggle('open', open); + toggle.classList.toggle('active', open); + localStorage.setItem('bates-chat-open', open ? '1' : '0'); + } + toggle.addEventListener('click', () => setOpen(!drawer.classList.contains('open'))); + close?.addEventListener('click', () => setOpen(false)); + + const saved = localStorage.getItem('bates-chat-open'); + setOpen(saved !== '0'); + } + + // ─── Clock ─── + function updateClock() { + const el = document.getElementById('clock'); + if (!el) return; + el.textContent = new Date().toLocaleTimeString('en-GB', { timeZone: 'Europe/Lisbon', hour: '2-digit', minute: '2-digit' }); + } + + // ─── Connection ─── + function updateConn(status) { + const dot = document.getElementById('conn-dot'); + const lbl = document.getElementById('conn-label'); + if (dot) dot.className = 'conn-dot ' + status; + if (lbl) lbl.textContent = status === 'connected' ? 'LIVE' : status.toUpperCase(); + } + + // ─── Refresh buttons ─── + function setupRefresh() { + document.querySelectorAll('.panel-refresh').forEach(btn => { + btn.addEventListener('click', () => { + const pid = (btn.dataset.action || '').replace('refresh-', ''); + try { panels[pid]?.refresh?.(gateway); } catch {} + }); + }); + } + + // ─── Overview metrics ─── + window._updateOverviewMetrics = function(d) { + if (!d) return; + const set = (id, v) => { const el = document.getElementById(id); if (el) el.textContent = v; }; + if (d.activeAgents !== undefined) set('metric-agents-val', d.activeAgents); + if (d.emails !== undefined) set('metric-emails-val', d.emails); + if (d.tasks !== undefined) set('metric-tasks-val', d.tasks); + if (d.nextCron !== undefined) set('metric-cron-val', d.nextCron); + }; + + // ─── Agents summary hook ─── + const _origReg = window.Dashboard.registerPanel; + window.Dashboard.registerPanel = function(id, mod) { + if (id === 'agents') { + const oRefresh = mod.refresh, oInit = mod.init; + mod.refresh = async gw => { await oRefresh(gw); updateAgentsSummary(); }; + mod.init = async gw => { await oInit(gw); updateAgentsSummary(); }; + } + _origReg(id, mod); + }; + + function updateAgentsSummary() { + const el = document.getElementById('panel-agents-summary'); + if (!el) return; + const cards = document.querySelectorAll('#panel-agents .acard, #panel-agents .agent-card'); + if (!cards.length) { el.innerHTML = '
No agents online
'; return; } + let html = '
'; + let n = 0; + cards.forEach(c => { + if (n >= 6) return; + const name = c.querySelector('.aname, .agent-name'); + const role = c.querySelector('.arole, .agent-role'); + const dot = c.querySelector('.status-dot'); + if (!name) return; + html += `
+ + ${name.textContent} + ${role ? `${role.textContent}` : ''} +
`; + n++; + }); + if (cards.length > 6) html += `
View all ${cards.length} →
`; + html += '
'; + el.innerHTML = html; + } + + // ─── Rollout panel (standalone, not injected into project card) ─── + + // ─── Init ─── + async function init() { + updateClock(); + setInterval(updateClock, 1000); + + document.querySelectorAll('.nav-tab').forEach(b => b.addEventListener('click', () => switchView(b.dataset.view))); + setupChatDrawer(); + setupRefresh(); + + const ov = document.getElementById('soul-modal-overlay'); + const cl = document.getElementById('soul-modal-close'); + if (ov) ov.addEventListener('click', e => { if (e.target === ov) ov.classList.remove('visible'); }); + if (cl) cl.addEventListener('click', () => ov.classList.remove('visible')); + + const config = window.__GATEWAY_CONFIG || {}; + gateway = new GatewayClient(config); + gateway.onStatusChange = updateConn; + updateConn('reconnecting'); + + for (const [id, p] of Object.entries(panels)) { + try { await p.init?.(gateway); } catch (e) { console.error(`Init ${id}:`, e); } + } + + gateway.connect().then(() => { + for (const pid of VIEW_PANELS[currentView]) { + try { panels[pid]?.refresh?.(gateway); } catch {} + } + // Refresh chat panel after auth is confirmed + if (panels.chat?.refresh) try { panels.chat.refresh(gateway); } catch {} + }).catch(e => { console.error('WS failed:', e); updateConn('disconnected'); }); + + // Project boxes clickable + setupProjectBoxes(); + + setInterval(() => { + for (const pid of VIEW_PANELS[currentView]) { + try { panels[pid]?.refresh?.(gateway); } catch {} + } + }, 30000); + } + + // ─── Project Detail Views ─── + // Configure your projects here. Update planUrl with your own Planner/To-Do URLs. + const PROJECT_DATA = { + project_a: { name: 'Project A', icon: '🏦', desc: 'Example project A', agent: 'conrad', agentName: 'Conrad', planUrl: 'https://tasks.office.com/TENANT.example.com/Home/PlanViews/PLAN_ID_A' }, + project_b: { name: 'Project B', icon: '⚡', desc: 'Example project B', agent: 'soren', agentName: 'Soren', planUrl: 'https://tasks.office.com/TENANT.example.com/Home/PlanViews/PLAN_ID_B' }, + project_c: { name: 'Project C', icon: '🏫', desc: 'Example project C', agent: 'amara', agentName: 'Amara', planUrl: 'https://tasks.office.com/TENANT.example.com/Home/PlanViews/PLAN_ID_C' }, + bates: { name: 'Bates', icon: '🐧', desc: 'AI operations platform — agent orchestration & rollout', agent: 'dash', agentName: 'Dash', planUrl: 'https://tasks.office.com/TENANT.example.com/Home/PlanViews/PLAN_ID_D' }, + private: { name: 'Private', icon: '🏠', desc: 'Personal & family affairs', agent: 'jules', agentName: 'Jules', planUrl: 'https://to-do.office.com/tasks' }, + }; + + function setupProjectBoxes() { + document.querySelectorAll('.project-box').forEach(box => { + const pid = box.dataset.project; + if (!pid || !PROJECT_DATA[pid]) return; + box.addEventListener('click', (e) => { + e.stopPropagation(); + openProjectDetail(pid); + }); + }); + } + + function openProjectDetail(pid) { + const p = PROJECT_DATA[pid]; + if (!p) return; + const ov = document.getElementById('soul-modal-overlay'); + if (!ov) return; + const titleEl = document.getElementById('soul-modal-title'); + const bodyEl = document.getElementById('soul-modal-body'); + titleEl.textContent = p.icon + ' ' + p.name; + bodyEl.innerHTML = ` +
+
${Dashboard.esc(p.desc)}
+ +
+
Planner Tasks
+
📋 Loading…
+
+
+
Recent Files
+
📁 Loading...
+
+
`; + ov.classList.add('visible'); + + // Load project tasks using shared task row component + (function loadProjectTasks() { + const tel = document.getElementById('project-detail-tasks-' + pid); + if (!tel) return; + + function renderProjectTaskRows(tasks) { + const incomplete = tasks.filter(t => !t.completed && !t.error); + const done = tasks.filter(t => t.completed); + if (!incomplete.length && !done.length) { tel.textContent = '📋 No tasks'; return; } + let h = ''; + for (const t of incomplete.slice(0, 20)) h += Dashboard.renderTaskRowCompact(t); + h += '
'; + if (done.length) h += `
✓ ${done.length} completed
`; + if (incomplete.length > 20) { const planLink = PROJECT_DATA[pid]?.planUrl; h += `+ ${incomplete.length - 20} more → Open in Planner`; } + tel.innerHTML = h; + Dashboard.wireTaskRows(tel); + } + + const pt = window._getProjectTasks?.(pid); + if (pt && pt.tasks?.length) { + renderProjectTaskRows(pt.tasks); + } else if (pt && pt.tasks?.length === 0) { + tel.textContent = '📋 No tasks in this plan'; + } else { + Dashboard.fetchApi('tasks').then(data => { + if (data?.byProject?.[pid]?.tasks) { + renderProjectTaskRows(data.byProject[pid].tasks); + } else { + tel.textContent = '📋 No plan configured'; + } + }).catch(() => { tel.textContent = '📋 Could not load tasks'; }); + } + })(); + + // Try to load filtered files + Dashboard.fetchApi('files').then(files => { + const el = document.getElementById('project-detail-files-' + pid); + if (!el) return; + const all = Array.isArray(files) ? files : []; + const kw = pid; + const filtered = all.filter(f => (f.path || '').toLowerCase().includes(kw)).slice(0, 5); + if (!filtered.length) { el.textContent = '📁 No recent files for this project'; return; } + el.innerHTML = filtered.map(f => `
${Dashboard.esc(f.name)} ${Dashboard.timeAgo(f.modified)}
`).join(''); + }).catch(() => { + const el = document.getElementById('project-detail-files-' + pid); + if (el) el.textContent = '📁 Could not load files'; + }); + } + + document.readyState === 'loading' ? document.addEventListener('DOMContentLoaded', init) : init(); +})(); diff --git a/bates-core/plugins/dashboard/static/js/gateway.js b/bates-core/plugins/dashboard/static/js/gateway.js new file mode 100644 index 0000000..b439cb2 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/gateway.js @@ -0,0 +1,685 @@ +/** + * OpenClaw Gateway WebSocket Client + * Protocol v3 — typed frames { type: "req"|"res"|"event" } + * Includes Ed25519 device auth for operator scopes. + */ + +// ─── Ed25519 (minimal, browser-only via noble-ed25519-style inline) ─── +// We use SubtleCrypto SHA-512 + a tiny Ed25519 sign implementation. +// For brevity we import the same device-identity approach as Control UI: +// generate keypair, store in localStorage, sign connect payload. + +const DEVICE_STORAGE_KEY = "openclaw-device-identity-v1"; +const DEVICE_AUTH_TOKEN_KEY = "openclaw.device.auth.v1"; + +// ─── Helpers ─── +function b64url(bytes) { + let s = ""; + for (const b of bytes) s += String.fromCharCode(b); + return btoa(s).replace(/\+/g, "-").replace(/\//g, "_").replace(/=+$/g, ""); +} +function b64urlDecode(str) { + const s = str.replace(/-/g, "+").replace(/_/g, "/"); + const padded = s + "=".repeat((4 - s.length % 4) % 4); + const bin = atob(padded); + const bytes = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; i++) bytes[i] = bin.charCodeAt(i); + return bytes; +} +function hexFromBytes(bytes) { + return Array.from(bytes).map(b => b.toString(16).padStart(2, "0")).join(""); +} +async function sha256Hex(bytes) { + const hash = await crypto.subtle.digest("SHA-256", bytes.buffer); + return hexFromBytes(new Uint8Array(hash)); +} + +// ─── Ed25519 via noble-ed25519 approach (reuse Control UI's stored keys) ─── +// We need to sign payloads. The Control UI stores keys as base64url-encoded +// Ed25519 seed (private) and public key. We'll use the Web Crypto Ed25519 API +// if available (Chrome 113+, Firefox 128+), or fall back to importing the +// existing noble-ed25519 implementation pattern. + +// Try native Ed25519 first (available in modern browsers) +async function ed25519Sign(privateKeyBytes, message) { + // Try native Web Crypto Ed25519 + try { + const key = await crypto.subtle.importKey( + "pkcs8", + ed25519SeedToPkcs8(privateKeyBytes), + { name: "Ed25519" }, + false, + ["sign"] + ); + const sig = await crypto.subtle.sign("Ed25519", key, new TextEncoder().encode(message)); + return new Uint8Array(sig); + } catch (e) { + // Native Ed25519 not available, fall back to noble implementation + return ed25519SignNoble(privateKeyBytes, new TextEncoder().encode(message)); + } +} + +// Convert 32-byte Ed25519 seed to PKCS#8 format for Web Crypto +function ed25519SeedToPkcs8(seed) { + // PKCS#8 wrapper for Ed25519 private key (seed) + const prefix = new Uint8Array([ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, + 0x04, 0x22, 0x04, 0x20 + ]); + const result = new Uint8Array(prefix.length + seed.length); + result.set(prefix); + result.set(seed, prefix.length); + return result.buffer; +} + +// Minimal noble-ed25519 sign (synchronous-style using SHA-512 from SubtleCrypto) +async function sha512(data) { + const hash = await crypto.subtle.digest("SHA-512", data instanceof Uint8Array ? data.buffer : data); + return new Uint8Array(hash); +} + +// We'll use a simplified approach: if native Ed25519 fails, we load the +// noble-ed25519 micro library dynamically. For now, store a minimal implementation. +// This is the same Ed25519 implementation used by Control UI (inlined). + +// ─── Modular arithmetic for Ed25519 ─── +const P = 2n ** 255n - 19n; +const N = 2n ** 252n + 27742317777372353535851937790883648493n; +const Gx = 15112221349535807912866137220509078750507884956996801397894129974371384098553n; +const Gy = 46316835694926478169428394003475163141307993866256225615783033890098355573289n; +const D_CONST = 37095705934669439343138083508754565189542113879843219016388785533085940283555n; + +function mod(a, m = P) { let r = a % m; return r >= 0n ? r : m + r; } +function modInv(a, m = P) { + let [old_r, r] = [mod(a, m), m]; + let [old_s, s] = [1n, 0n]; + while (r !== 0n) { + const q = old_r / r; + [old_r, r] = [r, old_r - q * r]; + [old_s, s] = [s, old_s - q * s]; + } + return mod(old_s, m); +} +function modN(a) { return mod(a, N); } + +class EdPoint { + constructor(X, Y, Z, T) { this.X = X; this.Y = Y; this.Z = Z; this.T = T; } + static ZERO = new EdPoint(0n, 1n, 1n, 0n); + static BASE = new EdPoint(Gx, Gy, 1n, mod(Gx * Gy)); + + add(other) { + const a = -1n; // Ed25519 a = -1 + const { X: X1, Y: Y1, Z: Z1, T: T1 } = this; + const { X: X2, Y: Y2, Z: Z2, T: T2 } = other; + const A = mod(X1 * X2); + const B = mod(Y1 * Y2); + const C = mod(T1 * D_CONST * T2); + const DD = mod(Z1 * Z2); + const E = mod((X1 + Y1) * (X2 + Y2) - A - B); + const F = mod(DD - C); + const G = mod(DD + C); + const H = mod(B - a * A); + return new EdPoint(mod(E * F), mod(G * H), mod(F * G), mod(E * H)); + } + + double() { + const a = -1n; + const { X, Y, Z } = this; + const A = mod(X * X); + const B = mod(Y * Y); + const C = mod(2n * mod(Z * Z)); + const D2 = mod(a * A); + const E = mod(mod((X + Y) * (X + Y)) - A - B); + const G = mod(D2 + B); + const F = mod(G - C); + const H = mod(D2 - B); + return new EdPoint(mod(E * F), mod(G * H), mod(F * G), mod(E * H)); + } + + multiply(scalar) { + let result = EdPoint.ZERO; + let base = this; + let s = scalar; + while (s > 0n) { + if (s & 1n) result = result.add(base); + base = base.double(); + s >>= 1n; + } + return result; + } + + toAffine() { + const inv = modInv(this.Z); + return { x: mod(this.X * inv), y: mod(this.Y * inv) }; + } + + toBytes() { + const { x, y } = this.toAffine(); + const bytes = numberToLEBytes(y, 32); + if (x & 1n) bytes[31] |= 0x80; + return bytes; + } +} + +function numberToLEBytes(n, len) { + const bytes = new Uint8Array(len); + let v = n; + for (let i = 0; i < len; i++) { bytes[i] = Number(v & 0xffn); v >>= 8n; } + return bytes; +} +function bytesToNumberLE(bytes) { + let n = 0n; + for (let i = bytes.length - 1; i >= 0; i--) n = (n << 8n) | BigInt(bytes[i]); + return n; +} + +async function ed25519SignNoble(seed, message) { + // Hash seed to get (scalar, prefix) + const h = await sha512(seed); + const scalar_bytes = h.slice(0, 32); + scalar_bytes[0] &= 248; + scalar_bytes[31] &= 127; + scalar_bytes[31] |= 64; + const scalar = bytesToNumberLE(scalar_bytes); + const prefix = h.slice(32, 64); + + // Public key + const pubPoint = EdPoint.BASE.multiply(scalar); + const pubBytes = pubPoint.toBytes(); + + // r = SHA-512(prefix || message) mod N + const rHash = await sha512(concat(prefix, message)); + const r = modN(bytesToNumberLE(rHash)); + + // R = r * G + const R = EdPoint.BASE.multiply(r); + const RBytes = R.toBytes(); + + // S = (r + SHA-512(R || pubKey || message) * scalar) mod N + const kHash = await sha512(concat(RBytes, pubBytes, message)); + const k = modN(bytesToNumberLE(kHash)); + const S = modN(r + k * scalar); + const SBytes = numberToLEBytes(S, 32); + + // Signature = R || S + return concat(RBytes, SBytes); +} + +function concat(...arrays) { + const len = arrays.reduce((s, a) => s + a.length, 0); + const result = new Uint8Array(len); + let offset = 0; + for (const a of arrays) { result.set(a, offset); offset += a.length; } + return result; +} + +// ─── Device Identity Management ─── +async function getOrCreateDeviceIdentity() { + if (!crypto.subtle) return null; + try { + const stored = localStorage.getItem(DEVICE_STORAGE_KEY); + if (stored) { + const parsed = JSON.parse(stored); + if (parsed?.version === 1 && parsed.deviceId && parsed.publicKey && parsed.privateKey) { + // Verify deviceId matches publicKey + const computedId = await sha256Hex(b64urlDecode(parsed.publicKey)); + if (computedId !== parsed.deviceId) { + parsed.deviceId = computedId; + localStorage.setItem(DEVICE_STORAGE_KEY, JSON.stringify(parsed)); + } + return { deviceId: parsed.deviceId, publicKey: parsed.publicKey, privateKey: parsed.privateKey }; + } + } + } catch {} + + // Generate new keypair using our Ed25519 implementation + const seed = crypto.getRandomValues(new Uint8Array(32)); + const h = await sha512(seed); + const scalar_bytes = h.slice(0, 32); + scalar_bytes[0] &= 248; + scalar_bytes[31] &= 127; + scalar_bytes[31] |= 64; + const scalar = bytesToNumberLE(scalar_bytes); + const pubPoint = EdPoint.BASE.multiply(scalar); + const pubBytes = pubPoint.toBytes(); + const deviceId = await sha256Hex(pubBytes); + + const identity = { + version: 1, + deviceId, + publicKey: b64url(pubBytes), + privateKey: b64url(seed), + createdAtMs: Date.now() + }; + localStorage.setItem(DEVICE_STORAGE_KEY, JSON.stringify(identity)); + return { deviceId, publicKey: identity.publicKey, privateKey: identity.privateKey }; +} + +function getStoredDeviceToken(deviceId, role) { + try { + const stored = localStorage.getItem(DEVICE_AUTH_TOKEN_KEY); + if (!stored) return null; + const parsed = JSON.parse(stored); + if (!parsed || parsed.version !== 1 || parsed.deviceId !== deviceId) return null; + const entry = parsed.tokens[role.trim()]; + return entry?.token || null; + } catch { return null; } +} + +function storeDeviceToken(deviceId, role, token, scopes) { + const key = role.trim(); + let data = { version: 1, deviceId, tokens: {} }; + try { + const existing = JSON.parse(localStorage.getItem(DEVICE_AUTH_TOKEN_KEY)); + if (existing?.version === 1 && existing.deviceId === deviceId) { + data.tokens = { ...existing.tokens }; + } + } catch {} + data.tokens[key] = { token, role: key, scopes, updatedAtMs: Date.now() }; + localStorage.setItem(DEVICE_AUTH_TOKEN_KEY, JSON.stringify(data)); +} + +function clearDeviceToken(deviceId, role) { + try { + const existing = JSON.parse(localStorage.getItem(DEVICE_AUTH_TOKEN_KEY)); + if (!existing || existing.version !== 1 || existing.deviceId !== deviceId) return; + const tokens = { ...existing.tokens }; + delete tokens[role.trim()]; + localStorage.setItem(DEVICE_AUTH_TOKEN_KEY, JSON.stringify({ ...existing, tokens })); + } catch {} +} + +function buildDeviceAuthPayload(opts) { + const version = opts.version || (opts.nonce ? "v2" : "v1"); + const scopeStr = (opts.scopes || []).join(","); + const tokenStr = opts.token || ""; + const parts = [version, opts.deviceId, opts.clientId, opts.clientMode, opts.role, scopeStr, String(opts.signedAtMs), tokenStr]; + if (version === "v2" && opts.nonce) parts.push(opts.nonce); + return parts.join("|"); +} + +async function signPayload(privateKeyB64, payload) { + const seed = b64urlDecode(privateKeyB64); + const msg = new TextEncoder().encode(payload); + // Try native Web Crypto Ed25519 first (Chrome 113+, Firefox 128+) + try { + const pkcs8 = ed25519SeedToPkcs8(seed); + const key = await crypto.subtle.importKey("pkcs8", pkcs8, { name: "Ed25519" }, false, ["sign"]); + const sig = await crypto.subtle.sign("Ed25519", key, msg); + return b64url(new Uint8Array(sig)); + } catch { + // Fall back to noble implementation + const sig = await ed25519SignNoble(seed, msg); + return b64url(sig); + } +} + +function generateUUID() { + if (crypto.randomUUID) return crypto.randomUUID(); + const bytes = crypto.getRandomValues(new Uint8Array(16)); + bytes[6] = (bytes[6] & 0x0f) | 0x40; + bytes[8] = (bytes[8] & 0x3f) | 0x80; + const hex = hexFromBytes(bytes); + return `${hex.slice(0,8)}-${hex.slice(8,12)}-${hex.slice(12,16)}-${hex.slice(16,20)}-${hex.slice(20)}`; +} + +// ─── Gateway Client ─── +class GatewayClient { + constructor(config) { + this.wsUrl = config.wsUrl; + this.token = config.token; + this.ws = null; + this.connected = false; + this.authenticated = false; + this.pendingRpc = new Map(); + this.subscribers = new Map(); + this.rpcIdCounter = 0; + this.reconnectDelay = 2000; + this.maxReconnectDelay = 30000; + this.onStatusChange = null; + this._shouldReconnect = true; + this._connectResolve = null; + this._connectReject = null; + this.serverInfo = null; + this.features = null; + this._connectNonce = null; + this._connectSent = false; + this._authFailed = false; + this._retryCount = 0; + this._maxRetries = 5; + this._retryDelays = [2000, 4000, 8000, 16000, 30000]; + this.lastError = null; + } + + connect() { + return new Promise((resolve, reject) => { + this._setStatus("reconnecting"); + this._connectResolve = resolve; + this._connectReject = reject; + this._connectNonce = null; + this._connectSent = false; + + try { + this.ws = new WebSocket(this.wsUrl); + } catch (e) { + this._setStatus("disconnected"); + this._connectResolve = null; + this._connectReject = null; + reject(e); + return; + } + + this.ws.onopen = () => { + console.log("[GW] WebSocket open"); + this.connected = true; + this._authFailed = false; + // If server doesn't send a challenge within 2s, send connect request anyway + this._challengeTimer = setTimeout(() => { + if (!this._connectSent && this.connected) { + console.log("[GW] No challenge received, sending connect without nonce"); + this._sendConnectRequest(null); + } + }, 2000); + }; + + this.ws.onmessage = (event) => { + let msg; + try { msg = JSON.parse(event.data); } catch { return; } + this._handleMessage(msg); + }; + + this.ws.onerror = () => { + if (!this.authenticated && this._connectReject) { + const rej = this._connectReject; + this._connectResolve = null; + this._connectReject = null; + rej(new Error("WebSocket error")); + } + }; + + this.ws.onclose = (ev) => { + this.connected = false; + const wasAuthenticated = this.authenticated; + this.authenticated = false; + + if (this._challengeTimer) { clearTimeout(this._challengeTimer); this._challengeTimer = null; } + + for (const [, { reject: rej }] of this.pendingRpc) { + rej(new Error("Connection closed")); + } + this.pendingRpc.clear(); + + if (this._connectReject) { + const rej = this._connectReject; + this._connectResolve = null; + this._connectReject = null; + rej(new Error("Connection closed before auth")); + } + + // Don't reconnect on explicit auth rejection + const noReconnectCodes = [4001, 4003, 4008, 4009]; + if (noReconnectCodes.includes(ev.code) || this._authFailed) { + console.warn(`[GW] Close code=${ev.code}, auth failed — NOT reconnecting`); + this._shouldReconnect = false; + this._setStatus("auth_failed"); + return; + } + + // Cap retries at _maxRetries + if (!wasAuthenticated) { + this._retryCount++; + if (this._retryCount >= this._maxRetries) { + console.warn(`[GW] Max retries (${this._maxRetries}) reached, stopping`); + this._shouldReconnect = false; + this._setStatus("max_retries"); + return; + } + } else { + // Successful connection was lost — reset retry count + this._retryCount = 0; + } + + this._setStatus("disconnected"); + + if (this._shouldReconnect) { + const delay = wasAuthenticated ? 2000 : (this._retryDelays[this._retryCount - 1] || 30000); + console.log(`[GW] Reconnecting in ${delay}ms (attempt ${this._retryCount}/${this._maxRetries}, code=${ev.code})`); + setTimeout(() => this._reconnect(), delay); + } else { + console.log(`[GW] Not reconnecting (code=${ev.code})`); + this._setStatus("disconnected"); + } + }; + }); + } + + async _handleMessage(msg) { + // Step 1: Challenge — build and send connect request with device auth + if (msg.type === "event" && msg.event === "connect.challenge") { + if (this._challengeTimer) { clearTimeout(this._challengeTimer); this._challengeTimer = null; } + const nonce = msg.payload?.nonce || null; + this._connectNonce = nonce; + await this._sendConnectRequest(nonce); + return; + } + + // Step 2: Connect response + if (msg.type === "res" && msg.id === "connect") { + if (msg.ok) { + console.log("[GW] Authenticated successfully"); + this.authenticated = true; + this.reconnectDelay = 1000; + this._authFailed = false; + this._setStatus("connected"); + const payload = msg.payload || {}; + this.serverInfo = payload.server; + this.features = payload.features; + + // Store device token if provided + if (payload.auth?.deviceToken) { + try { + const identity = await getOrCreateDeviceIdentity(); + if (identity) { + storeDeviceToken(identity.deviceId, "operator", payload.auth.deviceToken, payload.auth.scopes || []); + } + } catch {} + } + + if (this._connectResolve) { + const res = this._connectResolve; + this._connectResolve = null; + this._connectReject = null; + res(this); + } + } else { + console.error("[GW] Connect REJECTED:", msg.error); + this._authFailed = true; + this.lastError = msg.error?.message || "Connect rejected"; + + // Clear device token on auth failure + try { + const identity = await getOrCreateDeviceIdentity(); + if (identity) clearDeviceToken(identity.deviceId, "operator"); + } catch {} + + if (this._connectReject) { + const rej = this._connectReject; + this._connectResolve = null; + this._connectReject = null; + rej(new Error(this.lastError)); + } + + // Close WebSocket explicitly to prevent lingering connection + try { this.ws?.close(); } catch {} + } + return; + } + + // RPC response + if (msg.type === "res" && msg.id && this.pendingRpc.has(msg.id)) { + const { resolve, reject } = this.pendingRpc.get(msg.id); + this.pendingRpc.delete(msg.id); + if (!msg.ok || msg.error) { + reject(new Error(msg.error?.message || JSON.stringify(msg.error))); + } else { + resolve(msg.payload ?? msg); + } + return; + } + + // Event frames + if (msg.type === "event" && msg.event) { + if (msg.event === "tick") return; + + const listeners = this.subscribers.get(msg.event) || []; + for (const cb of listeners) { + try { cb(msg.payload ?? msg); } catch {} + } + const wildcardListeners = this.subscribers.get("*") || []; + for (const cb of wildcardListeners) { + try { cb({ event: msg.event, ...(msg.payload ?? {}) }); } catch {} + } + } + } + + async _sendConnectRequest(nonce) { + if (this._connectSent) return; + this._connectSent = true; + + const role = "operator"; + const scopes = ["operator.admin", "operator.read", "operator.write", "operator.approvals", "operator.pairing"]; + const clientId = "webchat-ui"; + const clientMode = "webchat"; + let authToken = this.token; + + // Build device identity for Ed25519 auth (required for scopes) + let deviceObj = null; + const hasSubtleCrypto = typeof crypto !== "undefined" && !!crypto.subtle; + + if (hasSubtleCrypto) { + try { + const identity = await getOrCreateDeviceIdentity(); + if (identity) { + // Try stored device token first (faster reconnect) + const storedToken = getStoredDeviceToken(identity.deviceId, role); + if (storedToken && this.token) { + // Prefer stored device token over shared gateway token + authToken = storedToken; + } + + const signedAtMs = Date.now(); + const payload = buildDeviceAuthPayload({ + deviceId: identity.deviceId, + clientId, + clientMode, + role, + scopes, + signedAtMs, + token: authToken || null, + nonce: nonce || undefined, + version: nonce ? "v2" : "v1", + }); + const signature = await signPayload(identity.privateKey, payload); + + deviceObj = { + id: identity.deviceId, + publicKey: identity.publicKey, + signature, + signedAt: signedAtMs, + nonce: nonce || undefined, + }; + } + } catch (e) { + console.warn("[GW] Device auth setup failed, falling back to token-only:", e); + } + } + + console.log("[GW] Sending connect request, token present:", !!authToken, "nonce:", !!nonce, "device:", !!deviceObj); + + this._send({ + type: "req", + id: "connect", + method: "connect", + params: { + minProtocol: 3, + maxProtocol: 3, + client: { + id: clientId, + version: "1.0.0", + platform: navigator?.platform || "web", + mode: clientMode, + displayName: "Bates Command Center", + instanceId: generateUUID(), + }, + role, + scopes, + device: deviceObj, + auth: { + token: authToken, + }, + caps: ["tool-events"], + userAgent: navigator?.userAgent, + }, + }); + } + + rpc(method, params = {}) { + return new Promise((resolve, reject) => { + if (!this.authenticated) { + reject(new Error("Not authenticated")); + return; + } + const id = `rpc-${++this.rpcIdCounter}`; + this.pendingRpc.set(id, { resolve, reject }); + this._send({ type: "req", id, method, params }); + + setTimeout(() => { + if (this.pendingRpc.has(id)) { + this.pendingRpc.delete(id); + reject(new Error(`RPC timeout: ${method}`)); + } + }, 15000); + }); + } + + subscribe(eventType, callback) { + if (!this.subscribers.has(eventType)) { + this.subscribers.set(eventType, []); + } + this.subscribers.get(eventType).push(callback); + return () => { + const list = this.subscribers.get(eventType); + if (list) { + const idx = list.indexOf(callback); + if (idx >= 0) list.splice(idx, 1); + } + }; + } + + _send(obj) { + if (this.ws && this.ws.readyState === WebSocket.OPEN) { + this.ws.send(JSON.stringify(obj)); + } + } + + _setStatus(status) { + if (this.onStatusChange) { + this.onStatusChange(status); + } + } + + _reconnect() { + if (!this._shouldReconnect) return; + this._setStatus("reconnecting"); + this.connect().catch(() => {}); + } + + disconnect() { + this._shouldReconnect = false; + if (this.ws) { + this.ws.close(); + } + } +} + +window.GatewayClient = GatewayClient; diff --git a/bates-core/plugins/dashboard/static/js/panel-agents.js b/bates-core/plugins/dashboard/static/js/panel-agents.js new file mode 100644 index 0000000..3ecce25 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-agents.js @@ -0,0 +1,230 @@ +/** + * Agents Panel — Org Chart Layout (v4 glassmorphism) + */ +(function () { + const D = window.Dashboard; + let sessionData = [], subagentData = [], agentFleetData = []; + let fastRefreshInterval = null; + + const TIERS = { + coo: [{ id: 'bates', name: 'Bates', role: 'Chief Operating Officer' }], + deputies: [ + { id: 'conrad', name: 'Conrad', role: 'Deputy Director, Project A Operations' }, + { id: 'soren', name: 'Soren', role: 'Deputy Director, Project B Operations' }, + { id: 'amara', name: 'Amara', role: 'Deputy Director, Project C Operations' }, + { id: 'jules', name: 'Jules', role: 'Deputy Director, Private Affairs' }, + { id: 'dash', name: 'Dash', role: 'Deputy Director, OpenClaw/Bates Rollout' }, + { id: 'mira', name: 'Mira', role: 'Deputy Director, Technology & Infrastructure' }, + ], + specialists: [ + { id: 'mercer', name: 'Mercer', role: 'Legal & Compliance Specialist' }, + { id: 'kira', name: 'Kira', role: 'Social Media & Content Specialist' }, + { id: 'nova', name: 'Nova', role: 'Research & Discovery Specialist' }, + { id: 'paige', name: 'Paige', role: 'Finance Specialist' }, + { id: 'quinn', name: 'Quinn', role: 'HR & People Specialist' }, + { id: 'archer', name: 'Archer', role: 'Documentation Specialist' }, + ], + }; + + async function fetchWithFallback(gatewayPath) { + return fetch(gatewayPath); + } + + window.AGENT_AVATARS = { + bates: '/dashboard/assets/avatar-transparent.png', + conrad: '/dashboard/assets/agent-baby_bolt.png', + soren: '/dashboard/assets/agent-baby_core.png', + amara: '/dashboard/assets/agent-baby_aqua.png', + jules: '/dashboard/assets/agent-baby_frost.png', + dash: '/dashboard/assets/agent-baby_Ember.png', + mercer: '/dashboard/assets/agent-baby_Dark.png', + kira: '/dashboard/assets/agent-baby_pixel.png', + nova: '/dashboard/assets/agent-baby_nova.png', + paige: '/dashboard/assets/agent-baby_Sage.png', + quinn: '/dashboard/assets/agent-baby_sky.png', + mira: '/dashboard/assets/agent-baby_Sage.png', + archer: '/dashboard/assets/agent-baby_sky.png', + }; + + const MODEL_FALLBACK = { + bates: 'Sonnet 4.6', conrad: 'Sonnet 4.6', mira: 'Opus 4.6', mercer: 'Opus 4.6', + soren: 'Sonnet 4.6', amara: 'Sonnet 4.6', jules: 'Sonnet 4.6', dash: 'Sonnet 4.6', kira: 'Sonnet 4.6', paige: 'Sonnet 4.6', + nova: 'Gemini Flash', quinn: 'Gemini Flash', archer: 'Gemini Flash', + }; + function mbClass(m) { if (!m) return 'other'; const l = m.toLowerCase(); return l.includes('opus') ? 'opus' : l.includes('sonnet') ? 'sonnet' : l.includes('gemini') ? 'gemini' : 'other'; } + function ago(ep) { if (!ep) return 'never'; const d = Date.now()/1000-ep; return d<0?'now':d<60?((d|0)+'s ago'):d<3600?((d/60|0)+'m ago'):d<86400?((d/3600|0)+'h ago'):((d/86400|0)+'d ago'); } + function find(n) { return agentFleetData.find(a => a.name?.toLowerCase() === n.toLowerCase()); } + + // Map TIERS id to filesystem/API id + const API_ID_MAP = { bates: 'main' }; + function apiId(id) { return API_ID_MAP[id] || id; } + + function openAgentDetail(id, name) { + const ov = document.getElementById('soul-modal-overlay'); if (!ov) return; + const def = [...TIERS.coo, ...TIERS.deputies, ...TIERS.specialists].find(a => a.id === id) || {}; + const fleetAgent = find(name) || {}; + const avatarSrc = window.AGENT_AVATARS[id] || ''; + const m = fleetAgent.model || MODEL_FALLBACK[id] || '', cls = mbClass(m); + const st = fleetAgent.status || 'idle'; + + // Build modal header + body + const titleEl = document.getElementById('soul-modal-title'); + const bodyEl = document.getElementById('soul-modal-body'); + titleEl.textContent = name + ' — Agent Detail'; + + bodyEl.innerHTML = ` +
+
+ ${avatarSrc ? `` : ''} +
+
${D.esc(name)}
+
${D.esc(def.role || fleetAgent.role || '')}
+
+ ${m ? `${D.esc(m.split('/').pop())}` : ''} + + ${D.esc(st)} + · ${D.esc(ago(fleetAgent.last_activity_epoch))} +
+
📥 ${fleetAgent.inbox_count||0}   📤 ${fleetAgent.outbox_count||0}
+
+
+
+
Recent Activity
+
Loading...
+
+
+
Recent Memory
+
Loading...
+
+
+
SOUL.md
+
Loading...
+
+
`; + + ov.classList.add('visible'); + + // Fetch agent API data + fetchWithFallback('/dashboard/api/agents') + .then(r => r.ok ? r.json() : null) + .then(agents => { + const agents2 = Array.isArray(agents) ? agents : (agents?.agents || []); + const aid = apiId(id); + const a = agents2.find(x => x.id === aid || x.id === id || x.name?.toLowerCase() === name.toLowerCase()); + const el = document.getElementById('agent-detail-activity'); + if (a && el) { + const hb = a.heartbeat_interval || '—'; + const lastAct = a.last_activity ? new Date(a.last_activity).toLocaleString() : 'never'; + el.innerHTML = `
Last activity: ${D.esc(lastAct)}
+
Heartbeat interval: ${D.esc(hb)}
+
Layer: ${a.layer || '—'}
`; + } + }).catch(() => { const el = document.getElementById('agent-detail-activity'); if (el) el.textContent = 'Could not load'; }); + + // Fetch SOUL.md + fetchWithFallback(`/dashboard/api/agents/${encodeURIComponent(apiId(id))}/soul`) + .then(r => r.ok ? r.json() : { content: null }) + .then(d => { const el = document.getElementById('agent-detail-soul'); if (el) el.textContent = d.content || 'No SOUL.md found.'; }) + .catch(() => { const el = document.getElementById('agent-detail-soul'); if (el) el.textContent = 'Error loading SOUL.md'; }); + + // Fetch today's memory + const today = new Date().toISOString().slice(0, 10); + fetchWithFallback(`/dashboard/api/agents/${encodeURIComponent(apiId(id))}/memory?date=${today}`) + .then(r => r.ok ? r.json() : {}) + .then(d => { + const el = document.getElementById('agent-detail-memory'); + if (el) { + const content = d.content || d.text || ''; + if (content) { + const lines = content.split('\n'); + el.textContent = lines.slice(-5).join('\n') || 'No entries today.'; + } else { el.textContent = 'No memory entries today.'; } + } + }) + .catch(() => { const el = document.getElementById('agent-detail-memory'); if (el) el.textContent = 'No memory available.'; }); + } + window._openSoulModal = openAgentDetail; + + function card(def, isCoo) { + const d = find(def.name), st = d?.status || 'idle', m = d?.model || MODEL_FALLBACK[def.id] || '', cls = mbClass(m); + const avatarSrc = window.AGENT_AVATARS[def.id] || ''; + const avatarHtml = avatarSrc ? `` : ''; + return `
+ ${avatarHtml} +
${D.esc(def.name)}
+
${D.esc(d?.role || def.role)}
+ ${m ? `${D.esc(m.split('/').pop())}` : ''} +
${D.esc(ago(d?.last_activity_epoch))}
+
📥 ${d?.inbox_count||0}📤 ${d?.outbox_count||0}
+
💓 ${D.esc(d?.heartbeat_interval||'—')}
+
`; + } + + function render() { + const el = document.getElementById('panel-agents'); if (!el) return; + let h = ''; + h += '
Layer 1 — COO
' + TIERS.coo.map(a => card(a, true)).join('') + '
'; + h += '
'; + h += '
Layer 2 — Deputies
' + TIERS.deputies.map(a => card(a, false)).join('') + '
'; + h += '
'; + h += '
Layer 3 — Specialists
' + TIERS.specialists.map(a => card(a, false)).join('') + '
'; + + el.innerHTML = h; + } + + async function refreshSub() { try { const d = await D.fetchApi('sessions'); if (Array.isArray(d)) subagentData = d; } catch {} } + async function refreshFleet() { try { const r = await fetchWithFallback('/dashboard/api/agents'); if (r.ok) { const d = await r.json(); agentFleetData = Array.isArray(d) ? d : (d.agents || []); } } catch {} } + + async function refresh(gw) { + if (gw?.authenticated) try { const r = await gw.rpc('sessions.list', {}); sessionData = r?.sessions || r?.items || (Array.isArray(r) ? r : []); } catch { sessionData = []; } + await Promise.all([refreshSub(), refreshFleet()]); + const a = agentFleetData.filter(x => x.status === 'active').length; + const ready = agentFleetData.filter(x => x.status === 'ready' || x.status === 'active').length; + window._updateOverviewMetrics?.({ activeAgents: a + '/' + ready }); + render(); + } + + async function init(gw) { + render(); + if (gw?.authenticated) await refresh(gw); else await Promise.all([refreshSub(), refreshFleet()]); + render(); + gw?.subscribe('agent', () => refresh(gw)); + gw?.subscribe('agent.lifecycle', () => refresh(gw)); + } + + let _refreshInterval = null; + let _lastUpdated = null; + + function updateTimestamp() { + const el = document.getElementById('panel-agents'); + if (!el) return; + let ts = el.querySelector('.panel-last-updated'); + if (!ts) { ts = document.createElement('div'); ts.className = 'panel-last-updated'; el.appendChild(ts); } + if (_lastUpdated) { + const s = ((Date.now() - _lastUpdated) / 1000) | 0; + ts.textContent = `last updated: ${s}s ago`; + } + } + + const _origRefresh = refresh; + refresh = async function(gw) { + await _origRefresh(gw); + _lastUpdated = Date.now(); + updateTimestamp(); + }; + + function startAutoRefresh(gw) { + stopAutoRefresh(); + _refreshInterval = setInterval(() => { refresh(gw); }, 60000); + setInterval(updateTimestamp, 10000); + } + function stopAutoRefresh() { if (_refreshInterval) { clearInterval(_refreshInterval); _refreshInterval = null; } } + + const _origInit = init; + init = async function(gw) { + await _origInit(gw); + startAutoRefresh(gw); + }; + + D.registerPanel('agents', { init, refresh, stopAutoRefresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-ceo.js b/bates-core/plugins/dashboard/static/js/panel-ceo.js new file mode 100644 index 0000000..bb0b90c --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-ceo.js @@ -0,0 +1,91 @@ +/** + * CEO Dashboard Panel — Tasks + project data + metrics (v4) + */ +(function () { + const D = window.Dashboard; + + function priClass(p) { return p === 'high' || p === 1 ? 'high' : p === 'medium' || p === 5 ? 'medium' : p === 'low' || p === 9 ? 'low' : 'none'; } + + function renderTasks(tasks) { + const el = document.getElementById('panel-ceo-tasks'); + if (!el) return; + if (!tasks?.length) { + el.innerHTML = '
No tasks found
No tasks loaded
'; + return; + } + let h = ''; + for (const t of tasks) { + const done = t.status === 'completed' || t.completed; + h += `
+
+
+
+
${D.esc(t.title || t.subject || '—')}
+
+ ${t.dueDate ? `Due: ${D.esc(t.dueDate)}` : ''} + ${t.planName ? `${D.esc(t.planName)}` : ''} + ${t.source ? `${D.esc(t.source)}` : ''} +
+
+
`; + } + el.innerHTML = h; + const pending = tasks.filter(t => !t.completed && t.status !== 'completed').length; + window._updateOverviewMetrics?.({ tasks: pending }); + } + + function renderProjectBodies(agents, tasksData) { + const projects = [ + { el: 'project-project_a', agent: 'conrad', key: 'project_a' }, + { el: 'project-project_b', agent: 'soren', key: 'project_b' }, + { el: 'project-private', agent: 'jules', key: 'private' }, + { el: 'project-project_c', agent: 'amara', key: 'project_c' }, + { el: 'project-bates', agent: 'dash', key: 'bates' }, + ]; + const byProject = tasksData?.byProject || {}; + for (const p of projects) { + const container = document.getElementById(p.el); + if (!container) continue; + const a = agents?.find(x => x.name?.toLowerCase() === p.agent); + const proj = byProject[p.key]; + let html = ''; + if (a) { + html += ` ${D.esc(a.status||'idle')} · Last: ${D.esc(D.timeAgo(a.lastHeartbeat||a.last_heartbeat||a.last_activity))}`; + } + if (proj) { + const pending = (proj.tasks || []).filter(t => !t.completed).length; + html += `
📋 ${proj.count || 0} tasks (${pending} pending)
`; + } + container.innerHTML = html || 'No data'; + } + } + + async function refresh() { + let tasks = null, status = null, agents = null; + try { + const [tR, sR, aR] = await Promise.allSettled([ + D.fetchApi('tasks'), + D.fetchApi('status'), + D.fetchApi('agents'), + ]); + tasks = tR.status === 'fulfilled' ? tR.value : null; + status = sR.status === 'fulfilled' ? sR.value : null; + agents = aR.status === 'fulfilled' ? aR.value : null; + } catch {} + + let list = tasks ? (Array.isArray(tasks) ? tasks : (tasks.tasks || tasks.items || [])) : []; + // Only render in CEO panel if tasks panel isn't handling it + if (list.length) { + // Update metrics from real data + const pending = list.filter(t => !t.completed && t.status !== 'completed').length; + window._updateOverviewMetrics?.({ tasks: pending }); + } + + if (status?.unread_emails !== undefined) window._updateOverviewMetrics?.({ emails: status.unread_emails }); + + const agentList = agents ? (Array.isArray(agents) ? agents : (agents.agents || [])) : []; + renderProjectBodies(agentList, tasks); + } + + D.registerPanel('ceo', { init: refresh, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-chat.js b/bates-core/plugins/dashboard/static/js/panel-chat.js new file mode 100644 index 0000000..2824ed4 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-chat.js @@ -0,0 +1,421 @@ +/** + * Chat Panel + * Interactive chat with agent sessions via WebSocket RPC + */ +(function () { + const D = window.Dashboard; + + let sessions = []; + let activeSessionKey = null; + let messages = []; + let streamingText = ""; + let activeRunId = null; + let isStreaming = false; + let unsubChat = null; + let gwRef = null; + + function generateUUID() { + if (crypto.randomUUID) return crypto.randomUUID(); + return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => + (+c ^ (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> +c / 4))).toString(16) + ); + } + + function extractText(content) { + if (!content) return ""; + if (typeof content === "string") return content; + if (Array.isArray(content)) { + const texts = content + .filter((b) => b && b.type === "text" && b.text) + .map((b) => b.text); + if (texts.length) return texts.join("\n"); + // Fallback: try to extract any string values from array items + return content + .map((b) => (typeof b === "string" ? b : b && b.text ? b.text : "")) + .filter(Boolean) + .join("\n"); + } + // Handle nested content (e.g., {content: "text"} or {content: [{type:"text", text:"..."}]}) + if (content.content !== undefined) return extractText(content.content); + if (content.text) return String(content.text); + if (content.message) return String(content.message); + // Last resort: try JSON but never return [object Object] + try { + const s = JSON.stringify(content); + return s !== "{}" ? s : ""; + } catch { + return ""; + } + } + + function renderSessionTabs() { + const bar = document.getElementById("chat-session-bar"); + if (!bar) return; + if (!sessions.length) { + bar.innerHTML = 'No sessions available'; + return; + } + const sorted = [...sessions].sort((a, b) => { + const aIsSub = (a.key || "").startsWith("subagent:"); + const bIsSub = (b.key || "").startsWith("subagent:"); + if (aIsSub !== bIsSub) return aIsSub ? 1 : -1; + return (b.updatedAt || 0) - (a.updatedAt || 0); + }); + let html = ""; + for (const s of sorted) { + const key = s.key || ""; + const label = s.displayName || s.label || key.split(":").pop() || "Unknown"; + const isActive = key === activeSessionKey; + const isSub = key.startsWith("subagent:"); + const isRunning = s.updatedAt && Date.now() - s.updatedAt < 300000; + html += ``; + } + bar.innerHTML = html; + } + + function renderMessages() { + const el = document.getElementById("chat-messages"); + if (!el) return; + + if (!messages.length && !streamingText && !isStreaming) { + el.innerHTML = + '
💬Select a session to begin
'; + return; + } + + let html = ""; + for (const msg of messages) { + const text = extractText(msg.content); + if (!text) continue; + const role = msg.role || "system"; + const ts = msg.timestamp + ? new Date(typeof msg.timestamp === "number" ? msg.timestamp : msg.timestamp).toLocaleTimeString("en-GB", { + hour: "2-digit", + minute: "2-digit", + }) + : ""; + html += `
`; + html += `
${D.esc(text)}
`; + if (ts) html += `
${ts}
`; + html += `
`; + } + + if (isStreaming && streamingText) { + html += `
`; + html += `
${D.esc(streamingText)}
`; + html += `
`; + } else if (isStreaming) { + html += `
`; + html += `
Thinking...
`; + html += `
`; + } + + el.innerHTML = html; + + const scrollContainer = document.getElementById("chat-messages-scroll"); + if (scrollContainer) scrollContainer.scrollTop = scrollContainer.scrollHeight; + + updateInputBar(); + } + + function updateInputBar() { + const sendBtn = document.getElementById("chat-send-btn"); + const stopBtn = document.getElementById("chat-stop-btn"); + if (sendBtn) sendBtn.style.display = isStreaming ? "none" : ""; + if (stopBtn) stopBtn.style.display = isStreaming ? "" : "none"; + } + + async function loadHistory(gw) { + if (!gw || !gw.authenticated || !activeSessionKey) { + messages = []; + renderMessages(); + return; + } + try { + console.log("[Chat] Requesting chat.history for session:", activeSessionKey); + const result = await gw.rpc("chat.history", { sessionKey: activeSessionKey, limit: 200 }); + console.log("[Chat] chat.history result keys:", result ? Object.keys(result) : "null"); + const raw = result?.messages || []; + // Filter to user and assistant messages with actual text content + messages = raw.filter(m => { + const text = extractText(m.content); + return text && text.trim().length > 0 && (m.role === "user" || m.role === "assistant"); + }); + console.log("[Chat] Loaded", raw.length, "raw messages,", messages.length, "after filtering"); + if (raw.length > 0 && messages.length === 0) { + console.log("[Chat] All messages filtered out. Sample roles:", raw.slice(0, 5).map(m => m.role)); + console.log("[Chat] Sample message:", JSON.stringify(raw[0]).slice(0, 300)); + } + } catch (e) { + console.error("[Chat] chat.history failed:", e); + messages = []; + } + streamingText = ""; + isStreaming = false; + activeRunId = null; + renderMessages(); + } + + function subscribeToChatEvents(gw) { + if (unsubChat) { + unsubChat(); + unsubChat = null; + } + if (!gw) return; + unsubChat = gw.subscribe("chat", (payload) => { + if (payload.sessionKey !== activeSessionKey) return; + const state = payload.state; + + if (state === "delta") { + isStreaming = true; + activeRunId = payload.runId || activeRunId; + const text = extractText(payload.message); + // Deltas from gateway are CUMULATIVE (full text so far) — always replace + if (text) streamingText = text; + renderMessages(); + } else if (state === "final") { + isStreaming = false; + streamingText = ""; + activeRunId = null; + loadHistory(gw); + } else if (state === "aborted" || state === "error") { + isStreaming = false; + streamingText = ""; + activeRunId = null; + if (state === "error" && payload.errorMessage) { + messages.push({ role: "system", content: "Error: " + payload.errorMessage }); + } + loadHistory(gw); + } + }); + } + + async function selectSession(gw, sessionKey) { + activeSessionKey = sessionKey; + streamingText = ""; + isStreaming = false; + activeRunId = null; + renderSessionTabs(); + await loadHistory(gw); + subscribeToChatEvents(gw); + } + + async function sendMessage(gw) { + const input = document.getElementById("chat-input"); + if (!input) return; + const text = input.value.trim(); + if (!text || !activeSessionKey || !gw || !gw.authenticated) return; + + input.value = ""; + input.style.height = "auto"; + + // Optimistic local append + messages.push({ role: "user", content: text, timestamp: Date.now() }); + isStreaming = true; + streamingText = ""; + renderMessages(); + + try { + const result = await gw.rpc("chat.send", { + sessionKey: activeSessionKey, + message: text, + deliver: false, + idempotencyKey: generateUUID(), + }); + activeRunId = result?.runId || null; + } catch (e) { + console.error("chat.send failed:", e); + isStreaming = false; + messages.push({ role: "system", content: "Failed to send: " + e.message }); + renderMessages(); + } + } + + async function abortAgent(gw) { + if (!gw || !gw.authenticated || !activeSessionKey) return; + try { + await gw.rpc("chat.abort", { + sessionKey: activeSessionKey, + runId: activeRunId || undefined, + }); + } catch (e) { + console.error("chat.abort failed:", e); + } + isStreaming = false; + streamingText = ""; + activeRunId = null; + renderMessages(); + } + + async function refreshSessions(gw) { + if (!gw || !gw.authenticated) { + console.log("[Chat] refreshSessions skipped — gw:", !!gw, "authenticated:", gw?.authenticated); + return; + } + try { + console.log("[Chat] Calling sessions.list..."); + const result = await gw.rpc("sessions.list", {}); + console.log("[Chat] sessions.list result keys:", result ? Object.keys(result) : "null"); + const payload = result?.sessions || result?.items || (Array.isArray(result) ? result : []); + sessions = Array.isArray(payload) ? payload : []; + console.log("[Chat] Got", sessions.length, "sessions"); + } catch (e) { + console.error("[Chat] sessions.list failed:", e); + sessions = []; + } + // Always ensure main session is available for chat + if (!sessions.find(s => s.key === "agent:main:main")) { + sessions.unshift({ key: "agent:main:main", displayName: "Main", label: "main", updatedAt: Date.now() }); + } + renderSessionTabs(); + + // If selected session disappeared, clear + if (activeSessionKey && !sessions.find((s) => s.key === activeSessionKey)) { + activeSessionKey = null; + messages = []; + streamingText = ""; + isStreaming = false; + activeRunId = null; + renderMessages(); + const input = document.getElementById("chat-input"); + const sendBtn = document.getElementById("chat-send-btn"); + if (input) input.disabled = true; + if (sendBtn) sendBtn.disabled = true; + } + } + + function showConnStatus(msg, type) { + const el = document.getElementById("chat-conn-status"); + if (!el) return; + el.textContent = msg; + el.className = "chat-conn-status chat-conn-" + (type || "info"); + el.style.display = msg ? "block" : "none"; + } + + async function init(gw) { + gwRef = gw; + const el = document.getElementById("panel-chat"); + if (!el) return; + + el.innerHTML = ` + +
+
+
+
💬Select a session to begin
+
+
+
+ + + +
+ `; + + // Session tab click handler + const bar = document.getElementById("chat-session-bar"); + bar.addEventListener("click", (e) => { + const tab = e.target.closest(".chat-session-tab"); + if (!tab) return; + const key = tab.dataset.sessionKey; + if (key) { + selectSession(gw, key); + const input = document.getElementById("chat-input"); + const sendBtn = document.getElementById("chat-send-btn"); + if (input) input.disabled = false; + if (sendBtn) sendBtn.disabled = false; + } + }); + + // Send button + document.getElementById("chat-send-btn").addEventListener("click", () => sendMessage(gw)); + + // Stop button + document.getElementById("chat-stop-btn").addEventListener("click", () => abortAgent(gw)); + + // Textarea: Enter to send, Shift+Enter for newline, auto-resize + const input = document.getElementById("chat-input"); + input.addEventListener("keydown", (e) => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + sendMessage(gw); + } + }); + input.addEventListener("input", () => { + input.style.height = "auto"; + input.style.height = Math.min(input.scrollHeight, 120) + "px"; + }); + + // Track connection status in chat panel + if (gw) { + const origOnStatus = gw.onStatusChange; + gw.onStatusChange = function(status) { + if (origOnStatus) origOnStatus(status); + if (status === "connected") { + showConnStatus("Connected", "ok"); + setTimeout(() => showConnStatus("", "ok"), 2000); + // Re-initialize chat on connect/reconnect + loadAndAutoSelect().catch(() => {}); + if (activeSessionKey) subscribeToChatEvents(gw); + } else if (status === "reconnecting") { + showConnStatus("Reconnecting... (attempt " + (gw._retryCount + 1) + "/" + gw._maxRetries + ")", "warn"); + } else if (status === "auth_failed") { + showConnStatus("WebSocket auth failed. Connection paused. " + (gw.lastError || ""), "error"); + } else if (status === "max_retries") { + showConnStatus("Connection failed after " + gw._maxRetries + " attempts. Refresh page to retry.", "error"); + } else if (status === "disconnected") { + showConnStatus("Disconnected", "warn"); + } + }; + } + + // Load sessions and auto-select main (with retry for auth timing) + async function loadAndAutoSelect() { + console.log("[Chat] loadAndAutoSelect — gw:", !!gw, "authenticated:", gw?.authenticated, "connected:", gw?.connected); + if (!gw || !gw.authenticated) return false; + showConnStatus("Connected", "ok"); + setTimeout(() => showConnStatus("", "ok"), 2000); + await refreshSessions(gw); + if (!activeSessionKey && sessions.length > 0) { + const main = sessions.find((s) => s.key === "agent:main:main") || sessions[0]; + if (main) { + await selectSession(gw, main.key); + input.disabled = false; + document.getElementById("chat-send-btn").disabled = false; + } + } + return true; + } + + showConnStatus("Connecting to gateway...", "info"); + + if (!(await loadAndAutoSelect())) { + // Auth not ready yet — retry up to 10 times + let retries = 0; + const retryInterval = setInterval(async () => { + retries++; + if (await loadAndAutoSelect() || retries >= 10) { + clearInterval(retryInterval); + if (retries >= 10 && (!gw || !gw.authenticated)) { + showConnStatus("Connection failed — retrying in background", "error"); + } + } + }, 500); + } + + // Subscribe to lifecycle events + if (gw) { + gw.subscribe("agent", () => refreshSessions(gw)); + } + } + + async function refresh(gw) { + gwRef = gw; + await refreshSessions(gw); + if (activeSessionKey) subscribeToChatEvents(gw); + } + + D.registerPanel("chat", { init, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-costs.js b/bates-core/plugins/dashboard/static/js/panel-costs.js new file mode 100644 index 0000000..08fd50d --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-costs.js @@ -0,0 +1,153 @@ +/** + * Costs Panel — Real-time Token Usage & Operational Costs + */ +(function () { + const D = window.Dashboard; + + function fmt(n) { + if (n >= 1e9) return (n / 1e9).toFixed(1) + 'B'; + if (n >= 1e6) return (n / 1e6).toFixed(1) + 'M'; + if (n >= 1e3) return (n / 1e3).toFixed(1) + 'K'; + return String(n); + } + + function fmtDollar(n) { return '$' + n.toFixed(2); } + + function todayKey() { + const d = new Date(); + return d.getFullYear() + '-' + String(d.getMonth() + 1).padStart(2, '0') + '-' + String(d.getDate()).padStart(2, '0'); + } + + function render(data) { + const el = document.getElementById('panel-costs'); + if (!el) return; + + if (!data || data.error) { + el.innerHTML = '
⏳ Awaiting data...
'; + return; + } + + const today = todayKey(); + const todayData = data[today]; + + // 7-day aggregation + let tokens7 = 0, cost7 = 0, interactions7 = 0; + const now = new Date(); + for (let i = 0; i < 7; i++) { + const d = new Date(now); + d.setDate(d.getDate() - i); + const k = d.getFullYear() + '-' + String(d.getMonth() + 1).padStart(2, '0') + '-' + String(d.getDate()).padStart(2, '0'); + if (data[k]) { + tokens7 += data[k].totalTokens || 0; + cost7 += data[k].totalCost || 0; + interactions7 += data[k].interactions || 0; + } + } + + let h = ''; + + // Today's summary + if (todayData) { + h += `
+
Today's Usage
+
${fmt(todayData.totalTokens)} tokens
+
${(todayData.interactions || 0).toLocaleString()} interactions · Notional: ${fmtDollar(todayData.totalCost || 0)}
+
`; + } else { + h += `
+
Today's Usage
+
No data yet
+
`; + } + + // 7-day summary + h += `
+
7-Day Total
+
${fmt(tokens7)} tokens
+
${interactions7.toLocaleString()} interactions · Notional: ${fmtDollar(cost7)}
+
`; + + // Non-Anthropic cost note (only if there are non-Anthropic costs) + const nonAnthCost = todayData ? getNonAnthropicCost(todayData) : 0; + if (nonAnthCost > 0) { + h += `
+ 💰 Non-Anthropic API cost today: ${fmtDollar(nonAnthCost)} +
`; + } + + // Model breakdown for today + if (todayData && todayData.byModel) { + h += '
'; + h += '
ModelTokensNotional
'; + const models = Object.entries(todayData.byModel) + .filter(([, v]) => v.tokens > 0 || v.count > 0) + .sort((a, b) => b[1].tokens - a[1].tokens); + for (const [name, v] of models) { + const badge = `${fmtDollar(v.cost)}`; + h += `
+
+
${D.esc(name)}
+
${v.count} calls
+
+
${fmt(v.tokens)}
+
${badge}
+
`; + } + h += '
'; + } + + el.innerHTML = h; + } + + function getNonAnthropicCost(dayData) { + if (!dayData || !dayData.byModel) return 0; + let cost = 0; + for (const [name, v] of Object.entries(dayData.byModel)) { + if (!name.startsWith('claude-')) cost += v.cost || 0; + } + return cost; + } + + async function refresh() { + try { + const r = await fetch('/dashboard/api/costs'); + if (r.ok) { render(await r.json()); return; } + } catch {} + render(null); + } + + let _refreshInterval = null; + let _lastUpdated = null; + + function updateTimestamp() { + const el = document.getElementById('panel-costs'); + if (!el) return; + let ts = el.querySelector('.panel-last-updated'); + if (!ts) { ts = document.createElement('div'); ts.className = 'panel-last-updated'; el.appendChild(ts); } + if (_lastUpdated) { + const s = ((Date.now() - _lastUpdated) / 1000) | 0; + ts.textContent = `last updated: ${s}s ago`; + } + } + + const _origRefresh = refresh; + async function autoRefresh() { + await _origRefresh(); + _lastUpdated = Date.now(); + updateTimestamp(); + } + + function startAutoRefresh() { + stopAutoRefresh(); + _refreshInterval = setInterval(autoRefresh, 60000); + setInterval(updateTimestamp, 10000); + } + function stopAutoRefresh() { if (_refreshInterval) { clearInterval(_refreshInterval); _refreshInterval = null; } } + + async function initPanel() { + await autoRefresh(); + startAutoRefresh(); + } + + D.registerPanel('costs', { init: initPanel, refresh: autoRefresh, stopAutoRefresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-crons.js b/bates-core/plugins/dashboard/static/js/panel-crons.js new file mode 100644 index 0000000..b96ed48 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-crons.js @@ -0,0 +1,144 @@ +/** + * Cron Jobs Panel — Categorized card grid (v4) + * Excludes heartbeats from upcoming section on overview + */ +(function () { + const D = window.Dashboard; + + function cronH(e) { + if (!e) return ''; const p = e.split(' '); if (p.length < 5) return e; + const [m, h, , , d] = p; + if (e.startsWith('0 */')) return `Every ${p[1].replace('*/','')}h`; + if (e.startsWith('*/')) return `Every ${m.replace('*/','')}m`; + if (d === '1-5') return `Weekdays ${h}:${m.padStart(2,'0')}`; + if (d === '1') return `Mon ${h}:${m.padStart(2,'0')}`; + if (d === '5') return `Fri ${h}:${m.padStart(2,'0')}`; + if (d === '*') { if (h.includes('-')) return `Daily ${h} at :${m.padStart(2,'0')}`; return `Daily ${h}:${m.padStart(2,'0')}`; } + return e; + } + function evH(ms) { if (!ms) return ''; const s = Math.round(ms/1000); return s<60?`Every ${s}s`:s<3600?`Every ${Math.round(s/60)}m`:`Every ${(s/3600).toFixed(1).replace(/\.0$/,'')}h`; } + function fmtTs(ms) { + if (!ms) return '—'; const d = new Date(ms), pad = n => String(n).padStart(2,'0'); + const ds = `${pad(d.getDate())}/${pad(d.getMonth()+1)} ${pad(d.getHours())}:${pad(d.getMinutes())}`; + const diff = ms - Date.now(), a = Math.abs(diff); + if (a < 864e5) { const h = (a/36e5)|0, m = ((a%36e5)/6e4)|0; return `${ds} (${diff>0?'in ':''}${h}h${m}m${diff<=0?' ago':''})`; } + return ds; + } + function isHeartbeat(j) { + const n = (j.name||j.id||'').toLowerCase(); + return n.includes('heartbeat') || n.includes('hb-') || n.includes('checkin'); + } + function cat(j) { + if (isHeartbeat(j)) return 'Agent Heartbeats'; + const n = (j.name||j.id||'').toLowerCase(); + if (n.includes('report')||n.includes('standup')||n.includes('digest')) return 'Scheduled Reports'; + return 'System Tasks'; + } + + function renderCard(j) { + const name = j.name||j.id, s = j.schedule, st = j.state||{}; + const dis = !j.enabled, run = st.lastStatus === 'running'; + let sched = ''; + if (s?.kind === 'cron') sched = cronH(s.expr); + else if (s?.kind === 'every' || s?.everyMs) sched = evH(s.everyMs); + else if (s?.expr) sched = cronH(s.expr); + const runCount = st.runCount != null ? st.runCount : '—'; + return `
+
${D.esc(name)}
+
${D.esc(sched)}
+
+ Last: ${D.esc(st.lastRunAtMs ? fmtTs(st.lastRunAtMs) : 'never')}${st.lastStatus?' ('+D.esc(st.lastStatus)+')':''} + ${st.nextRunAtMs ? `Next: ${D.esc(fmtTs(st.nextRunAtMs))}` : ''} +
+
▸ click for details
+
+ ⏱ Last run: ${D.esc(st.lastRunAtMs ? fmtTs(st.lastRunAtMs) : 'never')} + ⏭ Next run: ${D.esc(st.nextRunAtMs ? fmtTs(st.nextRunAtMs) : '—')} + 📊 Status: ${D.esc(st.lastStatus || 'unknown')} + 🔢 Run count: ${D.esc(String(runCount))} + ${j.target ? `🎯 Target: ${D.esc(j.target)}` : ''} + ${j.channel ? `📡 Channel: ${D.esc(j.channel)}` : ''} +
+
+ + +
+
`; + } + + function render(jobs) { + const el = document.getElementById('panel-crons'); + if (!el) return; + if (!jobs?.length) { el.innerHTML = '
No cron jobs
'; return; } + + const groups = {}; + for (const j of jobs) { const c = cat(j); (groups[c] = groups[c] || []).push(j); } + + let h = '
'; + for (const [c, cj] of Object.entries(groups)) { + cj.sort((a,b) => (a.state?.lastStatus==='running'?-1:0)-(b.state?.lastStatus==='running'?-1:0) || (a.state?.nextRunAtMs||Infinity)-(b.state?.nextRunAtMs||Infinity)); + h += `
${c} ${cj.length}
`; + h += cj.map(renderCard).join(''); + } + h += '
'; + el.innerHTML = h; + renderUpcoming(jobs); + } + + function renderUpcoming(jobs) { + const el = document.getElementById('panel-crons-upcoming'); if (!el) return; + // Exclude heartbeats from upcoming on overview + const up = jobs + .filter(j => j.enabled && j.state?.nextRunAtMs && !isHeartbeat(j)) + .sort((a,b) => a.state.nextRunAtMs - b.state.nextRunAtMs) + .slice(0,5); + if (!up.length) { el.innerHTML = '
No upcoming crons
'; return; } + el.innerHTML = up.map(j => `
${D.esc(j.name||j.id)}
${D.esc(fmtTs(j.state.nextRunAtMs))}
`).join(''); + if (up[0]) { + const d = up[0].state.nextRunAtMs - Date.now(); + if (d > 0) { const m = (d/6e4)|0; window._updateOverviewMetrics?.({ nextCron: m >= 60 ? `${(m/60)|0}h ${m%60}m` : `${m}m` }); } + } + } + + async function refresh(gw) { + let jobs = null; + if (gw?.authenticated) try { const r = await gw.rpc('cron.list', {}); jobs = r?.jobs || r?.items || (Array.isArray(r) ? r : null); } catch {} + if (!jobs) { const d = await D.fetchApi('crons'); jobs = d?.jobs || []; } + render(jobs || []); + } + + let _refreshInterval = null; + let _lastUpdated = null; + + function updateTimestamp() { + const el = document.getElementById('panel-crons'); + if (!el) return; + let ts = el.querySelector('.panel-last-updated'); + if (!ts) { ts = document.createElement('div'); ts.className = 'panel-last-updated'; el.appendChild(ts); } + if (_lastUpdated) { + const s = ((Date.now() - _lastUpdated) / 1000) | 0; + ts.textContent = `last updated: ${s}s ago`; + } + } + + const _origRefresh = refresh; + async function autoRefresh(gw) { + await _origRefresh(gw); + _lastUpdated = Date.now(); + updateTimestamp(); + } + + function startAutoRefresh(gw) { + stopAutoRefresh(); + _refreshInterval = setInterval(() => autoRefresh(gw), 60000); + setInterval(updateTimestamp, 10000); + } + function stopAutoRefresh() { if (_refreshInterval) { clearInterval(_refreshInterval); _refreshInterval = null; } } + + async function initPanel(gw) { + await autoRefresh(gw); + startAutoRefresh(gw); + } + + D.registerPanel('crons', { init: initPanel, refresh: autoRefresh, stopAutoRefresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-delegations.js b/bates-core/plugins/dashboard/static/js/panel-delegations.js new file mode 100644 index 0000000..2020564 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-delegations.js @@ -0,0 +1,122 @@ +/** + * Claude Code Delegations Panel + * Shows running and recent Claude Code delegations with status tracking. + */ +(function () { + const D = window.Dashboard; + let delegations = []; + let fastRefreshInterval = null; + + function statusBadge(status) { + const cls = { + running: "agent-status-running", + completed: "agent-status-completed", + failed: "agent-status-failed", + }; + const labels = { + running: "\u25CF Running", + completed: "\u2713 Done", + failed: "\u2717 Failed", + }; + return '' + (labels[status] || status) + ""; + } + + function formatDuration(ms) { + if (!ms) return ""; + var s = Math.floor(ms / 1000); + if (s < 60) return s + "s"; + var m = Math.floor(s / 60); + if (m < 60) return m + "m " + (s % 60) + "s"; + var h = Math.floor(m / 60); + return h + "h " + (m % 60) + "m"; + } + + function renderCard(d) { + var elapsed = d.durationMs || (Date.now() - d.startedAt); + var duration = formatDuration(elapsed); + var started = D.timeAgo(new Date(d.startedAt).toISOString()); + var desc = (d.description || "").slice(0, 120); + if (d.description && d.description.length > 120) desc += "..."; + var promptName = (d.promptPath || "").split("/").pop() || ""; + var logName = (d.logPath || "").split("/").pop() || ""; + var isRunning = d.status === "running"; + + return '
' + + '
' + (isRunning ? "\u{1F4BB}" : d.status === "completed" ? "\u2705" : "\u274C") + "
" + + '
' + + '
' + D.esc(d.name) + "
" + + '
' + D.esc(started) + + (duration ? " \u00B7 " + duration : "") + + (d.exitCode !== undefined && d.exitCode !== null ? " \u00B7 exit " + d.exitCode : "") + + "
" + + (desc ? '
' + D.esc(desc) + "
" : "") + + '
' + + (promptName ? '\u{1F4C4} ' + D.esc(promptName) + "" : "") + + (logName ? '\u{1F4CB} ' + D.esc(logName) + "" : "") + + "
" + + "
" + + statusBadge(d.status) + + "
"; + } + + function render() { + var el = document.getElementById("panel-delegations"); + if (!el) return; + + if (delegations.length === 0) { + el.innerHTML = '
\u{1F4BB}No Claude Code delegations
'; + manageFastRefresh(false); + return; + } + + var running = delegations.filter(function (d) { return d.status === "running"; }); + var completed = delegations.filter(function (d) { return d.status === "completed"; }).slice(0, 10); + var failed = delegations.filter(function (d) { return d.status === "failed"; }).slice(0, 5); + + var html = '
'; + if (running.length > 0) { + html += '
Running
'; + html += running.map(renderCard).join(""); + } + if (completed.length > 0) { + html += (running.length > 0 ? '
Recent
' : ""); + html += completed.map(renderCard).join(""); + } + if (failed.length > 0) { + html += '
Failed
'; + html += failed.map(renderCard).join(""); + } + html += "
"; + el.innerHTML = html; + + manageFastRefresh(running.length > 0); + } + + function manageFastRefresh(hasRunning) { + if (hasRunning && !fastRefreshInterval) { + fastRefreshInterval = setInterval(refresh, 5000); + } else if (!hasRunning && fastRefreshInterval) { + clearInterval(fastRefreshInterval); + fastRefreshInterval = null; + } + } + + async function refresh() { + try { + var data = await D.fetchApi("delegations"); + if (data && Array.isArray(data.delegations)) { + delegations = data.delegations; + } + } catch (e) { + // Keep existing data + } + render(); + } + + async function init() { + render(); + await refresh(); + } + + D.registerPanel("delegations", { init: init, refresh: refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-files.js b/bates-core/plugins/dashboard/static/js/panel-files.js new file mode 100644 index 0000000..055753c --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-files.js @@ -0,0 +1,163 @@ +/** + * File Explorer Panel + * Shows recently modified files in the workspace + */ +(function () { + const D = window.Dashboard; + + function fileIcon(name) { + if (name.endsWith(".md")) return "📄"; + if (name.endsWith(".json")) return "{"; + if (name.endsWith(".sh")) return "⚙"; + if (name.endsWith(".ts") || name.endsWith(".js")) return "✎"; + if (name.endsWith(".py")) return "🐍"; + if (name.endsWith(".pptx")) return "📊"; + if (name.endsWith(".html") || name.endsWith(".css")) return "🌐"; + return "📄"; + } + + function render(files) { + const el = document.getElementById("panel-files"); + if (!el) return; + + if (!files || files.length === 0) { + el.innerHTML = '
📁No recent files
'; + return; + } + + let html = '
'; + for (const file of files) { + const dir = file.path.includes("/") ? file.path.substring(0, file.path.lastIndexOf("/")) : ""; + // Configure your OneDrive base URL here (tenant-my.sharepoint.com/personal/user_tenant_com/...) + const oneDriveBase = 'https://TENANT-my.sharepoint.com/personal/USER_TENANT_COM/_layouts/15/onedrive.aspx?id=/personal/USER_TENANT_COM/Documents/'; + const isDraft = file.path && file.path.startsWith('drafts/'); + const webUrl = file.webUrl || (isDraft ? oneDriveBase + encodeURIComponent(file.path) : ''); + const nameHtml = webUrl + ? `${D.esc(file.name)}` + : `${D.esc(file.name)}`; + html += ` +
+ ${fileIcon(file.name)} +
+
${nameHtml}
+ ${dir ? `
${D.esc(dir)}
` : ""} +
+
+
${D.timeAgo(file.modified)}
+
${D.formatSize(file.size)}
+
+
`; + } + html += "
"; + el.innerHTML = html; + } + + const SHOW_EXTS = new Set(['.docx','.xlsx','.pptx','.pdf','.md','.html','.png','.jpg','.jpeg','.txt','.gif','.webp','.csv']); + + function isUserFile(name) { + const dot = name.lastIndexOf('.'); + if (dot < 0) return false; + return SHOW_EXTS.has(name.substring(dot).toLowerCase()); + } + + async function refresh() { + const files = await D.fetchApi("files"); + const all = Array.isArray(files) ? files : []; + render(all.filter(f => isUserFile(f.name || ''))); + } + + let _refreshInterval = null; + let _lastUpdated = null; + + function updateTimestamp() { + const el = document.getElementById("panel-files"); + if (!el) return; + let ts = el.querySelector('.panel-last-updated'); + if (!ts) { ts = document.createElement('div'); ts.className = 'panel-last-updated'; el.appendChild(ts); } + if (_lastUpdated) { + const s = ((Date.now() - _lastUpdated) / 1000) | 0; + ts.textContent = `last updated: ${s}s ago`; + } + } + + const _origRefresh = refresh; + async function autoRefresh() { + await _origRefresh(); + _lastUpdated = Date.now(); + updateTimestamp(); + } + + function startAutoRefresh() { + stopAutoRefresh(); + _refreshInterval = setInterval(autoRefresh, 120000); + setInterval(updateTimestamp, 10000); + } + function stopAutoRefresh() { if (_refreshInterval) { clearInterval(_refreshInterval); _refreshInterval = null; } } + + async function initPanel() { + await autoRefresh(); + startAutoRefresh(); + } + + window._showFileContent = async function(path) { + const ov = document.getElementById('soul-modal-overlay'); + if (!ov) return; + const titleEl = document.getElementById('soul-modal-title'); + const bodyEl = document.getElementById('soul-modal-body'); + titleEl.textContent = '📄 ' + path; + const absPath = '~/.openclaw/workspace/' + path; + const ext = path.split('.').pop().toLowerCase(); + const typeMap = {md:'Markdown',json:'JSON',ts:'TypeScript',js:'JavaScript',py:'Python',sh:'Shell',html:'HTML',css:'CSS',txt:'Text',csv:'CSV',yaml:'YAML',yml:'YAML'}; + const fileType = typeMap[ext] || ext.toUpperCase(); + + // Try to fetch file content from the API + let contentHtml = ''; + try { + const resp = await D.fetchApi('file?path=' + encodeURIComponent(path)); + if (resp && !resp.error) { + const text = resp.content || ''; + if (text && text !== 'Not found') { + contentHtml = ` +
+
Contents
+
${Dashboard.esc(text)}
+
+
+ ⬇ Download + +
`; + } + } + } catch(e) {} + + if (!contentHtml) { + contentHtml = ` +
+
+ 📂 Full path: +
+ ${Dashboard.esc(absPath)} + +
`; + } + + bodyEl.innerHTML = ` +
+
+
File Details
+
+
Path: ${Dashboard.esc(path)}
+
Type: ${Dashboard.esc(fileType)}
+
+
+ ${contentHtml} +
`; + ov.classList.add('visible'); + }; + + D.registerPanel("files", { + init: initPanel, + refresh: autoRefresh, + stopAutoRefresh, + }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-integrations.js b/bates-core/plugins/dashboard/static/js/panel-integrations.js new file mode 100644 index 0000000..8bf6060 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-integrations.js @@ -0,0 +1,88 @@ +/** + * Integrations Panel — MCP Servers & External Services (Live Data Only) + */ +(function () { + const D = window.Dashboard; + + function render(healthData) { + const el = document.getElementById('panel-integrations'); + if (!el) return; + + if (!healthData || !healthData.servers || !healthData.servers.length) { + el.innerHTML = '
⏳ Checking MCP server health...
'; + return; + } + + let h = '
MCP Servers (Live Health)
'; + h += '
'; + + const servers = healthData.servers.sort((a, b) => { + if (a.healthy !== b.healthy) return a.healthy ? -1 : 1; + return a.name.localeCompare(b.name); + }); + + for (const s of servers) { + const statusColor = s.healthy ? 'var(--green)' : 'var(--red, #ef4444)'; + const statusText = s.healthy + ? `✓ ${s.tools} tool${s.tools !== 1 ? 's' : ''} · ${s.responseTime}s` + : '✗ Unhealthy'; + h += `
+
+
+
${D.esc(s.name)}
+
${statusText}
+
+
`; + } + h += '
'; + + const healthy = servers.filter(s => s.healthy).length; + h += `
${healthy}/${servers.length} servers healthy
`; + + el.innerHTML = h; + } + + async function refresh() { + let healthData = null; + try { + const res = await fetch('/dashboard/api/integrations/health'); + if (res.ok) healthData = await res.json(); + } catch {} + render(healthData); + } + + let _refreshInterval = null; + let _lastUpdated = null; + + function updateTimestamp() { + const el = document.getElementById('panel-integrations'); + if (!el) return; + let ts = el.querySelector('.panel-last-updated'); + if (!ts) { ts = document.createElement('div'); ts.className = 'panel-last-updated'; el.appendChild(ts); } + if (_lastUpdated) { + const s = ((Date.now() - _lastUpdated) / 1000) | 0; + ts.textContent = `last updated: ${s}s ago`; + } + } + + const _origRefresh = refresh; + async function autoRefresh() { + await _origRefresh(); + _lastUpdated = Date.now(); + updateTimestamp(); + } + + function startAutoRefresh() { + stopAutoRefresh(); + _refreshInterval = setInterval(autoRefresh, 120000); + setInterval(updateTimestamp, 10000); + } + function stopAutoRefresh() { if (_refreshInterval) { clearInterval(_refreshInterval); _refreshInterval = null; } } + + async function initPanel() { + await autoRefresh(); + startAutoRefresh(); + } + + D.registerPanel('integrations', { init: initPanel, refresh: autoRefresh, stopAutoRefresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-memory.js b/bates-core/plugins/dashboard/static/js/panel-memory.js new file mode 100644 index 0000000..2548f16 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-memory.js @@ -0,0 +1,177 @@ +/** + * Live Memory Feed Panel + * Shows observation data + real-time agent events + */ +(function () { + const D = window.Dashboard; + let entries = []; + let activeFilter = null; + const MAX_ENTRIES = 100; + + const CATEGORIES = ["goal", "fact", "preference", "deadline", "decision", "contact", "pattern", "agent"]; + + function parseObservations(data) { + const items = []; + if (!data) return items; + + for (const [filename, content] of Object.entries(data)) { + if (filename.endsWith(".json")) { + // Parse JSON observations (like last-checkin.json) + try { + const obj = JSON.parse(content); + if (obj.last_run) { + items.push({ + timestamp: obj.last_run, + tag: "agent", + content: `Check-in: ${obj.items_reported_today || 0} items reported, ${obj.skipped_runs || 0} skipped`, + }); + } + } catch {} + continue; + } + + // Parse markdown observations + const category = filename.replace(".md", "").replace("file-index", "fact"); + if (!CATEGORIES.includes(category) && category !== "file-index") continue; + + const lines = content.split("\n"); + let currentEntry = null; + + for (const line of lines) { + const trimmed = line.trim(); + if (!trimmed || trimmed.startsWith("#") || trimmed.startsWith("|") || trimmed.startsWith("---")) continue; + + // Date-prefixed entry: "- 2026-02-07: Something" + const dateMatch = trimmed.match(/^-\s*(\d{4}-\d{2}-\d{2}):\s*(.+)/); + if (dateMatch) { + items.push({ + timestamp: dateMatch[1] + "T12:00:00Z", + tag: category, + content: dateMatch[2], + }); + continue; + } + + // Bullet entry without date + const bulletMatch = trimmed.match(/^[-*]\s+(.+)/); + if (bulletMatch) { + items.push({ + timestamp: null, + tag: category, + content: bulletMatch[1], + }); + } + } + } + + return items; + } + + function addAgentEvent(data) { + const content = data.text || data.message || data.delta || JSON.stringify(data).slice(0, 200); + if (!content || content === "{}") return; + + entries.unshift({ + timestamp: new Date().toISOString(), + tag: "agent", + content: String(content).slice(0, 300), + }); + + if (entries.length > MAX_ENTRIES) { + entries = entries.slice(0, MAX_ENTRIES); + } + + render(); + } + + function render() { + const el = document.getElementById("panel-memory"); + if (!el) return; + + const filtered = activeFilter ? entries.filter((e) => e.tag === activeFilter) : entries; + + if (filtered.length === 0) { + el.innerHTML = '
No observations yet
'; + return; + } + + let html = '
'; + for (const entry of filtered) { + const ts = entry.timestamp + ? new Date(entry.timestamp).toLocaleDateString("en-GB", { month: "short", day: "numeric" }) + : ""; + html += ` +
+ ${D.esc(ts)} + ${D.esc(entry.tag)} + ${D.esc(entry.content)} +
`; + } + html += "
"; + el.innerHTML = html; + } + + function setupFilters() { + const bar = document.getElementById("memory-filters"); + if (!bar) return; + + let html = ``; + for (const cat of CATEGORIES) { + html += ``; + } + bar.innerHTML = html; + + bar.addEventListener("click", (e) => { + const btn = e.target.closest(".filter-btn"); + if (!btn) return; + const filter = btn.dataset.filter; + activeFilter = filter === "all" ? null : filter; + bar.querySelectorAll(".filter-btn").forEach((b) => b.classList.remove("active")); + btn.classList.add("active"); + render(); + }); + } + + async function refresh() { + const data = await D.fetchApi("observations"); + if (data && !data.error) { + const parsed = parseObservations(data); + // Merge new observations, keeping agent events from WebSocket + const agentEntries = entries.filter((e) => e.tag === "agent" && e.timestamp); + entries = [...agentEntries, ...parsed]; + // Sort: dated entries by date desc, undated at the end + entries.sort((a, b) => { + if (!a.timestamp && !b.timestamp) return 0; + if (!a.timestamp) return 1; + if (!b.timestamp) return -1; + return new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(); + }); + if (entries.length > MAX_ENTRIES) entries = entries.slice(0, MAX_ENTRIES); + } + render(); + } + + async function init(gw) { + setupFilters(); + await refresh(); + + // Subscribe to real-time agent events + if (gw) { + gw.subscribe("agent", (data) => { + if (data.event === "agent.assistant" || data.type === "assistant") { + addAgentEvent(data); + } + }); + gw.subscribe("*", (data) => { + if (data.event && data.event.includes("memory")) { + addAgentEvent(data); + } + }); + } + } + + D.registerPanel("memory", { + init, + refresh, + }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-rollout.js b/bates-core/plugins/dashboard/static/js/panel-rollout.js new file mode 100644 index 0000000..346c4b6 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-rollout.js @@ -0,0 +1,113 @@ +/** + * Bates Rollout Panel — Agent deployment status by layer + * Fetches from gateway API + */ +(function () { + const D = window.Dashboard; + + const LAYERS = [ + { name: 'Layer 1 — COO', agents: [{ name: 'Bates', role: 'Chief Operating Officer' }] }, + { name: 'Layer 2 — Deputies', agents: [ + { name: 'Conrad', role: 'Project A Deputy' }, + { name: 'Soren', role: 'Project B Deputy' }, + { name: 'Amara', role: 'Project C Deputy' }, + { name: 'Jules', role: 'Personal Deputy' }, + { name: 'Dash', role: 'DevOps Deputy' }, + ]}, + { name: 'Layer 3 — Specialists', agents: [ + { name: 'Mercer', role: 'Finance Specialist' }, + { name: 'Kira', role: 'Content Specialist' }, + { name: 'Nova', role: 'Research Specialist' }, + { name: 'Paige', role: 'Documentation Specialist' }, + { name: 'Quinn', role: 'QA Specialist' }, + { name: 'Archer', role: 'Architecture Specialist' }, + ]}, + ]; + + const ALL_AGENTS = LAYERS.flatMap(l => l.agents); + + function findAgent(apiAgents, name) { + if (!apiAgents) return null; + return apiAgents.find(a => a.name && a.name.toLowerCase() === name.toLowerCase()); + } + + function render(apiAgents) { + const el = document.getElementById('panel-rollout'); + if (!el) return; + + const deployed = ALL_AGENTS.filter(a => findAgent(apiAgents, a.name)).length; + const total = ALL_AGENTS.length; + const pct = Math.round((deployed / total) * 100); + + let html = ''; + + // Progress bar + html += `
+
+ + ${deployed}/${total} agents (${pct}%) +
+
+
`; + + // Layers + for (const layer of LAYERS) { + html += `
+ +
`; + + for (const agentDef of layer.agents) { + const data = findAgent(apiAgents, agentDef.name); + const exists = !!data; + const model = data && data.model ? data.model : '—'; + const workspace = data && data.workspace !== undefined ? (data.workspace ? '✓' : '✗') : (exists ? '✓' : '✗'); + const wsClass = workspace === '✓' ? 'ok' : 'error'; + const heartbeat = data && data.heartbeat ? data.heartbeat : null; + const hbActive = heartbeat && (heartbeat.active || heartbeat.enabled || heartbeat.cron); + const hbTime = data && (data.lastHeartbeat || data.last_heartbeat || (heartbeat && heartbeat.last)) ? D.timeAgo(data.lastHeartbeat || data.last_heartbeat || heartbeat.last) : '—'; + const statusIcon = exists ? '☑' : '☐'; + const statusClass = exists ? 'rollout-deployed' : 'rollout-pending'; + + // Model badge + let modelClass = 'other'; + const ml = model.toLowerCase(); + if (ml.includes('opus')) modelClass = 'opus'; + else if (ml.includes('sonnet')) modelClass = 'sonnet'; + else if (ml.includes('gemini')) modelClass = 'gemini'; + + html += `
+ ${statusIcon} +
+
${D.esc(agentDef.name)} ${D.esc(data && data.role ? data.role : agentDef.role)}
+
+ ${D.esc(model)} + + ${hbActive ? '⏱ Active' : '⏱ Inactive'} + Last: ${D.esc(hbTime)} +
+
+
`; + } + + html += '
'; + } + + el.innerHTML = html; + } + + async function refresh() { + try { + const res = await fetch('/dashboard/api/agents'); + if (res.ok) { + const data = await res.json(); + const agents = Array.isArray(data) ? data : (data && data.agents ? data.agents : []); + render(agents); + return; + } + } catch {} + const el = document.getElementById('panel-rollout'); + if (el) el.innerHTML = '
🚀No data available
Could not reach gateway API
'; + } + + D.registerPanel('rollout', { init: refresh, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-settings.js b/bates-core/plugins/dashboard/static/js/panel-settings.js new file mode 100644 index 0000000..a29b303 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-settings.js @@ -0,0 +1,43 @@ +/** + * Settings Panel — Read-only config summary + * Fetches from gateway API + */ +(function () { + const D = window.Dashboard; + + function card(title, items) { + let html = `
${D.esc(title)}
`; + for (const [k, v] of items) { + html += `
${D.esc(k)}${D.esc(String(v))}
`; + } + return html + '
'; + } + + function render(data) { + const el = document.getElementById("panel-settings"); + if (!el) return; + + if (!data || data.error) { + el.innerHTML = '
Settings unavailable
Could not reach gateway API
'; + return; + } + + el.innerHTML = '
' + + card('Model', [['Primary', data.default_model || '—'], ['Fallbacks', (data.model_fallbacks || []).join(', ') || '—']]) + + card('Heartbeat', [['Interval', data.heartbeat_interval || '—'], ['Active Hours', data.heartbeat_hours || '—']]) + + card('Compaction', [['Mode', data.compaction_mode || '—'], ['Reserve Tokens', data.compaction_reserve_tokens || '—'], ['Max History', data.compaction_max_history || '—']]) + + card('Fleet', [['Agents', data.num_agents || '—'], ['Cron Jobs', data.num_cron_jobs || '—'], ['Enabled', data.num_cron_enabled || '—']]) + + card('Session', [['Reset Mode', data.session_reset_mode || '—'], ['Idle Timeout', (data.session_idle_minutes || '?') + 'm'], ['Gateway Port', data.gateway_port || '—']]) + + '
'; + } + + async function refresh() { + try { + const res = await fetch("/dashboard/api/settings"); + if (res.ok) { render(await res.json()); return; } + } catch {} + render(null); + } + + D.registerPanel("settings", { init: refresh, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-standup.js b/bates-core/plugins/dashboard/static/js/panel-standup.js new file mode 100644 index 0000000..4fe5b2b --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-standup.js @@ -0,0 +1,73 @@ +/** + * Standup Panel — Conversation-style standup view + */ +(function () { + const D = window.Dashboard; + + const AGENT_EMOJIS = { + bates: '🐧', conrad: '🏦', soren: '⚡', amara: '🏫', jules: '🏠', + dash: '🚀', mercer: '⚖️', kira: '✍️', nova: '🔬', paige: '💰', + quinn: '👥', archer: '📝', + }; + + function getEmoji(name) { + return AGENT_EMOJIS[(name || '').toLowerCase()] || '🤖'; + } + + function renderStandups(data) { + const el = document.getElementById('panel-standup'); + if (!el) return; + + const standups = Array.isArray(data) ? data : (data?.standups || data?.items || []); + + let h = `
+

📋 Daily Standup

+
+ + +
+
`; + + if (!standups.length) { + h += `
+
📋
+
No standups yet
+
Standups will appear here after the first daily collection.
+
`; + el.innerHTML = h; + return; + } + + h += '
'; + for (const msg of standups) { + const name = msg.agent || msg.name || 'Unknown'; + const role = msg.role || ''; + const text = msg.message || msg.content || msg.text || ''; + const time = msg.timestamp ? new Date(msg.timestamp).toLocaleTimeString('en-GB', { hour: '2-digit', minute: '2-digit' }) : ''; + + h += `
+
${getEmoji(name)}
+
+
+ ${D.esc(name)} + ${D.esc(role)} + ${time ? `${time}` : ''} +
+
${D.esc(text)}
+
+
`; + } + h += '
'; + el.innerHTML = h; + } + + async function refresh() { + try { + const res = await fetch(`/dashboard/api/standups`); + if (res.ok) { renderStandups(await res.json()); return; } + } catch {} + renderStandups([]); + } + + D.registerPanel('standup', { init: refresh, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-status.js b/bates-core/plugins/dashboard/static/js/panel-status.js new file mode 100644 index 0000000..2bc19e1 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-status.js @@ -0,0 +1,93 @@ +/** + * System Status Panel + * Shows gateway, telegram, MCP, disk usage from health.json + */ +(function () { + const D = window.Dashboard; + + function render(health) { + const el = document.getElementById("panel-status"); + if (!el) return; + + if (!health || health.error) { + el.innerHTML = '
Health data unavailable
'; + return; + } + + const services = health.services || {}; + const gwStatus = services.openclaw_gateway || "unknown"; + const tgStatus = services.telegram_bot || "unknown"; + const disk = health.disk_usage_percent ?? -1; + const uptime = health.uptime_hours ?? 0; + const ts = health.timestamp; + const checkin = health.checkin_summary || {}; + + const gwClass = gwStatus === "running" ? "ok" : "down"; + const tgClass = tgStatus === "connected" ? "ok" : "error"; + const diskClass = disk > 80 ? "danger" : disk > 60 ? "warning" : ""; + const diskBarClass = disk > 80 ? "danger" : disk > 60 ? "warning" : ""; + + // MCP servers + const mcpEntries = Object.entries(services).filter(([k]) => k.startsWith("mcp_")); + let mcpHtml = ""; + if (mcpEntries.length > 0 && !services.mcp_note) { + for (const [key, val] of mcpEntries) { + const name = key.replace("mcp_", "").replace(/_/g, "-"); + const cls = val === "ok" ? "ok" : "error"; + mcpHtml += ` +
+ +
+ ${D.esc(name)} + ${D.esc(String(val))} +
+
`; + } + } + + el.innerHTML = ` +
+
+ +
+ Gateway + ${D.esc(gwStatus)}${uptime > 0 ? ` (${uptime}h)` : ""} +
+
+
+ +
+ Telegram + ${D.esc(tgStatus)} +
+
+
+ +
+ Disk + ${disk >= 0 ? disk + "%" : "N/A"} + ${disk >= 0 ? `
` : ""} +
+
+
+
+ Check-ins Today + ${checkin.items_reported_today ?? "N/A"} reported · ${checkin.skipped_runs ?? 0} skipped +
+
+ ${mcpHtml} +
+ ${ts ? `
Last health check: ${D.timeAgo(ts)}
` : ""} + `; + } + + async function refresh() { + const health = await D.fetchApi("health"); + render(health); + } + + D.registerPanel("status", { + init: refresh, + refresh: refresh, + }); +})(); diff --git a/bates-core/plugins/dashboard/static/js/panel-tasks.js b/bates-core/plugins/dashboard/static/js/panel-tasks.js new file mode 100644 index 0000000..3753236 --- /dev/null +++ b/bates-core/plugins/dashboard/static/js/panel-tasks.js @@ -0,0 +1,130 @@ +/** + * Tasks Panel — Aggregated Planner + To Do tasks + */ +(function () { + const D = window.Dashboard; + let lastData = null; + let sortMode = 'priority'; // priority | due | project + let filterProject = 'all'; + let showCompleted = false; + + const PRI_COLORS = { urgent: '#ff4757', important: '#ffa502', medium: '#00d4ff', low: '#747d8c' }; + const PRI_ORDER = { urgent: 0, important: 1, medium: 2, low: 3 }; + + function priDot(p) { + return ``; + } + + function renderControls(container) { + return `
+ + + +
`; + } + + function sortTasks(tasks) { + const sorted = [...tasks]; + switch (sortMode) { + case 'due': + sorted.sort((a, b) => { + if (a.completed !== b.completed) return a.completed ? 1 : -1; + if (a.dueDate && b.dueDate) return a.dueDate.localeCompare(b.dueDate); + if (a.dueDate) return -1; + return b.dueDate ? 1 : 0; + }); + break; + case 'project': + sorted.sort((a, b) => (a.project || '').localeCompare(b.project || '') || (a.priorityNum ?? 5) - (b.priorityNum ?? 5)); + break; + default: // priority + sorted.sort((a, b) => { + if (a.completed !== b.completed) return a.completed ? 1 : -1; + return (a.priorityNum ?? 5) - (b.priorityNum ?? 5) || (a.dueDate || 'z').localeCompare(b.dueDate || 'z'); + }); + } + return sorted; + } + + function renderTaskRow(t) { + return D.renderTaskRow(t); + } + + function render() { + const el = document.getElementById('panel-tasks-body'); + if (!el || !lastData) return; + + let tasks = lastData.tasks || []; + if (filterProject !== 'all') tasks = tasks.filter(t => t.project === filterProject); + if (!showCompleted) tasks = tasks.filter(t => !t.completed); + tasks = sortTasks(tasks); + + let html = renderControls(); + + if (!tasks.length) { + html += '
No tasks to display
'; + } else if (sortMode === 'project') { + // Group by project + const groups = {}; + for (const t of tasks) { + const k = t.project || 'other'; + if (!groups[k]) groups[k] = { name: t.planName || k, tasks: [] }; + groups[k].tasks.push(t); + } + for (const [k, g] of Object.entries(groups)) { + html += `
${D.esc(g.name)} (${g.tasks.length})
`; + for (const t of g.tasks) html += renderTaskRow(t); + html += '
'; + } + } else { + for (const t of tasks) html += renderTaskRow(t); + } + + html += `
Updated ${D.timeAgo(lastData.updated)} · ${lastData.tasks?.length || 0} total tasks
`; + el.innerHTML = html; + + // Wire controls + document.getElementById('tasks-filter-project')?.addEventListener('change', e => { filterProject = e.target.value; render(); }); + document.getElementById('tasks-sort')?.addEventListener('change', e => { sortMode = e.target.value; render(); }); + document.getElementById('tasks-show-done')?.addEventListener('change', e => { showCompleted = e.target.checked; render(); }); + + // Wire click-to-open and complete buttons + D.wireTaskRows(el, () => { setTimeout(refresh, 1000); }); + } + + async function refresh() { + const el = document.getElementById('panel-tasks-body'); + if (el && !lastData) el.innerHTML = '
Loading tasks from Planner & To Do…
'; + try { + const data = await D.fetchApi('tasks'); + if (data && !data.error && !data['jwt-auth-error'] && data.tasks) { + lastData = data; + // Update overview metrics badge with pending task count + const pending = data.tasks.filter(t => !t.completed).length; + window._updateOverviewMetrics?.({ tasks: pending }); + render(); + } else { + if (el) el.innerHTML = `
⚠ ${D.esc(data?.error || 'Failed to load tasks')}
`; + } + } catch (e) { + if (el) el.innerHTML = `
⚠ ${D.esc(e.message)}
`; + } + } + + // Expose for project detail modals + window._getProjectTasks = function (projectKey) { + if (!lastData?.byProject?.[projectKey]) return null; + return lastData.byProject[projectKey]; + }; + + D.registerPanel('tasks', { init: refresh, refresh }); +})(); diff --git a/bates-core/plugins/dashboard/static/styles.css b/bates-core/plugins/dashboard/static/styles.css new file mode 100644 index 0000000..6e29df3 --- /dev/null +++ b/bates-core/plugins/dashboard/static/styles.css @@ -0,0 +1,1553 @@ +/* ═══════════════════════════════════════════════════════════ + OpenClaw Command Center — Glassmorphism Design System v5 + Inspired by: Crypto Wallet glassmorphism aesthetic + ═══════════════════════════════════════════════════════════ */ + +:root { + --bg: #060a14; + --glass-bg: rgba(12, 20, 45, 0.2); + --glass-bg-hover: rgba(20, 35, 70, 0.3); + --glass-border: rgba(90, 200, 232, 0.6); + --glass-border-hover: rgba(90, 200, 232, 0.85); + --glass-blur: blur(24px); + --nav-bg: rgba(8, 12, 25, 0.4); + + --blue: #1F4E8C; + --blue-lt: #3B7DD8; + --blue-glow: 0 0 20px rgba(31, 78, 140, 0.3); + --orange: #F08C2E; + --red: #D6452A; + --green: #22C55E; + --teal: #14B8A6; + --purple: #8B5CF6; + + --text: #E8EAED; + --text2: rgba(255, 255, 255, 0.5); + --text3: rgba(255, 255, 255, 0.3); + --text-muted: rgba(255, 255, 255, 0.25); + + --font: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; + --font-mono: 'JetBrains Mono', 'Fira Code', monospace; + --mono: var(--font-mono); + --r: 12px; + --r-sm: 8px; + --topbar: 56px; + --chat-w: 380px; +} + +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +html, body { + height: 100%; + background-color: #060a14; + background-image: url('/dashboard/assets/bg.png'); + background-size: cover; + background-position: center; + background-repeat: no-repeat; + background-attachment: fixed; + color: var(--text); + font: 13px/1.5 var(--font); + -webkit-font-smoothing: antialiased; + overflow: hidden; +} + +/* Dark overlay on top of background image — disabled, bg already blurred/matte */ +#bg-overlay { + display: none; +} + +/* Scrollbar */ +::-webkit-scrollbar { width: 5px; } +::-webkit-scrollbar-track { background: transparent; } +::-webkit-scrollbar-thumb { background: rgba(255,255,255,0.1); border-radius: 4px; } +::-webkit-scrollbar-thumb:hover { background: rgba(255,255,255,0.2); } + +/* ─── Glass Card (core component) ─── */ +.glass-card { + background: var(--glass-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); + border-top: 1px solid rgba(90, 200, 232, 0.5); + border-radius: var(--r); + box-shadow: 0 0 8px rgba(90, 200, 232, 0.25), 0 0 20px rgba(90, 200, 232, 0.1), inset 0 1px 0 rgba(255, 255, 255, 0.06); + transition: border-color 0.3s, box-shadow 0.3s; +} +.glass-card:hover { + border-color: var(--glass-border-hover); + box-shadow: 0 0 12px rgba(90, 200, 232, 0.35), 0 0 30px rgba(90, 200, 232, 0.15), inset 0 1px 0 rgba(255, 255, 255, 0.08); +} + +.glass-panel { + background: rgba(10, 18, 40, 0.25); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); + border-top: 1px solid rgba(255, 255, 255, 0.15); + border-radius: var(--r); + box-shadow: 0 0 8px rgba(90, 200, 232, 0.25), 0 0 20px rgba(90, 200, 232, 0.1); +} + +.glass-nav { + background: var(--nav-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border-bottom: 1px solid rgba(90, 200, 232, 0.4); + box-shadow: 0 0 10px rgba(90, 200, 232, 0.15), 0 4px 20px rgba(0, 0, 0, 0.3); +} + +/* ═══════════════ TOP BAR ═══════════════ */ +.topbar { + position: fixed; + top: 0; + left: 0; + right: 0; + height: var(--topbar); + display: flex; + align-items: center; + padding: 0 16px; + z-index: 100; + background: var(--nav-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border-bottom: 1px solid var(--glass-border); + gap: 12px; +} + +.topbar-left { + display: flex; + align-items: center; + flex-shrink: 0; +} + +.topbar-logo { + height: 36px; + width: auto; + object-fit: contain; +} + +.topbar-logo-fallback { + font-weight: 700; + font-size: 16px; + letter-spacing: 1px; + color: var(--blue-lt); + display: flex; + align-items: center; + gap: 6px; +} + +.topbar-nav { + display: flex; + align-items: center; + gap: 4px; + margin: 0 auto; + flex-shrink: 0; +} + +.nav-tab { + display: flex; + align-items: center; + gap: 6px; + padding: 7px 14px; + border: none; + border-radius: 8px; + background: transparent; + color: var(--text2); + font: 12px/1 var(--font); + font-weight: 500; + cursor: pointer; + transition: all 0.2s; + white-space: nowrap; +} +.nav-tab:hover { + background: rgba(255, 255, 255, 0.06); + color: var(--text); +} +.nav-tab.active { + background: rgba(31, 78, 140, 0.3); + color: #fff; + box-shadow: var(--blue-glow), inset 0 0 0 1px rgba(31, 78, 140, 0.3); +} +.nav-icon { font-size: 14px; } +.nav-label { font-size: 12px; } + +.topbar-right { + display: flex; + align-items: center; + gap: 12px; + flex-shrink: 0; + margin-left: auto; +} + +.topbar-clock { + font: 500 13px/1 var(--font-mono); + color: var(--text2); + letter-spacing: 0.5px; +} + +.conn-badge { + display: flex; + align-items: center; + gap: 6px; +} +.conn-dot { + width: 8px; + height: 8px; + border-radius: 50%; + background: #555; + transition: background 0.3s; +} +.conn-dot.connected { background: var(--green); box-shadow: 0 0 8px rgba(34, 197, 94, 0.5); } +.conn-dot.disconnected { background: var(--red); } +.conn-dot.reconnecting { background: var(--orange); animation: pulse 1.5s infinite; } +.conn-label { + font-size: 10px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1px; + color: var(--text3); +} + +.chat-toggle-btn { + display: none; + align-items: center; + justify-content: center; + width: 36px; + height: 36px; + border: 1px solid var(--glass-border); + border-radius: 8px; + background: rgba(255, 255, 255, 0.04); + color: var(--text2); + font-size: 16px; + cursor: pointer; + transition: all 0.2s; +} +.topbar-avatar { + height: 36px; + width: auto; + object-fit: contain; + filter: drop-shadow(0 0 6px rgba(90, 200, 232, 0.3)); +} +.topbar-title { + font-size: 14px; + font-weight: 700; + letter-spacing: 2px; + color: #fff; + text-shadow: 0 0 15px rgba(90, 200, 232, 0.4); +} +.chat-toggle-btn:hover { background: rgba(255, 255, 255, 0.08); } +.chat-toggle-btn.active { background: rgba(31, 78, 140, 0.3); border-color: rgba(31, 78, 140, 0.4); } + +/* ═══════════════ APP SHELL ═══════════════ */ +.app-shell { + position: fixed; + top: var(--topbar); + left: 0; + right: 0; + bottom: 0; + display: flex; + z-index: 1; +} + +.content-area { + flex: 1; + overflow-y: auto; + overflow-x: hidden; + padding: 20px 24px; + padding-right: calc(var(--chat-w) + 24px); +} + +/* ─── Views ─── */ +.view { display: none; } +.view.active { display: block; } + +/* ─── Sections ─── */ +.section-label { + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1.5px; + color: var(--text3); + margin: 24px 0 12px; +} + +/* ─── Cards ─── */ +.card { + margin-bottom: 16px; +} +.card-head { + display: flex; + align-items: center; + justify-content: space-between; + padding: 14px 18px; + border-bottom: 1px solid var(--glass-border); +} +.card-head h3 { + font-size: 13px; + font-weight: 600; + color: var(--text); + margin: 0; + text-shadow: 0 0 20px rgba(59, 125, 216, 0.15); +} +.card-body { + padding: 14px 18px; +} +.card-body.scroll-y { + max-height: 360px; + overflow-y: auto; +} + +.refresh-btn { + background: transparent; + border: 1px solid var(--glass-border); + color: var(--text2); + border-radius: 6px; + padding: 3px 8px; + font-size: 13px; + cursor: pointer; + transition: all 0.2s; +} +.refresh-btn:hover { + background: rgba(255, 255, 255, 0.06); + border-color: var(--glass-border-hover); + color: var(--text); +} + +/* ─── Grid layouts ─── */ +.grid-2col { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 16px; +} + +/* ═══════════════ OVERVIEW TAB ═══════════════ */ + +/* Metric strip */ +.metric-strip { + display: grid; + grid-template-columns: repeat(4, 1fr); + gap: 12px; + margin-bottom: 8px; +} +.metric { + padding: 18px 16px; + text-align: center; + position: relative; + overflow: hidden; +} +.metric::before { + content: ''; + position: absolute; + top: 0; + left: 50%; + transform: translateX(-50%); + width: 40%; + height: 2px; + background: linear-gradient(90deg, transparent, var(--blue-lt), transparent); + opacity: 0.6; +} +.metric-val { + display: block; + font-size: 24px; + font-weight: 700; + color: #fff; + margin-bottom: 4px; + font-variant-numeric: tabular-nums; +} +.metric-lbl { + display: block; + font-size: 10px; + font-weight: 500; + text-transform: uppercase; + letter-spacing: 1px; + color: var(--text3); +} + +/* Projects row */ +.projects-row { + display: grid; + grid-template-columns: repeat(5, 1fr); + gap: 12px; + margin-bottom: 20px; +} +.project-box { + padding: 16px; + cursor: pointer; + transition: all 0.25s; + position: relative; + overflow: hidden; +} +.project-box::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 3px; + background: var(--accent, var(--blue)); + opacity: 0.9; + box-shadow: 0 0 12px var(--accent, var(--blue)), 0 0 4px var(--accent, var(--blue)); +} +.project-box:hover { + border-color: var(--glass-border-hover); + transform: translateY(-2px); + box-shadow: 0 12px 40px rgba(0, 0, 0, 0.4), 0 0 20px color-mix(in srgb, var(--accent, var(--blue)) 25%, transparent); +} +.project-header { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 8px; +} +.project-icon { font-size: 18px; } +.project-name { + font-size: 13px; + font-weight: 600; + color: var(--text); +} +.project-deputy { + font-size: 11px; + color: var(--text2); + margin-bottom: 8px; +} +.project-deputy strong { + color: var(--text); + font-weight: 500; +} +.project-body { + font-size: 11px; + color: var(--text3); + line-height: 1.6; + max-height: 80px; + overflow-y: auto; +} + +/* ─── Tasks ─── */ +.task-item { + display: flex; + align-items: flex-start; + gap: 10px; + padding: 10px 0; + border-bottom: 1px solid rgba(255, 255, 255, 0.04); +} +.task-item:last-child { border-bottom: none; } +.priority-dot { + width: 6px; + height: 6px; + border-radius: 50%; + margin-top: 6px; + flex-shrink: 0; +} +.priority-dot.high { background: var(--red); box-shadow: 0 0 6px rgba(214, 69, 42, 0.4); } +.priority-dot.medium { background: var(--orange); } +.priority-dot.low { background: var(--green); } +.priority-dot.none { background: var(--text3); } +.task-check { + width: 14px; + height: 14px; + border: 1.5px solid var(--text3); + border-radius: 4px; + margin-top: 2px; + flex-shrink: 0; +} +.task-check.done { + background: var(--green); + border-color: var(--green); +} +.task-info { flex: 1; min-width: 0; } +.task-title { + font-size: 12px; + font-weight: 500; + color: var(--text); + line-height: 1.4; +} +.task-title.done { text-decoration: line-through; color: var(--text3); } +.task-meta { + display: flex; + gap: 10px; + font-size: 10px; + color: var(--text3); + margin-top: 2px; +} + +/* ─── Upcoming crons ─── */ +.upcoming-card { + display: flex; + justify-content: space-between; + align-items: center; + padding: 8px 0; + border-bottom: 1px solid rgba(255, 255, 255, 0.04); + font-size: 12px; +} +.upcoming-card:last-child { border-bottom: none; } +.upcoming-name { color: var(--text); font-weight: 500; } +.upcoming-time { color: var(--text3); font-size: 11px; font-family: var(--font-mono); } + +/* ═══════════════ AGENTS TAB ═══════════════ */ +.org-chart { + max-width: 900px; + margin: 0 auto; + padding: 8px 0; +} + +.org-tier { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 12px; + position: relative; +} +.org-tier-label { + width: 100%; + text-align: center; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1.5px; + color: var(--text3); + margin-bottom: 8px; +} + +.org-line { + width: 2px; + height: 28px; + background: linear-gradient(to bottom, rgba(31, 78, 140, 0.4), rgba(31, 78, 140, 0.1)); + margin: 12px auto; + position: relative; +} +.org-line::before { + content: ''; + position: absolute; + top: 0; + left: 50%; + transform: translateX(-50%); + width: 6px; + height: 6px; + border-radius: 50%; + background: var(--blue); + box-shadow: 0 0 8px rgba(31, 78, 140, 0.5); +} + +.acard { + background: var(--glass-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); + border-radius: var(--r); + padding: 16px; + text-align: center; + min-width: 130px; + max-width: 160px; + cursor: pointer; + transition: all 0.25s; +} +.acard:hover { + border-color: var(--glass-border-hover); + transform: translateY(-3px); + box-shadow: 0 12px 40px rgba(0, 0, 0, 0.4); +} +.acard.coo { + border-color: rgba(31, 78, 140, 0.4); + box-shadow: var(--blue-glow); + min-width: 170px; + max-width: 200px; + padding: 20px; +} +.acard.coo:hover { + border-color: rgba(31, 78, 140, 0.6); +} + +.aname { + font-size: 13px; + font-weight: 600; + color: var(--text); + margin-bottom: 2px; +} +.arole { + font-size: 10px; + color: var(--text2); + margin-bottom: 8px; +} +.ameta { + display: flex; + align-items: center; + justify-content: center; + gap: 6px; + font-size: 10px; + color: var(--text3); + margin-top: 6px; +} +.agent-counts { + display: flex; + justify-content: center; + gap: 10px; + font-size: 10px; + color: var(--text3); + margin-top: 4px; +} + +/* Model badges */ +.model-badge { + display: inline-block; + padding: 2px 8px; + border-radius: 4px; + font-size: 9px; + font-weight: 600; + font-family: var(--font-mono); + text-transform: uppercase; + letter-spacing: 0.5px; +} +.model-badge.opus { background: rgba(31, 78, 140, 0.2); color: var(--blue-lt); border: 1px solid rgba(31, 78, 140, 0.3); } +.model-badge.sonnet { background: rgba(240, 140, 46, 0.15); color: var(--orange); border: 1px solid rgba(240, 140, 46, 0.25); } +.model-badge.gemini { background: rgba(34, 197, 94, 0.15); color: var(--green); border: 1px solid rgba(34, 197, 94, 0.25); } +.model-badge.other { background: rgba(255, 255, 255, 0.05); color: var(--text3); border: 1px solid rgba(255, 255, 255, 0.08); } + +/* Status dots */ +.status-dot { + display: inline-block; + width: 8px; + height: 8px; + border-radius: 50%; + background: #555; + flex-shrink: 0; +} +.status-dot.active, .status-dot.running, .status-dot.ok, .status-dot.connected { background: var(--green); box-shadow: 0 0 6px rgba(34, 197, 94, 0.4); } +.status-dot.idle { background: var(--text3); } +.status-dot.error, .status-dot.down, .status-dot.failed { background: var(--red); } + +/* Sub-agent sections */ +.agent-section-header { + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1.2px; + color: var(--text3); + margin-bottom: 8px; + padding-bottom: 6px; + border-bottom: 1px solid var(--glass-border); +} + +.agent-list { + display: flex; + flex-direction: column; + gap: 6px; +} + +.agent-card { + display: flex; + align-items: center; + gap: 12px; + padding: 10px 14px; + border-radius: var(--r-sm); + background: rgba(255, 255, 255, 0.02); + border: 1px solid transparent; + transition: all 0.2s; +} +.agent-card:hover { + background: rgba(255, 255, 255, 0.04); + border-color: var(--glass-border); +} +.agent-card.subagent-running { + border-color: rgba(34, 197, 94, 0.2); + background: rgba(34, 197, 94, 0.04); +} + +.agent-avatar { font-size: 20px; flex-shrink: 0; } +.agent-info { flex: 1; min-width: 0; } +.agent-name { font-size: 12px; font-weight: 600; color: var(--text); } +.agent-role { font-size: 11px; color: var(--text2); } +.agent-detail { font-size: 11px; color: var(--text3); } +.subagent-task { font-size: 11px; color: var(--text2); margin-top: 4px; line-height: 1.4; } + +.agent-status { + font-size: 10px; + font-weight: 600; + padding: 3px 8px; + border-radius: 4px; + white-space: nowrap; +} +.agent-status-running { background: rgba(34, 197, 94, 0.12); color: var(--green); } +.agent-status-completed { background: rgba(255, 255, 255, 0.06); color: var(--text3); } +.agent-status-failed { background: rgba(214, 69, 42, 0.12); color: var(--red); } + +/* ═══════════════ OPERATIONS TAB ═══════════════ */ + +/* Cron grid */ +.cron-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 12px; +} +.cron-cat-label { + grid-column: 1 / -1; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1.5px; + color: var(--text3); + padding: 12px 0 4px; + border-bottom: 1px solid var(--glass-border); + margin-bottom: 4px; +} +.cron-cat-label .cnt { + font-size: 10px; + font-weight: 400; + color: var(--text3); + background: rgba(255, 255, 255, 0.06); + padding: 1px 6px; + border-radius: 4px; + margin-left: 6px; +} + +.cron-card { + background: var(--glass-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); + border-radius: var(--r-sm); + padding: 14px; + transition: all 0.2s; +} +.cron-card:hover { + border-color: var(--glass-border-hover); + transform: translateY(-1px); +} +.cron-card.running { + border-color: rgba(34, 197, 94, 0.3); +} +.cron-card.disabled { + opacity: 0.4; +} + +.cron-name { + font-size: 12px; + font-weight: 600; + color: var(--text); + margin-bottom: 4px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.cron-schedule { + font-size: 11px; + font-family: var(--font-mono); + color: var(--blue-lt); + margin-bottom: 8px; +} +.cron-meta { + font-size: 10px; + color: var(--text3); + line-height: 1.6; +} +.cron-meta span { display: block; } +.cron-actions { + display: flex; + gap: 6px; + margin-top: 10px; +} +.cron-action-btn { + flex: 1; + padding: 5px 0; + border: 1px solid var(--glass-border); + border-radius: 6px; + background: transparent; + color: var(--text2); + font-size: 10px; + cursor: pointer; + transition: all 0.2s; +} +.cron-action-btn:hover { + background: rgba(255, 255, 255, 0.06); + color: var(--text); +} + +/* Delegations */ +.deleg-paths { + display: flex; + gap: 8px; + margin-top: 4px; + font-size: 10px; +} +.deleg-path { + color: var(--text3); + font-family: var(--font-mono); + font-size: 10px; +} + +/* ═══════════════ STANDUP TAB ═══════════════ */ +.standup-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 20px; +} +.standup-header h2 { + font-size: 18px; + font-weight: 600; + color: var(--text); +} +.standup-actions { + display: flex; + gap: 8px; +} +.standup-btn { + padding: 7px 16px; + border-radius: 8px; + font-size: 12px; + font-weight: 500; + cursor: pointer; + transition: all 0.2s; + border: none; +} +.standup-btn-primary { + background: rgba(31, 78, 140, 0.3); + color: var(--blue-lt); + border: 1px solid rgba(31, 78, 140, 0.3); +} +.standup-btn-primary:hover { background: rgba(31, 78, 140, 0.45); } +.standup-btn-secondary { + background: rgba(255, 255, 255, 0.05); + color: var(--text2); + border: 1px solid var(--glass-border); +} +.standup-btn-secondary:hover { background: rgba(255, 255, 255, 0.08); } + +.standup-empty { + text-align: center; + padding: 48px 0; +} +.standup-empty-icon { font-size: 36px; margin-bottom: 12px; } +.standup-empty-text { font-size: 14px; color: var(--text2); margin-bottom: 6px; } +.standup-empty-sub { font-size: 12px; color: var(--text3); } + +.standup-thread { + display: flex; + flex-direction: column; + gap: 12px; +} +.standup-msg { + display: flex; + gap: 12px; + padding: 14px 16px; + border-radius: var(--r); + background: var(--glass-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); +} +.standup-avatar { + font-size: 24px; + flex-shrink: 0; + width: 36px; + height: 36px; + display: flex; + align-items: center; + justify-content: center; +} +.standup-msg-body { flex: 1; min-width: 0; } +.standup-msg-header { + display: flex; + align-items: center; + gap: 8px; + margin-bottom: 6px; +} +.standup-msg-name { font-size: 13px; font-weight: 600; color: var(--text); } +.standup-msg-role { + font-size: 10px; + padding: 2px 8px; + border-radius: 4px; + background: rgba(255, 255, 255, 0.06); + color: var(--text3); +} +.standup-msg-time { font-size: 10px; color: var(--text3); margin-left: auto; font-family: var(--font-mono); } +.standup-msg-text { + font-size: 12px; + color: var(--text2); + line-height: 1.6; + white-space: pre-wrap; +} + +/* ═══════════════ MEMORY TAB ═══════════════ */ +.memory-feed { display: flex; flex-direction: column; gap: 4px; } +.memory-entry { + display: flex; + align-items: flex-start; + gap: 10px; + padding: 8px 4px; + border-bottom: 1px solid rgba(255, 255, 255, 0.03); + font-size: 12px; +} +.memory-timestamp { + font-size: 10px; + font-family: var(--font-mono); + color: var(--text3); + min-width: 50px; + flex-shrink: 0; +} +.memory-tag { + display: inline-block; + padding: 2px 8px; + border-radius: 4px; + font-size: 9px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.5px; + flex-shrink: 0; +} +.memory-tag.goal { background: rgba(31, 78, 140, 0.15); color: var(--blue-lt); } +.memory-tag.fact { background: rgba(34, 197, 94, 0.1); color: var(--green); } +.memory-tag.preference { background: rgba(139, 92, 246, 0.1); color: var(--purple); } +.memory-tag.deadline { background: rgba(240, 140, 46, 0.1); color: var(--orange); } +.memory-tag.decision { background: rgba(20, 184, 166, 0.1); color: var(--teal); } +.memory-tag.contact { background: rgba(255, 255, 255, 0.06); color: var(--text2); } +.memory-tag.pattern { background: rgba(59, 125, 216, 0.1); color: var(--blue-lt); } +.memory-tag.agent { background: rgba(240, 140, 46, 0.1); color: var(--orange); } +.memory-content { color: var(--text2); line-height: 1.5; } + +.filter-bar { + display: flex; + gap: 4px; + flex-wrap: wrap; +} +.filter-btn { + padding: 4px 10px; + border: 1px solid transparent; + border-radius: 6px; + background: rgba(255, 255, 255, 0.04); + color: var(--text3); + font-size: 10px; + font-weight: 500; + text-transform: capitalize; + cursor: pointer; + transition: all 0.2s; +} +.filter-btn:hover { background: rgba(255, 255, 255, 0.08); color: var(--text2); } +.filter-btn.active { + background: rgba(31, 78, 140, 0.25); + color: var(--blue-lt); + border-color: rgba(31, 78, 140, 0.3); +} + +/* ═══════════════ COSTS & SETTINGS ═══════════════ */ +/* Integrations panel */ +.integ-section-title { + font-size: 10px; font-weight: 600; text-transform: uppercase; + letter-spacing: 1px; color: var(--text3); margin-bottom: 8px; margin-top: 4px; +} +.integ-list { display: flex; flex-direction: column; gap: 4px; } +.integ-row { + display: flex; align-items: center; gap: 10px; + padding: 6px 8px; border-radius: 6px; + background: rgba(255,255,255,0.02); + transition: background .15s; +} +.integ-row:hover { background: rgba(255,255,255,0.05); } +.integ-status-dot { width: 8px; height: 8px; border-radius: 50%; flex-shrink: 0; } +.integ-info { min-width: 0; } +.integ-name { font-size: 12px; font-weight: 500; color: var(--text); display: flex; align-items: center; gap: 6px; } +.integ-detail { font-size: 10px; color: var(--text3); margin-top: 1px; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } +.integ-badge { + font-size: 8px; padding: 1px 5px; border-radius: 3px; font-weight: 600; letter-spacing: .5px; +} +.integ-badge-read { background: rgba(34,197,94,.12); color: var(--green); } +.integ-badge-write { background: rgba(240,140,46,.12); color: var(--orange); } + +/* Costs panel */ +.cost-summary { + text-align: center; padding: 12px 0 14px; + border-bottom: 1px solid rgba(255,255,255,0.05); margin-bottom: 12px; +} +.cost-summary-label { font-size: 10px; text-transform: uppercase; letter-spacing: 1px; color: var(--text3); } +.cost-summary-value { font-size: 22px; font-weight: 700; color: var(--text); margin: 4px 0 2px; } +.cost-summary-sub { font-size: 10px; color: var(--text3); } +.cost-table { display: flex; flex-direction: column; gap: 2px; } +.cost-table-head { + display: grid; grid-template-columns: 1fr auto auto; gap: 8px; + font-size: 9px; text-transform: uppercase; letter-spacing: .8px; + color: var(--text3); padding: 0 4px 6px; border-bottom: 1px solid rgba(255,255,255,0.05); +} +.cost-table-row { + display: grid; grid-template-columns: 1fr auto auto; gap: 8px; align-items: center; + padding: 6px 4px; border-bottom: 1px solid rgba(255,255,255,0.02); +} +.cost-svc-name { font-size: 12px; font-weight: 500; color: var(--text); } +.cost-svc-plan { font-size: 10px; color: var(--text3); } +.cost-amount { font-size: 12px; font-weight: 500; color: var(--text); white-space: nowrap; } +.cost-type-badge { + font-size: 8px; padding: 2px 6px; border-radius: 3px; font-weight: 600; white-space: nowrap; +} +.cost-type-flat { background: rgba(31,78,140,.15); color: var(--blue-lt); } +.cost-type-usage { background: rgba(240,140,46,.12); color: var(--orange); } +.cost-note { + font-size: 11px; color: var(--text3); margin-top: 10px; + padding: 8px; border-radius: 6px; background: rgba(255,255,255,0.03); +} + +/* Settings */ +.settings-grid { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 12px; +} +.settings-card { + padding: 12px; + border-radius: var(--r-sm); + background: rgba(255, 255, 255, 0.02); + border: 1px solid rgba(255, 255, 255, 0.04); +} +.settings-card-title { + font-size: 10px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1px; + color: var(--text3); + margin-bottom: 8px; +} +.settings-row { + display: flex; + justify-content: space-between; + padding: 4px 0; + font-size: 11px; +} +.settings-row-label { color: var(--text2); } +.settings-row-value { color: var(--text); font-family: var(--font-mono); font-size: 10px; } + +/* ═══════════════ INTEGRATIONS ═══════════════ */ +.data-table-simple { + width: 100%; + border-collapse: collapse; + font-size: 12px; +} +.data-table-simple th { + text-align: left; + padding: 8px 10px; + font-size: 10px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1px; + color: var(--text3); + border-bottom: 1px solid var(--glass-border); +} +.data-table-simple td { + padding: 8px 10px; + border-bottom: 1px solid rgba(255, 255, 255, 0.03); + color: var(--text); +} +.data-table-simple tr:last-child td { border-bottom: none; } + +/* ═══════════════ FILES ═══════════════ */ +.file-list { display: flex; flex-direction: column; } +.file-item { + display: flex; + align-items: center; + gap: 10px; + padding: 8px 4px; + border-bottom: 1px solid rgba(255, 255, 255, 0.03); +} +.file-item:last-child { border-bottom: none; } +.file-icon { font-size: 16px; flex-shrink: 0; width: 24px; text-align: center; } +.file-info { flex: 1; min-width: 0; } +.file-name { font-size: 12px; font-weight: 500; color: var(--text); white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } +.file-link { color: rgba(90, 200, 232, 0.9); text-decoration: none; transition: color 0.2s; } +.file-link:hover { color: #fff; text-shadow: 0 0 8px rgba(90, 200, 232, 0.4); } +.file-item.clickable { cursor: pointer; } +.file-item.clickable:hover { background: rgba(90, 200, 232, 0.05); border-radius: 6px; } +.file-path { font-size: 10px; color: var(--text3); font-family: var(--font-mono); white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } +.file-meta { text-align: right; font-size: 10px; color: var(--text3); flex-shrink: 0; } + +/* ═══════════════ ROLLOUT ═══════════════ */ +.rollout-progress-wrap { margin-bottom: 16px; } +.rollout-progress-header { display: flex; justify-content: space-between; align-items: center; margin-bottom: 6px; } +.rollout-progress-pct { font-size: 12px; font-weight: 600; color: var(--text); } +.rollout-progress-bar { height: 6px; border-radius: 3px; background: rgba(255, 255, 255, 0.06); overflow: hidden; } +.rollout-progress-fill { height: 100%; border-radius: 3px; background: linear-gradient(90deg, var(--blue), var(--blue-lt)); transition: width 0.5s; } +.cron-section-label { + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 1.2px; + color: var(--text3); +} +.cron-section-label .count { + font-weight: 400; + color: var(--text3); + font-size: 10px; +} + +.rollout-layer { margin-bottom: 16px; } +.rollout-agent-list { display: flex; flex-direction: column; gap: 4px; margin-top: 8px; } +.rollout-agent { + display: flex; + align-items: center; + gap: 10px; + padding: 8px 10px; + border-radius: var(--r-sm); + background: rgba(255, 255, 255, 0.02); +} +.rollout-agent.rollout-deployed { border-left: 2px solid var(--green); } +.rollout-agent.rollout-pending { border-left: 2px solid var(--text3); opacity: 0.6; } +.rollout-check { font-size: 14px; flex-shrink: 0; } +.rollout-agent-info { flex: 1; } +.rollout-agent-name { font-size: 12px; font-weight: 500; color: var(--text); } +.rollout-agent-role { font-size: 10px; color: var(--text3); font-weight: 400; margin-left: 6px; } +.rollout-agent-meta { + display: flex; + align-items: center; + gap: 8px; + margin-top: 4px; +} +.rollout-hb-badge { + font-size: 9px; + padding: 1px 6px; + border-radius: 3px; +} +.rollout-hb-badge.hb-active { background: rgba(34, 197, 94, 0.1); color: var(--green); } +.rollout-hb-badge.hb-inactive { background: rgba(255, 255, 255, 0.04); color: var(--text3); } +.rollout-hb-time { font-size: 10px; color: var(--text3); font-family: var(--font-mono); } + +/* ═══════════════ CHAT DRAWER ═══════════════ */ +.chat-drawer { + position: fixed; + top: var(--topbar); + right: 0; + bottom: 0; + width: var(--chat-w); + display: flex; + flex-direction: column; + border-left: 1px solid var(--glass-border); + transform: translateX(100%); + transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); + z-index: 90; +} +.chat-drawer.open { + transform: translateX(0); +} + +.chat-drawer-head { + display: flex; + align-items: center; + justify-content: space-between; + padding: 12px 16px; + border-bottom: 1px solid var(--glass-border); + flex-shrink: 0; +} +.chat-drawer-title { + font-size: 13px; + font-weight: 600; + color: var(--text); +} +.chat-drawer-close { + background: none; + border: none; + color: var(--text3); + font-size: 16px; + cursor: pointer; + padding: 4px 8px; + border-radius: 4px; + transition: all 0.2s; +} +.chat-drawer-close:hover { color: var(--text); background: rgba(255, 255, 255, 0.06); } + +.chat-drawer-body { + flex: 1; + display: flex; + flex-direction: column; + overflow: hidden; +} + +/* Chat sessions */ +.chat-conn-status { + padding: 6px 12px; + font-size: 11px; + font-weight: 500; + text-align: center; + border-radius: 4px; + margin: 4px 8px; +} +.chat-conn-info { background: rgba(0,150,255,0.15); color: #60a5fa; } +.chat-conn-ok { background: rgba(0,200,100,0.15); color: #4ade80; } +.chat-conn-warn { background: rgba(255,180,0,0.15); color: #fbbf24; } +.chat-conn-error { background: rgba(255,60,60,0.15); color: #f87171; } + +.chat-session-bar { + display: flex; + gap: 4px; + padding: 8px 12px; + overflow-x: auto; + flex-shrink: 0; + border-bottom: 1px solid var(--glass-border); +} +.chat-session-tab { + padding: 5px 12px; + border: 1px solid var(--glass-border); + border-radius: 6px; + background: rgba(255, 255, 255, 0.03); + color: var(--text2); + font-size: 11px; + font-weight: 500; + cursor: pointer; + white-space: nowrap; + transition: all 0.2s; + display: flex; + align-items: center; +} +.chat-session-tab:hover { background: rgba(255, 255, 255, 0.06); } +.chat-session-tab.active { + background: rgba(31, 78, 140, 0.25); + border-color: rgba(31, 78, 140, 0.4); + color: var(--blue-lt); +} +.chat-session-tab.subagent { font-size: 10px; opacity: 0.7; } +.chat-no-sessions { font-size: 11px; color: var(--text3); padding: 4px; } + +/* Messages */ +.chat-messages-scroll { + flex: 1; + overflow-y: auto; + padding: 12px; +} +.chat-msg { + margin-bottom: 10px; + max-width: 92%; +} +.chat-msg-user { + margin-left: auto; +} +.chat-msg-user .chat-msg-content { + background: rgba(31, 78, 140, 0.25); + border: 1px solid rgba(31, 78, 140, 0.3); + border-radius: 12px 12px 4px 12px; + padding: 8px 12px; + font-size: 12px; + color: var(--text); + line-height: 1.5; +} +.chat-msg-assistant .chat-msg-content { + background: rgba(255, 255, 255, 0.04); + border: 1px solid var(--glass-border); + border-radius: 12px 12px 12px 4px; + padding: 8px 12px; + font-size: 12px; + color: var(--text2); + line-height: 1.5; +} +.chat-msg-system .chat-msg-content { + background: rgba(240, 140, 46, 0.08); + border: 1px solid rgba(240, 140, 46, 0.15); + border-radius: 8px; + padding: 6px 10px; + font-size: 11px; + color: var(--orange); + text-align: center; +} +.chat-msg-time { + font-size: 9px; + color: var(--text3); + margin-top: 3px; + padding: 0 4px; +} +.chat-msg-streaming { + opacity: 0.8; +} + +.chat-cursor { + display: inline-block; + width: 2px; + height: 14px; + background: var(--blue-lt); + margin-left: 2px; + vertical-align: middle; + animation: blink 1s infinite; +} +.chat-typing { color: var(--text3); font-style: italic; } + +/* Input bar */ +.chat-input-bar { + display: flex; + align-items: flex-end; + gap: 6px; + padding: 10px 12px; + border-top: 1px solid var(--glass-border); + flex-shrink: 0; +} +.chat-input { + flex: 1; + background: rgba(255, 255, 255, 0.04); + border: 1px solid var(--glass-border); + border-radius: 8px; + padding: 8px 12px; + color: var(--text); + font: 12px/1.4 var(--font); + resize: none; + outline: none; + transition: border-color 0.2s; +} +.chat-input:focus { border-color: rgba(31, 78, 140, 0.5); } +.chat-input::placeholder { color: var(--text3); } +.chat-input:disabled { opacity: 0.4; } + +.chat-btn { + width: 36px; + height: 36px; + border: 1px solid var(--glass-border); + border-radius: 8px; + background: transparent; + color: var(--text2); + font-size: 14px; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + transition: all 0.2s; + flex-shrink: 0; +} +.chat-btn:hover:not(:disabled) { background: rgba(255, 255, 255, 0.06); color: var(--text); } +.chat-btn:disabled { opacity: 0.3; cursor: default; } +.chat-btn-send { color: var(--blue-lt); border-color: rgba(31, 78, 140, 0.3); } +.chat-btn-stop { color: var(--red); border-color: rgba(214, 69, 42, 0.3); } + +/* ═══════════════ MODAL ═══════════════ */ +.modal-overlay { + position: fixed; + inset: 0; + background: rgba(0, 0, 0, 0.6); + backdrop-filter: blur(4px); + z-index: 200; + display: none; + align-items: center; + justify-content: center; +} +.modal-overlay.visible { display: flex; } +.modal { + width: 90%; + max-width: 700px; + max-height: 80vh; + display: flex; + flex-direction: column; +} +.modal-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 16px 20px; + border-bottom: 1px solid var(--glass-border); + font-size: 14px; + font-weight: 600; +} +.modal-close { + background: none; + border: none; + color: var(--text3); + font-size: 20px; + cursor: pointer; + padding: 4px 8px; + border-radius: 4px; +} +.modal-close:hover { color: var(--text); background: rgba(255, 255, 255, 0.06); } +.modal-body { + padding: 20px; + overflow-y: auto; + font-size: 12px; + font-family: var(--font-mono); + color: var(--text2); + line-height: 1.7; + white-space: pre-wrap; +} + +/* ═══════════════ SHARED STATES ═══════════════ */ +.placeholder, .empty-state { + text-align: center; + padding: 24px; + color: var(--text3); + font-size: 12px; +} +.empty-icon { + display: block; + font-size: 28px; + margin-bottom: 8px; + opacity: 0.5; +} + +/* Disk bar */ +.disk-bar { height: 4px; border-radius: 2px; background: rgba(255, 255, 255, 0.06); margin-top: 4px; overflow: hidden; } +.disk-bar-fill { height: 100%; border-radius: 2px; background: var(--green); transition: width 0.5s; } +.disk-bar-fill.warning { background: var(--orange); } +.disk-bar-fill.danger { background: var(--red); } + +/* Status grid */ +.status-grid { display: flex; flex-direction: column; gap: 8px; } +.status-item { display: flex; align-items: center; gap: 10px; padding: 6px 0; } +.status-info { flex: 1; } +.status-label { font-size: 12px; font-weight: 500; color: var(--text); } +.status-value { font-size: 11px; color: var(--text3); display: block; } + +/* ═══════════════ ANIMATIONS ═══════════════ */ +@keyframes pulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.4; } } +@keyframes blink { 0%, 50% { opacity: 1; } 51%, 100% { opacity: 0; } } + +/* ═══════════════ RESPONSIVE ═══════════════ */ +@media (max-width: 1200px) { + .projects-row { grid-template-columns: repeat(3, 1fr); } + .cron-grid { grid-template-columns: repeat(2, 1fr); } +} + +@media (max-width: 960px) { + .content-area { + padding-right: 24px; + } + .chat-drawer { + width: 100%; + max-width: 380px; + } + .chat-toggle-btn { display: flex; } + .chat-drawer:not(.open) ~ .content-area { padding-right: 24px; } + + .projects-row { grid-template-columns: repeat(2, 1fr); } + .metric-strip { grid-template-columns: repeat(2, 1fr); } + .grid-2col { grid-template-columns: 1fr; } + .settings-grid { grid-template-columns: 1fr; } + .cron-grid { grid-template-columns: 1fr; } + + .topbar-nav { + gap: 2px; + } + .nav-tab { + padding: 6px 10px; + } + .nav-label { display: none; } +} + +@media (max-width: 640px) { + :root { --topbar: 50px; --chat-w: 100%; } + + .topbar { padding: 0 10px; gap: 6px; } + .topbar-logo { height: 28px; } + + .content-area { padding: 12px; padding-right: 12px; } + .projects-row { grid-template-columns: 1fr; } + .metric-strip { grid-template-columns: repeat(2, 1fr); } + + .acard { min-width: 100px; max-width: 130px; padding: 10px; } + .acard.coo { min-width: 140px; } + + .conn-label { display: none; } +} + +/* ═══════════════ SEARCH BOX ═══════════════ */ +.search-wrap { + position: relative; + margin: 16px 0 8px; +} +.search-icon { + position: absolute; + left: 14px; + top: 50%; + transform: translateY(-50%); + font-size: 14px; + pointer-events: none; +} +.global-search { + width: 100%; + padding: 12px 16px 12px 40px; + background: var(--glass-bg); + backdrop-filter: var(--glass-blur); + -webkit-backdrop-filter: var(--glass-blur); + border: 1px solid var(--glass-border); + border-radius: var(--r); + color: var(--text); + font: 13px/1.4 var(--font); + outline: none; + transition: border-color 0.2s, box-shadow 0.2s; +} +.global-search::placeholder { color: var(--text3); } +.global-search:focus { + border-color: var(--glass-border-hover); + box-shadow: 0 0 12px rgba(90, 200, 232, 0.25); +} + +/* ═══════════════ INDEXATION STATUS ═══════════════ */ +.idx-row { + display: flex; + align-items: center; + gap: 16px; + padding: 8px 0; + border-bottom: 1px solid rgba(255, 255, 255, 0.04); + font-size: 12px; +} +.idx-row:last-child { border-bottom: none; } +.idx-source { color: var(--text); font-weight: 500; min-width: 200px; } +.idx-link { color: rgba(90, 200, 232, 0.9); text-decoration: none; cursor: pointer; transition: color 0.2s; } +.idx-link:hover { color: #fff; text-decoration: underline; text-shadow: 0 0 8px rgba(90, 200, 232, 0.4); } +.idx-detail { color: var(--text3); } +.idx-detail strong { color: var(--text2); font-weight: 500; } + +/* ═══════════════ CRON EXPAND/COLLAPSE ═══════════════ */ +.cron-card { cursor: pointer; } +.cron-detail { + display: none; + margin-top: 10px; + padding-top: 10px; + border-top: 1px solid rgba(255, 255, 255, 0.06); + font-size: 11px; + color: var(--text3); + line-height: 1.7; +} +.cron-detail span { display: block; } +.cron-card.expanded .cron-detail { display: block; } +.cron-card .cron-expand-hint { + font-size: 10px; + color: var(--text3); + margin-top: 6px; + opacity: 0.5; +} +.cron-card.expanded .cron-expand-hint { display: none; } + +/* ═══════════════ AGENT DETAIL MODAL ═══════════════ */ +.agent-detail-card { display: flex; flex-direction: column; gap: 16px; } +.agent-detail-header { display: flex; align-items: flex-start; gap: 16px; } +.agent-detail-avatar { width: 64px; height: 64px; border-radius: 50%; object-fit: cover; border: 2px solid rgba(90, 200, 232, 0.4); box-shadow: 0 0 16px rgba(90, 200, 232, 0.2); flex-shrink: 0; } +.agent-detail-info { flex: 1; } +.agent-detail-name { font-size: 18px; font-weight: 700; color: var(--text); font-family: var(--font); } +.agent-detail-role { font-size: 12px; color: var(--text2); margin-top: 2px; font-family: var(--font); } +.agent-detail-section { border-top: 1px solid rgba(255, 255, 255, 0.06); padding-top: 12px; } +.agent-detail-section-title { font-size: 10px; font-weight: 600; text-transform: uppercase; letter-spacing: 1.2px; color: var(--text3); margin-bottom: 8px; } +.agent-detail-pre { background: rgba(0, 0, 0, 0.3); border: 1px solid rgba(255, 255, 255, 0.06); border-radius: 8px; padding: 12px; font-size: 11px; font-family: var(--font-mono); color: var(--text2); line-height: 1.6; white-space: pre-wrap; max-height: 240px; overflow-y: auto; margin: 0; } + +/* ═══════════════ PANEL TIMESTAMPS ═══════════════ */ +.panel-last-updated { text-align: right; font-size: 10px; color: var(--text3, rgba(255,255,255,0.3)); padding: 6px 4px 0; opacity: 0.7; } + +/* ═══════════════ PROJECT BOXES CLICKABLE ═══════════════ */ +.project-box { cursor: pointer; transition: transform 0.15s, box-shadow 0.15s; } +.project-box:hover { transform: translateY(-2px); box-shadow: 0 4px 20px rgba(0,0,0,0.3); } + +/* ═══════════════ PROJECT DETAIL MODAL ═══════════════ */ +.project-detail-desc { font-size: 13px; color: var(--text2); line-height: 1.5; margin-bottom: 12px; } +.project-detail-agent-link { display: inline-flex; align-items: center; gap: 6px; padding: 6px 12px; border-radius: 8px; background: rgba(90, 200, 232, 0.1); border: 1px solid rgba(90, 200, 232, 0.3); color: var(--blue-lt, #5ac8e8); font-size: 12px; cursor: pointer; transition: background 0.2s; text-decoration: none; } +.project-detail-agent-link:hover { background: rgba(90, 200, 232, 0.2); } +.project-detail-placeholder { background: rgba(0,0,0,0.2); border: 1px dashed rgba(255,255,255,0.1); border-radius: 8px; padding: 16px; text-align: center; font-size: 11px; color: var(--text3); margin-top: 8px; } + +/* ═══════════════ SELECT OPTIONS (dark theme) ═══════════════ */ +select option { background: #1a1f2e; color: #e0e0e0; } + +/* ═══════════════ TASK ROW SHARED COMPONENT ═══════════════ */ +.task-row-shared { + display: flex; align-items: flex-start; gap: 8px; padding: 8px 10px; + border-bottom: 1px solid rgba(255,255,255,0.04); transition: background 0.15s; cursor: pointer; +} +.task-row-shared:hover { background: rgba(255,255,255,0.03); } +.task-row-shared.done { opacity: 0.5; } +.task-row-shared .task-complete-btn { + width: 16px; height: 16px; border: 1.5px solid var(--text3); border-radius: 4px; + background: transparent; cursor: pointer; flex-shrink: 0; margin-top: 1px; transition: all 0.2s; + display: flex; align-items: center; justify-content: center; font-size: 10px; color: transparent; padding: 0; +} +.task-row-shared .task-complete-btn:hover { border-color: var(--green); color: var(--green); } +.task-row-shared.done .task-complete-btn { background: var(--green); border-color: var(--green); color: #fff; } + +/* ═══════════════ PRINT ═══════════════ */ +@media print { + body::before { display: none; } + .topbar, .chat-drawer, .chat-toggle-btn { display: none !important; } + .content-area { padding: 0 !important; } + .glass-card, .glass-panel, .glass-nav { background: #fff !important; backdrop-filter: none !important; color: #000 !important; border-color: #ddd !important; } +} diff --git a/bates-core/scripts-core/archive-sessions.sh b/bates-core/scripts-core/archive-sessions.sh new file mode 100755 index 0000000..e352655 --- /dev/null +++ b/bates-core/scripts-core/archive-sessions.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# Part of Bates installer — scripts-core +# archive-sessions.sh — Move stale .jsonl session files to archive/ +# Runs safely under concurrent execution (mv -n is atomic on same filesystem). + +set -euo pipefail + +AGENTS_DIR="$HOME/.openclaw/agents" +MAX_AGE_MIN=120 # 2 hours + +total_archived=0 + +for sessions_dir in "$AGENTS_DIR"/*/sessions/; do + [ -d "$sessions_dir" ] || continue + + agent_dir="$(dirname "$sessions_dir")" + agent="$(basename "$agent_dir")" + archive_dir="$agent_dir/archive" + + count=0 + + # Find .jsonl files in the sessions dir (maxdepth 1 to skip subdirs like archive/, state/) + # that haven't been modified in the last 120 minutes. + while IFS= read -r -d '' file; do + mkdir -p "$archive_dir" + basename_file="$(basename "$file")" + # mv -n: no-clobber, atomic on same filesystem. If two instances race, only one wins. + mv -n "$file" "$archive_dir/$basename_file" 2>/dev/null && count=$((count + 1)) || true + done < <(find "$sessions_dir" -maxdepth 1 -name '*.jsonl' -type f -mmin +"$MAX_AGE_MIN" -print0 2>/dev/null) + + if [ "$count" -gt 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $agent: archived $count session file(s)" + total_archived=$((total_archived + count)) + fi +done + +if [ "$total_archived" -eq 0 ]; then + echo "[$(date '+%Y-%m-%d %H:%M:%S')] No session files older than ${MAX_AGE_MIN}m found." +else + echo "[$(date '+%Y-%m-%d %H:%M:%S')] Total archived: $total_archived file(s)" +fi diff --git a/bates-core/scripts-core/claude-sub.sh b/bates-core/scripts-core/claude-sub.sh new file mode 100755 index 0000000..d52973a --- /dev/null +++ b/bates-core/scripts-core/claude-sub.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# Part of Bates installer — scripts-core +# Wrapper to call Claude Code using subscription auth only. +# Strips ANTHROPIC_API_KEY so Claude Code falls back to OAuth credentials. +env -u ANTHROPIC_API_KEY claude "$@" diff --git a/bates-core/scripts-core/claude-tmux.sh b/bates-core/scripts-core/claude-tmux.sh new file mode 100755 index 0000000..b943589 --- /dev/null +++ b/bates-core/scripts-core/claude-tmux.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# Part of Bates installer — scripts-core +# claude-tmux.sh — Run Claude Code inside a persistent tmux session. +# +# Usage: +# claude-tmux # attach or create session, auto-resume last conversation +# claude-tmux new # attach or create session, start fresh conversation +# +# If the tmux session "claude" exists: +# - If Claude Code is still running inside it -> just attach +# - If the shell is idle (Claude exited) -> restart Claude with --resume +# If no session exists -> create one and start Claude +# +# To detach without killing Claude: press Ctrl+B then D +# To reattach later: just run `claude-tmux` again + +SESSION="claude" +# Set to the directory where you want Claude Code sessions to start +WORKDIR="${BATES_WORKDIR:-$HOME}" +MODE="${1:-resume}" + +# Check if session already exists +if tmux has-session -t "$SESSION" 2>/dev/null; then + # Session exists. Check if Claude Code is running inside it. + PANE_PID=$(tmux list-panes -t "$SESSION" -F '#{pane_pid}' 2>/dev/null) + CLAUDE_RUNNING=false + if [ -n "$PANE_PID" ]; then + # Check if any child process of the pane shell is claude + if pgrep -P "$PANE_PID" -f "claude" >/dev/null 2>&1; then + CLAUDE_RUNNING=true + fi + fi + + if $CLAUDE_RUNNING; then + echo "Claude Code is still running -- reattaching..." + tmux attach -t "$SESSION" + else + echo "Session exists but Claude exited -- restarting Claude Code..." + if [ "$MODE" = "new" ]; then + tmux send-keys -t "$SESSION" "cd $WORKDIR && claude" Enter + else + tmux send-keys -t "$SESSION" "cd $WORKDIR && claude --resume" Enter + fi + sleep 1 + tmux attach -t "$SESSION" + fi +else + # No session — create one + echo "Creating new tmux session '$SESSION'..." + if [ "$MODE" = "new" ]; then + tmux new-session -d -s "$SESSION" -c "$WORKDIR" "claude" + else + tmux new-session -d -s "$SESSION" -c "$WORKDIR" "claude --resume" + fi + sleep 1 + tmux attach -t "$SESSION" +fi diff --git a/bates-core/scripts-core/dashboard-register.sh b/bates-core/scripts-core/dashboard-register.sh new file mode 100755 index 0000000..8abbf82 --- /dev/null +++ b/bates-core/scripts-core/dashboard-register.sh @@ -0,0 +1,76 @@ +#!/bin/bash +# Part of Bates installer — scripts-core +# Lightweight dashboard registration helper for ad-hoc Claude Code runs. +# Use this to register exec-based or PTY-based runs that bypass run-delegation.sh. +# +# Usage: +# dashboard-register.sh start "task-name" "description" PID +# dashboard-register.sh complete "task-name" EXIT_CODE ["optional summary"] +# +# All dashboard calls are best-effort (won't fail if dashboard is down). + +set -uo pipefail + +# Dashboard URL — adjust port if your gateway runs elsewhere +DASHBOARD_URL="http://localhost:18789" + +ACTION="${1:?Usage: dashboard-register.sh start|complete TASK_NAME ...}" +TASK_NAME="${2:?Missing task name}" + +case "$ACTION" in + start) + DESCRIPTION="${3:-}" + PID="${4:-$$}" + DELEGATION_ID="$(date +%s)-${PID}" + + # Persist the delegation ID so 'complete' can find it + ID_FILE="/tmp/.dashboard-reg-$(echo "$TASK_NAME" | tr ' /' '_-')" + echo "$DELEGATION_ID" > "$ID_FILE" + + curl -s -X POST "$DASHBOARD_URL/dashboard/api/delegation/start" \ + -H "Content-Type: application/json" \ + -d "$(jq -n \ + --arg id "$DELEGATION_ID" \ + --arg name "$TASK_NAME" \ + --arg promptPath "" \ + --arg logPath "" \ + --arg description "$DESCRIPTION" \ + --argjson pid "$PID" \ + '{id: $id, name: $name, promptPath: $promptPath, logPath: $logPath, description: $description, pid: $pid}' + )" > /dev/null 2>&1 || true + + echo "Registered: $TASK_NAME (id=$DELEGATION_ID)" + ;; + + complete) + EXIT_CODE="${3:-0}" + SUMMARY="${4:-}" + + # Recover the delegation ID + ID_FILE="/tmp/.dashboard-reg-$(echo "$TASK_NAME" | tr ' /' '_-')" + if [[ -f "$ID_FILE" ]]; then + DELEGATION_ID="$(cat "$ID_FILE")" + rm -f "$ID_FILE" + else + # Fallback: construct a plausible ID (won't match, but dashboard can still log it) + DELEGATION_ID="unknown-$(date +%s)" + fi + + curl -s -X POST "$DASHBOARD_URL/dashboard/api/delegation/complete" \ + -H "Content-Type: application/json" \ + -d "$(jq -n \ + --arg id "$DELEGATION_ID" \ + --argjson exitCode "$EXIT_CODE" \ + --arg logTail "$SUMMARY" \ + '{id: $id, exitCode: $exitCode, logTail: $logTail}' + )" > /dev/null 2>&1 || true + + echo "Completed: $TASK_NAME (id=$DELEGATION_ID, exit=$EXIT_CODE)" + ;; + + *) + echo "Unknown action: $ACTION" >&2 + echo "Usage: dashboard-register.sh start|complete TASK_NAME ..." >&2 + exit 1 + ;; +esac diff --git a/bates-core/scripts-core/health-check.sh b/bates-core/scripts-core/health-check.sh new file mode 100755 index 0000000..e6339cc --- /dev/null +++ b/bates-core/scripts-core/health-check.sh @@ -0,0 +1,150 @@ +#!/bin/bash +# Part of Bates installer — scripts-core +# Health check script for OpenClaw/Bates system +# Outputs structured JSON to stdout (and optionally saves to observations/health.json) + +set -euo pipefail + +WORKSPACE="$HOME/.openclaw/workspace" +CRON_FILE="$HOME/.openclaw/cron/jobs.json" +CHECKIN_FILE="$WORKSPACE/observations/last-checkin.json" +OUTPUT_FILE="$WORKSPACE/observations/health.json" + +# NOTE: Set TELEGRAM_BOT_TOKEN in your environment or .env if Telegram checks are needed +TELEGRAM_BOT_TOKEN="${TELEGRAM_BOT_TOKEN:-}" + +NOW=$(date -u +"%Y-%m-%dT%H:%M:%S+00:00") + +# 1. Check OpenClaw gateway +if pgrep -x "openclaw-gate" > /dev/null 2>&1 || pgrep -f "openclaw-gateway" > /dev/null 2>&1; then + GATEWAY_STATUS="running" + # Get uptime in hours + GW_PID=$(pgrep -f "openclaw-gateway" | head -1) + if [ -n "$GW_PID" ]; then + GW_START=$(ps -o lstart= -p "$GW_PID" 2>/dev/null | xargs -I{} date -d "{}" +%s 2>/dev/null || echo "0") + NOW_EPOCH=$(date +%s) + if [ "$GW_START" != "0" ]; then + UPTIME_HOURS=$(( (NOW_EPOCH - GW_START) / 3600 )) + else + UPTIME_HOURS=-1 + fi + else + UPTIME_HOURS=-1 + fi +else + GATEWAY_STATUS="down" + UPTIME_HOURS=0 +fi + +# 2. Check Telegram bot (skipped if no token configured) +if [ -n "$TELEGRAM_BOT_TOKEN" ]; then + TELEGRAM_RESULT=$(curl -s --max-time 5 "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/getMe" 2>/dev/null || echo '{"ok":false}') + if echo "$TELEGRAM_RESULT" | python3 -c "import sys,json; d=json.load(sys.stdin); sys.exit(0 if d.get('ok') else 1)" 2>/dev/null; then + TELEGRAM_STATUS="connected" + else + TELEGRAM_STATUS="error" + fi +else + TELEGRAM_STATUS="not_configured" +fi + +# 3. Check MCP servers (test if mcporter is available) +MCP_STATUS="{}" +if command -v mcporter &> /dev/null; then + # Check each known MCP server by trying a lightweight operation + # NOTE: Customize this list with your own MCP server names + for SERVER in ms365-reader ms365-assistant; do + RESULT=$(timeout 10 mcporter call "$SERVER" list-mail-folders '{}' 2>/dev/null && echo "ok" || echo "error") + MCP_STATUS=$(echo "$MCP_STATUS" | python3 -c " +import sys, json +d = json.load(sys.stdin) +d['mcp_${SERVER//-/_}'] = '${RESULT}' +json.dump(d, sys.stdout) +" 2>/dev/null || echo "$MCP_STATUS") + done +else + MCP_STATUS='{"note":"mcporter not in PATH"}' +fi + +# 4. Last cron execution times +CRON_RUNS="{}" +if [ -f "$CRON_FILE" ]; then + CRON_RUNS=$(python3 -c " +import json, sys +with open('$CRON_FILE') as f: + data = json.load(f) +runs = {} +for job in data.get('jobs', []): + name = job.get('name', 'unknown') + last_run = job.get('state', {}).get('lastRunAtMs') + if last_run: + from datetime import datetime, timezone + dt = datetime.fromtimestamp(last_run / 1000, tz=timezone.utc) + runs[name] = dt.strftime('%Y-%m-%dT%H:%M:%S+00:00') + elif name not in runs: + runs[name] = None +json.dump(runs, sys.stdout) +" 2>/dev/null || echo '{}') +fi + +# 5. Disk usage +DISK_PERCENT=$(df -h / | awk 'NR==2 {gsub(/%/,""); print $5}' 2>/dev/null || echo "-1") + +# 6. Last checkin summary +CHECKIN_SUMMARY="{}" +if [ -f "$CHECKIN_FILE" ]; then + CHECKIN_SUMMARY=$(python3 -c " +import json, sys +with open('$CHECKIN_FILE') as f: + data = json.load(f) +summary = { + 'last_run': data.get('last_run'), + 'items_reported_today': len(data.get('reported_items', [])), + 'skipped_runs': data.get('skipped_runs', 0) +} +json.dump(summary, sys.stdout) +" 2>/dev/null || echo '{}') +fi + +# 7. Build final JSON +python3 -c " +import json, sys + +services = { + 'openclaw_gateway': '$GATEWAY_STATUS', + 'telegram_bot': '$TELEGRAM_STATUS' +} + +# Merge MCP status +try: + mcp = json.loads('''$MCP_STATUS''') + services.update(mcp) +except: + services['mcp_note'] = 'check failed' + +try: + cron_runs = json.loads('''$CRON_RUNS''') +except: + cron_runs = {} + +try: + checkin = json.loads('''$CHECKIN_SUMMARY''') +except: + checkin = {} + +result = { + 'timestamp': '$NOW', + 'uptime_hours': $UPTIME_HOURS, + 'services': services, + 'last_cron_runs': cron_runs, + 'disk_usage_percent': int('$DISK_PERCENT') if '$DISK_PERCENT'.lstrip('-').isdigit() else -1, + 'checkin_summary': checkin +} + +output = json.dumps(result, indent=2) +print(output) + +# Also save to file +with open('$OUTPUT_FILE', 'w') as f: + f.write(output + '\n') +" 2>/dev/null || echo '{"error": "failed to build health report"}' diff --git a/bates-core/scripts-core/model-fallback.sh b/bates-core/scripts-core/model-fallback.sh new file mode 100755 index 0000000..f92c049 --- /dev/null +++ b/bates-core/scripts-core/model-fallback.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Part of Bates installer — scripts-core +# model-fallback.sh — Switch the primary model between providers +set -euo pipefail + +# Available models — customize these for your setup +ANTHROPIC="anthropic/claude-opus-4-6" +CODEX="openai-codex/gpt-5.3-codex" +GEMINI="google-gemini-cli/gemini-3-pro-preview" +SONNET="anthropic/claude-sonnet-4-5-20250929" +CONFIG_PATH="agents.defaults.model.primary" + +usage() { + echo "Usage: $0 {anthropic|codex|gemini|sonnet|reset}" + echo "" + echo " anthropic Switch to Claude Opus 4.6 (subscription)" + echo " codex Switch to OpenAI Codex 5.3 (ChatGPT subscription)" + echo " gemini Switch to Gemini 3 Pro Preview (OAuth)" + echo " sonnet Switch to Claude Sonnet 4.5 (subscription)" + echo " reset Same as anthropic" + exit 1 +} + +[[ $# -eq 1 ]] || usage + +case "$1" in + anthropic|reset) TARGET="$ANTHROPIC" ;; + codex) TARGET="$CODEX" ;; + gemini) TARGET="$GEMINI" ;; + sonnet) TARGET="$SONNET" ;; + *) usage ;; +esac + +openclaw config set "$CONFIG_PATH" "$TARGET" +echo "Primary model set to: $TARGET" diff --git a/bates-core/scripts-core/run-delegation.sh b/bates-core/scripts-core/run-delegation.sh new file mode 100755 index 0000000..16a628a --- /dev/null +++ b/bates-core/scripts-core/run-delegation.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# Part of Bates installer — scripts-core +# Claude Code delegation wrapper with dashboard tracking +# Usage: run-delegation.sh PROMPTFILE LOGFILE [TASK_NAME] +# run-delegation.sh --pty PROMPTFILE LOGFILE [TASK_NAME] +# +# Registers the delegation with the dashboard, runs Claude Code, +# then reports completion so Bates gets notified. +# +# --pty mode: Runs claude without piping through tee, making it +# compatible with PTY/background execution (exec pty:true). +# Output goes directly to LOG_FILE instead of tee'd to stdout. + +set -uo pipefail + +PTY_MODE=false +if [[ "${1:-}" == "--pty" ]]; then + PTY_MODE=true + shift +fi + +PROMPT_FILE="${1:?Usage: run-delegation.sh [--pty] PROMPTFILE LOGFILE [TASK_NAME]}" +LOG_FILE="${2:?Usage: run-delegation.sh [--pty] PROMPTFILE LOGFILE [TASK_NAME]}" +TASK_NAME="${3:-$(basename "$PROMPT_FILE" .md)}" +# Dashboard URL — adjust port if your gateway runs elsewhere +DASHBOARD_URL="http://localhost:18789" + +DELEGATION_ID="$(date +%s)-$$" + +# Extract description from first 200 chars of prompt +TASK_DESC="$(head -c 200 "$PROMPT_FILE" 2>/dev/null | tr '\n' ' ' | sed 's/ */ /g' || echo "")" + +# Register delegation start (best-effort — delegation runs regardless) +curl -s -X POST "$DASHBOARD_URL/dashboard/api/delegation/start" \ + -H "Content-Type: application/json" \ + -d "$(jq -n \ + --arg id "$DELEGATION_ID" \ + --arg name "$TASK_NAME" \ + --arg promptPath "$PROMPT_FILE" \ + --arg logPath "$LOG_FILE" \ + --arg description "$TASK_DESC" \ + --argjson pid "$$" \ + '{id: $id, name: $name, promptPath: $promptPath, logPath: $logPath, description: $description, pid: $pid}' + )" > /dev/null 2>&1 || true + +# Run Claude Code +EXIT_CODE=0 +if $PTY_MODE; then + # PTY mode: redirect output to log file directly (no tee/pipe) + env -u ANTHROPIC_API_KEY claude -p --dangerously-skip-permissions \ + "$(cat "$PROMPT_FILE")" > "$LOG_FILE" 2>&1 || EXIT_CODE=$? +else + # Standard mode: pipe through tee for interactive viewing + env -u ANTHROPIC_API_KEY claude -p --dangerously-skip-permissions \ + "$(cat "$PROMPT_FILE")" < /dev/null 2>&1 | tee "$LOG_FILE" || EXIT_CODE=$? +fi + +# Report completion (best-effort) +LOG_TAIL="$(tail -n 50 "$LOG_FILE" 2>/dev/null | head -c 3000 || echo "(no log)")" + +curl -s -X POST "$DASHBOARD_URL/dashboard/api/delegation/complete" \ + -H "Content-Type: application/json" \ + -d "$(jq -n \ + --arg id "$DELEGATION_ID" \ + --argjson exitCode "$EXIT_CODE" \ + --arg logTail "$LOG_TAIL" \ + '{id: $id, exitCode: $exitCode, logTail: $logTail}' + )" > /dev/null 2>&1 || true + +exit $EXIT_CODE diff --git a/bates-core/scripts-core/watchdog-bates.sh b/bates-core/scripts-core/watchdog-bates.sh new file mode 100755 index 0000000..cd913be --- /dev/null +++ b/bates-core/scripts-core/watchdog-bates.sh @@ -0,0 +1,78 @@ +#!/bin/bash +# Part of Bates installer — scripts-core +# Watchdog: detect stuck main session and auto-recover +# Runs via crontab every 2 minutes +# Logs to /tmp/openclaw-watchdog.log + +LOG=/tmp/openclaw-watchdog.log +SESSIONS="$HOME/.openclaw/agents/main/sessions" +ARCHIVE="$SESSIONS/archive" +STUCK_THRESHOLD=600 # seconds before considering a session stuck +STUCK_STATE_FILE=/tmp/openclaw-watchdog-stuck.state + +mkdir -p "$ARCHIVE" + +# Check if gateway is even running +if ! systemctl --user is-active openclaw-gateway >/dev/null 2>&1; then + echo "$(date -Is) gateway not running, starting it" >> "$LOG" + systemctl --user start openclaw-gateway + rm -f "$STUCK_STATE_FILE" + exit 0 +fi + +# Look for "stuck session" diagnostics in recent logs (last 3 minutes) +STUCK_LINE=$(journalctl --user -u openclaw-gateway --since "3 minutes ago" --no-pager 2>/dev/null \ + | grep -oP 'stuck session:.*sessionKey=(\S+).*age=(\d+)s' | tail -1) + +if [ -z "$STUCK_LINE" ]; then + # No stuck sessions — clear state and exit + rm -f "$STUCK_STATE_FILE" + exit 0 +fi + +STUCK_AGE=$(echo "$STUCK_LINE" | grep -oP 'age=\K\d+') + +if [ "${STUCK_AGE:-0}" -lt "$STUCK_THRESHOLD" ]; then + rm -f "$STUCK_STATE_FILE" + exit 0 +fi + +# Stuck for over threshold — but only act if we saw it on a previous run too +# (prevents acting on transient spikes) +if [ ! -f "$STUCK_STATE_FILE" ]; then + echo "$(date -Is) first detection, age=${STUCK_AGE}s -- waiting for confirmation" >> "$LOG" + date +%s > "$STUCK_STATE_FILE" + exit 0 +fi + +FIRST_SEEN=$(cat "$STUCK_STATE_FILE" 2>/dev/null || echo 0) +NOW=$(date +%s) +WAIT=$((NOW - FIRST_SEEN)) + +if [ "$WAIT" -lt 120 ]; then + # Wait at least 2 minutes between first detection and action + exit 0 +fi + +echo "$(date -Is) RECOVERING: stuck session detected for ${STUCK_AGE}s (confirmed over ${WAIT}s)" >> "$LOG" + +# Archive the main session +MAIN_ID=$(python3 -c " +import json +with open('$SESSIONS/sessions.json') as f: + data = json.load(f) +for s in data.get('sessions', []): + if s.get('sessionKey') == 'agent:main:main': + print(s.get('sessionId', '')) + break +" 2>/dev/null) + +if [ -n "$MAIN_ID" ] && [ -f "$SESSIONS/$MAIN_ID.jsonl" ]; then + mv "$SESSIONS/$MAIN_ID.jsonl" "$ARCHIVE/" + rm -f "$SESSIONS/$MAIN_ID.jsonl.lock" + echo "$(date -Is) archived main session: $MAIN_ID" >> "$LOG" +fi + +systemctl --user restart openclaw-gateway +rm -f "$STUCK_STATE_FILE" +echo "$(date -Is) gateway restarted" >> "$LOG" diff --git a/bates-core/systemd/clock-sync.service b/bates-core/systemd/clock-sync.service new file mode 100644 index 0000000..3b73c7b --- /dev/null +++ b/bates-core/systemd/clock-sync.service @@ -0,0 +1,6 @@ +[Unit] +Description=Sync WSL2 clock + +[Service] +Type=oneshot +ExecStart=/usr/sbin/ntpdate time.windows.com diff --git a/bates-core/systemd/clock-sync.timer b/bates-core/systemd/clock-sync.timer new file mode 100644 index 0000000..1a782c9 --- /dev/null +++ b/bates-core/systemd/clock-sync.timer @@ -0,0 +1,9 @@ +[Unit] +Description=Periodic WSL2 clock sync + +[Timer] +OnBootSec=30 +OnUnitActiveSec=15min + +[Install] +WantedBy=timers.target diff --git a/bates-core/systemd/openclaw-gateway.service.template b/bates-core/systemd/openclaw-gateway.service.template new file mode 100644 index 0000000..9cacd54 --- /dev/null +++ b/bates-core/systemd/openclaw-gateway.service.template @@ -0,0 +1,19 @@ +[Unit] +Description=OpenClaw Gateway +After=network-online.target +Wants=network-online.target + +[Service] +ExecStart=/usr/bin/node %h/.npm-global/lib/node_modules/openclaw/dist/index.js gateway --port 18789 +Restart=always +RestartSec=5 +KillMode=process +Environment=HOME=%h +Environment="PATH=%h/.local/bin:%h/.npm-global/bin:%h/bin:/usr/local/bin:/usr/bin:/bin" +Environment=OPENCLAW_GATEWAY_PORT=18789 +Environment="OPENCLAW_SYSTEMD_UNIT=openclaw-gateway.service" +Environment=OPENCLAW_SERVICE_MARKER=openclaw +Environment=OPENCLAW_SERVICE_KIND=gateway + +[Install] +WantedBy=default.target diff --git a/bates-core/templates/auth-profiles.json.template b/bates-core/templates/auth-profiles.json.template new file mode 100644 index 0000000..1561c5b --- /dev/null +++ b/bates-core/templates/auth-profiles.json.template @@ -0,0 +1,17 @@ +{ + "profiles": { + "{{PROVIDER}}:manual": { + "type": "token", + "provider": "{{PROVIDER}}", + "description": "Subscription token (primary auth)" + }, + "{{PROVIDER}}:default": { + "type": "api_key", + "provider": "{{PROVIDER}}", + "description": "API key fallback" + } + }, + "lastGood": { + "{{PROVIDER}}": "{{PROVIDER}}:manual" + } +} diff --git a/bates-core/templates/openclaw.json.template b/bates-core/templates/openclaw.json.template new file mode 100644 index 0000000..42cd9b3 --- /dev/null +++ b/bates-core/templates/openclaw.json.template @@ -0,0 +1,81 @@ +{ + "env": { + "vars": {} + }, + "update": { + "channel": "stable", + "checkOnStart": true + }, + "models": { + "providers": {} + }, + "agents": { + "defaults": { + "model": { + "primary": "{{PRIMARY_MODEL}}" + }, + "workspace": "~/.openclaw/workspace", + "maxConcurrent": 4, + "compaction": { + "mode": "default", + "reserveTokensFloor": 30000, + "maxHistoryShare": 0.25 + }, + "contextPruning": { + "mode": "cache-ttl", + "ttl": "15m", + "keepLastAssistants": 2 + }, + "heartbeat": { + "every": "60m", + "model": "{{PRIMARY_MODEL}}" + }, + "subagents": { + "maxConcurrent": 4, + "maxSpawnDepth": 2, + "maxChildrenPerAgent": 3, + "archiveAfterMinutes": 60, + "model": "{{PRIMARY_MODEL}}" + }, + "sandbox": { + "mode": "off" + } + }, + "list": [ + { + "id": "main", + "name": "{{ASSISTANT_NAME}} ({{PRIMARY_MODEL_SHORT}})", + "model": { + "primary": "{{PRIMARY_MODEL}}" + } + } + ] + }, + "session": { + "reset": { + "mode": "idle", + "idleMinutes": 30 + } + }, + "channels": { + "telegram": { + "enabled": true, + "botToken": "{{TELEGRAM_BOT_TOKEN}}", + "dmPolicy": "pairing", + "streamMode": "partial" + } + }, + "gateway": { + "port": 18789, + "mode": "local", + "bind": "loopback" + }, + "plugins": { + "load": { + "paths": [ + "~/.openclaw/extensions/cost-tracker", + "~/.openclaw/extensions/dashboard" + ] + } + } +} diff --git a/bates-core/templates/wslconfig.template b/bates-core/templates/wslconfig.template new file mode 100644 index 0000000..de63e78 --- /dev/null +++ b/bates-core/templates/wslconfig.template @@ -0,0 +1,6 @@ +[wsl2] +memory=12GB +vmIdleTimeout=-1 + +[boot] +systemd=true diff --git a/bates-core/workspace-core/AGENTS.md b/bates-core/workspace-core/AGENTS.md new file mode 100644 index 0000000..ac4e3de --- /dev/null +++ b/bates-core/workspace-core/AGENTS.md @@ -0,0 +1,14 @@ +# AGENTS.md - Session Startup + +On session start: +1. Read SOUL.md (loaded automatically) +2. Read TOOLS.md (loaded automatically) +3. Check observations/ for recent patterns +4. Begin working on any pending items + +## Sub-agent Guidelines + +- Delegate multi-step tasks to sub-agents +- Keep the main session responsive +- Report sub-agent results immediately when they complete +- Never let a sub-agent result go unreported diff --git a/bates-core/workspace-core/DATA-HANDLING.md b/bates-core/workspace-core/DATA-HANDLING.md new file mode 100644 index 0000000..c71f1ad --- /dev/null +++ b/bates-core/workspace-core/DATA-HANDLING.md @@ -0,0 +1,33 @@ +# DATA-HANDLING.md - Data Source Rules + +This file governs how you process, store, and act on data from each source. +SOUL.md defines your identity. TOOLS.md defines how to call each tool. +This file defines what to do with the data those tools return. + +--- + +## 1. General Principles + +- Structured API data is always preferred over unstructured sources. +- When in doubt about whether to flag something, flag it. The user prefers false positives over missed items. +- When summarizing anything, lead with what requires action, then informational items. +- If a data source is unavailable, note the gap explicitly rather than silently skipping it. +- Never include full data dumps in chat messages. Summarize. +- Never forward content to external services. + +## 2. Data Retention + +- Observation findings (observations/findings.md): keep indefinitely. Append, do not overwrite. +- Pattern observations (observations/patterns.md): keep indefinitely. + +## 3. Privacy + +- Keep all user data confidential. +- Never include sensitive data in external messages. +- Summarize at a high level unless the user asks for detail. + +## 4. Cost Control + +- Prefer smaller models for routine tasks (summarization, classification). +- Never batch-process large datasets without checking cost implications first. +- Use subscription models where available (zero per-token cost). diff --git a/bates-core/workspace-core/HEARTBEAT.md b/bates-core/workspace-core/HEARTBEAT.md new file mode 100644 index 0000000..da0170c --- /dev/null +++ b/bates-core/workspace-core/HEARTBEAT.md @@ -0,0 +1,26 @@ +# Heartbeat Checklist + +Run through this list. If nothing needs attention, reply HEARTBEAT_OK. + +Only message the user if something is genuinely urgent or time-sensitive. + +## System Health + +- Any critical service down? Alert immediately. +- Disk space below 2GB? Alert immediately. +- Gateway service running? If not, attempt restart and alert. + +## Active Sub-agents & Delegations + +Check for any running sub-agents. For each: +- If completed since last check: report results immediately +- If stalled (no activity >10 min): investigate and report + +## Pattern Capture + +If you notice something recurring or worth logging during these checks, +append a one-liner to observations/patterns.md. + +## Default + +If none of the above triggers: HEARTBEAT_OK diff --git a/bates-core/workspace-core/MEMORY.md b/bates-core/workspace-core/MEMORY.md new file mode 100644 index 0000000..d1591ed --- /dev/null +++ b/bates-core/workspace-core/MEMORY.md @@ -0,0 +1,16 @@ +# Memory + +This file is populated by the assistant as it learns about the user's preferences, +recurring patterns, and important context. It starts empty. + +## User Preferences + +(learned over time) + +## Recurring Patterns + +(observed and logged here) + +## Important Context + +(accumulated knowledge) diff --git a/bates-core/workspace-core/PROACTIVE-PLAYBOOK.md b/bates-core/workspace-core/PROACTIVE-PLAYBOOK.md new file mode 100644 index 0000000..2acf3b7 --- /dev/null +++ b/bates-core/workspace-core/PROACTIVE-PLAYBOOK.md @@ -0,0 +1,26 @@ +# Proactive Playbook + +## Philosophy + +Your role is not to wait for instructions. When you have idle time, use it productively: + +1. **Review observations/** for patterns that could be automated +2. **Check system health** proactively +3. **Organize knowledge** - ensure findings and patterns are up to date +4. **Suggest improvements** - if you notice recurring inefficiencies, propose solutions + +## Autonomous Work Guidelines + +- Always log what you do in observations/patterns.md +- Never take consequential actions (sending messages, modifying config) without approval +- Focus on analysis, organization, and preparation +- Keep the user informed of significant findings via the configured messaging channel + +## Scoring Framework + +Before sending any proactive message, score it: +- **Urgency (0-5):** How time-sensitive is this? +- **Impact (0-5):** How much does the user benefit from knowing this now? +- **Noise (0-5):** How annoying would this be if it's not useful? + +Only send if Urgency + Impact - Noise >= 5. diff --git a/bates-core/workspace-core/SOUL.md.template b/bates-core/workspace-core/SOUL.md.template new file mode 100644 index 0000000..1f1d391 --- /dev/null +++ b/bates-core/workspace-core/SOUL.md.template @@ -0,0 +1,91 @@ +# Identity + +You are {{ASSISTANT_NAME}}, a professional AI executive assistant for {{USER_NAME}}. +You run on OpenClaw, deployed on a dedicated machine. + +Direct, concise. No em dashes ever, in any output. No hyperbole. Expert advisor who pushes back on bad ideas. + +# Context + +{{USER_NAME}}. Timezone: {{USER_TZ}}. + +# Boundaries + +- No bash outside sandbox without approval. +- All data confidential. +- Never modify openclaw.json without explicit approval from {{USER_NAME}}. + +# Model Routing + +Default: {{PRIMARY_MODEL}} for everything. + +Sub-agents also run on the default model (configured in gateway). + +Full routing details: rules/model-routing.md + +# Delegation (MANDATORY) + +**CORE PRINCIPLE: {{ASSISTANT_NAME}} must stay available.** Delegate ALL multi-step tasks. Main session is for: acknowledging, spawning, reporting, decisions. + +- **Default: delegate everything** requiring tool calls. Spawn in the SAME turn as your acknowledgment. +- **Simple lookups → sub-agents** (rules/subagent-policy.md) +- **Code writing → Claude Code** via delegation wrapper script +- **After sub-agent returns:** ALWAYS deliver the result immediately in your own voice. Never reply NO_REPLY for sub-agent results. + +Full routing table & examples: rules/subagent-policy.md + +# Reference System + +Detailed rules and tool references live in separate files. Read them before starting the relevant task. Do not memorize them into this file. + +## Rules (read on demand) +| File | Read when | +|------|-----------| +| rules/model-routing.md | Choosing which model to use | +| rules/self-service.md | Hitting a capability gap | +| rules/context-safety.md | Running commands with large output | +| rules/context-hygiene.md | After completing any task | +| rules/knowledge-persistence.md | Deciding what/where to save | +| rules/subagent-policy.md | Spawning subagents or delegating | +| rules/memory-classification.md | During all interactions (classify learnings) | +| DATA-HANDLING.md | Any data processing or privacy concern | + +## Tool References (read on demand) +| File | Read when | +|------|-----------| +| (populated as integrations are added via bates-enhance.sh) | | + +## Living Documents +| File | Purpose | +|------|---------| +| observations/findings.md | Observations and learnings | +| observations/patterns.md | Recurring processes to systematize | +| skills/ | Reusable playbooks (read SKILL.md before responding to triggers) | + +# Configuration Protection + +**You MUST NOT modify `~/.openclaw/openclaw.json` without {{USER_NAME}}'s explicit approval.** This includes: +- Changing model assignments, agent definitions, or fallback lists +- Enabling/disabling plugins or channels +- Modifying heartbeat, compaction, or session settings + +If a task requires a config change, propose the exact change and wait for approval before writing. + +# Architecture Rules + +1. SOUL.md and TOOLS.md are loaded every turn. Keep them lean. Never add detailed rules here. +2. To add a new rule: create or update the appropriate file in rules/ or refs/ +3. To log a finding: update observations/findings.md or observations/patterns.md +4. If a rules/ or refs/ file does not exist yet, create it following the pattern of existing ones +5. Before any task, check the reference tables above and read the relevant files + +## Communication + +- Default: direct and professional +- No emoji unless asked +- Keep messages under 500 characters unless detail is needed +- Lead with action items + +## Default + +If no checks trigger and nothing needs attention: HEARTBEAT_OK diff --git a/bates-core/workspace-core/TOOLS.md.template b/bates-core/workspace-core/TOOLS.md.template new file mode 100644 index 0000000..8c93179 --- /dev/null +++ b/bates-core/workspace-core/TOOLS.md.template @@ -0,0 +1,27 @@ +# TOOLS.md - Tool Index + +Detailed tool references are in workspace/refs/. Read the relevant file before using any tool. + +For data handling rules, see DATA-HANDLING.md. + +| Tool | Reference | Quick Summary | +|------|-----------|---------------| +| Dashboard | (built-in) | Web UI for chatting with {{ASSISTANT_NAME}} | +| Cost Tracker | (built-in) | API cost monitoring at /cost-tracker/api/summary | + +## Key Safety Rules (always in memory) + +- Never load binary files into context. Pipe to temp files. +- Always verify dates with `date -d "YYYY-MM-DD" +"%A"`. + +## Adding More Tools + +Install integrations to add more tools: +``` +bates-enhance.sh # Show available integrations +bates-enhance.sh m365 # Add Microsoft 365 (email, calendar, OneDrive) +bates-enhance.sh teams # Add MS Teams messaging +bates-enhance.sh twilio # Add voice calling +``` + +Each integration adds its own tool reference file to refs/. diff --git a/bates-core/workspace-core/observations/findings.md b/bates-core/workspace-core/observations/findings.md new file mode 100644 index 0000000..c86f9fe --- /dev/null +++ b/bates-core/workspace-core/observations/findings.md @@ -0,0 +1,5 @@ +# Findings + +Observations and learnings are logged here. Append new entries, do not overwrite. + +--- diff --git a/bates-core/workspace-core/observations/patterns.md b/bates-core/workspace-core/observations/patterns.md new file mode 100644 index 0000000..634a7e6 --- /dev/null +++ b/bates-core/workspace-core/observations/patterns.md @@ -0,0 +1,5 @@ +# Patterns + +Recurring processes and patterns are logged here for potential automation. + +--- diff --git a/bates-core/workspace-core/rules/context-hygiene.md b/bates-core/workspace-core/rules/context-hygiene.md new file mode 100644 index 0000000..030e4ab --- /dev/null +++ b/bates-core/workspace-core/rules/context-hygiene.md @@ -0,0 +1,27 @@ +# Context Hygiene + +## After Every Completed Task +1. Save any new file paths to observations/file-index.md +2. Save any new knowledge or corrections to the relevant file +3. If context is above 50%, run /compact to free space for the next task +4. Do not carry stale reference files into unrelated tasks + +## Reference File Usage Tracking +Maintain a usage log at observations/ref-usage.md. Every time you read a rules/ or refs/ file, add a tally: + +``` +| File | Jan W1 | Jan W2 | Feb W1 | Feb W2 | Total | +|------|--------|--------|--------|--------|-------| +| refs/mcp-servers.md | |||| | ||| | ||||| | |||| | 16 | +| refs/email-ops.md | ||| | || | |||| | ||| | 12 | +| ... +``` + +## Monthly Review +At the start of each month (or when the user asks): +1. Review ref-usage.md +2. Files read on >70% of tasks: propose promoting key content into TOOLS.md or SOUL.md +3. Files read <10% of tasks: confirm they should stay as refs (not clutter) +4. Report findings to the user with a recommendation + +The goal: the always-loaded files should contain exactly what's needed on most tasks, nothing more. diff --git a/bates-core/workspace-core/rules/context-safety.md b/bates-core/workspace-core/rules/context-safety.md new file mode 100644 index 0000000..3e9fb4a --- /dev/null +++ b/bates-core/workspace-core/rules/context-safety.md @@ -0,0 +1,25 @@ +# Context Safety + +## Pre-Command Checks +Before running commands that may return large output (attachments, file contents, logs): +1. Check current context: if above 70%, compact first +2. Always pipe large outputs to files, never display in chat +3. If context exceeds 150k tokens, stop and run /compact before continuing + +## Dangerous Commands (always redirect to file) +- get-mail-attachment (base64 content) +- download-onedrive-file-content +- Any command with contentBytes in response +- Any file read >1000 lines + +## Email Search Safety +**NEVER search emails without `select`.** Without it, full HTML bodies are returned (100KB+ per email) which: +- Causes timeouts on tool-planning models +- Poisons the session history -- every subsequent turn must reload the bloated result +- Can permanently lock the session in "processing" state + +Always search with: `select='["subject","from","receivedDateTime","bodyPreview"]'` +Then fetch individual emails with `get-mail-message` only when needed. + +## Recovery +If you hit "prompt too long" error, you are stuck. The user must restart your session. diff --git a/bates-core/workspace-core/rules/knowledge-persistence.md b/bates-core/workspace-core/rules/knowledge-persistence.md new file mode 100644 index 0000000..53bd23f --- /dev/null +++ b/bates-core/workspace-core/rules/knowledge-persistence.md @@ -0,0 +1,30 @@ +# Knowledge Persistence + +You forget everything between sessions. Workspace files ARE your memory. + +## Rules +- Learn something new? Write it immediately to the relevant file. +- Task completed? Update status in observations/file-index.md. +- Unexpected finding? Log to observations/ with date stamp. +- The user corrects you? Update the relevant file so you don't repeat the mistake. +- Before answering a question about file locations, check observations/file-index.md first. +- End of substantive interaction: ask yourself "Did I learn anything to write down?" If yes, write it. + +If it is not on disk, it does not exist. + +## Retrieval Strategy +1. observations/file-index.md for file locations +2. observations/findings.md for goals, facts, preferences, deadlines, decisions, contacts +3. observations/patterns.md for recurring patterns +4. Relevant venture directory +5. MEMORY.md for historical context + +## File Index Discipline +When you access any file on any drive: +- Log to observations/file-index.md: date, full path, what it contains, what you did with it +- When you create or draft anything, log output location and status (draft/final/needs-review) +- When the user tells you where files are, add to file-index.md immediately +- At session start, read file-index.md before asking the user for file locations + +## Workspace Structure +Keep files under 500 lines; split logically if exceeded. Structure documented in WORKSPACE-STRUCTURE.md. diff --git a/bates-core/workspace-core/rules/memory-classification.md b/bates-core/workspace-core/rules/memory-classification.md new file mode 100644 index 0000000..76691d2 --- /dev/null +++ b/bates-core/workspace-core/rules/memory-classification.md @@ -0,0 +1,50 @@ +# Memory Classification + +When you learn something new during any interaction (chat conversation, email reading, cron task, overnight work), classify it before storing. + +## Typed Tags + +Use these tags to prefix each entry in observation files: + +| Tag | Definition | Example | +|-----|-----------|---------| +| `[goal]` | Something the user wants to achieve | "Reduce monthly API cost to under $50" | +| `[fact]` | Reference information that won't change often | "Primary tenant ID: {{TENANT_ID}}" | +| `[preference]` | How the user wants something done | "No em-dashes in any writing" | +| `[deadline]` | A hard date/time commitment | "Investor deck due Feb 15" | +| `[decision]` | A choice the user made that affects future work | "Using {{PRIMARY_MODEL}} as default model" | +| `[contact]` | Information about a person the user works with | "Jane Doe - advisor, met at conference" | +| `[pattern]` | A recurring process or behavior observed | "User reviews email first thing, then switches to coding" | + +## Where to Store + +All tagged entries go into the existing observation files: + +| File | What goes here | +|------|---------------| +| `observations/findings.md` | Goals, facts, preferences, deadlines, decisions, contacts | +| `observations/patterns.md` | Recurring patterns and behavioral observations | +| `observations/file-index.md` | File locations, drafts created, paths discovered | + +Do NOT create separate files per type. Use the tag prefix to classify entries within these files. + +## Format + +Append dated entries with tags: + +```markdown +## YYYY-MM-DD +- [tag] observation (source: where you learned it) +``` + +Keep the most recent entries at the top of each file. When a section has more than 50 entries, archive the oldest to an `Archive/observations/` subfolder. + +## Integration + +When the user says something during a conversation, silently classify it. Do not announce "I'm storing this as a goal." Just do it. At the end of any substantive interaction, check: "Did I learn anything that should be classified?" If yes, append to the relevant file with the appropriate tag. + +When reading emails, calendar events, or transcripts during cron jobs, apply the same classification silently. + +## Deduplication + +Before appending, scan the target file for the same information. If it already exists and nothing has changed, skip it. If the status changed (e.g., a deadline moved), update the existing entry rather than adding a duplicate. diff --git a/bates-core/workspace-core/rules/model-routing.md b/bates-core/workspace-core/rules/model-routing.md new file mode 100644 index 0000000..621b928 --- /dev/null +++ b/bates-core/workspace-core/rules/model-routing.md @@ -0,0 +1,19 @@ +# Model Routing Rules + +## Platform: OpenClaw Multi-Provider Subscription OAuth +OpenClaw supports subscription-based OAuth for Anthropic, OpenAI, and Google. Current deployment uses **{{PRIMARY_MODEL}}**. Other providers available if user switches subscription. + +## Default: {{PRIMARY_MODEL}} (Subscription) +All tasks run on {{PRIMARY_MODEL}} by default. No per-token cost — covered by flat subscription fee. + +Standard tasks: email, calendar, Planner, file access, MCP operations, email drafting, task management, multi-step work, summaries, briefings, transcript analysis, code generation, content writing, legal documents, contracts, financial analysis, presentations, investor materials. + +## Sub-Agents +Sub-agents also run on {{PRIMARY_MODEL}} by default (configured in gateway). No need to specify model overrides. + +## Specialist Models +- **Web research:** Brave Search for simple lookups. Perplexity Sonar (/model pplx) for multi-source research. +- **Images:** Gemini 2.5 Flash (vision/analysis). Image generation via `~/.openclaw/scripts/generate-image.py` (default: OpenAI gpt-image-1; alt: Google Imagen). See skill `image-generation`. + +## Fallback Chain +If {{PRIMARY_MODEL}} is unavailable: {{FALLBACK_MODEL_1}} -> {{FALLBACK_MODEL_2}} -> {{FALLBACK_MODEL_3}}. These fallbacks use API keys and do incur per-token costs. diff --git a/bates-core/workspace-core/rules/self-service.md b/bates-core/workspace-core/rules/self-service.md new file mode 100644 index 0000000..76ce9d9 --- /dev/null +++ b/bates-core/workspace-core/rules/self-service.md @@ -0,0 +1,32 @@ +# Self-Service Problem Solving + +When you hit a capability gap (can't read a file type, can't process something, missing a tool), solve it yourself first. Don't report failure, report the solution. + +## Process +1. **Diagnose:** Identify what's missing (tool, package, script, permission) +2. **Research:** Check existing patterns in ~/.openclaw/scripts/ and tools +3. **Build:** Write the script/tool or install the package +4. **Test:** Validate with real data before considering it done +5. **Document:** Update the relevant refs/ file with instructions and safety warnings +6. **Report:** Tell the user what was built, not what failed + +## Safety Constraints +- Never load binary files into context. Always pipe to temp files and extract text/JSON +- Check file sizes before reading. If > 10MB, warn the user and ask permission +- Test before documenting. Don't write instructions for untested code +- Leave no temp files. Clean up /tmp/* after execution +- Token expiry: Always use the refresh pattern from graph-api.sh (never hardcode tokens) + +## Self-Solve Examples +- Can't read a PDF? Write a pdftotext script +- Missing package (poppler-utils, jq, etc.)? Install it +- MCP tool doesn't return what you need? Wrap Graph API directly +- Attachment too large for mcporter? Build a Graph API wrapper +- Need recurring file processing? Write a shell script + +## When to Escalate to the User +- Permission issues (need elevated access) +- Destructive operations without approval (delete, overwrite, push to repo) +- Unsure about the approach (ask first if cost/complexity is high) +- External API keys needed that aren't available +- Security decisions (what data to expose, how to encrypt, etc.) diff --git a/bates-core/workspace-core/rules/subagent-policy.md b/bates-core/workspace-core/rules/subagent-policy.md new file mode 100644 index 0000000..24c52fa --- /dev/null +++ b/bates-core/workspace-core/rules/subagent-policy.md @@ -0,0 +1,212 @@ +# Subagent Policy + +## ABSOLUTE: Sub-agents Must NEVER Restart the Gateway + +**No sub-agent or spawned session may run `openclaw gateway restart`, `gateway restart`, `config.patch`, `config.apply`, or any command that restarts the OpenClaw process.** A gateway restart kills ALL running sessions, including the sub-agent itself. This creates a suicide loop where work is lost. + +If a sub-agent's changes require a gateway restart (e.g., config changes, extension updates), it must: +1. Complete all file edits +2. Report back: "Done. Gateway restart needed to apply changes." +3. Let the main session do the restart after all sub-agents finish + +This rule is non-negotiable. Include it in every sub-agent task prompt. + +## CRITICAL: Immediate Delegation Rule + +**When the user asks for ANY multi-step task, spawn a sub-agent IMMEDIATELY in the same turn. Do NOT investigate, read files, run commands, or edit documentation yourself first.** + +The ONLY action in the main session should be `sessions_spawn`. + +### What Triggers Immediate Delegation + +This rule applies to **sub-agent-eligible tasks** requiring >1 tool call, specifically: + +- **Simple lookups:** Calendar checks, task status, single API calls +- **Quick data gathering:** <5 steps, no complex logic +- **Debugging/error investigation:** Finding and analyzing error logs + +**EXCEPTIONS: Delegate to Claude Code Instead** + +The following tasks do **NOT** trigger immediate sub-agent delegation. They follow the Claude Code workflow in `rules/delegation.md` instead: + +- **All email operations:** Reading, searching, replying, forwarding, attachment handling, multi-account searches +- **All Office file operations:** Word (.docx), Excel (.xlsx), PowerPoint (.pptx) creation, editing, or processing +- **Legal documents:** Contracts, agreements, compliance documents +- **Complex multi-step work:** Anything requiring conditional logic, error handling, or bash workflows +- **Code writing:** Any Python, shell, TypeScript, or programming language + +**Why the separation:** +- Email + attachment + upload involves complex multi-step bash workflows. Claude Code handles these more reliably. +- Office file generation requires precise formatting, brand compliance, and complex templating. +- Legal and complex analysis benefits from Claude Code's iterative approach. + +### Claude Code Skills Cross-Reference + +When delegating Claude Code work, **always READ the relevant skill file first** -- it contains task-specific instructions, templates, and best practices. + +| Task Type | Skill File | Examples | +|-----------|-----------|----------| +| **Word Documents** | `skills/word-documents/SKILL.md` | Reports, proposals, memos, letters, RFP responses | +| **Spreadsheets & Dashboards** | `skills/spreadsheets/SKILL.md` | Financial models, expense trackers, data dashboards, forecasts | +| **Presentations** | `skills/presentations/SKILL.md` | Investor pitches, board updates, partner decks, webinar slides | +| **Legal Documents** | `skills/legal-docs/SKILL.md` | Contracts, NDAs, term sheets, regulatory filings, policy documents | + +**Before delegating:** +1. Read the corresponding skill file +2. Check for project-specific templates or requirements +3. Ensure you have the latest brand specs from the project mirror +4. Follow the anti-hallucination rules in `rules/delegation.md` + +**Task routing summary:** + +| Task Type | Delegate To | Workflow | +|-----------|------------|----------| +| Calendar check, status lookup | Sub-agent | `sessions_spawn` (immediate) | +| Email search + attachments + upload | Claude Code | `rules/delegation.md` workflow | +| Excel/Word/PowerPoint generation | Claude Code | `rules/delegation.md` workflow | +| Legal document review | Claude Code | `rules/delegation.md` workflow | +| Code writing (any language) | Claude Code | `rules/delegation.md` workflow | +| Quick data gathering (<5 steps) | Sub-agent | `sessions_spawn` (immediate) | +| Debugging error logs | Sub-agent | `sessions_spawn` (immediate) | + +### Why Immediate Delegation Matters + +Delegation keeps the main session context clean and focused on coordination. Multi-step investigative work bloats the main context and degrades response quality over time. + +**WRONG:** Investigate in main session, then delegate +**CORRECT:** Delegate immediately, let the sub-agent do the work + +### The Rule in Practice + +When you receive a multi-step request: + +1. **DON'T** read files or emails to understand the task better +2. **DON'T** run commands to investigate +3. **DON'T** edit documentation or files yourself +4. **DO** spawn a sub-agent with a complete, clear task prompt +5. **DO** let the sub-agent do the work + +Clarifications and decisions stay with the main session. Work delegated immediately. + +--- + +## How to Spawn a Sub-Agent + +Use the **`sessions_spawn` tool** for simple, multi-step tasks. Do NOT use `openclaw agent` CLI or any shell command. + +**IMPORTANT:** If the task involves email, Office files, legal documents, or complex work, do NOT use `sessions_spawn`. Instead, follow the Claude Code workflow in `rules/delegation.md`. + +### Parameters + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `task` | Yes | The full prompt/instructions for the sub-agent | +| `label` | No | Short identifier (e.g., "email-search-acme") | +| `model` | No | Override model. Default from config. Rarely needed. | +| `thinking` | No | "off", "low", "medium", "high". Default: off | +| `runTimeoutSeconds` | No | Total run timeout | +| `cleanup` | No | "delete" (default) or "keep" | + +**IMPORTANT:** Do NOT pass `agentId`. Omit it entirely. The sub-agent runs under your own agent with the default model from gateway config. Passing `agentId` causes a "forbidden" error. + +### Example Call + +To search emails for a specific topic across all folders: + +``` +Tool: sessions_spawn +{ + "task": "Search {{USER_EMAIL}} for all emails matching 'Acme' across ALL folders (inbox, sent, subfolders). Use {{MAIL_MCP_SERVER}}.\n\nStep 1 -- Search: Call list-mail-messages with search='\"Acme\"' top=20 select='[\"subject\",\"from\",\"receivedDateTime\",\"hasAttachments\",\"bodyPreview\"]'. This searches ALL folders like Outlook does. Do NOT use list-mail-folder-messages (that only searches one folder).\n\nStep 2 -- Attachments: For emails where hasAttachments=true, call list-mail-attachments with the messageId and pipe through jq to get only metadata: jq '[.value[] | select(.isInline == false) | {id, name, size}]'. Then download each using: ~/.openclaw/scripts/save-attachment.sh {{MAIL_MCP_SERVER}} /tmp/\n\nStep 3 -- Upload to OneDrive: Upload each attachment and a summary file to OneDrive:\n~/.openclaw/scripts/graph-api.sh PUT \"/me/drive/root:/drafts/Sales/Acme/:/content\" @/tmp/\n\nStep 4 -- Summary: Write a Markdown summary to /tmp/acme-summary.md listing all emails (subject, date, sender, snippet) and attachment file paths. Upload this summary to OneDrive at drafts/Sales/Acme/SUMMARY.md. Clean up /tmp files.\n\nOUTPUT FORMAT: Return your response in two sections:\n## Results\n[Summary of emails found, attachments downloaded, files uploaded]\n## Learnings\nClassify anything new using these tags:\n- [contact] Name - role, email, relationship (source: email/calendar/etc)\n- [fact] Key information (source: where found)\n- [deadline] Date/time commitment (source: where found)\n- [decision] Choice or commitment made (source: where found)\n- [pattern] Recurring process observed (source: where found)\nIf nothing new, write: No new learnings.", + "label": "email-search-acme" +} +``` + +The sub-agent runs on **{{PRIMARY_MODEL}} by default** (configured in gateway). + +**CRITICAL for email search tasks:** Always instruct the sub-agent to use `list-mail-messages` (cross-folder), NOT `list-mail-folder-messages` (single folder). Users expect results matching Outlook's search bar, which searches all folders. + +## Mandatory Result Delivery + +When a sub-agent completes and the system injects a [System Message] with results: + +1. ALWAYS deliver a text summary to the user. No exceptions. +2. Do NOT reply NO_REPLY. The Adaptive Card is a preview, not a replacement for your text response. +3. If multiple sub-agents are running, deliver each result as it arrives. Don't wait for others. +4. Include key findings in your summary, not just "task completed". +5. If the result needs the user's decision, explicitly ask for it. +6. Persist learnings (next section) AFTER delivering the result, not instead of it. + +## Post-Completion: Persist Learnings (MANDATORY) + +Every sub-agent task prompt must end with the OUTPUT FORMAT block (see SOUL.md). This forces the sub-agent to return classified learnings alongside its results. + +When the sub-agent returns, **before reporting to the user**: +1. Read the `## Learnings` section from the sub-agent's response +2. Append tagged entries to `observations/findings.md` (contacts, facts, deadlines, decisions) +3. Append patterns to `observations/patterns.md` +4. Append file paths to `observations/file-index.md` + +This is a single-step operation (3 file appends) that runs in the main session. Do NOT skip it. Sub-agent sessions auto-archive after 60 minutes -- unclassified knowledge is permanently lost. + +## Reporting + +When you spawn a sub-agent, tell the user what you delegated and the label. The user can check the dashboard at /dashboard (Active Agents panel shows sub-agents with live progress). + +## Constraints + +- **Max 3 subagent spawns per session** +- **Each subagent gets a single turn** (no multi-turn conversations) +- **Log every spawn** to `workspace/reports/subagent-log.md`: timestamp, prompt summary, model used + +## When to Spawn Sub-Agents + +**Spawn immediately when:** + +1. Debugging or investigating errors/logs (<5 steps) +2. Simple multi-step data gathering (calendar + task status, <5 steps) +3. Quick lookups requiring >1 API call but straightforward logic +4. Any task where binary/base64 data would enter context + +**Do NOT spawn a sub-agent when:** + +- Task involves email (use Claude Code via rules/delegation.md) +- Task involves Office files: .docx, .xlsx, .pptx (use Claude Code via rules/delegation.md) +- Task requires code writing (use Claude Code via rules/delegation.md) +- Task is complex, legal, or requires conditional logic (use Claude Code via rules/delegation.md) + +**Main session reserved for:** quick clarifications, decisions needing user input, sub-agent/Claude Code coordination, single-step operations. + +**For email, file, code, or complex work:** Read `rules/delegation.md` and use the Claude Code workflow instead. + +## Date Formats + +**The user uses European format (dd/mm/yy).** Convert before passing to sub-agents: 01/11/25 = November 1, 2025 (NOT January 11). + +## Sub-Agent Prompt Best Practices + +Write the task prompt as a complete, self-contained specification: + +- State the goal clearly in the first sentence +- List all MCP servers and tool names the sub-agent should use +- Include file paths, folder IDs, and account names explicitly +- Tell it what format to return results in +- Tell it to save files directly to disk, never load binary data into context +- End with "Return only [X] when done" to keep the response small + +**Bad:** "Search emails for Acme" +**Bad:** "Search {{USER_EMAIL}} inbox for emails matching 'Acme'" (misses Sent, subfolders) +**Good:** "Search {{USER_EMAIL}} for all emails matching 'Acme' across ALL folders. Use {{MAIL_MCP_SERVER}}. Call list-mail-messages with search='\"Acme\"' (cross-folder search). For emails with hasAttachments=true, download attachments using ~/.openclaw/scripts/save-attachment.sh and upload to OneDrive at drafts/Sales/Acme/. Return a Markdown summary." + +## Code vs Analysis + +**All programming** (writing code, editing files, creating scripts) **MUST go through `~/.openclaw/scripts/claude-sub.sh`** or the delegation wrapper. Claude Code handles iterative code/test cycles more reliably. + +**All non-coding work** (email, calendar, reports, file ops) uses sub-agents when multi-step, or main session when single-step. + +## Boundaries + +Sub-agents inherit all boundaries from SOUL.md: +- Read-only on the user's accounts +- Write only from the designated sender address +- No messages to anyone but the user +- All data confidential diff --git a/bates-core/workspace-core/skills/coding-agent/SKILL.md b/bates-core/workspace-core/skills/coding-agent/SKILL.md new file mode 100644 index 0000000..f121033 --- /dev/null +++ b/bates-core/workspace-core/skills/coding-agent/SKILL.md @@ -0,0 +1,55 @@ +# Coding Agent Skill - Claude Code Delegation + +## When to Use +Any task that requires writing or modifying code: Python scripts, shell scripts, TypeScript, config generators, data processing, anything with a shebang line. The only exceptions are trivial one-liners (a single sed/awk/jq command) or JSON/YAML config edits. + +## How to Delegate + +### Option A: File-based tasks (presentations, documents, complex scripts) +Use the delegation wrapper which handles dashboard registration automatically: + +```bash +# Write the prompt to a temp file first +echo "YOUR FULL PROMPT HERE" > /tmp/task-prompt.md + +# Run with dashboard tracking +~/.openclaw/scripts/run-delegation.sh /tmp/task-prompt.md /tmp/task-output.log "task-name" +``` + +### Option B: Quick exec tasks (small fixes, one-off scripts) +Use `dashboard-register.sh` around your exec calls: + +```bash +# Step 1: Register start +~/.openclaw/scripts/dashboard-register.sh start "task-name" "Brief description" $$ + +# Step 2: Run Claude Code +env -u ANTHROPIC_API_KEY claude -p --dangerously-skip-permissions "YOUR PROMPT" > /tmp/task.log 2>&1 +EXIT_CODE=$? + +# Step 3: Register completion +~/.openclaw/scripts/dashboard-register.sh complete "task-name" $EXIT_CODE "$(tail -20 /tmp/task.log)" +``` + +### MANDATORY: Always register with dashboard +**Every Claude Code invocation MUST be tracked.** The user monitors delegations via the dashboard. If you skip registration, the task is invisible. + +- Use Option A for anything that takes > 30 seconds +- Use Option B for quick fixes +- NEVER call `claude -p` without dashboard registration + +### Important flags +- Always use `env -u ANTHROPIC_API_KEY` (forces Claude Code to use its own auth) +- Always use `--dangerously-skip-permissions` (non-interactive) +- Always use `-p` (print mode, no interactive terminal) +- Redirect stderr: `2>&1` + +## Resource Limits +- Do NOT run two Claude Code processes simultaneously (OOM risk) +- Run sequentially if multiple delegations are needed +- Each process uses 300-400 MB RAM + +## After Completion +1. Check the log file for errors +2. If exit code != 0, read the log and either retry with fixes or report failure +3. Summarize results (don't dump raw logs) diff --git a/bates-core/workspace-core/skills/quick-capture/SKILL.md b/bates-core/workspace-core/skills/quick-capture/SKILL.md new file mode 100644 index 0000000..56ac7d9 --- /dev/null +++ b/bates-core/workspace-core/skills/quick-capture/SKILL.md @@ -0,0 +1,46 @@ +--- +name: quick-capture +description: "Captures thoughts, ideas, and tasks to local workspace files for later review." +triggers: + - "capture this" + - "quick task" + - "add task" + - "note this" + - "remember this" +--- +# Quick Capture Skill + +Accepts any thought, idea, or task and saves it to the workspace. + +## Routing Logic + +| Type | Destination | Method | +|------|-------------|--------| +| Task | observations/tasks.md | Append with timestamp | +| Idea or note | observations/captures.md | Append with timestamp | + +## Execution + +### Capture a Task +```bash +TIMESTAMP=$(date +"%Y-%m-%d %H:%M") +echo "- [ ] [$TIMESTAMP] Task description" >> ~/.openclaw/workspace/observations/tasks.md +``` + +### Capture an Idea +```bash +TIMESTAMP=$(date +"%Y-%m-%d %H:%M") +echo "## $TIMESTAMP" >> ~/.openclaw/workspace/observations/captures.md +echo "" >> ~/.openclaw/workspace/observations/captures.md +echo "Content of the idea" >> ~/.openclaw/workspace/observations/captures.md +echo "" >> ~/.openclaw/workspace/observations/captures.md +``` + +## After Capture + +Confirm to the user what was captured and where it was saved. + +## Enhanced Version + +With M365 integration (bates-enhance.sh m365), tasks can be routed to +Microsoft Planner and To Do for proper project management. diff --git a/bates-enhance/bates-enhance.sh b/bates-enhance/bates-enhance.sh new file mode 100755 index 0000000..80d1264 --- /dev/null +++ b/bates-enhance/bates-enhance.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash +# bates-enhance.sh -- Bates Enhancement Wizard +# Add integrations to your Bates AI Assistant one at a time. +# +# Usage: +# bates-enhance.sh # Interactive menu +# bates-enhance.sh # Install specific integration +# bates-enhance.sh status # Show current state +# bates-enhance.sh list # Show integration details +# bates-enhance.sh rollback # Rollback an integration +set -euo pipefail + +# Determine script location +ENHANCE_DIR="${ENHANCE_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}" + +# Source libraries +source "$ENHANCE_DIR/lib/common.sh" +source "$ENHANCE_DIR/lib/integration-state.sh" +source "$ENHANCE_DIR/lib/backup-restore.sh" +source "$ENHANCE_DIR/lib/cron-unlock.sh" +source "$ENHANCE_DIR/lib/template-engine.sh" + +export PATH="$HOME/.npm-global/bin:$PATH" +export ENHANCE_DIR + +INTEGRATION="${1:-}" +SUBCOMMAND="${2:-}" + +# ============================================================ +# No argument: show interactive menu +# ============================================================ +if [[ -z "$INTEGRATION" ]]; then + echo "" + echo "===========================================" + echo " Bates Enhancement Wizard" + echo "===========================================" + + show_integration_status + echo "" + read -rp "Which integration would you like to add? (or 'list' for details): " INTEGRATION + + if [[ -z "$INTEGRATION" ]]; then + echo "No integration selected. Exiting." + exit 0 + fi +fi + +# ============================================================ +# Handle commands +# ============================================================ +case "$INTEGRATION" in + status) + show_integration_status + ;; + + list) + show_integration_details + ;; + + rollback) + if [[ -z "$SUBCOMMAND" ]]; then + echo "Usage: bates-enhance.sh rollback " + echo "" + echo "Available:" + list_backups + exit 1 + fi + rollback_integration "$SUBCOMMAND" + ;; + + m365|teams|twilio|elevenlabs|search|image|social|tailscale|agents|google|github|deepseek|websearch) + # Check if already installed + if is_installed "$INTEGRATION" 2>/dev/null; then + warn "$INTEGRATION is already installed." + echo "" + if ! confirm "Reinstall/reconfigure $INTEGRATION?"; then + exit 0 + fi + fi + + # Backup current config + backup_config "$INTEGRATION" + + # Run integration setup + SETUP_SCRIPT="$ENHANCE_DIR/integrations/$INTEGRATION/setup.sh" + if [[ ! -f "$SETUP_SCRIPT" ]]; then + fatal "Setup script not found: $SETUP_SCRIPT" + fi + source "$SETUP_SCRIPT" + + # Mark integration as installed + mark_installed "$INTEGRATION" + + # Deploy workspace additions if they exist + deploy_workspace_additions "$INTEGRATION" + + # Unlock cron jobs for this integration + unlock_cron_jobs "$INTEGRATION" + + # Restart gateway to pick up changes + echo "" + info "Restarting gateway to apply changes..." + systemctl --user restart openclaw-gateway 2>/dev/null || warn "Could not restart gateway" + sleep 3 + + echo "" + success "$INTEGRATION integration complete!" + suggest_next_integration "$INTEGRATION" + ;; + + *) + error "Unknown integration: $INTEGRATION" + echo "" + echo "Available integrations:" + echo " m365 teams twilio elevenlabs search image" + echo " social tailscale agents google github deepseek websearch" + echo "" + echo "Commands:" + echo " status Show current integration state" + echo " list Show integration details" + echo " rollback Rollback an integration" + exit 1 + ;; +esac diff --git a/bates-enhance/docs/manual-steps-elevenlabs.md b/bates-enhance/docs/manual-steps-elevenlabs.md new file mode 100644 index 0000000..8a3f93a --- /dev/null +++ b/bates-enhance/docs/manual-steps-elevenlabs.md @@ -0,0 +1,244 @@ +# ElevenLabs Voice Clone Setup + +This guide covers creating an ElevenLabs account, recording voice samples, creating a voice clone, and integrating it with the OpenClaw gateway for text-to-speech in voice calls. + +--- + +## Prerequisites + +- An ElevenLabs account (free tier works for testing; paid plan recommended for production) +- A microphone for recording voice samples +- The OpenClaw gateway installed and running + +--- + +## Step 1: Create an ElevenLabs Account + +1. Go to [elevenlabs.io](https://elevenlabs.io) and sign up. +2. Verify your email address. +3. Choose a plan: + + | Plan | Characters/month | Voice Clones | Best For | + |---|---|---|---| + | Free | 10,000 | 3 instant | Testing | + | Starter | 30,000 | 10 instant | Light usage | + | Creator | 100,000 | 30 instant, 1 professional | Regular usage | + | Pro | 500,000+ | Unlimited instant, 3 professional | Production | + +> **Tip:** Instant voice clones require only a short sample. Professional voice clones require 30+ minutes of audio but produce significantly better results. Start with an instant clone for testing. + +--- + +## Step 2: Record Voice Samples + +The quality of your voice clone depends heavily on the quality of your recordings. + +### Recording Guidelines + +- **Environment**: Record in a quiet room with minimal echo. Avoid rooms with hard surfaces (use a carpeted room or add soft furnishings). +- **Microphone**: Use a decent USB microphone or headset. Built-in laptop microphones produce poor results. +- **Distance**: Keep the microphone 6-12 inches from your mouth. +- **Duration**: + - Instant clone: Minimum 30 seconds, recommended 1-3 minutes + - Professional clone: Minimum 30 minutes of clean audio +- **Content**: Read naturally. Mix short and long sentences. Include questions, statements, and varied emotional tones. Reading a book chapter or news article works well. +- **Consistency**: Maintain a consistent volume, pace, and distance from the microphone throughout. + +### Recording Tips + +1. Do a test recording first and listen back for background noise or distortion. +2. Speak at your natural pace. Do not read too fast or too slow. +3. Avoid filler words ("um", "uh") as much as possible. +4. Take natural pauses between sentences. +5. If you make a mistake, pause for 2 seconds, then re-read the sentence from the beginning. + +### Recommended Recording Tools + +- **Audacity** (free, cross-platform): Good for recording and cleaning up audio +- **Voice Memos** (macOS/iOS): Quick and easy for short samples +- **OBS Studio** (free): If you already have it set up + +### File Format + +- WAV or MP3, minimum 22050 Hz sample rate +- Mono channel preferred +- No background music or effects + +--- + +## Step 3: Create a Voice Clone + +### Instant Voice Clone + +1. Log in to [elevenlabs.io](https://elevenlabs.io). +2. Go to **Voices** (or **Voice Library** > **My Voices**). +3. Click **Add Voice** > **Instant Voice Cloning**. +4. Give the voice a name (e.g., "Bates Voice"). +5. Upload your audio samples (you can upload multiple files). +6. Add a description of the voice characteristics (e.g., "Male, mid-30s, professional, calm and articulate"). +7. Review and accept the terms. +8. Click **Add Voice**. + +The clone is created almost instantly and appears in your voice library. + +### Professional Voice Clone (Optional) + +For higher quality, especially important for phone calls where audio fidelity matters: + +1. Go to **Voices** > **Add Voice** > **Professional Voice Cloning**. +2. Follow the guided process to upload 30+ minutes of clean audio. +3. The professional clone takes several hours to train. +4. You will receive an email when it is ready. + +--- + +## Step 4: Get the Voice ID + +1. Go to **Voices** in the ElevenLabs dashboard. +2. Find your cloned voice and click on it. +3. The **Voice ID** is displayed in the voice settings panel. It looks like: + + ``` + AbCdEfGhIjKlMnOpQrSt + ``` + + You can also click the copy icon next to the ID. + +### Alternative: Get Voice ID via API + +```bash +curl -s "https://api.elevenlabs.io/v1/voices" \ + -H "xi-api-key: " | python3 -m json.tool +``` + +Look for your voice in the response and note the `voice_id` field. + +--- + +## Step 5: Get Your API Key + +1. Click your profile icon in the top-right corner of the ElevenLabs dashboard. +2. Go to **Profile + API key**. +3. Your API key is shown (click to reveal). It looks like: + + ``` + sk_a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6 + ``` + +> **Warning:** Treat this API key as a secret. Do not share it or commit it to version control. + +--- + +## Step 6: Test TTS Output + +Before integrating with the gateway, test the voice clone directly via the API: + +```bash +curl -X POST "https://api.elevenlabs.io/v1/text-to-speech/" \ + -H "xi-api-key: " \ + -H "Content-Type: application/json" \ + -d '{ + "text": "Hello, this is Bates. How can I help you today?", + "model_id": "eleven_multilingual_v2", + "voice_settings": { + "stability": 0.5, + "similarity_boost": 0.75, + "style": 0.0, + "use_speaker_boost": true + } + }' \ + --output test_output.mp3 +``` + +Play the resulting `test_output.mp3` to verify the voice sounds correct. + +### Voice Settings Explained + +| Setting | Range | Description | +|---|---|---| +| `stability` | 0.0 - 1.0 | Higher = more consistent but potentially monotone. Lower = more expressive but less predictable. Start at 0.5. | +| `similarity_boost` | 0.0 - 1.0 | Higher = closer to the original voice. Lower = more creative variation. Start at 0.75. | +| `style` | 0.0 - 1.0 | Exaggerates the style of the voice. 0.0 recommended for natural speech. | +| `use_speaker_boost` | boolean | Boosts clarity of the cloned voice. Recommended `true`. | + +--- + +## Step 7: Configure OpenClaw + +Add the ElevenLabs configuration to your OpenClaw voice call settings: + +```json +{ + "voiceCall": { + "tts": { + "provider": "elevenlabs", + "elevenlabs": { + "apiKey": "", + "voiceId": "", + "model": "eleven_multilingual_v2" + } + } + } +} +``` + +### Model Options + +| Model | Quality | Latency | Languages | +|---|---|---|---| +| `eleven_multilingual_v2` | Highest | Higher | 29 languages | +| `eleven_turbo_v2_5` | Good | Lower | 32 languages | +| `eleven_turbo_v2` | Good | Lowest | English only | + +For voice calls, `eleven_multilingual_v2` is recommended for the best quality. If latency is a concern, try `eleven_turbo_v2_5`. + +Restart the gateway: + +```bash +systemctl --user restart openclaw-gateway +``` + +--- + +## Step 8: Verify End-to-End + +1. Make a test voice call (see the [Twilio setup guide](manual-steps-twilio.md)). +2. Bates should respond using the cloned voice. +3. Verify the voice sounds natural and the audio quality is acceptable. + +--- + +## Maintaining Your Voice Clone + +- **Update samples**: If the voice quality degrades or you want to change the voice characteristics, you can delete and recreate the clone with new samples. +- **Monitor usage**: Check your ElevenLabs dashboard for character usage. Voice calls can consume characters quickly (roughly 150 characters per sentence). +- **Plan ahead**: If you approach your monthly character limit, consider upgrading your plan or reducing the verbosity of Bates's spoken responses. + +--- + +## Troubleshooting + +### Voice Sounds Robotic or Unnatural + +- Upload longer, higher-quality audio samples. +- Increase `similarity_boost` to 0.8 or higher. +- Ensure the recording has no background noise. +- Try a professional voice clone for better results. + +### Audio Clipping or Distortion + +- Check that the original recording is not clipping (audio peaks hitting maximum). +- Reduce the input volume in your recording software. +- Re-record problematic samples. + +### High Latency in Voice Calls + +- Switch to `eleven_turbo_v2_5` or `eleven_turbo_v2` for lower latency. +- Ensure your internet connection is stable. +- Consider the geographic distance between your server and ElevenLabs API endpoints. + +### API Rate Limits + +- Free and lower-tier plans have concurrent request limits. +- If you receive 429 errors, wait a moment and retry. +- Upgrade your plan for higher rate limits. diff --git a/bates-enhance/docs/manual-steps-google.md b/bates-enhance/docs/manual-steps-google.md new file mode 100644 index 0000000..ee523b9 --- /dev/null +++ b/bates-enhance/docs/manual-steps-google.md @@ -0,0 +1,270 @@ +# Google Calendar and Gmail Integration Setup + +This guide covers creating a Google Cloud project, configuring OAuth, and connecting Google services to the OpenClaw gateway. + +--- + +## Prerequisites + +- A Google account (personal or Google Workspace) +- Access to the [Google Cloud Console](https://console.cloud.google.com) +- The OpenClaw gateway installed and running + +--- + +## Step 1: Create a Google Cloud Project + +1. Go to the [Google Cloud Console](https://console.cloud.google.com). +2. Click the project selector in the top bar. +3. Click **New Project**. +4. Enter a project name (e.g., `bates-integration`). +5. Select your organization (if applicable) or leave as "No organization." +6. Click **Create**. +7. Wait for the project to be created, then select it from the project selector. + +--- + +## Step 2: Enable Required APIs + +1. Go to **APIs & Services** > **Library**. +2. Search for and enable each of the following APIs: + + | API | Purpose | + |---|---| + | Google Calendar API | Read and manage calendar events | + | Gmail API | Read and manage email | + | Google People API | Read contacts (optional) | + +3. Click on each API and press **Enable**. + +--- + +## Step 3: Configure the OAuth Consent Screen + +Before creating credentials, you must configure the OAuth consent screen. + +1. Go to **APIs & Services** > **OAuth consent screen**. +2. Select **User type**: + - **Internal**: Only available for Google Workspace accounts. Users within your organization can use the app without review. + - **External**: Available to any Google account. Requires verification for production use (but unverified apps can be used by up to 100 test users). +3. Click **Create**. + +### Fill in App Information + +- **App name:** `Bates Integration` +- **User support email:** Your email address +- **App logo:** Optional +- **Developer contact information:** Your email address + +### Configure Scopes + +Click **Add or remove scopes** and add: + +| Scope | Description | +|---|---| +| `https://www.googleapis.com/auth/calendar.readonly` | View calendar events | +| `https://www.googleapis.com/auth/calendar.events` | Create/edit/delete calendar events (if needed) | +| `https://www.googleapis.com/auth/gmail.readonly` | View email messages and settings | +| `https://www.googleapis.com/auth/gmail.send` | Send email (if needed) | +| `https://www.googleapis.com/auth/gmail.modify` | View and modify emails (mark read, label, etc.) | +| `https://www.googleapis.com/auth/contacts.readonly` | View contacts (optional) | + +> **Tip:** Start with read-only scopes. Add write scopes later if needed. The fewer scopes you request, the simpler the consent flow and the easier any future verification process. + +Click **Update**, then **Save and continue**. + +### Add Test Users (External only) + +If you selected External, add your Google account email as a test user. This allows you to complete the OAuth flow before the app is verified. + +Click **Save and continue**, then **Back to dashboard**. + +--- + +## Step 4: Create OAuth 2.0 Credentials + +1. Go to **APIs & Services** > **Credentials**. +2. Click **Create credentials** > **OAuth client ID**. +3. Application type: **Desktop app** (or **Web application** if the gateway handles the redirect). +4. Name: `bates-oauth-client` + +### For Desktop App + +No additional configuration needed. Click **Create**. + +### For Web Application + +Add the following authorized redirect URI: + +``` +http://localhost:18789/auth/google/callback +``` + +Click **Create**. + +### Download the Credentials + +After creation, a dialog shows the **Client ID** and **Client secret**. Click **Download JSON** to save the credentials file (usually named `client_secret_XXXXX.json`). + +> **Warning:** Store this file securely. Do not commit it to version control. + +--- + +## Step 5: Configure OpenClaw + +Add the Google OAuth credentials to your OpenClaw configuration: + +```json +{ + "google": { + "clientId": ".apps.googleusercontent.com", + "clientSecret": "", + "redirectUri": "http://localhost:18789/auth/google/callback", + "scopes": [ + "https://www.googleapis.com/auth/calendar.readonly", + "https://www.googleapis.com/auth/gmail.readonly" + ] + } +} +``` + +--- + +## Step 6: Run the Auth Flow + +The first time you connect, you need to complete the OAuth consent flow to obtain a refresh token. + +### Using the Gateway's Built-in Auth Flow + +1. Restart the gateway: + + ```bash + systemctl --user restart openclaw-gateway + ``` + +2. Open your browser and navigate to: + + ``` + http://localhost:18789/auth/google + ``` + +3. You will be redirected to Google's consent screen. + +4. Sign in with your Google account and grant the requested permissions. + +5. If you see a "This app isn't verified" warning (External apps only): + - Click **Advanced** + - Click **Go to Bates Integration (unsafe)** + - This is normal for unverified test apps + +6. After granting consent, you will be redirected back to the gateway with an authorization code. + +7. The gateway exchanges the code for access and refresh tokens automatically. + +### Manual Auth Flow (Alternative) + +If the gateway does not have a built-in auth endpoint, you can use a standalone script: + +```bash +# Generate the auth URL +python3 << 'PYEOF' +from urllib.parse import urlencode + +params = { + "client_id": ".apps.googleusercontent.com", + "redirect_uri": "http://localhost:18789/auth/google/callback", + "response_type": "code", + "scope": "https://www.googleapis.com/auth/calendar.readonly https://www.googleapis.com/auth/gmail.readonly", + "access_type": "offline", + "prompt": "consent" +} +print(f"https://accounts.google.com/o/oauth2/v2/auth?{urlencode(params)}") +PYEOF +``` + +1. Open the printed URL in your browser. +2. Complete the consent flow. +3. Copy the `code` parameter from the redirect URL. +4. Exchange it for tokens: + +```bash +curl -X POST "https://oauth2.googleapis.com/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "code=" \ + -d "client_id=.apps.googleusercontent.com" \ + -d "client_secret=" \ + -d "redirect_uri=http://localhost:18789/auth/google/callback" \ + -d "grant_type=authorization_code" +``` + +The response contains `access_token`, `refresh_token`, and `expires_in`. Store the refresh token in your configuration. + +> **Important:** Include `access_type=offline` and `prompt=consent` in the auth URL. Without `access_type=offline`, Google will not issue a refresh token. Without `prompt=consent`, Google may skip the consent screen on subsequent authorizations and not return a new refresh token. + +--- + +## Step 7: Token Refresh Setup + +Google access tokens expire after 1 hour. The refresh token is used to obtain new access tokens automatically. + +### Automatic Refresh + +The OpenClaw gateway handles token refresh automatically. When an API call fails with a 401 status, the gateway: + +1. Uses the stored refresh token to request a new access token. +2. Retries the failed API call with the new token. +3. Stores the new access token for subsequent requests. + +### Manual Refresh (Testing) + +```bash +curl -X POST "https://oauth2.googleapis.com/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "refresh_token=" \ + -d "client_id=.apps.googleusercontent.com" \ + -d "client_secret=" \ + -d "grant_type=refresh_token" +``` + +### Token Expiry and Revocation + +- **Access tokens** expire after 1 hour. +- **Refresh tokens** do not expire unless: + - The user revokes access in their [Google Account permissions](https://myaccount.google.com/permissions) + - The app is unverified and the refresh token has not been used for 7 days + - The project's OAuth consent screen is set to "Testing" and the token is older than 7 days + - The user's password is changed (in some configurations) + +> **Tip:** If token refresh fails unexpectedly, re-run the auth flow from Step 6 to obtain a new refresh token. + +--- + +## Verifying the Integration + +Test that the Google integration is working: + +### Calendar Test + +```bash +curl -s "https://www.googleapis.com/calendar/v3/calendars/primary/events?maxResults=5" \ + -H "Authorization: Bearer " | python3 -m json.tool +``` + +### Gmail Test + +```bash +curl -s "https://www.googleapis.com/gmail/v1/users/me/messages?maxResults=5" \ + -H "Authorization: Bearer " | python3 -m json.tool +``` + +If both return data, the integration is working correctly. + +--- + +## Security Considerations + +- **Minimal scopes**: Only request the scopes you actually need. +- **Secure storage**: Store client secrets and refresh tokens encrypted or in a secrets manager. +- **Regular review**: Periodically review connected apps in your [Google Account permissions](https://myaccount.google.com/permissions). +- **Verified apps**: For production use with multiple users, submit the app for Google's verification process. +- **Rotate secrets**: If a client secret is compromised, delete the credential in the Cloud Console and create a new one. All users will need to re-authenticate. diff --git a/bates-enhance/docs/manual-steps-m365.md b/bates-enhance/docs/manual-steps-m365.md new file mode 100644 index 0000000..616558a --- /dev/null +++ b/bates-enhance/docs/manual-steps-m365.md @@ -0,0 +1,245 @@ +# Microsoft 365 Integration Setup + +This guide walks through setting up the three Entra (Azure AD) app registrations required for Bates to interact with Microsoft 365 services: reading mail/calendars, accessing company-wide resources, and sending mail on behalf of the assistant. + +--- + +## Prerequisites + +- A Microsoft 365 tenant with admin access (or access to request admin consent) +- Access to the [Microsoft Entra admin center](https://entra.microsoft.com) +- The OpenClaw gateway installed and running + +--- + +## Overview of App Registrations + +| App Name | Purpose | Permission Type | +|---|---|---| +| `bates-reader` | Read user mail, calendars, contacts | Delegated | +| `bates-company-reader` | Read organization-wide resources (shared mailboxes, directory) | Application | +| `bates-assistant` | Send mail, manage calendar events, write to mailboxes | Application | + +--- + +## Step 1: Create the Reader App Registration + +The reader app uses **delegated permissions** so it acts on behalf of the signed-in user. + +1. Go to **Entra admin center** > **App registrations** > **New registration**. +2. Name: `bates-reader` +3. Supported account types: **Accounts in this organizational directory only** (single tenant). +4. Redirect URI: Select **Web** and enter your gateway callback URL (e.g., `https://localhost:18789/auth/callback`). +5. Click **Register**. + +### Configure API Permissions (Reader) + +Navigate to **API permissions** > **Add a permission** > **Microsoft Graph** > **Delegated permissions**. + +Add the following: + +| Permission | Description | +|---|---| +| `Mail.Read` | Read user mail | +| `Mail.ReadBasic` | Read basic mail properties | +| `Calendars.Read` | Read user calendars | +| `Calendars.Read.Shared` | Read shared calendars | +| `Contacts.Read` | Read user contacts | +| `User.Read` | Sign in and read user profile | +| `offline_access` | Maintain access to data (refresh tokens) | + +### Create a Client Secret + +1. Go to **Certificates & secrets** > **New client secret**. +2. Description: `bates-reader-secret` +3. Expiry: Choose 12 or 24 months. +4. Copy the **Value** immediately (it will not be shown again). + +### Record These Values + +- **Application (client) ID** +- **Directory (tenant) ID** +- **Client secret value** + +--- + +## Step 2: Create the Company Reader App Registration + +The company reader uses **application permissions** to access organization-wide data without a signed-in user. + +1. Go to **App registrations** > **New registration**. +2. Name: `bates-company-reader` +3. Supported account types: **Single tenant**. +4. No redirect URI needed. +5. Click **Register**. + +### Configure API Permissions (Company Reader) + +Navigate to **API permissions** > **Add a permission** > **Microsoft Graph** > **Application permissions**. + +Add the following: + +| Permission | Description | +|---|---| +| `Mail.Read` | Read mail in all mailboxes | +| `Calendars.Read` | Read calendars in all mailboxes | +| `User.Read.All` | Read all users' full profiles | +| `Group.Read.All` | Read all groups | +| `Directory.Read.All` | Read directory data | +| `MailboxSettings.Read` | Read all users' mailbox settings | + +### Grant Admin Consent + +Click the **Grant admin consent for [Your Tenant]** button. This is required because application permissions need admin approval. + +> **Warning:** Application permissions grant broad access. Only grant what is strictly needed and review periodically. + +### Create a Client Secret + +Follow the same steps as the reader app. Record the client ID, tenant ID, and secret. + +--- + +## Step 3: Create the Assistant App Registration + +The assistant app uses **application permissions** and is the most privileged registration. It can send mail, create events, and modify mailbox contents. + +1. Go to **App registrations** > **New registration**. +2. Name: `bates-assistant` +3. Supported account types: **Single tenant**. +4. No redirect URI needed. +5. Click **Register**. + +### Configure API Permissions (Assistant) + +Navigate to **API permissions** > **Add a permission** > **Microsoft Graph** > **Application permissions**. + +Add the following: + +| Permission | Description | +|---|---| +| `Mail.ReadWrite` | Read and write mail in all mailboxes | +| `Mail.Send` | Send mail as any user | +| `Calendars.ReadWrite` | Read and write calendars in all mailboxes | +| `Contacts.ReadWrite` | Read and write contacts in all mailboxes | +| `User.Read.All` | Read all users' full profiles | +| `Files.ReadWrite.All` | Read and write all files (OneDrive/SharePoint) | + +### Grant Admin Consent + +Click **Grant admin consent for [Your Tenant]**. + +### Create a Client Secret + +Follow the same steps as above. Record all three values. + +--- + +## Step 4: Restrict the Assistant Email via Exchange Transport Rule + +To prevent the assistant from sending mail as arbitrary users, create an Exchange transport rule that restricts which sender address it can use. + +1. Open the [Exchange admin center](https://admin.exchange.microsoft.com). +2. Go to **Mail flow** > **Rules** > **Add a rule** > **Create a new rule**. +3. Configure the rule: + + - **Name:** `Restrict Bates Assistant Sender` + - **Apply this rule if:** The sender is `bates-assistant@yourdomain.com` + - **Except if:** The sender address includes `bates-assistant@yourdomain.com` + - **Do the following:** Reject the message with the explanation "Only the designated assistant address may send via this app." + +Alternatively, use **application access policies** in Exchange Online to scope the app registration to specific mailboxes: + +```powershell +# Connect to Exchange Online PowerShell +Connect-ExchangeOnline + +# Create an application access policy +New-ApplicationAccessPolicy ` + -AppId "" ` + -PolicyScopeGroupId "bates-allowed-mailboxes@yourdomain.com" ` + -AccessRight RestrictAccess ` + -Description "Restrict Bates assistant to specific mailboxes" +``` + +> **Tip:** Create a mail-enabled security group (e.g., `bates-allowed-mailboxes`) containing only the mailboxes the assistant should access. This is more secure than transport rules. + +--- + +## Step 5: Configure OpenClaw + +Add the app registration details to your OpenClaw configuration. The exact field names depend on your config schema, but typically: + +```json +{ + "m365": { + "reader": { + "clientId": "", + "tenantId": "", + "clientSecret": "" + }, + "companyReader": { + "clientId": "", + "tenantId": "", + "clientSecret": "" + }, + "assistant": { + "clientId": "", + "tenantId": "", + "clientSecret": "" + } + } +} +``` + +--- + +## Step 6: Test with mcporter + +[mcporter](https://github.com/nicolgit/mcporter) is a handy tool for testing Microsoft Graph API calls directly. + +1. Install mcporter: + + ```bash + npm install -g mcporter + ``` + +2. Test the reader app (delegated flow): + + ```bash + mcporter login --client-id --tenant-id --scope "Mail.Read Calendars.Read" + mcporter get "https://graph.microsoft.com/v1.0/me/messages?\$top=5" + ``` + +3. Test the company reader app (client credentials flow): + + ```bash + mcporter login --client-id --tenant-id --client-secret --grant-type client_credentials + mcporter get "https://graph.microsoft.com/v1.0/users?\$top=5" + ``` + +4. Test the assistant app (send a test email): + + ```bash + mcporter login --client-id --tenant-id --client-secret --grant-type client_credentials + mcporter post "https://graph.microsoft.com/v1.0/users/bates-assistant@yourdomain.com/sendMail" --body '{ + "message": { + "subject": "Test from Bates Assistant", + "body": { "contentType": "Text", "content": "This is a test." }, + "toRecipients": [{ "emailAddress": { "address": "you@yourdomain.com" } }] + } + }' + ``` + +5. Verify the email arrives and check the sender address is correct. + +--- + +## Security Checklist + +- [ ] Reader app uses **delegated** permissions only +- [ ] Company reader and assistant use **application** permissions with admin consent +- [ ] Application access policies restrict assistant to designated mailboxes +- [ ] Client secrets are stored securely (not in version control) +- [ ] Secret expiry dates are tracked and rotated before expiry +- [ ] Permissions are reviewed quarterly for least-privilege compliance diff --git a/bates-enhance/docs/manual-steps-tailscale.md b/bates-enhance/docs/manual-steps-tailscale.md new file mode 100644 index 0000000..1c1c38b --- /dev/null +++ b/bates-enhance/docs/manual-steps-tailscale.md @@ -0,0 +1,287 @@ +# Tailscale Remote Access Setup + +This guide covers installing Tailscale, joining a tailnet, configuring HTTPS access via Tailscale Serve, and exposing the OpenClaw gateway and voice webhook to the internet. + +--- + +## Prerequisites + +- A Tailscale account ([login.tailscale.com](https://login.tailscale.com)) +- The OpenClaw gateway installed and running on a Linux machine (WSL2 or native) +- Root/sudo access for installing Tailscale + +--- + +## Step 1: Install Tailscale + +### On Ubuntu / Debian (including WSL2) + +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +### On Other Distributions + +Follow the [official installation guide](https://tailscale.com/download/linux) for your distribution. + +### Verify Installation + +```bash +tailscale version +``` + +--- + +## Step 2: Authenticate and Join the Tailnet + +1. Start the Tailscale daemon (if not already running): + + ```bash + sudo tailscaled & + ``` + + On systemd-based systems: + + ```bash + sudo systemctl enable --now tailscaled + ``` + +2. Authenticate: + + ```bash + sudo tailscale up + ``` + +3. A URL will be printed. Open it in your browser and log in to your Tailscale account. + +4. Approve the device when prompted. + +5. Verify the connection: + + ```bash + tailscale status + ``` + + You should see your machine listed with an IP address (usually in the `100.x.x.x` range). + +### Machine Name + +By default, Tailscale uses your hostname. You can rename the machine in the [Tailscale admin console](https://login.tailscale.com/admin/machines) for a cleaner URL. + +--- + +## Step 3: Enable HTTPS Certificates + +Tailscale can automatically provision TLS certificates for machines in your tailnet via [Tailscale HTTPS](https://tailscale.com/kb/1153/enabling-https/). + +1. Go to the [Tailscale admin console](https://login.tailscale.com/admin/dns). +2. Under **DNS**, ensure you have a **tailnet name** configured (e.g., `your-tailnet.ts.net`). +3. Enable **HTTPS Certificates** if not already enabled. + +Your machine will be accessible at: + +``` +https://..ts.net +``` + +--- + +## Step 4: Configure Tailscale Serve + +Tailscale Serve allows you to expose local HTTP services over HTTPS to your tailnet (and optionally to the public internet via Tailscale Funnel). + +### Expose the Dashboard on Port 443 + +The main gateway dashboard and API can be served over the default HTTPS port: + +```bash +sudo tailscale serve https / http://localhost:18789 +``` + +This makes the gateway accessible at: + +``` +https://..ts.net/ +``` + +### Expose Voice Webhooks on Port 8443 + +Twilio voice webhooks need a separate port to avoid conflicting with the main dashboard: + +```bash +sudo tailscale serve https:8443 / http://localhost:18789 +``` + +This makes the voice webhook accessible at: + +``` +https://..ts.net:8443/webhook +``` + +### Verify Serve Configuration + +```bash +sudo tailscale serve status +``` + +Expected output: + +``` +https://..ts.net (Tailscale Serve) +|-- / proxy http://127.0.0.1:18789 + +https://..ts.net:8443 (Tailscale Serve) +|-- / proxy http://127.0.0.1:18789 +``` + +--- + +## Step 5: Enable Tailscale Funnel (Public Access) + +By default, Tailscale Serve only exposes services **within your tailnet**. For external services like Twilio webhooks and Teams bot messages, you need **Tailscale Funnel** to make them publicly accessible. + +1. Enable Funnel in the [Tailscale admin console](https://login.tailscale.com/admin/acls): + + Add to your ACL policy: + + ```json + { + "nodeAttrs": [ + { + "target": ["autogroup:member"], + "attr": ["funnel"] + } + ] + } + ``` + +2. Turn on Funnel for your serve configurations: + + ```bash + sudo tailscale funnel 443 on + sudo tailscale funnel 8443 on + ``` + +3. Verify: + + ```bash + sudo tailscale funnel status + ``` + +> **Warning:** Funnel exposes your services to the entire internet. Ensure your gateway has proper authentication and rate limiting before enabling Funnel. + +--- + +## Step 6: Verify External Access + +### From Another Device on Your Tailnet + +```bash +curl -s https://..ts.net/ +``` + +### From the Public Internet (Funnel) + +Open in any browser: + +``` +https://..ts.net/ +``` + +### Test Voice Webhook + +```bash +curl -s -X POST https://..ts.net:8443/webhook \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "test=true" +``` + +--- + +## Step 7: Make Serve Configuration Persistent + +By default, `tailscale serve` configurations persist across restarts. Verify with: + +```bash +sudo tailscale serve status +``` + +If the configuration is lost after a reboot, you can add the serve commands to a startup script: + +```bash +# /etc/systemd/system/tailscale-serve.service +[Unit] +Description=Tailscale Serve Configuration +After=tailscaled.service + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/usr/bin/tailscale serve https / http://localhost:18789 +ExecStartPost=/usr/bin/tailscale serve https:8443 / http://localhost:18789 + +[Install] +WantedBy=multi-user.target +``` + +Enable: + +```bash +sudo systemctl enable tailscale-serve +``` + +--- + +## Port Summary + +| Port | Protocol | Purpose | Access | +|---|---|---|---| +| 443 | HTTPS | Gateway dashboard, APIs, Teams webhook, Telegram webhook | Tailnet or Funnel | +| 8443 | HTTPS | Twilio voice webhooks | Funnel (public) | +| 18789 | HTTP | Local gateway (not exposed directly) | localhost only | + +--- + +## Security Considerations + +### Tailnet ACLs + +Control which devices can access your machine via [Access Control Lists](https://tailscale.com/kb/1018/acls/): + +```json +{ + "acls": [ + { + "action": "accept", + "src": ["autogroup:member"], + "dst": [":443", ":8443"] + } + ] +} +``` + +### Funnel Exposure + +- Funnel makes services publicly accessible. Only enable it for endpoints that need external access (webhooks). +- The gateway should validate incoming webhook requests (e.g., Twilio signature validation, Bot Framework authentication). +- Monitor access logs for unexpected traffic. + +### WSL2 Considerations + +- Tailscale in WSL2 runs in userspace networking mode by default. +- The WSL2 VM's IP changes on restart. Tailscale handles this automatically. +- If Tailscale fails to start in WSL2, ensure `tailscaled` is running: + + ```bash + sudo tailscaled --state=/var/lib/tailscale/tailscaled.state --tun=userspace-networking & + ``` + +### Key Rotation + +- Tailscale node keys expire by default (180 days). Either: + - Disable key expiry for the machine in the admin console (for servers) + - Set up a cron job to run `tailscale up --auth-key=` before expiry + +### MagicDNS + +Tailscale enables [MagicDNS](https://tailscale.com/kb/1081/magicdns/) by default. This means you can access machines by name (e.g., `my-machine`) within the tailnet without the full `.ts.net` domain. However, for webhooks and external services, always use the full FQDN. diff --git a/bates-enhance/docs/manual-steps-teams.md b/bates-enhance/docs/manual-steps-teams.md new file mode 100644 index 0000000..f973fcb --- /dev/null +++ b/bates-enhance/docs/manual-steps-teams.md @@ -0,0 +1,269 @@ +# Microsoft Teams Integration Setup + +This guide covers creating a Bot Framework registration in Azure, building a Teams app manifest, uploading it to the Teams Admin Center, and connecting it to the OpenClaw gateway. + +--- + +## Prerequisites + +- A Microsoft 365 tenant with Teams enabled +- Admin access to the [Azure portal](https://portal.azure.com) and [Teams Admin Center](https://admin.teams.microsoft.com) +- The OpenClaw gateway installed and running +- A publicly accessible HTTPS endpoint for the bot (e.g., via Tailscale Serve) + +--- + +## Step 1: Create a Bot Registration in Azure / Entra + +1. Go to the [Azure portal](https://portal.azure.com). +2. Search for **Azure Bot** in the top search bar and select **Azure Bot** under Services. +3. Click **Create**. +4. Fill in the details: + + - **Bot handle:** `bates-teams-bot` + - **Subscription:** Select your Azure subscription + - **Resource group:** Create new or select existing + - **Pricing tier:** F0 (Free) for development, S1 for production + - **Microsoft App ID:** Select **Create new Microsoft App ID** + +5. Click **Review + create**, then **Create**. + +### Get the App ID and Password + +1. Once deployed, go to the bot resource. +2. Navigate to **Configuration**. +3. Note the **Microsoft App ID**. +4. Click **Manage Password** to go to the app registration. +5. Under **Certificates & secrets**, create a **New client secret**. +6. Copy the secret **Value** immediately. + +--- + +## Step 2: Configure the Bot Framework Channel + +1. In the Azure Bot resource, go to **Channels**. +2. Click **Microsoft Teams** to enable the Teams channel. +3. Accept the terms of service. +4. Under **Messaging**, ensure the messaging endpoint is set to your gateway URL: + + ``` + https://your-machine.your-tailnet.ts.net:443/teams/messages + ``` + + Replace with your actual public HTTPS endpoint. + +5. Click **Apply**. + +--- + +## Step 3: Configure OpenClaw + +Add the Teams bot credentials to your OpenClaw configuration: + +```json +{ + "msteams": { + "appId": "", + "appPassword": "", + "tenantId": "" + } +} +``` + +Restart the gateway: + +```bash +systemctl --user restart openclaw-gateway +``` + +--- + +## Step 4: Create the Teams App Manifest + +The Teams app manifest is a ZIP file containing a `manifest.json` and two icon files. + +### Directory Structure + +``` +teams-app/ + manifest.json + color.png (192x192 full-color icon) + outline.png (32x32 transparent outline icon) +``` + +### manifest.json + +```json +{ + "$schema": "https://developer.microsoft.com/en-us/json-schemas/teams/v1.16/MicrosoftTeams.schema.json", + "manifestVersion": "1.16", + "version": "1.0.0", + "id": "", + "developer": { + "name": "Your Organization", + "websiteUrl": "https://example.com", + "privacyUrl": "https://example.com/privacy", + "termsOfUseUrl": "https://example.com/terms" + }, + "name": { + "short": "Bates", + "full": "Bates AI Assistant" + }, + "description": { + "short": "AI-powered executive assistant", + "full": "Bates is an AI-powered executive assistant that helps manage email, calendar, tasks, and more through natural conversation in Microsoft Teams." + }, + "icons": { + "color": "color.png", + "outline": "outline.png" + }, + "accentColor": "#4F6BED", + "bots": [ + { + "botId": "", + "scopes": ["personal", "team", "groupChat"], + "supportsFiles": false, + "isNotificationOnly": false, + "commandLists": [ + { + "scopes": ["personal"], + "commands": [ + { + "title": "help", + "description": "Show available commands" + }, + { + "title": "status", + "description": "Check system status" + } + ] + } + ] + } + ], + "permissions": ["identity", "messageTeamMembers"], + "validDomains": [] +} +``` + +> **Tip:** Replace `` with the actual App ID from Step 1. The `id` field and `botId` must match. + +### Create the ZIP Package + +```bash +cd teams-app +zip -r ../bates-teams-app.zip manifest.json color.png outline.png +``` + +### Icon Requirements + +- **color.png**: 192x192 pixels, full-color, PNG format. Used in the Teams app store and app bar. +- **outline.png**: 32x32 pixels, transparent background with a single color (white recommended), PNG format. Used in the Teams activity bar. + +--- + +## Step 5: Upload to Teams Admin Center + +1. Go to the [Teams Admin Center](https://admin.teams.microsoft.com). +2. Navigate to **Teams apps** > **Manage apps**. +3. Click **Upload new app** (or **Upload** in the toolbar). +4. Select the ZIP file you created. +5. The app should appear in the list as "Bates." + +### Assign the App to Users + +By default, uploaded custom apps may not be available to users. To enable: + +1. Go to **Teams apps** > **Setup policies**. +2. Edit the **Global (Org-wide default)** policy or create a new one. +3. Under **Installed apps**, click **Add apps** and search for "Bates." +4. Add it to the installed apps list. +5. Optionally, pin the app for easy access. + +Alternatively, individual users can find the app under **Apps** > **Built for your org** in the Teams client. + +--- + +## Step 6: Test in Teams + +1. Open Microsoft Teams. +2. Go to **Apps** and search for "Bates" under **Built for your org**. +3. Click on the app and select **Add** (or it may already be installed via policy). +4. Open a chat with the bot. +5. Send a test message: + + ``` + Hello, Bates! + ``` + +6. The bot should respond. If not, check: + + ```bash + journalctl --user -u openclaw-gateway -n 50 --no-pager + ``` + +--- + +## NODE_PATH Systemd Drop-in + +If the Teams plugin is installed as an npm global package, Node.js may not resolve its dependencies correctly when running under systemd. This is because systemd services do not inherit the same environment as your interactive shell. + +### The Problem + +The `msteams` plugin requires `botbuilder` and related packages. When the gateway starts via systemd, Node.js cannot find these packages because `NODE_PATH` is not set. + +### The Solution + +Create a systemd drop-in to set `NODE_PATH`: + +1. Create the drop-in directory: + + ```bash + mkdir -p ~/.config/systemd/user/openclaw-gateway.service.d/ + ``` + +2. Create the drop-in file: + + ```bash + cat > ~/.config/systemd/user/openclaw-gateway.service.d/msteams-deps.conf << 'EOF' + [Service] + Environment=NODE_PATH=/home/YOUR_USER/.npm-global/lib/node_modules + EOF + ``` + + Replace `YOUR_USER` with your actual username. + +3. Reload systemd and restart: + + ```bash + systemctl --user daemon-reload + systemctl --user restart openclaw-gateway + ``` + +### Why Not Use extensions/? + +You might wonder why the Teams plugin is not simply placed in `~/.openclaw/extensions/msteams/`. The extension auto-discovery glob (`~/.openclaw/extensions/*/index.ts`) can cause duplicate plugin loading if the plugin is also installed globally. The drop-in approach avoids this conflict by keeping the plugin in the npm global location and simply ensuring its dependencies are resolvable. + +> **Warning:** If you ever see duplicate message handling or "plugin already registered" errors, check that the plugin does not exist in both `~/.openclaw/extensions/` and the npm global path simultaneously. + +--- + +## Troubleshooting + +### Bot Not Responding in Teams + +1. Verify the messaging endpoint in Azure Bot > Configuration matches your gateway URL. +2. Check that the gateway is receiving requests (look for Teams-related log entries). +3. Ensure the app ID and password in your OpenClaw config match the Azure Bot registration. + +### Manifest Upload Fails + +- Ensure the `id` field in `manifest.json` is a valid GUID matching the Microsoft App ID. +- Validate icons meet size requirements (192x192 color, 32x32 outline). +- Use the [Teams App Validator](https://dev.teams.microsoft.com/appvalidation.html) to check for issues. + +### Messages Delayed or Lost + +- Check your HTTPS endpoint is consistently reachable. +- The Bot Framework retries failed deliveries, but long outages may cause message loss. +- Ensure the gateway is not being rate-limited (check HTTP 429 responses in logs). diff --git a/bates-enhance/docs/manual-steps-telegram.md b/bates-enhance/docs/manual-steps-telegram.md new file mode 100644 index 0000000..b947214 --- /dev/null +++ b/bates-enhance/docs/manual-steps-telegram.md @@ -0,0 +1,211 @@ +# Telegram Bot Setup + +This guide walks through creating a Telegram bot for Bates, obtaining the necessary credentials, and verifying the integration. + +--- + +## Prerequisites + +- A Telegram account +- The Telegram app (desktop or mobile) +- The OpenClaw gateway installed and running + +--- + +## Step 1: Create a Bot via BotFather + +1. Open Telegram and search for **@BotFather** (the official bot for managing bots). +2. Start a conversation and send the command: + + ``` + /newbot + ``` + +3. BotFather will ask for a **display name** for your bot. This is what users see in chats. Example: + + ``` + Bates Assistant + ``` + +4. Next, BotFather asks for a **username**. This must end in `bot` and be globally unique. Example: + + ``` + my_bates_assistant_bot + ``` + +5. On success, BotFather responds with a message containing your **bot token**. It looks like: + + ``` + 7123456789:AAH1bCdEfGhIjKlMnOpQrStUvWxYz12345 + ``` + +> **Warning:** Never share your bot token publicly. Anyone with this token can control your bot. If compromised, use `/revoke` with BotFather to generate a new one. + +--- + +## Step 2: Get Your Numeric User ID + +Bates needs your Telegram numeric user ID to restrict who can interact with the bot. + +1. Search for **@userinfobot** in Telegram and start a conversation. +2. Send any message (e.g., `/start`). +3. The bot replies with your account details, including your numeric **ID**: + + ``` + Id: 123456789 + First: Robert + Lang: en + ``` + +4. Record this number. You will configure it as the allowed user. + +> **Tip:** You can also forward a message from yourself to @userinfobot to see the sender's ID. + +--- + +## Step 3: Configure OpenClaw + +Add the Telegram bot credentials to your OpenClaw configuration: + +```json +{ + "telegram": { + "botToken": "", + "allowedUsers": [123456789] + } +} +``` + +The `allowedUsers` array restricts which Telegram user IDs can interact with the bot. Messages from other users will be ignored. + +--- + +## Step 4: Test the Bot + +1. Restart the OpenClaw gateway to pick up the new configuration: + + ```bash + systemctl --user restart openclaw-gateway + ``` + +2. Open Telegram and find your bot by its username (e.g., `@my_bates_assistant_bot`). + +3. Send `/start` to initiate a conversation. + +4. Send a test message like: + + ``` + Hello, Bates! + ``` + +5. Verify the bot responds. Check gateway logs if there is no response: + + ```bash + journalctl --user -u openclaw-gateway -n 50 --no-pager + ``` + +--- + +## Step 5: Customize the Bot Profile + +Go back to **@BotFather** to customize your bot's appearance. + +### Set the Bot Description + +The description appears when users first open a chat with the bot (before sending any messages): + +``` +/setdescription +``` + +Then select your bot and enter a description, for example: + +``` +Bates is your AI-powered executive assistant. Send a message to get started. +``` + +### Set the About Text + +This appears in the bot's profile page: + +``` +/setabouttext +``` + +Example: + +``` +AI executive assistant powered by OpenClaw. Manages email, calendar, tasks, and more. +``` + +### Set the Bot Profile Picture + +``` +/setuserpic +``` + +Select your bot, then upload a square image (minimum 150x150 pixels). A professional-looking avatar or logo works well. + +### Set Bot Commands (Menu) + +Define slash commands that appear in the Telegram command menu: + +``` +/setcommands +``` + +Select your bot and enter commands in the format `command - description`: + +``` +start - Start a conversation +status - Check system status +help - Show available commands +``` + +--- + +## Optional: Webhook vs Polling + +By default, OpenClaw uses **long polling** to receive Telegram updates. For production use, you may want to switch to **webhooks** for lower latency. + +### Set Up Webhook + +The gateway must be accessible over HTTPS. If you are using Tailscale Serve, your webhook URL would be: + +``` +https://your-machine.your-tailnet.ts.net:443/telegram/webhook +``` + +Configure in OpenClaw: + +```json +{ + "telegram": { + "botToken": "", + "allowedUsers": [123456789], + "webhook": { + "enabled": true, + "url": "https://your-machine.your-tailnet.ts.net:443/telegram/webhook" + } + } +} +``` + +### Verify Webhook + +After restarting the gateway, verify the webhook is set: + +```bash +curl "https://api.telegram.org/bot/getWebhookInfo" | python3 -m json.tool +``` + +You should see your URL in the response along with `"has_custom_certificate": false` and no pending errors. + +--- + +## Security Considerations + +- **Restrict allowed users**: Always set `allowedUsers` to prevent unauthorized access. +- **Rotate the bot token** periodically using `/revoke` with BotFather. +- **Do not embed the token** in version-controlled files. Use environment variables or a secrets manager. +- **Monitor activity**: Check gateway logs periodically for unexpected user IDs attempting to interact with the bot. diff --git a/bates-enhance/docs/manual-steps-twilio.md b/bates-enhance/docs/manual-steps-twilio.md new file mode 100644 index 0000000..8be680d --- /dev/null +++ b/bates-enhance/docs/manual-steps-twilio.md @@ -0,0 +1,215 @@ +# Twilio Voice Calling Setup + +This guide covers creating a Twilio account, acquiring a phone number, configuring webhooks, and setting up voice calling with the OpenClaw gateway. + +--- + +## Prerequisites + +- The OpenClaw gateway installed and running +- A publicly accessible HTTPS endpoint (e.g., via Tailscale Serve on port 8443) +- A payment method for Twilio (trial accounts work for testing) + +--- + +## Step 1: Create a Twilio Account + +1. Go to [twilio.com](https://www.twilio.com) and sign up for an account. +2. Complete the verification process (phone number and email). +3. Once in the console, note your: + + - **Account SID** (starts with `AC`) + - **Auth Token** + + These are on the main [Console Dashboard](https://console.twilio.com). + +> **Tip:** Trial accounts provide a small credit for testing. You can make calls to verified numbers only. Upgrade to a paid account for full functionality. + +--- + +## Step 2: Get a Phone Number + +1. In the Twilio Console, go to **Phone Numbers** > **Manage** > **Buy a number**. +2. Search for a number in your preferred country/area code. +3. Ensure the number has **Voice** capability enabled. +4. Click **Buy** and confirm. +5. Note the phone number (e.g., `+15551234567`). + +### Number Configuration + +After purchasing, go to **Phone Numbers** > **Manage** > **Active Numbers** and click on your number. + +Under **Voice Configuration**: + +- **Configure with:** Webhooks, TwiML Bins, Functions, etc. +- **A call comes in:** Set to **Webhook** (configured in Step 3) +- **HTTP method:** POST + +--- + +## Step 3: Set Up the Webhook URL + +The gateway needs to receive incoming call notifications from Twilio. This requires a publicly accessible HTTPS URL. + +### Using Tailscale Serve + +If you use Tailscale for remote access, configure Tailscale Serve to expose the voice webhook port: + +```bash +# Expose port 8443 for voice webhooks +tailscale serve https:8443 / http://localhost:18789 +``` + +Your webhook URL will be: + +``` +https://your-machine.your-tailnet.ts.net:8443/webhook +``` + +### Configure the Webhook in Twilio + +1. Go to your active phone number in the Twilio Console. +2. Under **Voice Configuration** > **A call comes in**: + + - **Webhook URL:** `https://your-machine.your-tailnet.ts.net:8443/webhook` + - **HTTP Method:** POST + +3. Under **Call status changes** (optional but recommended): + + - **Status callback URL:** `https://your-machine.your-tailnet.ts.net:8443/webhook/status` + - **HTTP Method:** POST + +4. Click **Save configuration**. + +--- + +## Step 4: Configure OpenClaw + +Add the Twilio credentials and voice settings to your OpenClaw configuration: + +```json +{ + "voiceCall": { + "provider": "twilio", + "twilio": { + "accountSid": "", + "authToken": "", + "phoneNumber": "+15551234567" + }, + "streaming": { + "enabled": true + }, + "outbound": { + "defaultMode": "conversation" + }, + "webhook": { + "publicUrl": "https://your-machine.your-tailnet.ts.net:8443/webhook" + } + } +} +``` + +### Configuration Options + +| Key | Description | +|---|---| +| `streaming.enabled` | Enable real-time audio streaming (recommended for natural conversation) | +| `outbound.defaultMode` | `"conversation"` for interactive calls, `"announcement"` for one-way messages | +| `webhook.publicUrl` | The publicly accessible URL that Twilio will call | + +Restart the gateway after updating the configuration: + +```bash +systemctl --user restart openclaw-gateway +``` + +--- + +## Step 5: Configure TTS (Text-to-Speech) + +Voice calls require a TTS engine to convert Bates's text responses to speech. ElevenLabs is the recommended provider (see the [ElevenLabs setup guide](manual-steps-elevenlabs.md) for creating a voice clone). + +```json +{ + "voiceCall": { + "tts": { + "provider": "elevenlabs", + "elevenlabs": { + "apiKey": "", + "voiceId": "", + "model": "eleven_multilingual_v2" + } + } + } +} +``` + +--- + +## Step 6: Test Inbound Calls + +1. Call your Twilio phone number from any phone. +2. You should hear the gateway answer and Bates respond via TTS. +3. Check the gateway logs for activity: + + ```bash + journalctl --user -u openclaw-gateway -n 50 --no-pager + ``` + +4. If the call does not connect, check: + - The webhook URL is reachable from the internet + - Twilio can reach your endpoint (check [Twilio Debugger](https://console.twilio.com/us1/monitor/logs/debugger)) + - The gateway is running and listening + +### Twilio Debugger + +The [Twilio Debugger](https://console.twilio.com/us1/monitor/logs/debugger) is invaluable for troubleshooting. Common errors: + +| Error Code | Meaning | +|---|---| +| 11200 | HTTP retrieval failure (webhook unreachable) | +| 11205 | HTTP connection failure | +| 12100 | Document parse failure (invalid TwiML response) | +| 12200 | Schema validation warning | + +--- + +## Step 7: Test Outbound Calls + +Outbound calls are initiated by Bates (e.g., to notify you of something urgent). + +1. Trigger an outbound call via the gateway API or by asking Bates to call you. +2. Your phone should ring with the Twilio number as the caller ID. + +> **Warning:** On trial accounts, outbound calls can only reach **verified** phone numbers. Add your phone number under **Phone Numbers** > **Verified Caller IDs** in the Twilio Console. + +### Testing from the Command Line + +You can test outbound calling with a curl request to the gateway: + +```bash +curl -X POST http://localhost:18789/api/voice/call \ + -H "Content-Type: application/json" \ + -d '{ + "to": "+15559876543", + "message": "Hello, this is a test call from Bates." + }' +``` + +--- + +## Audio Quality Tips + +- **Use streaming mode** (`streaming.enabled: true`) for more natural conversation flow. +- **Minimize latency**: Ensure your Tailscale connection is stable. Voice calls are sensitive to latency above 300ms. +- **TTS quality**: The `eleven_multilingual_v2` model provides the best quality. Avoid cheaper models for production voice calls. +- **Background noise**: If Bates picks up background noise during calls, this is a Twilio-side issue. Consider enabling noise suppression in your Twilio settings. + +--- + +## Security Considerations + +- **Validate Twilio signatures**: The gateway should verify that incoming webhook requests are genuinely from Twilio using the `X-Twilio-Signature` header. This prevents spoofed requests. +- **Restrict outbound calling**: Configure an allow-list of phone numbers that Bates can call to prevent misuse. +- **Rate limiting**: Set reasonable limits on outbound calls per hour/day. +- **Store credentials securely**: Never commit Twilio Account SID or Auth Token to version control. diff --git a/bates-enhance/docs/troubleshooting.md b/bates-enhance/docs/troubleshooting.md new file mode 100644 index 0000000..f52e4a0 --- /dev/null +++ b/bates-enhance/docs/troubleshooting.md @@ -0,0 +1,1040 @@ +# Troubleshooting Guide + +This document covers common issues and their solutions, organized by category. For each issue, the **Symptom**, **Cause**, and **Fix** are provided. + +--- + +## Table of Contents + +- [Installation](#installation) +- [Gateway](#gateway) +- [Authentication](#authentication) +- [Telegram](#telegram) +- [Microsoft Teams](#microsoft-teams) +- [Voice Calling](#voice-calling) +- [Dashboard](#dashboard) +- [Search and Embeddings](#search-and-embeddings) +- [Agents](#agents) +- [Cron Jobs](#cron-jobs) +- [Patches](#patches) + +--- + +## Installation + +### WSL2 Not Enabled + +**Symptom:** You cannot run Linux commands or `wsl` returns "WSL2 is not installed." + +**Cause:** Windows Subsystem for Linux is not enabled or not set to version 2. + +**Fix:** + +1. Open PowerShell as Administrator. +2. Run: + + ```powershell + wsl --install + ``` + +3. Restart your computer. +4. Verify: + + ```powershell + wsl --list --verbose + ``` + + Ensure your distribution shows VERSION 2. + +--- + +### Node.js Version Incorrect + +**Symptom:** The gateway fails to start with syntax errors or "Unexpected token" messages. Or `npm install` fails with compatibility warnings. + +**Cause:** OpenClaw requires Node.js 20 or later. An older version is installed. + +**Fix:** + +1. Check your current version: + + ```bash + node --version + ``` + +2. Install the correct version using nvm: + + ```bash + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash + source ~/.bashrc + nvm install 20 + nvm use 20 + nvm alias default 20 + ``` + +3. Verify: + + ```bash + node --version # Should show v20.x.x or later + ``` + +--- + +### npm Global Path Issues + +**Symptom:** Globally installed packages (e.g., `openclaw`) are not found, or you get EACCES permission errors when running `npm install -g`. + +**Cause:** The default npm global directory requires root permissions, or `~/.npm-global` is not in your PATH. + +**Fix:** + +1. Configure npm to use a user-writable directory: + + ```bash + mkdir -p ~/.npm-global + npm config set prefix '~/.npm-global' + ``` + +2. Add to your `~/.bashrc` or `~/.profile`: + + ```bash + export PATH="$HOME/.npm-global/bin:$PATH" + ``` + +3. Reload: + + ```bash + source ~/.bashrc + ``` + +4. Test: + + ```bash + npm install -g openclaw + openclaw --version + ``` + +--- + +## Gateway + +### Gateway Will Not Start + +**Symptom:** `systemctl --user status openclaw-gateway` shows "failed" or "inactive." + +**Cause:** Configuration error, port conflict, or missing dependencies. + +**Fix:** + +1. Check logs for the specific error: + + ```bash + journalctl --user -u openclaw-gateway -n 100 --no-pager + ``` + +2. Validate your configuration file: + + ```bash + cat ~/.openclaw/openclaw.json | python3 -m json.tool + ``` + + Fix any JSON syntax errors. + +3. Ensure no other process is using port 18789: + + ```bash + ss -tlnp | grep 18789 + ``` + +4. Try starting manually to see errors in real time: + + ```bash + openclaw gateway + ``` + +--- + +### Port Already in Use + +**Symptom:** Gateway logs show "EADDRINUSE: address already in use :::18789." + +**Cause:** Another process is already bound to port 18789, possibly a previous gateway instance that did not shut down cleanly. + +**Fix:** + +1. Find the process: + + ```bash + ss -tlnp | grep 18789 + ``` + +2. Kill it: + + ```bash + kill + ``` + +3. Restart the gateway: + + ```bash + systemctl --user restart openclaw-gateway + ``` + +--- + +### Systemd Service Errors + +**Symptom:** `systemctl --user start openclaw-gateway` silently fails or shows "degraded." + +**Cause:** The systemd user instance may not be running, or the service file has errors. + +**Fix:** + +1. Ensure lingering is enabled (allows user services to run without an active login session): + + ```bash + sudo loginctl enable-linger $(whoami) + ``` + +2. Check the service file for errors: + + ```bash + systemctl --user cat openclaw-gateway + ``` + +3. Reload after any changes: + + ```bash + systemctl --user daemon-reload + systemctl --user restart openclaw-gateway + ``` + +--- + +## Authentication + +### Token Expired + +**Symptom:** API calls fail with 401 Unauthorized. Logs show "token expired" or "invalid session." + +**Cause:** The Claude authentication token has expired and needs to be refreshed. + +**Fix:** + +1. Re-run the token setup: + + ```bash + claude setup-token + ``` + +2. Update OpenClaw: + + ```bash + openclaw models auth setup-token --provider anthropic + ``` + +3. Restart the gateway: + + ```bash + systemctl --user restart openclaw-gateway + ``` + +--- + +### API Key Invalid + +**Symptom:** Requests fail with "Invalid API key" or 403 Forbidden. + +**Cause:** The API key is incorrect, expired, or associated with a deactivated account. + +**Fix:** + +1. Verify the key: + + ```bash + curl -s https://api.anthropic.com/v1/messages \ + -H "x-api-key: $ANTHROPIC_API_KEY" \ + -H "anthropic-version: 2023-06-01" \ + -H "content-type: application/json" \ + -d '{"model":"claude-sonnet-4-20250514","max_tokens":10,"messages":[{"role":"user","content":"hi"}]}' + ``` + +2. If invalid, generate a new key from the [Anthropic Console](https://console.anthropic.com/settings/keys). + +3. Update the key in your auth profiles: + + ```bash + cat ~/.openclaw/agents/main/agent/auth-profiles.json + ``` + + Update the relevant profile and restart. + +--- + +### 1M Context Window Causes Auth Failure + +**Symptom:** ALL Anthropic API requests fail with 401 "OAuth authentication is currently not supported." This happens immediately after enabling `context1m: true`. + +**Cause:** The `context1m: true` setting adds `anthropic-beta: context-1m-2025-08-07` via `options.headers`. The pi-ai library's `mergeHeaders` uses `Object.assign`, which **overwrites** the base `anthropic-beta` header containing the required OAuth betas (`claude-code-20250219,oauth-2025-04-20`). Without the OAuth beta header, Anthropic rejects the OAuth token. + +**Fix:** + +Do NOT enable `context1m: true` in `agents.defaults.models.*.params`. Remove it if present: + +1. Open `~/.openclaw/openclaw.json` +2. Search for `context1m` +3. Remove any `"context1m": true` entries +4. Restart the gateway + +> **Note:** This affects Claude Max subscription (token-type) auth profiles. API key profiles are not affected but will still lose the 1M context beta header. + +--- + +### Subscription Issues + +**Symptom:** The token profile works initially but fails intermittently. Logs mention "subscription" or "rate limit." + +**Cause:** The subscription may have lapsed, or you have exceeded the subscription's usage limits. + +**Fix:** + +1. Check subscription status at [claude.ai/settings](https://claude.ai/settings). +2. If the subscription is active, wait for rate limits to reset. +3. If the subscription has lapsed, either renew it or switch to the API key fallback profile by updating `auth-profiles.json`. + +--- + +## Telegram + +### Bot Not Responding + +**Symptom:** You send messages to the Telegram bot but receive no response. + +**Cause:** Multiple possible causes: gateway not running, bot token incorrect, user ID not in allowedUsers, or webhook/polling not working. + +**Fix:** + +1. Verify the gateway is running: + + ```bash + systemctl --user status openclaw-gateway + ``` + +2. Check logs for Telegram-related errors: + + ```bash + journalctl --user -u openclaw-gateway -n 50 --no-pager | grep -i telegram + ``` + +3. Verify your user ID is in the `allowedUsers` array in the configuration. + +4. Test the bot token directly: + + ```bash + curl "https://api.telegram.org/bot/getMe" + ``` + + If this fails, the token is invalid. Get a new one from @BotFather. + +--- + +### Webhook Errors + +**Symptom:** Telegram webhook is set but the bot does not receive messages. `getWebhookInfo` shows errors. + +**Cause:** The webhook URL is unreachable, SSL certificate issues, or Telegram cannot connect. + +**Fix:** + +1. Check webhook status: + + ```bash + curl "https://api.telegram.org/bot/getWebhookInfo" | python3 -m json.tool + ``` + +2. Look for `last_error_message` in the response. + +3. If SSL errors: ensure your HTTPS certificate is valid (Tailscale Serve handles this automatically). + +4. If connection errors: verify Tailscale Funnel is enabled and your endpoint is publicly accessible. + +5. To reset to polling mode: + + ```bash + curl "https://api.telegram.org/bot/deleteWebhook" + ``` + +--- + +### User ID Mismatch + +**Symptom:** Bot receives messages (visible in logs) but does not respond to you. May respond to other users. + +**Cause:** Your numeric Telegram user ID does not match what is configured in `allowedUsers`. + +**Fix:** + +1. Send a message to @userinfobot to confirm your numeric ID. +2. Update the `allowedUsers` array in your configuration. +3. Restart the gateway. + +--- + +## Microsoft Teams + +### Manifest Upload Fails + +**Symptom:** The Teams Admin Center rejects the app ZIP file with validation errors. + +**Cause:** Invalid manifest structure, wrong icon sizes, or missing required fields. + +**Fix:** + +1. Validate the manifest using the [Teams App Validator](https://dev.teams.microsoft.com/appvalidation.html). +2. Ensure icons are exactly 192x192 (color) and 32x32 (outline) in PNG format. +3. Verify the `id` field is a valid GUID matching your Azure Bot App ID. +4. Check that `manifestVersion` matches the schema version. + +--- + +### Bot Not Responding in Teams + +**Symptom:** Messages sent to the bot in Teams show as delivered but no response comes back. + +**Cause:** Messaging endpoint misconfigured, credentials wrong, or gateway not processing Teams messages. + +**Fix:** + +1. Verify the messaging endpoint in Azure Bot > Configuration matches your gateway URL. +2. Check that `appId` and `appPassword` in the OpenClaw config match the Azure registration. +3. Look for Teams-related errors in gateway logs: + + ```bash + journalctl --user -u openclaw-gateway -n 50 --no-pager | grep -i teams + ``` + +4. Test the endpoint is reachable: + + ```bash + curl -s https://your-endpoint/teams/messages + ``` + +--- + +### "Cannot perform 'set' on a proxy that has been revoked" + +**Symptom:** All Teams DM deliveries fail. Gateway logs show `Cannot perform 'set' on a proxy that has been revoked`. Messages may loop with "The agent encountered an error or bug." + +**Cause:** Bot Framework SDK wraps `TurnContext` in `Proxy.revocable()`, which gets revoked when the inbound HTTP request completes. The default DM `replyStyle: "thread"` uses this captured proxy directly via `ctx.sendActivity()`, so any message sent after the HTTP request completes hits a revoked proxy. + +**Fix:** + +1. Open the policy file: + + ```bash + nano ~/.npm-global/lib/node_modules/openclaw/extensions/msteams/src/policy.ts + ``` + +2. Find the `resolveMSTeamsReplyPolicy` function (around line 216-224). + +3. Change the DM branch from: + + ```typescript + return { requireMention: false, replyStyle: "thread" }; + ``` + + To: + + ```typescript + return { requireMention: false, replyStyle: "top-level" }; + ``` + +4. Restart the gateway. The `"top-level"` style uses `adapter.continueConversation()` which creates a fresh `TurnContext`, avoiding the revoked proxy. + +> **Note:** This only affects DMs. Group/channel replies use their own configured replyStyle and are unaffected. The `reapply-patches.sh` script handles this automatically. + +--- + +### Teams Channel Auto-Restart Loop + +**Symptom:** Gateway logs show the msteams channel starting, then immediately restarting, in an infinite loop. Memory usage climbs. + +**Cause:** In v2026.2.17+, `monitorMSTeamsProvider()` resolves its promise immediately after setup. The gateway interprets a resolved `startAccount()` promise as "channel stopped" and triggers a restart. + +**Fix:** + +The `startAccount()` function must block until the abort signal fires. This is handled by the channel-bridge patch (see `patches/channel-bridge.patch.ts`). The key code: + +```typescript +if (ctx.abortSignal && !ctx.abortSignal.aborted) { + await new Promise((resolve) => { + ctx.abortSignal!.addEventListener("abort", () => resolve(), { once: true }); + }); +} +``` + +Run `reapply-patches.sh` or apply the channel-bridge patch manually. + +--- + +### NODE_PATH Issues + +**Symptom:** Gateway starts but Teams integration fails with "Cannot find module 'botbuilder'" or similar import errors. + +**Cause:** When running under systemd, the `NODE_PATH` environment variable is not set, so Node.js cannot find globally installed packages. + +**Fix:** + +1. Create the systemd drop-in: + + ```bash + mkdir -p ~/.config/systemd/user/openclaw-gateway.service.d/ + ``` + +2. Create the environment file: + + ```bash + cat > ~/.config/systemd/user/openclaw-gateway.service.d/msteams-deps.conf << 'EOF' + [Service] + Environment=NODE_PATH=/home/YOUR_USER/.npm-global/lib/node_modules + EOF + ``` + + Replace `YOUR_USER` with your username. + +3. Reload and restart: + + ```bash + systemctl --user daemon-reload + systemctl --user restart openclaw-gateway + ``` + +--- + +## Voice Calling + +### Twilio Webhook Not Reachable + +**Symptom:** Incoming calls ring but immediately disconnect or play Twilio's default error message. The Twilio Debugger shows error 11200. + +**Cause:** Twilio cannot reach your webhook URL. + +**Fix:** + +1. Verify the webhook URL is correct in the Twilio Console. +2. Test the URL from outside your network: + + ```bash + curl -s -o /dev/null -w "%{http_code}" https://your-endpoint:8443/webhook + ``` + +3. Ensure Tailscale Funnel is enabled for port 8443: + + ```bash + sudo tailscale funnel status + ``` + +4. Check the gateway is running and listening: + + ```bash + ss -tlnp | grep 18789 + ``` + +--- + +### TTS Not Working + +**Symptom:** Voice calls connect but Bates does not speak, or you hear silence/static instead of speech. + +**Cause:** ElevenLabs API key invalid, voice ID wrong, or TTS endpoint unreachable. + +**Fix:** + +1. Test TTS directly: + + ```bash + curl -s -X POST "https://api.elevenlabs.io/v1/text-to-speech/" \ + -H "xi-api-key: " \ + -H "Content-Type: application/json" \ + -d '{"text":"Test","model_id":"eleven_multilingual_v2"}' \ + -o test.mp3 + ``` + +2. If this fails, verify the API key and voice ID in the ElevenLabs dashboard. +3. Check your ElevenLabs character quota has not been exceeded. + +--- + +### Audio Quality Issues + +**Symptom:** Voice is choppy, delayed, or has echo. + +**Cause:** High latency, network issues, or incorrect streaming configuration. + +**Fix:** + +1. Enable streaming if not already: + + ```json + { "streaming": { "enabled": true } } + ``` + +2. Check network latency to ElevenLabs: + + ```bash + curl -o /dev/null -s -w "Time: %{time_total}s\n" https://api.elevenlabs.io/v1/voices + ``` + +3. If latency is above 300ms consistently, consider using a faster TTS model (`eleven_turbo_v2_5`). +4. Ensure your internet connection is stable (run a speed test). + +--- + +## Dashboard + +### Dashboard Not Loading + +**Symptom:** Navigating to `http://localhost:18789/dashboard` returns a blank page, 404, or connection refused. + +**Cause:** Dashboard plugin not loaded, gateway not running, or port mismatch. + +**Fix:** + +1. Verify the gateway is running: + + ```bash + systemctl --user status openclaw-gateway + ``` + +2. Check that the dashboard plugin is loaded: + + ```bash + journalctl --user -u openclaw-gateway -n 100 --no-pager | grep -i dashboard + ``` + +3. Ensure the dashboard extension exists: + + ```bash + ls ~/.openclaw/extensions/dashboard/ + ``` + +4. Try accessing directly: + + ```bash + curl -s http://localhost:18789/dashboard | head -20 + ``` + +--- + +### WebSocket Connection Failed + +**Symptom:** Dashboard loads but shows "Disconnected" or panels do not update in real time. + +**Cause:** WebSocket upgrade failed, usually due to a proxy or reverse proxy stripping the Upgrade header. + +**Fix:** + +1. If accessing through Tailscale Serve, WebSocket should work natively. Verify: + + ```bash + curl -s -i -N \ + -H "Connection: Upgrade" \ + -H "Upgrade: websocket" \ + -H "Sec-WebSocket-Version: 13" \ + -H "Sec-WebSocket-Key: dGVzdA==" \ + http://localhost:18789/ws + ``` + + You should see a 101 Switching Protocols response. + +2. If using nginx or another reverse proxy, ensure WebSocket headers are forwarded: + + ```nginx + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + ``` + +--- + +### Blank Panels + +**Symptom:** Dashboard loads and WebSocket connects, but individual panels show no data. + +**Cause:** The data source for the panel is not available (e.g., cost tracker not running, no session data yet). + +**Fix:** + +1. Check which panels are blank and what data they require. +2. For cost panels: verify the cost tracker plugin is loaded and `data/daily-costs.json` exists. +3. For session panels: ensure at least one agent session has run. +4. For delegation panels: check `delegations.json` exists and the wrapper script `run-delegation.sh` is being used. +5. Wait for a data refresh cycle (typically 5-60 seconds depending on the panel). + +--- + +## Search and Embeddings + +### Ollama Not Running + +**Symptom:** Search index scripts fail with "Connection refused" when trying to generate embeddings. + +**Cause:** The Ollama service is not running or not listening on the expected port. + +**Fix:** + +1. Start Ollama: + + ```bash + ollama serve & + ``` + + Or via systemd: + + ```bash + sudo systemctl start ollama + ``` + +2. Verify it is running: + + ```bash + curl http://localhost:11434/api/tags + ``` + +3. Ensure the embedding model is downloaded: + + ```bash + ollama pull nomic-embed-text + ``` + +--- + +### Embedding Failures + +**Symptom:** Search index sync runs but logs show embedding errors or documents are indexed without embeddings. + +**Cause:** Ollama out of memory, model not loaded, or text too long. + +**Fix:** + +1. Check Ollama logs for OOM errors. +2. Verify the embedding model is available: + + ```bash + ollama list + ``` + +3. If memory is an issue, close other applications or restart Ollama. +4. Check the document being indexed. Very long documents may need chunking. + +--- + +### Sync Stuck + +**Symptom:** The search index sync starts but makes no progress, or gets stuck processing the same batch. + +**Cause:** Database lock, network timeout, or a single problematic document blocking the pipeline. + +**Fix:** + +1. Check for database locks: + + ```bash + python3 -c " + import sqlite3 + conn = sqlite3.connect('$HOME/.openclaw/search-index/db/search.db') + print(conn.execute('PRAGMA journal_mode').fetchone()) + conn.close() + " + ``` + +2. If stuck on a specific document, check the scan offsets: + + ```bash + python3 -c " + import json + with open('$HOME/.openclaw/search-index/data/scan-offsets.json') as f: + print(json.dumps(json.load(f), indent=2)) + " + ``` + +3. Try running with `--resume` to skip past the problematic batch: + + ```bash + ~/.openclaw/search-index/scripts/sync.sh --resume + ``` + +4. As a last resort, delete offsets to force a rescan (this re-processes everything): + + ```bash + rm ~/.openclaw/search-index/data/scan-offsets.json + ``` + +--- + +## Agents + +### Deputy Not Starting + +**Symptom:** A deputy agent is requested but does not start. Logs show "agent not found" or "failed to spawn." + +**Cause:** The agent configuration is missing, or the on-demand system is not configured correctly. + +**Fix:** + +1. Verify the agent exists: + + ```bash + ls ~/.openclaw/agents/ + ``` + +2. Check the agent's configuration files are valid. +3. Use the agent control script to check status: + + ```bash + ~/.openclaw/scripts/agent-ctl.sh status + ``` + +4. Try starting manually: + + ```bash + ~/.openclaw/scripts/agent-ctl.sh start + ``` + +--- + +### Spawn Depth Errors + +**Symptom:** A sub-agent task fails with "maximum spawn depth exceeded" or similar. + +**Cause:** The `maxSpawnDepth` setting (default: 2) limits how deeply agents can delegate to other agents. A chain like main -> deputy -> sub-deputy exceeds the limit. + +**Fix:** + +1. Restructure the task to reduce delegation depth. +2. If deeper delegation is genuinely needed, increase `maxSpawnDepth` in the gateway configuration (not recommended for most setups). +3. Check that deputies are not unnecessarily spawning sub-agents for tasks they could handle directly. + +--- + +### Idle Watcher Issues + +**Symptom:** Agents are stopped prematurely, or idle agents are not being stopped (consuming memory). + +**Cause:** The idle watcher cron job is misconfigured or not running. + +**Fix:** + +1. Verify the cron job is active: + + ```bash + crontab -l | grep idle-watcher + ``` + +2. Test manually: + + ```bash + ~/.openclaw/scripts/agent-idle-watcher.sh + ``` + +3. Check the idle timeout setting (default: 10 minutes). Adjust if agents need more time. + +--- + +## Cron Jobs + +### Jobs Not Firing + +**Symptom:** Scheduled cron jobs are not executing at the expected times. + +**Cause:** The cron configuration is incorrect, the gateway is not running, or the job is in an error state. + +**Fix:** + +1. Check job status: + + ```bash + python3 -c " + import json + with open('$HOME/.openclaw/cron/jobs.json') as f: + jobs = json.load(f) + for job in jobs: + print(f\"{job.get('id', 'unknown')}: {job.get('status', 'unknown')} - {job.get('schedule', 'no schedule')}\") + " + ``` + +2. Verify the gateway cron engine is running by checking logs: + + ```bash + journalctl --user -u openclaw-gateway -n 100 --no-pager | grep -i cron + ``` + +3. Check that the schedule expression is valid (uses standard cron syntax). + +--- + +### Delivery Target Missing + +**Symptom:** Cron jobs run but fail with "cron delivery target is missing" in logs. + +**Cause:** Jobs with `delivery.channel: "msteams"` (or other channels) do not have an explicit `delivery.to` field. The gateway tries to resolve the target from the last interaction, which may not be available after a restart. + +**Fix:** + +Add an explicit `delivery.to` field to every cron job that has a delivery section: + +```json +{ + "delivery": { + "channel": "msteams", + "to": "user:" + } +} +``` + +> **Tip:** Find your Teams user ID by running a Graph API query or checking the Teams admin center. Do not use display names or email addresses. + +--- + +### Timezone Issues + +**Symptom:** Cron jobs fire at unexpected times (e.g., 5 hours early or late). + +**Cause:** The cron schedule is interpreted in UTC, but you expected local time, or the system timezone is incorrect. + +**Fix:** + +1. Check the system timezone: + + ```bash + timedatectl + ``` + +2. Set the correct timezone: + + ```bash + sudo timedatectl set-timezone America/New_York # or your timezone + ``` + +3. Adjust cron schedules if needed. If the gateway uses UTC internally, convert your local time to UTC for the schedule expression. + +--- + +## Patches + +### Patch Failed After Update + +**Symptom:** After running `openclaw update`, customizations (cost footer, Adaptive Cards, etc.) stop working. + +**Cause:** Updates overwrite the dist files that contain your patches. File names change with each release (hash suffixes), so patches must be reapplied at new locations. + +**Fix:** + +1. Check the post-update checklist for the full repair procedure. +2. Identify the new dist file names: + + ```bash + ls ~/.npm-global/lib/node_modules/openclaw/dist/reply-*.js + ls ~/.npm-global/lib/node_modules/openclaw/dist/deliver-*.js + ``` + +3. Compare with your patch backups to understand what needs to be reapplied: + + ```bash + ls ~/.openclaw/patch-backup/ + ``` + +4. Reapply each patch to the new files, adjusting for any code changes in the update. + +> **Warning:** Always back up the new dist files before patching, in case you need to start over. + +--- + +### How to Restore from Backups + +**Symptom:** A patch goes wrong and you need to revert to the unpatched version. + +**Cause:** Manual edits to dist files introduced syntax errors or logic bugs. + +**Fix:** + +1. Check available backups: + + ```bash + ls ~/.openclaw/patch-backup/ + ``` + +2. Backups are organized by version. Copy the original files back: + + ```bash + cp ~/.openclaw/patch-backup//.js \ + ~/.npm-global/lib/node_modules/openclaw/dist/.js + ``` + +3. Restart the gateway: + + ```bash + systemctl --user restart openclaw-gateway + ``` + +4. If no backup exists for the current version, reinstall: + + ```bash + npm install -g openclaw@ + ``` + + Then carefully reapply patches one at a time, testing after each. + +--- + +## General Debugging Tips + +### Enable Verbose Logging + +Set the `DEBUG` environment variable for more detailed output: + +```bash +DEBUG=openclaw:* systemctl --user restart openclaw-gateway +``` + +Or for a specific component: + +```bash +DEBUG=openclaw:cron,openclaw:msteams systemctl --user restart openclaw-gateway +``` + +### Check Resource Usage + +```bash +# Memory usage +ps aux --sort=-%mem | head -20 + +# Disk usage +df -h + +# Open file descriptors (can cause "too many open files" errors) +ls /proc/$(pgrep -f openclaw)/fd | wc -l +``` + +### Restart Everything Cleanly + +When in doubt, a clean restart often resolves transient issues: + +```bash +# Stop the gateway +systemctl --user stop openclaw-gateway + +# Kill any zombie processes +pkill -f openclaw-cron 2>/dev/null +pkill -f "openclaw agent" 2>/dev/null + +# Wait a moment +sleep 2 + +# Start fresh +systemctl --user start openclaw-gateway + +# Verify +systemctl --user status openclaw-gateway +journalctl --user -u openclaw-gateway -n 20 --no-pager +``` diff --git a/bates-enhance/integrations/agents/config-fragment.json b/bates-enhance/integrations/agents/config-fragment.json new file mode 100644 index 0000000..f815e36 --- /dev/null +++ b/bates-enhance/integrations/agents/config-fragment.json @@ -0,0 +1,13 @@ +{ + "agents": { + "defaults": { + "subagents": { + "maxSpawnDepth": 2, + "maxConcurrent": 8, + "maxChildrenPerAgent": 5, + "archiveAfterMinutes": 60, + "model": "{{DEFAULT_MODEL}}" + } + } + } +} diff --git a/bates-enhance/integrations/agents/cron-jobs-agents.json b/bates-enhance/integrations/agents/cron-jobs-agents.json new file mode 100644 index 0000000..ee0be38 --- /dev/null +++ b/bates-enhance/integrations/agents/cron-jobs-agents.json @@ -0,0 +1,197 @@ +[ + { + "name": "mira-heartbeat", + "schedule": "0 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Mira, a specialized deputy agent for Technology & Infrastructure. Read {{HOME}}/.openclaw/agents/mira/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "conrad-heartbeat", + "schedule": "5 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Conrad, a specialized deputy agent for Operations A. Read {{HOME}}/.openclaw/agents/conrad/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "soren-heartbeat", + "schedule": "10 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Soren, a specialized deputy agent for Operations B. Read {{HOME}}/.openclaw/agents/soren/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "amara-heartbeat", + "schedule": "15 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Amara, a specialized deputy agent for Operations C. Read {{HOME}}/.openclaw/agents/amara/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "jules-heartbeat", + "schedule": "20 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Jules, a specialized deputy agent for Personal Affairs. Read {{HOME}}/.openclaw/agents/jules/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "dash-heartbeat", + "schedule": "25 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Dash, a specialized deputy agent for DevOps & Rollout. Read {{HOME}}/.openclaw/agents/dash/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "mercer-heartbeat", + "schedule": "30 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Mercer, a specialized deputy agent for Legal & Compliance. Read {{HOME}}/.openclaw/agents/mercer/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "kira-heartbeat", + "schedule": "35 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Kira, a specialized deputy agent for Content & Social Media. Read {{HOME}}/.openclaw/agents/kira/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "nova-heartbeat", + "schedule": "40 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Nova, a specialized deputy agent for Research & Discovery. Read {{HOME}}/.openclaw/agents/nova/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "paige-heartbeat", + "schedule": "45 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Paige, a specialized deputy agent for Finance. Read {{HOME}}/.openclaw/agents/paige/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "quinn-heartbeat", + "schedule": "50 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Quinn, a specialized deputy agent for HR & People. Read {{HOME}}/.openclaw/agents/quinn/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "archer-heartbeat", + "schedule": "55 */4 * * *", + "tz": "{{USER_TZ}}", + "message": "You are Archer, a specialized deputy agent for Documentation. Read {{HOME}}/.openclaw/agents/archer/HEARTBEAT.md and follow it strictly. DO NOT delegate or spawn sub-agents. If you find something that needs attention, report it clearly and concisely. If nothing needs attention, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "daily-standup-compile", + "schedule": "0 9 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "DO NOT delegate or spawn sub-agents. Compile the daily standup from all deputies. Collect recent observations from ~/.openclaw/agents/*/workspace/observations/, summarize status, flag blockers, and deliver a unified briefing. Keep under 1000 characters.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "message-router", + "schedule": "*/15 * * * *", + "tz": "{{USER_TZ}}", + "message": "DO NOT delegate or spawn sub-agents. Run: bash ~/.openclaw/scripts/route-messages.sh 2>&1. If any escalations were routed, summarize them briefly. If no escalations, reply NO_REPLY.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + }, + { + "name": "weekly-strategy-review", + "schedule": "0 10 * * 1", + "tz": "{{USER_TZ}}", + "message": "DO NOT delegate or spawn sub-agents. Weekly strategy alignment check. Review all deputy observations from ~/.openclaw/agents/*/workspace/observations/ from the past week. Identify cross-cutting themes, flag misalignments, and produce a strategic summary. Keep under 1500 characters.", + "agentId": "main", + "sessionTarget": "isolated", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}", + "bestEffort": true + } + } +] diff --git a/bates-enhance/integrations/agents/scripts/agent-ctl.sh b/bates-enhance/integrations/agents/scripts/agent-ctl.sh new file mode 100644 index 0000000..9c4aaba --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/agent-ctl.sh @@ -0,0 +1,160 @@ +#!/usr/bin/env bash +# agent-ctl.sh -- Control deputy agents via systemd +# +# Usage: +# agent-ctl.sh start Enable and start an agent +# agent-ctl.sh stop Stop an agent +# agent-ctl.sh restart Restart an agent +# agent-ctl.sh status [agent-name|all] Show agent status +# agent-ctl.sh wake Send a wake-up message to an idle agent +# +# Agents run as systemd user units: openclaw-agent@.service + +set -euo pipefail + +# All known deputy names +DEPUTIES=(mira conrad soren amara jules dash mercer kira nova paige quinn archer) + +UNIT_PREFIX="openclaw-agent@" + +usage() { + echo "Usage: $(basename "$0") [agent-name|all]" + echo "" + echo "Commands:" + echo " start Enable and start agent(s)" + echo " stop Stop agent(s)" + echo " restart Restart agent(s)" + echo " status [name|all] Show status (defaults to all)" + echo " wake Send a wake-up message to an idle agent" + echo "" + echo "Deputies: ${DEPUTIES[*]}" + exit 1 +} + +# Resolve target list: single agent name, or all deputies +resolve_targets() { + local target="${1:-all}" + if [[ "$target" == "all" ]]; then + echo "${DEPUTIES[@]}" + else + # Validate agent name + local found=0 + for d in "${DEPUTIES[@]}"; do + if [[ "$d" == "$target" ]]; then + found=1 + break + fi + done + if [[ $found -eq 0 ]]; then + echo "ERROR: Unknown agent '$target'. Known deputies: ${DEPUTIES[*]}" >&2 + exit 1 + fi + echo "$target" + fi +} + +cmd_start() { + local targets + read -ra targets <<< "$(resolve_targets "${1:-all}")" + for name in "${targets[@]}"; do + echo "Starting ${name}..." + systemctl --user enable "${UNIT_PREFIX}${name}.service" 2>/dev/null || true + systemctl --user start "${UNIT_PREFIX}${name}.service" + echo " [OK] ${name} started." + done +} + +cmd_stop() { + local targets + read -ra targets <<< "$(resolve_targets "${1:-all}")" + for name in "${targets[@]}"; do + echo "Stopping ${name}..." + systemctl --user stop "${UNIT_PREFIX}${name}.service" 2>/dev/null || true + echo " [OK] ${name} stopped." + done +} + +cmd_restart() { + local targets + read -ra targets <<< "$(resolve_targets "${1:-all}")" + for name in "${targets[@]}"; do + echo "Restarting ${name}..." + systemctl --user restart "${UNIT_PREFIX}${name}.service" 2>/dev/null || true + echo " [OK] ${name} restarted." + done +} + +cmd_status() { + local targets + read -ra targets <<< "$(resolve_targets "${1:-all}")" + printf "%-12s %-12s %s\n" "AGENT" "STATUS" "MEMORY" + printf "%-12s %-12s %s\n" "-----" "------" "------" + for name in "${targets[@]}"; do + local state + state=$(systemctl --user is-active "${UNIT_PREFIX}${name}.service" 2>/dev/null || echo "inactive") + local mem="-" + if [[ "$state" == "active" ]]; then + # Try to get memory usage from systemd + mem=$(systemctl --user show "${UNIT_PREFIX}${name}.service" \ + --property=MemoryCurrent 2>/dev/null | cut -d= -f2 || echo "-") + if [[ "$mem" != "-" && "$mem" != "[not set]" && "$mem" =~ ^[0-9]+$ ]]; then + mem="$(( mem / 1048576 ))MB" + else + mem="-" + fi + fi + printf "%-12s %-12s %s\n" "$name" "$state" "$mem" + done +} + +cmd_wake() { + local name="${1:-}" + if [[ -z "$name" ]]; then + echo "ERROR: wake requires an agent name." >&2 + usage + fi + resolve_targets "$name" >/dev/null # validate + + local msg_dir="$HOME/.openclaw/agents/${name}/inbox" + mkdir -p "$msg_dir" + + local ts + ts=$(date +%s) + local msg_file="${msg_dir}/${ts}-wake.json" + + cat > "$msg_file" </dev/null || echo "inactive") + if [[ "$state" != "active" ]]; then + echo "Agent ${name} is ${state}. Starting..." + cmd_start "$name" + fi +} + +# -- Main -- +if [[ $# -lt 1 ]]; then + usage +fi + +COMMAND="$1" +TARGET="${2:-}" + +case "$COMMAND" in + start) cmd_start "$TARGET" ;; + stop) cmd_stop "$TARGET" ;; + restart) cmd_restart "$TARGET" ;; + status) cmd_status "$TARGET" ;; + wake) cmd_wake "$TARGET" ;; + *) usage ;; +esac diff --git a/bates-enhance/integrations/agents/scripts/agent-idle-watcher.sh b/bates-enhance/integrations/agents/scripts/agent-idle-watcher.sh new file mode 100644 index 0000000..fa4ee00 --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/agent-idle-watcher.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# agent-idle-watcher.sh -- Stop idle deputy agents to save memory +# +# Designed to run from cron every 5 minutes: +# */5 * * * * ~/.openclaw/scripts/agent-idle-watcher.sh +# +# Checks all running deputy agents. If a deputy's session has been idle +# for longer than IDLE_THRESHOLD seconds (default: 600 = 10 minutes), +# the agent is stopped via systemd. + +set -euo pipefail + +IDLE_THRESHOLD="${IDLE_THRESHOLD:-600}" # seconds (10 minutes) +DEPUTIES=(mira conrad soren amara jules dash mercer kira nova paige quinn archer) +UNIT_PREFIX="openclaw-agent@" +LOG_FILE="$HOME/.openclaw/logs/agent-idle-watcher.log" + +mkdir -p "$(dirname "$LOG_FILE")" + +log() { + echo "$(date '+%Y-%m-%d %H:%M:%S') $1" >> "$LOG_FILE" +} + +now=$(date +%s) + +for name in "${DEPUTIES[@]}"; do + # Only check running agents + state=$(systemctl --user is-active "${UNIT_PREFIX}${name}.service" 2>/dev/null || echo "inactive") + if [[ "$state" != "active" ]]; then + continue + fi + + # Determine last activity time from the session transcript + session_dir="$HOME/.openclaw/agents/${name}/sessions" + if [[ ! -d "$session_dir" ]]; then + continue + fi + + # Find the most recently modified .jsonl file + latest_file="" + latest_mtime=0 + for f in "$session_dir"/*.jsonl; do + [[ -f "$f" ]] || continue + mtime=$(stat -c %Y "$f" 2>/dev/null || echo 0) + if [[ "$mtime" -gt "$latest_mtime" ]]; then + latest_mtime=$mtime + latest_file=$f + fi + done + + if [[ -z "$latest_file" || "$latest_mtime" -eq 0 ]]; then + # No session files -- agent might be freshly started, skip + continue + fi + + idle_seconds=$(( now - latest_mtime )) + + if [[ "$idle_seconds" -gt "$IDLE_THRESHOLD" ]]; then + log "Stopping idle agent: ${name} (idle ${idle_seconds}s, threshold ${IDLE_THRESHOLD}s)" + systemctl --user stop "${UNIT_PREFIX}${name}.service" 2>/dev/null || { + log "WARNING: Failed to stop ${name}" + } + fi +done diff --git a/bates-enhance/integrations/agents/scripts/agent-message.sh b/bates-enhance/integrations/agents/scripts/agent-message.sh new file mode 100644 index 0000000..b1b04ae --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/agent-message.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# agent-message.sh -- Send a message to a specific deputy agent +# +# Usage: +# agent-message.sh "Your message here" +# agent-message.sh nova "Research the latest trends in AI governance" +# +# Messages are written as JSON files to the agent's inbox directory. +# The agent picks them up on its next heartbeat or wake cycle. + +set -euo pipefail + +DEPUTIES=(mira conrad soren amara jules dash mercer kira nova paige quinn archer) + +usage() { + echo "Usage: $(basename "$0") \"message\"" + echo "" + echo "Send a message to a deputy agent's inbox." + echo "" + echo "Deputies: ${DEPUTIES[*]}" + exit 1 +} + +if [[ $# -lt 2 ]]; then + usage +fi + +AGENT_ID="$1" +MESSAGE="$2" +SENDER="${3:-main}" +PRIORITY="${4:-normal}" + +# Validate agent name +valid=0 +for d in "${DEPUTIES[@]}"; do + if [[ "$d" == "$AGENT_ID" ]]; then + valid=1 + break + fi +done + +if [[ $valid -eq 0 ]]; then + echo "ERROR: Unknown agent '${AGENT_ID}'." >&2 + echo "Known deputies: ${DEPUTIES[*]}" >&2 + exit 1 +fi + +# Create inbox directory if needed +INBOX_DIR="$HOME/.openclaw/agents/${AGENT_ID}/inbox" +mkdir -p "$INBOX_DIR" + +# Generate message file +TIMESTAMP=$(date +%s) +MSG_ID="${TIMESTAMP}-$(head -c 4 /dev/urandom | od -An -tx1 | tr -d ' \n')" +MSG_FILE="${INBOX_DIR}/${MSG_ID}.json" + +cat > "$MSG_FILE" </dev/null || echo "\"${MESSAGE}\"") +} +EOF + +echo "Message sent to ${AGENT_ID}: ${MSG_FILE}" +echo " From: ${SENDER}" +echo " Priority: ${PRIORITY}" +echo " ID: ${MSG_ID}" diff --git a/bates-enhance/integrations/agents/scripts/agent-supervisor.sh b/bates-enhance/integrations/agents/scripts/agent-supervisor.sh new file mode 100644 index 0000000..30dcbb5 --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/agent-supervisor.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash +# agent-supervisor.sh -- Monitor agent health and restart failed agents +# +# Usage: +# agent-supervisor.sh Run a single health check pass +# agent-supervisor.sh --watch Continuous monitoring (run in tmux/screen) +# +# Checks: +# - Are enabled agents running? If not, restart them. +# - Are agents consuming excessive memory? Log a warning. +# - Has an agent been restarting in a loop? Disable it and alert. +# +# Designed to be called from cron or run as a long-lived watcher. + +set -euo pipefail + +DEPUTIES=(mira conrad soren amara jules dash mercer kira nova paige quinn archer) +UNIT_PREFIX="openclaw-agent@" +LOG_FILE="$HOME/.openclaw/logs/agent-supervisor.log" +MAX_RESTARTS=5 # Max restarts within the window before disabling +RESTART_WINDOW=3600 # 1 hour window for restart counting +MEM_WARN_MB=512 # Warn if an agent exceeds this memory usage +WATCH_INTERVAL=60 # Seconds between checks in --watch mode + +mkdir -p "$(dirname "$LOG_FILE")" + +log() { + echo "$(date '+%Y-%m-%d %H:%M:%S') [SUPERVISOR] $1" >> "$LOG_FILE" +} + +log_warn() { + echo "$(date '+%Y-%m-%d %H:%M:%S') [SUPERVISOR] WARNING: $1" >> "$LOG_FILE" + echo "WARNING: $1" >&2 +} + +# Check a single agent +check_agent() { + local name="$1" + local unit="${UNIT_PREFIX}${name}.service" + + # Only supervise agents that are enabled (user intentionally started them) + local enabled + enabled=$(systemctl --user is-enabled "$unit" 2>/dev/null || echo "disabled") + if [[ "$enabled" != "enabled" ]]; then + return 0 + fi + + local active + active=$(systemctl --user is-active "$unit" 2>/dev/null || echo "inactive") + + if [[ "$active" == "failed" ]]; then + # Check restart count to avoid restart loops + local restart_count + restart_count=$(systemctl --user show "$unit" --property=NRestarts 2>/dev/null | cut -d= -f2 || echo "0") + + if [[ "$restart_count" -ge "$MAX_RESTARTS" ]]; then + log_warn "${name} has restarted ${restart_count} times. Disabling to prevent loop." + systemctl --user disable "$unit" 2>/dev/null || true + systemctl --user stop "$unit" 2>/dev/null || true + # Write alert to main inbox + local alert_dir="$HOME/.openclaw/agents/main/inbox" + mkdir -p "$alert_dir" + local ts + ts=$(date +%s) + cat > "${alert_dir}/${ts}-supervisor-alert.json" </dev/null || { + log_warn "Failed to restart ${name}" + } + return 0 + fi + + if [[ "$active" != "active" ]]; then + log "Agent ${name} is ${active} but enabled. Starting..." + systemctl --user start "$unit" 2>/dev/null || { + log_warn "Failed to start ${name}" + } + return 0 + fi + + # Check memory usage + local mem_bytes + mem_bytes=$(systemctl --user show "$unit" --property=MemoryCurrent 2>/dev/null | cut -d= -f2 || echo "0") + if [[ "$mem_bytes" =~ ^[0-9]+$ && "$mem_bytes" -gt 0 ]]; then + local mem_mb=$(( mem_bytes / 1048576 )) + if [[ "$mem_mb" -gt "$MEM_WARN_MB" ]]; then + log_warn "${name} using ${mem_mb}MB (threshold: ${MEM_WARN_MB}MB)" + fi + fi +} + +# Run one pass of all agents +run_check() { + for name in "${DEPUTIES[@]}"; do + check_agent "$name" + done +} + +# -- Main -- +if [[ "${1:-}" == "--watch" ]]; then + log "Supervisor started in watch mode (interval: ${WATCH_INTERVAL}s)" + while true; do + run_check + sleep "$WATCH_INTERVAL" + done +else + run_check +fi diff --git a/bates-enhance/integrations/agents/scripts/collect-standups.sh b/bates-enhance/integrations/agents/scripts/collect-standups.sh new file mode 100644 index 0000000..ee0d8d5 --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/collect-standups.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# collect-standups.sh -- Collect standup updates from all active deputies +# +# Usage: +# collect-standups.sh Print standups to stdout +# collect-standups.sh --output FILE Write standups to a file +# +# Reads each deputy's recent observations (last 24 hours) and produces +# a per-agent summary. Used by compile-briefing.sh and the daily-standup +# cron job. + +set -euo pipefail + +DEPUTIES=(mira conrad soren amara jules dash mercer kira nova paige quinn archer) +ROLES=( + "Technology & Infrastructure" + "Operations A" + "Operations B" + "Operations C" + "Personal Affairs" + "DevOps & Rollout" + "Legal & Compliance" + "Content & Social Media" + "Research & Discovery" + "Finance" + "HR & People" + "Documentation" +) + +OUTPUT_FILE="" +HOURS="${HOURS:-24}" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --output) + OUTPUT_FILE="$2" + shift 2 + ;; + --hours) + HOURS="$2" + shift 2 + ;; + *) + echo "Unknown argument: $1" >&2 + exit 1 + ;; + esac +done + +cutoff=$(date -d "${HOURS} hours ago" +%s 2>/dev/null || date -v-${HOURS}H +%s 2>/dev/null || echo 0) +now_fmt=$(date '+%Y-%m-%d %H:%M') + +# Collect output +output="" +output+="# Deputy Standup Collection\n" +output+="Generated: ${now_fmt}\n" +output+="Window: last ${HOURS} hours\n" +output+="\n" + +active_count=0 +idle_count=0 + +for i in "${!DEPUTIES[@]}"; do + name="${DEPUTIES[$i]}" + role="${ROLES[$i]}" + obs_dir="$HOME/.openclaw/agents/${name}/workspace/observations" + + output+="## ${name} (${role})\n" + + if [[ ! -d "$obs_dir" ]]; then + output+=" No observations directory.\n\n" + (( idle_count++ )) || true + continue + fi + + # Find recent observation files + recent_found=0 + for f in "$obs_dir"/*.md "$obs_dir"/*.txt "$obs_dir"/*.json; do + [[ -f "$f" ]] || continue + mtime=$(stat -c %Y "$f" 2>/dev/null || echo 0) + if [[ "$mtime" -ge "$cutoff" ]]; then + fname=$(basename "$f") + output+=" - ${fname}:\n" + # Include first 10 lines of each observation + head_content=$(head -10 "$f" 2>/dev/null || true) + while IFS= read -r line; do + output+=" ${line}\n" + done <<< "$head_content" + output+="\n" + (( recent_found++ )) || true + fi + done + + if [[ "$recent_found" -eq 0 ]]; then + output+=" No recent observations.\n\n" + (( idle_count++ )) || true + else + (( active_count++ )) || true + fi +done + +output+="---\n" +output+="Summary: ${active_count} active, ${idle_count} idle out of ${#DEPUTIES[@]} deputies.\n" + +# Output +if [[ -n "$OUTPUT_FILE" ]]; then + mkdir -p "$(dirname "$OUTPUT_FILE")" + printf "%b" "$output" > "$OUTPUT_FILE" + echo "Standup collection written to: ${OUTPUT_FILE}" +else + printf "%b" "$output" +fi diff --git a/bates-enhance/integrations/agents/scripts/compile-briefing.sh b/bates-enhance/integrations/agents/scripts/compile-briefing.sh new file mode 100644 index 0000000..c7ba91b --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/compile-briefing.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# compile-briefing.sh -- Compile a unified briefing from deputy standups +# +# Usage: +# compile-briefing.sh Print briefing to stdout +# compile-briefing.sh --output FILE Write briefing to a file +# +# Collects standups from all deputies, then produces a concise summary +# organized by status, highlights, blockers, and next steps. +# Intended for delivery to the main session. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +BRIEFING_DIR="$HOME/.openclaw/workspace/briefings" +OUTPUT_FILE="" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --output) + OUTPUT_FILE="$2" + shift 2 + ;; + *) + echo "Unknown argument: $1" >&2 + exit 1 + ;; + esac +done + +mkdir -p "$BRIEFING_DIR" + +# Step 1: Collect raw standups into a temp file +RAW_STANDUPS=$(mktemp) +trap 'rm -f "$RAW_STANDUPS"' EXIT + +"$SCRIPT_DIR/collect-standups.sh" --output "$RAW_STANDUPS" + +# Step 2: Build the briefing +now_fmt=$(date '+%Y-%m-%d %H:%M') +date_label=$(date '+%A, %B %d, %Y') + +briefing="" +briefing+="# Daily Briefing -- ${date_label}\n" +briefing+="Compiled: ${now_fmt}\n\n" + +# Extract summary line from standups +summary_line=$(tail -1 "$RAW_STANDUPS" 2>/dev/null || echo "No summary available.") +briefing+="## Overview\n" +briefing+="${summary_line}\n\n" + +# Include the full standup data +briefing+="## Deputy Reports\n\n" +briefing+="$(cat "$RAW_STANDUPS")\n\n" + +# Step 3: Add action items section (placeholder for AI processing) +briefing+="## Action Items\n" +briefing+="Review the deputy reports above and extract:\n" +briefing+="- Blockers requiring main-session intervention\n" +briefing+="- Cross-deputy coordination needs\n" +briefing+="- Decisions pending approval\n" +briefing+="- Items that can be acknowledged and closed\n\n" + +briefing+="---\n" +briefing+="End of briefing.\n" + +# Output +if [[ -n "$OUTPUT_FILE" ]]; then + mkdir -p "$(dirname "$OUTPUT_FILE")" + printf "%b" "$briefing" > "$OUTPUT_FILE" + echo "Briefing written to: ${OUTPUT_FILE}" +else + printf "%b" "$briefing" +fi + +# Also archive the briefing +archive_file="${BRIEFING_DIR}/briefing-$(date '+%Y%m%d-%H%M').md" +printf "%b" "$briefing" > "$archive_file" diff --git a/bates-enhance/integrations/agents/scripts/generate-agent-configs.sh b/bates-enhance/integrations/agents/scripts/generate-agent-configs.sh new file mode 100644 index 0000000..9a7bc7e --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/generate-agent-configs.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash +# generate-agent-configs.sh -- Generate per-agent openclaw.json and SOUL.md +# +# Usage: +# generate-agent-configs.sh [--templates-dir DIR] +# +# Reads the deputy roster and generates configuration files for each agent +# from the provided templates. Called by setup.sh during initial installation. +# +# Templates used: +# agent-openclaw.json.template -> ~/.openclaw/agents//agent/openclaw.json +# agent-soul.md.template -> ~/.openclaw/agents//agent/SOUL.md + +set -euo pipefail + +# Default templates directory (sibling to scripts/) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +TEMPLATES_DIR="${TEMPLATES_DIR:-$(dirname "$SCRIPT_DIR")/templates}" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case "$1" in + --templates-dir) + TEMPLATES_DIR="$2" + shift 2 + ;; + *) + echo "Unknown argument: $1" >&2 + exit 1 + ;; + esac +done + +# Validate templates +CONFIG_TEMPLATE="$TEMPLATES_DIR/agent-openclaw.json.template" +SOUL_TEMPLATE="$TEMPLATES_DIR/agent-soul.md.template" + +if [[ ! -f "$CONFIG_TEMPLATE" ]]; then + echo "ERROR: Config template not found: $CONFIG_TEMPLATE" >&2 + exit 1 +fi + +if [[ ! -f "$SOUL_TEMPLATE" ]]; then + echo "ERROR: Soul template not found: $SOUL_TEMPLATE" >&2 + exit 1 +fi + +# Deputy roster: id|name|role|layer +ROSTER=( + "mira|Mira|Technology & Infrastructure|2" + "conrad|Conrad|Operations A|2" + "soren|Soren|Operations B|2" + "amara|Amara|Operations C|2" + "jules|Jules|Personal Affairs|2" + "dash|Dash|DevOps & Rollout|2" + "mercer|Mercer|Legal & Compliance|2" + "kira|Kira|Content & Social Media|2" + "nova|Nova|Research & Discovery|2" + "paige|Paige|Finance|2" + "quinn|Quinn|HR & People|2" + "archer|Archer|Documentation|2" +) + +# Configurable defaults +AGENT_MODEL="${AGENT_MODEL:-{{DEFAULT_MODEL}}}" +ASSISTANT_NAME="${ASSISTANT_NAME:-{{ASSISTANT_NAME}}}" + +echo "Generating agent configurations..." +echo " Templates: $TEMPLATES_DIR" +echo " Model: $AGENT_MODEL" +echo "" + +for entry in "${ROSTER[@]}"; do + IFS='|' read -r agent_id agent_name agent_role agent_layer <<< "$entry" + + agent_dir="$HOME/.openclaw/agents/${agent_id}/agent" + workspace_dir="$HOME/.openclaw/agents/${agent_id}/workspace/observations" + inbox_dir="$HOME/.openclaw/agents/${agent_id}/inbox" + + mkdir -p "$agent_dir" "$workspace_dir" "$inbox_dir" + + # Export template variables + export AGENT_ID="$agent_id" + export AGENT_NAME="$agent_name" + export AGENT_ROLE="$agent_role" + export AGENT_LAYER="$agent_layer" + export AGENT_MODEL + export ASSISTANT_NAME + export HOME + + # Generate openclaw.json + config_output="$agent_dir/openclaw.json" + cp "$CONFIG_TEMPLATE" "$config_output" + + # Replace placeholders using sed (compatible with template_render pattern) + sed -i "s|{{AGENT_ID}}|${agent_id}|g" "$config_output" + sed -i "s|{{AGENT_NAME}}|${agent_name}|g" "$config_output" + sed -i "s|{{AGENT_ROLE}}|${agent_role}|g" "$config_output" + sed -i "s|{{AGENT_LAYER}}|${agent_layer}|g" "$config_output" + sed -i "s|{{AGENT_MODEL}}|${AGENT_MODEL}|g" "$config_output" + sed -i "s|{{HOME}}|${HOME}|g" "$config_output" + + # Generate SOUL.md + soul_output="$agent_dir/SOUL.md" + cp "$SOUL_TEMPLATE" "$soul_output" + + sed -i "s|{{AGENT_NAME}}|${agent_name}|g" "$soul_output" + sed -i "s|{{AGENT_ROLE}}|${agent_role}|g" "$soul_output" + sed -i "s|{{AGENT_LAYER}}|${agent_layer}|g" "$soul_output" + sed -i "s|{{ASSISTANT_NAME}}|${ASSISTANT_NAME}|g" "$soul_output" + + echo " [OK] ${agent_name} (${agent_id}) -- ${agent_role}" +done + +echo "" +echo "All ${#ROSTER[@]} agent configurations generated." diff --git a/bates-enhance/integrations/agents/scripts/route-messages.sh b/bates-enhance/integrations/agents/scripts/route-messages.sh new file mode 100644 index 0000000..6b1919d --- /dev/null +++ b/bates-enhance/integrations/agents/scripts/route-messages.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# route-messages.sh -- Route inter-agent messages +# +# Usage: +# route-messages.sh Process the message queue once +# route-messages.sh --watch Continuous routing (poll every 30s) +# +# Reads messages from the central message queue directory and delivers +# them to the target agent's inbox. Processed messages are moved to +# an archive directory. + +set -euo pipefail + +QUEUE_DIR="$HOME/.openclaw/message-queue" +ARCHIVE_DIR="$HOME/.openclaw/message-queue/archive" +LOG_FILE="$HOME/.openclaw/logs/message-router.log" +WATCH_INTERVAL=30 + +mkdir -p "$QUEUE_DIR" "$ARCHIVE_DIR" "$(dirname "$LOG_FILE")" + +log() { + echo "$(date '+%Y-%m-%d %H:%M:%S') [ROUTER] $1" >> "$LOG_FILE" +} + +# Process a single message file +process_message() { + local msg_file="$1" + local filename + filename=$(basename "$msg_file") + + # Parse the target agent from the message JSON + local target + target=$(python3 -c " +import json, sys +try: + msg = json.load(open('${msg_file}')) + print(msg.get('to', '')) +except Exception: + print('') +" 2>/dev/null) + + if [[ -z "$target" ]]; then + log "WARNING: No 'to' field in ${filename}. Moving to archive." + mv "$msg_file" "$ARCHIVE_DIR/${filename}.undeliverable" + return 0 + fi + + # Deliver to target agent's inbox + local inbox_dir="$HOME/.openclaw/agents/${target}/inbox" + if [[ ! -d "$HOME/.openclaw/agents/${target}" ]]; then + log "WARNING: Unknown agent '${target}' in ${filename}. Moving to archive." + mv "$msg_file" "$ARCHIVE_DIR/${filename}.unknown-target" + return 0 + fi + + mkdir -p "$inbox_dir" + cp "$msg_file" "$inbox_dir/${filename}" + mv "$msg_file" "$ARCHIVE_DIR/${filename}" + log "Delivered ${filename} to ${target}" +} + +# Process all pending messages +process_queue() { + local count=0 + for msg_file in "$QUEUE_DIR"/*.json; do + [[ -f "$msg_file" ]] || continue + process_message "$msg_file" + (( count++ )) || true + done + + if [[ "$count" -gt 0 ]]; then + log "Processed ${count} message(s)." + fi +} + +# -- Main -- +if [[ "${1:-}" == "--watch" ]]; then + log "Message router started in watch mode (interval: ${WATCH_INTERVAL}s)" + while true; do + process_queue + sleep "$WATCH_INTERVAL" + done +else + process_queue +fi diff --git a/bates-enhance/integrations/agents/setup.sh b/bates-enhance/integrations/agents/setup.sh new file mode 100644 index 0000000..6588f6e --- /dev/null +++ b/bates-enhance/integrations/agents/setup.sh @@ -0,0 +1,179 @@ +# setup.sh -- Deputy Agent System integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Installs the 12-deputy agent system with on-demand startup, +# idle watching, and inter-agent message routing. +# +# Prerequisites: +# - OpenClaw gateway running +# - systemd --user available (loginctl enable-linger) + +AGENTS_DIR="$ENHANCE_DIR/integrations/agents" + +# ------------------------------------------------------------------- +# Step 1 -- Generate agent configurations +# ------------------------------------------------------------------- +step "Generate deputy agent configurations" + +info "Setting up 12 deputy agents with on-demand activation." +echo "" + +# Deputy roster: id|name|role|layer +DEPUTY_ROSTER=( + "mira|Mira|Technology & Infrastructure|2" + "conrad|Conrad|Operations A|2" + "soren|Soren|Operations B|2" + "amara|Amara|Operations C|2" + "jules|Jules|Personal Affairs|2" + "dash|Dash|DevOps & Rollout|2" + "mercer|Mercer|Legal & Compliance|2" + "kira|Kira|Content & Social Media|2" + "nova|Nova|Research & Discovery|2" + "paige|Paige|Finance|2" + "quinn|Quinn|HR & People|2" + "archer|Archer|Documentation|2" +) + +CONFIG_TEMPLATE="$AGENTS_DIR/templates/agent-openclaw.json.template" +SOUL_TEMPLATE="$AGENTS_DIR/templates/agent-soul.md.template" + +if [[ ! -f "$CONFIG_TEMPLATE" ]]; then + fatal "Config template not found: $CONFIG_TEMPLATE" +fi +if [[ ! -f "$SOUL_TEMPLATE" ]]; then + fatal "Soul template not found: $SOUL_TEMPLATE" +fi + +# Use AGENT_MODEL from environment or fall back to placeholder +AGENT_MODEL="${AGENT_MODEL:-{{DEFAULT_MODEL}}}" +ASSISTANT_NAME="${ASSISTANT_NAME:-{{ASSISTANT_NAME}}}" + +for entry in "${DEPUTY_ROSTER[@]}"; do + IFS='|' read -r agent_id agent_name agent_role agent_layer <<< "$entry" + + agent_dir="$HOME/.openclaw/agents/${agent_id}/agent" + workspace_dir="$HOME/.openclaw/agents/${agent_id}/workspace/observations" + inbox_dir="$HOME/.openclaw/agents/${agent_id}/inbox" + sessions_dir="$HOME/.openclaw/agents/${agent_id}/sessions" + + mkdir -p "$agent_dir" "$workspace_dir" "$inbox_dir" "$sessions_dir" + + # Generate openclaw.json from template + config_output="$agent_dir/openclaw.json" + export AGENT_ID="$agent_id" AGENT_NAME="$agent_name" AGENT_ROLE="$agent_role" + export AGENT_LAYER="$agent_layer" AGENT_MODEL ASSISTANT_NAME HOME + template_render "$CONFIG_TEMPLATE" "$config_output" + + # Generate SOUL.md from template + soul_output="$agent_dir/SOUL.md" + template_render "$SOUL_TEMPLATE" "$soul_output" + + info " ${agent_name} (${agent_id}) -- ${agent_role}" +done + +# Create shared directories +mkdir -p "$HOME/.openclaw/message-queue/archive" +mkdir -p "$HOME/.openclaw/workspace/briefings" +mkdir -p "$HOME/.openclaw/logs" + +success "All ${#DEPUTY_ROSTER[@]} deputy configurations generated." + +# ------------------------------------------------------------------- +# Step 2 -- Install systemd template unit +# ------------------------------------------------------------------- +step "Install systemd template unit for deputy agents" + +SYSTEMD_USER_DIR="$HOME/.config/systemd/user" +mkdir -p "$SYSTEMD_USER_DIR" + +SERVICE_TEMPLATE="$AGENTS_DIR/templates/openclaw-agent@.service.template" +SERVICE_TARGET="$SYSTEMD_USER_DIR/openclaw-agent@.service" + +if [[ ! -f "$SERVICE_TEMPLATE" ]]; then + fatal "Service template not found: $SERVICE_TEMPLATE" +fi + +cp "$SERVICE_TEMPLATE" "$SERVICE_TARGET" +chmod 644 "$SERVICE_TARGET" +info "Installed template unit: $SERVICE_TARGET" + +systemctl --user daemon-reload 2>/dev/null || warn "Could not reload systemd daemon." + +success "Systemd template unit installed." + +# ------------------------------------------------------------------- +# Step 3 -- Install agent management scripts +# ------------------------------------------------------------------- +step "Install agent management scripts" + +SCRIPTS_SRC="$AGENTS_DIR/scripts" +SCRIPTS_DST="$HOME/.openclaw/scripts" +mkdir -p "$SCRIPTS_DST" + +SCRIPT_FILES=( + agent-ctl.sh + agent-idle-watcher.sh + agent-message.sh + agent-supervisor.sh + collect-standups.sh + compile-briefing.sh + route-messages.sh + generate-agent-configs.sh +) + +for script in "${SCRIPT_FILES[@]}"; do + if [[ -f "$SCRIPTS_SRC/$script" ]]; then + cp "$SCRIPTS_SRC/$script" "$SCRIPTS_DST/$script" + chmod +x "$SCRIPTS_DST/$script" + info " Installed: $script" + else + warn " Script not found: $SCRIPTS_SRC/$script" + fi +done + +success "All ${#SCRIPT_FILES[@]} scripts installed to $SCRIPTS_DST." + +# ------------------------------------------------------------------- +# Step 4 -- Set up idle watcher cron and agent limits +# ------------------------------------------------------------------- +step "Configure idle watcher and agent spawn limits" + +# Add crontab entry for idle watcher (if not already present) +CRON_LINE="*/5 * * * * $HOME/.openclaw/scripts/agent-idle-watcher.sh" + +if (crontab -l 2>/dev/null || true) | grep -qF "agent-idle-watcher.sh"; then + info "Idle watcher cron already installed." +else + (crontab -l 2>/dev/null || true; echo "$CRON_LINE") | crontab - + success "Idle watcher cron installed (runs every 5 minutes)." +fi + +# Set agent spawn limits via config merge +info "Setting agent spawn limits: maxSpawnDepth=2, maxConcurrent=8, maxChildrenPerAgent=5" + +FRAGMENT_DIR="$AGENTS_DIR" +RENDERED_FRAGMENT=$(mktemp) +export DEFAULT_MODEL="${AGENT_MODEL:-anthropic/claude-sonnet-4-5-20250929}" +template_render "$FRAGMENT_DIR/config-fragment.json" "$RENDERED_FRAGMENT" +config_merge "$RENDERED_FRAGMENT" +rm -f "$RENDERED_FRAGMENT" + +success "Agent spawn limits configured." + +# ------------------------------------------------------------------- +# Summary +# ------------------------------------------------------------------- +echo "" +success "Deputy Agent System setup complete." +info "12 deputies configured as on-demand agents (none auto-started)." +info "Use agent-ctl.sh to start/stop/wake individual deputies." +info "Idle agents are automatically stopped after 10 minutes." +echo "" +info "Management scripts installed to: $SCRIPTS_DST" +info "Agent configs stored in: $HOME/.openclaw/agents//agent/" +echo "" +info "Key commands:" +info " agent-ctl.sh status Show all agent statuses" +info " agent-ctl.sh start Start a specific deputy" +info " agent-ctl.sh wake Wake an idle deputy" +info " agent-message.sh msg Send a message to a deputy" diff --git a/bates-enhance/integrations/agents/templates/agent-openclaw.json.template b/bates-enhance/integrations/agents/templates/agent-openclaw.json.template new file mode 100644 index 0000000..534a448 --- /dev/null +++ b/bates-enhance/integrations/agents/templates/agent-openclaw.json.template @@ -0,0 +1,13 @@ +{ + "agent": { + "name": "{{AGENT_NAME}}", + "role": "{{AGENT_ROLE}}", + "layer": {{AGENT_LAYER}} + }, + "models": { + "default": "{{AGENT_MODEL}}" + }, + "workspace": { + "root": "{{HOME}}/.openclaw/agents/{{AGENT_ID}}/workspace" + } +} diff --git a/bates-enhance/integrations/agents/templates/agent-soul.md.template b/bates-enhance/integrations/agents/templates/agent-soul.md.template new file mode 100644 index 0000000..9784555 --- /dev/null +++ b/bates-enhance/integrations/agents/templates/agent-soul.md.template @@ -0,0 +1,18 @@ +# {{AGENT_NAME}} -- {{AGENT_ROLE}} + +You are {{AGENT_NAME}}, a deputy in the {{ASSISTANT_NAME}} organization. +Your role is: {{AGENT_ROLE}}. + +## Reporting +- Report to: {{ASSISTANT_NAME}} (main session) +- Layer: {{AGENT_LAYER}} + +## Communication +- Use sessions_spawn on main to delegate sub-tasks +- Post updates to your observations file +- Respond to messages in your inbox + +## Rules +- Follow all workspace rules +- Stay within your role scope +- Escalate decisions outside your authority diff --git a/bates-enhance/integrations/agents/templates/openclaw-agent@.service.template b/bates-enhance/integrations/agents/templates/openclaw-agent@.service.template new file mode 100644 index 0000000..edde38f --- /dev/null +++ b/bates-enhance/integrations/agents/templates/openclaw-agent@.service.template @@ -0,0 +1,15 @@ +[Unit] +Description=OpenClaw Agent - %i +After=openclaw-gateway.service +Requires=openclaw-gateway.service + +[Service] +Type=simple +ExecStart=%h/.npm-global/bin/openclaw agent start --id %i +Restart=on-failure +RestartSec=10 +Environment="NODE_PATH=%h/.npm-global/lib/node_modules" +WorkingDirectory=%h/.openclaw/agents/%i + +[Install] +WantedBy=default.target diff --git a/bates-enhance/integrations/agents/workspace-additions/rules/delegation.md b/bates-enhance/integrations/agents/workspace-additions/rules/delegation.md new file mode 100644 index 0000000..11c8de8 --- /dev/null +++ b/bates-enhance/integrations/agents/workspace-additions/rules/delegation.md @@ -0,0 +1,86 @@ +# Delegation Rules + +Guidelines for delegating work to deputy agents and managing the deputy system. + +## When to Delegate + +Delegate to a deputy when: +- The task falls clearly within a deputy's defined role scope +- The task requires sustained focus and would interrupt the main session's flow +- Parallel execution across multiple deputies would speed up delivery +- The task involves domain-specific knowledge that a deputy specializes in + +Handle directly (do NOT delegate) when: +- The task is simple and can be completed in under 2 minutes +- The user explicitly asks the main session to handle it +- The task requires real-time conversational back-and-forth with the user +- Sensitive decisions that require main-session authority (e.g., config changes, payments) + +## Choosing the Right Deputy + +Match tasks to deputies by role: + +| Deputy | Role | Delegate when... | +|---------|-----------------------------|-----------------------------------------------------| +| mira | Technology & Infrastructure | Infrastructure issues, system architecture, tech research | +| conrad | Operations A | Operational tasks, process management, workflow A | +| soren | Operations B | Operational tasks, process management, workflow B | +| amara | Operations C | Operational tasks, process management, workflow C | +| jules | Personal Affairs | Personal scheduling, errands, lifestyle tasks | +| dash | DevOps & Rollout | Deployments, CI/CD, release management, monitoring | +| mercer | Legal & Compliance | Contract review, regulatory questions, compliance | +| kira | Content & Social Media | Content creation, social posts, marketing copy | +| nova | Research & Discovery | Research tasks, market analysis, competitive intel | +| paige | Finance | Financial analysis, budgets, invoicing, expense tracking | +| quinn | HR & People | HR processes, recruitment support, people questions | +| archer | Documentation | Documentation writing, knowledge base updates, SOPs | + +If a task spans multiple roles, assign it to the most relevant deputy and instruct +them to coordinate with others via the message queue. + +## Sub-Agent Spawn Limits + +- **maxSpawnDepth**: 2 -- deputies can spawn one level of sub-agents on main +- **maxConcurrent**: 8 -- no more than 8 agent sessions running at once +- **maxChildrenPerAgent**: 5 -- a single deputy can spawn at most 5 sub-tasks + +These limits prevent resource exhaustion. If a task requires more parallelism, +break it into sequential batches. + +## Result Collection and Quality Checks + +After delegating: +1. **Monitor**: Check the deputy's observations file for progress updates +2. **Timeout**: If no update within 30 minutes, send a follow-up message +3. **Review**: When the deputy reports completion, review the output for: + - Completeness -- does it address the full scope of the request? + - Accuracy -- are facts and figures correct? + - Format -- is the output in the expected format? +4. **Consolidate**: Merge deputy results into a unified response for the user +5. **Attribute**: Note which deputy handled the work in the response + +## Escalation Paths + +Deputies should escalate to the main session when: +- A decision is outside their role scope or authority level +- They encounter an error they cannot resolve after two attempts +- The task requires access to tools or credentials they do not have +- Conflicting instructions from multiple sources need resolution +- The user explicitly asks to speak with the main session + +Escalation method: +1. Post an observation tagged `[ESCALATION]` with context and the specific question +2. Send a message to the main session inbox via the message queue +3. Pause work on the escalated item until the main session responds + +## Anti-Patterns (Avoid These) + +- **Delegation ping-pong**: Do not delegate a task to a deputy who then delegates + it back. If the first deputy cannot handle it, escalate to main. +- **Over-delegation**: Do not delegate trivial tasks (e.g., "what time is it?"). +- **Blind delegation**: Always include clear instructions, context, and expected + output format when delegating. +- **Ignoring results**: Always review and acknowledge deputy outputs. Unread + observations accumulate and create confusion. +- **Parallel overload**: Do not spawn all 12 deputies simultaneously. Stagger + work to stay within resource limits. diff --git a/bates-enhance/integrations/deepseek/config-fragment.json b/bates-enhance/integrations/deepseek/config-fragment.json new file mode 100644 index 0000000..76d4f54 --- /dev/null +++ b/bates-enhance/integrations/deepseek/config-fragment.json @@ -0,0 +1,32 @@ +{ + "models": { + "providers": { + "deepseek": { + "baseUrl": "https://api.deepseek.com/v1", + "apiKey": "{{DEEPSEEK_API_KEY}}", + "api": "openai-completions", + "models": [ + { + "id": "deepseek-chat", + "name": "DeepSeek V3", + "reasoning": false, + "input": ["text"], + "cost": { + "input": 0.27, + "output": 1.1, + "cacheRead": 0, + "cacheWrite": 0 + }, + "contextWindow": 64000, + "maxTokens": 8192 + } + ] + } + } + }, + "env": { + "vars": { + "DEEPSEEK_API_KEY": "{{DEEPSEEK_API_KEY}}" + } + } +} diff --git a/bates-enhance/integrations/deepseek/cron-jobs-deepseek.json b/bates-enhance/integrations/deepseek/cron-jobs-deepseek.json new file mode 100644 index 0000000..18d00ae --- /dev/null +++ b/bates-enhance/integrations/deepseek/cron-jobs-deepseek.json @@ -0,0 +1,9 @@ +[ + { + "name": "overnight-code-review", + "schedule": "0 3 * * *", + "tz": "{{USER_TZ}}", + "message": "Run overnight code review on recent commits. Use DeepSeek for initial analysis, then verify findings with primary model. Report significant issues only.", + "sessionTarget": "isolated" + } +] diff --git a/bates-enhance/integrations/deepseek/setup.sh b/bates-enhance/integrations/deepseek/setup.sh new file mode 100644 index 0000000..820824e --- /dev/null +++ b/bates-enhance/integrations/deepseek/setup.sh @@ -0,0 +1,90 @@ +# setup.sh -- DeepSeek integration for Bates +# Sourced by bates-enhance.sh; has access to common.sh and config-merge.sh functions. +# +# Configures DeepSeek as an additional model provider for cost-effective +# tasks like overnight code review. + +step "DeepSeek Model Provider Configuration" + +info "DeepSeek provides cost-effective AI models for tasks like code review." +info "Get your API key from: https://platform.deepseek.com/api_keys" +echo "" + +# --- API Key --- +local api_key="" +while [[ -z "$api_key" ]]; do + read -rp "DeepSeek API key: " api_key + if [[ -z "$api_key" ]]; then + warn "API key cannot be empty." + continue + fi +done +info "API key accepted." + +# --- Store in systemd drop-in --- +step "Storing DeepSeek API key" + +local dropin_dir="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$dropin_dir" + +local dropin_file="$dropin_dir/deepseek.conf" +cat > "$dropin_file" </dev/null; then + info "Attempting to register DeepSeek provider with openclaw..." + openclaw models auth --provider deepseek --key "$api_key" 2>/dev/null || { + warn "Could not register via CLI. Key is stored in environment; provider will be configured via config." + } +fi + +# --- Timezone for cron job --- +local user_tz="" +local detected_tz="" +detected_tz=$(timedatectl show -p Timezone --value 2>/dev/null || echo "") + +if [[ -n "$detected_tz" ]]; then + info "Detected timezone: $detected_tz" + if confirm "Use $detected_tz for cron schedules?"; then + user_tz="$detected_tz" + fi +fi + +if [[ -z "$user_tz" ]]; then + read -rp "Enter your timezone (e.g., Europe/Lisbon, America/New_York): " user_tz + if [[ -z "$user_tz" ]]; then + user_tz="UTC" + warn "No timezone provided. Defaulting to UTC." + fi +fi + +export USER_TZ="$user_tz" + +# --- Merge config --- +step "Applying DeepSeek configuration" + +local fragment_dir +fragment_dir="$(dirname "${BASH_SOURCE[0]}")" +export DEEPSEEK_API_KEY="$api_key" +local rendered_fragment +rendered_fragment=$(mktemp) +template_render "$fragment_dir/config-fragment.json" "$rendered_fragment" +config_merge "$rendered_fragment" +rm -f "$rendered_fragment" +success "DeepSeek model provider configured in openclaw.json" + +# --- Cron job info --- +echo "" +info "DeepSeek includes an overnight code review cron job (3:00 AM $user_tz)." +info "The job uses DeepSeek for initial analysis, then verifies with the primary model." +info "Cron jobs will be registered automatically after setup completes." diff --git a/bates-enhance/integrations/elevenlabs/config-fragment.json b/bates-enhance/integrations/elevenlabs/config-fragment.json new file mode 100644 index 0000000..8620926 --- /dev/null +++ b/bates-enhance/integrations/elevenlabs/config-fragment.json @@ -0,0 +1,21 @@ +{ + "plugins": { + "entries": { + "voice-call": { + "config": { + "tts": { + "provider": "elevenlabs", + "elevenlabs": { + "voiceId": "{{ELEVENLABS_VOICE_ID}}", + "modelId": "eleven_flash_v2_5" + } + } + } + } + } + }, + "talk": { + "voiceId": "{{ELEVENLABS_VOICE_ID}}", + "modelId": "eleven_flash_v2_5" + } +} diff --git a/bates-enhance/integrations/elevenlabs/setup.sh b/bates-enhance/integrations/elevenlabs/setup.sh new file mode 100644 index 0000000..3e6e7d5 --- /dev/null +++ b/bates-enhance/integrations/elevenlabs/setup.sh @@ -0,0 +1,114 @@ +# setup.sh -- ElevenLabs Voice Clone integration for Bates +# Sourced by bates-enhance.sh; has access to common.sh and config-merge.sh functions. +# +# Configures ElevenLabs TTS with a cloned or selected voice for Bates voice output. + +step "ElevenLabs Voice Clone Configuration" + +info "You'll need an ElevenLabs API key and a voice ID." +info "Get your API key from: https://elevenlabs.io/app/settings/api-keys" +info "Find voice IDs in the ElevenLabs Voice Library or from your cloned voices." +echo "" + +# --- API Key --- +local api_key="" +while [[ -z "$api_key" ]]; do + read -rp "ElevenLabs API key: " api_key + if [[ -z "$api_key" ]]; then + warn "API key cannot be empty." + continue + fi + if ! validate_elevenlabs_key "$api_key"; then + warn "Key format looks unexpected (usually starts with sk_ and is 32+ chars)." + if ! confirm "Use this key anyway?"; then + api_key="" + continue + fi + fi +done + +# --- Voice ID --- +local voice_id="" +while [[ -z "$voice_id" ]]; do + read -rp "ElevenLabs voice ID (from voice clone or library): " voice_id + if [[ -z "$voice_id" ]]; then + warn "Voice ID cannot be empty." + continue + fi +done + +# --- Voice model selection --- +echo "" +info "Available TTS models:" +echo " 1) eleven_flash_v2_5 (fast, low latency -- recommended for calls)" +echo " 2) eleven_multilingual_v2 (higher quality, multilingual)" +echo " 3) eleven_turbo_v2_5 (balanced speed and quality)" +local model_choice="" +read -rp "Select model [1]: " model_choice +local tts_model="eleven_flash_v2_5" +case "$model_choice" in + 2) tts_model="eleven_multilingual_v2" ;; + 3) tts_model="eleven_turbo_v2_5" ;; + *) tts_model="eleven_flash_v2_5" ;; +esac +info "Using TTS model: $tts_model" + +# --- Store API key in systemd drop-in --- +step "Storing ElevenLabs API key" + +local dropin_dir="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$dropin_dir" + +local dropin_file="$dropin_dir/elevenlabs.conf" +cat > "$dropin_file" < "$dropin_file" </dev/null; then + info "mcporter found. Registering GitHub server..." + mcporter add github --token "$github_token" 2>/dev/null || true + success "GitHub MCP server registered via mcporter" +else + info "mcporter not found. Configuring MCP server directly in openclaw.json." +fi + +# --- Merge config --- +local fragment_dir +fragment_dir="$(dirname "${BASH_SOURCE[0]}")" + +# Store token in env.vars (MCP server is registered via mcporter above) +config_merge_inline "{ + \"env\": { + \"vars\": { + \"GITHUB_TOKEN\": \"$github_token\" + } + } +}" +success "GitHub integration configured in openclaw.json" + +echo "" +info "GitHub integration is ready. Bates can now access repositories, issues, and PRs." +info "Try asking Bates to list your repositories or check recent issues." diff --git a/bates-enhance/integrations/github/workspace-additions/refs/github.md b/bates-enhance/integrations/github/workspace-additions/refs/github.md new file mode 100644 index 0000000..9e2fec9 --- /dev/null +++ b/bates-enhance/integrations/github/workspace-additions/refs/github.md @@ -0,0 +1,50 @@ +# GitHub Integration Reference + +## Overview +Bates has access to GitHub repositories via an MCP (Model Context Protocol) server. +This enables direct interaction with repositories, issues, pull requests, and code. + +## Authentication +- Uses a Personal Access Token (PAT) stored in the gateway environment +- Token is available as `GITHUB_TOKEN` + +## Available Operations + +### Repositories +- List repositories for authenticated user or organization +- Get repository details (description, language, stars, etc.) +- Browse repository contents and file trees +- Read file contents at any branch or commit + +### Issues +- List open/closed issues for a repository +- Create new issues with labels and assignees +- Comment on existing issues +- Search issues across repositories + +### Pull Requests +- List open/closed PRs +- Get PR details, diffs, and review status +- Create pull requests +- Add review comments + +### Code Search +- Search code across repositories +- Find files by name or content +- Search commits by message + +### Branches & Commits +- List branches and tags +- Get commit history and details +- Compare branches + +## Usage Examples +- "List my open PRs across all repos" +- "Show recent issues in the main project" +- "Find all files that reference the config schema" +- "Create an issue for the bug we discussed" + +## Limitations +- Rate limits apply (5000 requests/hour for authenticated users) +- File content reads are limited to files under 1MB via the API +- Large diffs may be truncated diff --git a/bates-enhance/integrations/google/config-fragment.json b/bates-enhance/integrations/google/config-fragment.json new file mode 100644 index 0000000..9d85d79 --- /dev/null +++ b/bates-enhance/integrations/google/config-fragment.json @@ -0,0 +1,9 @@ +{ + "env": { + "vars": { + "GOOGLE_CLIENT_ID": "{{GOOGLE_CLIENT_ID}}", + "GOOGLE_CLIENT_SECRET": "{{GOOGLE_CLIENT_SECRET}}", + "GOOGLE_REFRESH_TOKEN": "{{GOOGLE_REFRESH_TOKEN}}" + } + } +} diff --git a/bates-enhance/integrations/google/scripts/google-auth.sh b/bates-enhance/integrations/google/scripts/google-auth.sh new file mode 100644 index 0000000..bb66234 --- /dev/null +++ b/bates-enhance/integrations/google/scripts/google-auth.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# google-auth.sh -- Google OAuth 2.0 authorization flow +# +# Opens a browser for the user to grant consent, captures the authorization +# code, and exchanges it for access + refresh tokens. +# +# Can be sourced (exports GOOGLE_REFRESH_TOKEN) or run directly. +# +# Required environment variables: +# GOOGLE_CLIENT_ID +# GOOGLE_CLIENT_SECRET + +set -euo pipefail + +# ── Validate inputs ─────────────────────────────────────────────── +if [[ -z "${GOOGLE_CLIENT_ID:-}" ]]; then + echo "ERROR: GOOGLE_CLIENT_ID is not set." >&2 + return 1 2>/dev/null || exit 1 +fi +if [[ -z "${GOOGLE_CLIENT_SECRET:-}" ]]; then + echo "ERROR: GOOGLE_CLIENT_SECRET is not set." >&2 + return 1 2>/dev/null || exit 1 +fi + +# ── Build the authorization URL ─────────────────────────────────── +REDIRECT_URI="urn:ietf:wg:oauth:2.0:oob" +SCOPES="https://www.googleapis.com/auth/calendar https://www.googleapis.com/auth/gmail.modify" +SCOPES_ENCODED=$(python3 -c "import urllib.parse; print(urllib.parse.quote('$SCOPES'))") + +AUTH_URL="https://accounts.google.com/o/oauth2/v2/auth?client_id=${GOOGLE_CLIENT_ID}&redirect_uri=${REDIRECT_URI}&response_type=code&scope=${SCOPES_ENCODED}&access_type=offline&prompt=consent" + +echo "" +echo "Opening browser for Google OAuth consent..." +echo "" +echo "If the browser does not open, visit this URL manually:" +echo " $AUTH_URL" +echo "" + +# Try to open the browser (works on Linux with xdg-open, macOS with open) +if command -v xdg-open &>/dev/null; then + xdg-open "$AUTH_URL" 2>/dev/null || true +elif command -v open &>/dev/null; then + open "$AUTH_URL" 2>/dev/null || true +fi + +# ── Capture the authorization code ──────────────────────────────── +read -rp "Paste the authorization code here: " AUTH_CODE + +if [[ -z "${AUTH_CODE:-}" ]]; then + echo "ERROR: No authorization code provided." >&2 + return 1 2>/dev/null || exit 1 +fi + +# ── Exchange code for tokens ────────────────────────────────────── +echo "Exchanging authorization code for tokens..." + +TOKEN_RESPONSE=$(curl -s -X POST "https://oauth2.googleapis.com/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "code=${AUTH_CODE}" \ + -d "client_id=${GOOGLE_CLIENT_ID}" \ + -d "client_secret=${GOOGLE_CLIENT_SECRET}" \ + -d "redirect_uri=${REDIRECT_URI}" \ + -d "grant_type=authorization_code") + +# Parse tokens +ACCESS_TOKEN=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('access_token',''))" 2>/dev/null || echo "") +GOOGLE_REFRESH_TOKEN=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('refresh_token',''))" 2>/dev/null || echo "") +TOKEN_ERROR=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('error_description',''))" 2>/dev/null || echo "") + +if [[ -n "$TOKEN_ERROR" ]]; then + echo "ERROR: Token exchange failed: $TOKEN_ERROR" >&2 + return 1 2>/dev/null || exit 1 +fi + +if [[ -z "$GOOGLE_REFRESH_TOKEN" ]]; then + echo "WARNING: No refresh token in response. You may need to revoke and re-authorize." >&2 + echo " Visit: https://myaccount.google.com/permissions" >&2 + return 1 2>/dev/null || exit 1 +fi + +if [[ -n "$ACCESS_TOKEN" ]]; then + echo "Access token obtained (valid for ~1 hour)." +fi + +echo "Refresh token obtained." + +# Export for the caller (if sourced) +export GOOGLE_REFRESH_TOKEN diff --git a/bates-enhance/integrations/google/scripts/google-refresh.sh b/bates-enhance/integrations/google/scripts/google-refresh.sh new file mode 100644 index 0000000..b7a125b --- /dev/null +++ b/bates-enhance/integrations/google/scripts/google-refresh.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +# google-refresh.sh -- Refresh the Google OAuth token +# +# Reads the current refresh token, exchanges it for a new access token, +# and updates the stored refresh token if Google rotates it. +# +# Intended to be run as a daily cron job. +# Logs to stdout (redirect to a log file in crontab). + +set -euo pipefail + +CREDS_FILE="$HOME/.openclaw/google-credentials.json" +TOKEN_FILE="$HOME/.openclaw/google-refresh-token" +DROPIN_FILE="$HOME/.config/systemd/user/openclaw-gateway.service.d/google-oauth.conf" + +LOG_PREFIX="[google-refresh $(date -Iseconds)]" + +# ── Load credentials ────────────────────────────────────────────── +if [[ ! -f "$CREDS_FILE" ]]; then + echo "$LOG_PREFIX ERROR: Credentials file not found: $CREDS_FILE" + exit 1 +fi + +GOOGLE_CLIENT_ID=$(python3 -c "import json; print(json.load(open('$CREDS_FILE'))['client_id'])") +GOOGLE_CLIENT_SECRET=$(python3 -c "import json; print(json.load(open('$CREDS_FILE'))['client_secret'])") + +if [[ ! -f "$TOKEN_FILE" ]]; then + echo "$LOG_PREFIX ERROR: Refresh token file not found: $TOKEN_FILE" + exit 1 +fi + +CURRENT_REFRESH_TOKEN=$(cat "$TOKEN_FILE") + +if [[ -z "$CURRENT_REFRESH_TOKEN" ]]; then + echo "$LOG_PREFIX ERROR: Refresh token is empty." + exit 1 +fi + +# ── Exchange refresh token ──────────────────────────────────────── +echo "$LOG_PREFIX Refreshing Google OAuth token..." + +TOKEN_RESPONSE=$(curl -s -X POST "https://oauth2.googleapis.com/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "client_id=${GOOGLE_CLIENT_ID}" \ + -d "client_secret=${GOOGLE_CLIENT_SECRET}" \ + -d "refresh_token=${CURRENT_REFRESH_TOKEN}" \ + -d "grant_type=refresh_token") + +ACCESS_TOKEN=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('access_token',''))" 2>/dev/null || echo "") +NEW_REFRESH_TOKEN=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('refresh_token',''))" 2>/dev/null || echo "") +TOKEN_ERROR=$(echo "$TOKEN_RESPONSE" | python3 -c "import json,sys; print(json.load(sys.stdin).get('error_description',''))" 2>/dev/null || echo "") + +if [[ -n "$TOKEN_ERROR" ]]; then + echo "$LOG_PREFIX ERROR: Token refresh failed: $TOKEN_ERROR" + exit 1 +fi + +if [[ -z "$ACCESS_TOKEN" ]]; then + echo "$LOG_PREFIX ERROR: No access token in response." + exit 1 +fi + +echo "$LOG_PREFIX Access token refreshed successfully." + +# ── Update stored refresh token if rotated ──────────────────────── +if [[ -n "$NEW_REFRESH_TOKEN" && "$NEW_REFRESH_TOKEN" != "$CURRENT_REFRESH_TOKEN" ]]; then + echo "$LOG_PREFIX Refresh token was rotated. Updating stored token." + echo "$NEW_REFRESH_TOKEN" > "$TOKEN_FILE" + chmod 600 "$TOKEN_FILE" + + # Update the systemd drop-in + if [[ -f "$DROPIN_FILE" ]]; then + cat > "$DROPIN_FILE" </dev/null || true + echo "$LOG_PREFIX Systemd drop-in updated." + fi + + # Update openclaw.json MCP env + python3 -c " +import json +config_path = '$HOME/.openclaw/openclaw.json' +try: + with open(config_path) as f: + config = json.load(f) + servers = config.get('mcp', {}).get('servers', {}) + if 'google-calendar' in servers: + servers['google-calendar']['env']['GOOGLE_REFRESH_TOKEN'] = '$NEW_REFRESH_TOKEN' + with open(config_path, 'w') as f: + json.dump(config, f, indent=2) + print('$LOG_PREFIX openclaw.json updated with new refresh token.') +except Exception as e: + print(f'$LOG_PREFIX WARNING: Could not update openclaw.json: {e}') +" 2>&1 + + echo "$LOG_PREFIX Token rotation complete." +else + echo "$LOG_PREFIX Refresh token unchanged (no rotation)." +fi + +echo "$LOG_PREFIX Done." diff --git a/bates-enhance/integrations/google/setup.sh b/bates-enhance/integrations/google/setup.sh new file mode 100644 index 0000000..fbe5bd6 --- /dev/null +++ b/bates-enhance/integrations/google/setup.sh @@ -0,0 +1,178 @@ +# setup.sh -- Google Calendar / Gmail integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Configures Google OAuth credentials and registers an MCP server so Bates +# can access Google Calendar and Gmail via the mcporter bridge. +# +# Prerequisites: +# - A Google Cloud project with Calendar and Gmail APIs enabled +# - An OAuth 2.0 Client ID (Desktop or Web type) +# - mcporter installed (npm i -g mcporter) + +# ------------------------------------------------------------------- +# Step 1 -- Check mcporter +# ------------------------------------------------------------------- +step "Check mcporter installation" + +if command -v mcporter &>/dev/null; then + success "mcporter is installed." +else + warn "mcporter is not installed." + info "Install it with: npm install -g mcporter" + if ! confirm "Continue anyway (you can install mcporter later)?"; then + fatal "Aborted. Install mcporter first." + fi +fi + +# ------------------------------------------------------------------- +# Step 2 -- Collect Google Cloud project details +# ------------------------------------------------------------------- +step "Collect Google Cloud credentials" + +info "You need a Google Cloud project with the Calendar and Gmail APIs enabled." +info "Create OAuth credentials at: https://console.cloud.google.com/apis/credentials" +echo "" + +prompt_default "Google Cloud Project ID" "" GOOGLE_PROJECT_ID +if [[ -z "${GOOGLE_PROJECT_ID:-}" ]]; then + fatal "Google Cloud Project ID is required." +fi + +prompt_default "OAuth Client ID" "" GOOGLE_CLIENT_ID +if [[ -z "${GOOGLE_CLIENT_ID:-}" ]]; then + fatal "OAuth Client ID is required." +fi + +prompt_default "OAuth Client Secret" "" GOOGLE_CLIENT_SECRET +if [[ -z "${GOOGLE_CLIENT_SECRET:-}" ]]; then + fatal "OAuth Client Secret is required." +fi + +success "Credentials collected." + +# ------------------------------------------------------------------- +# Step 3 -- Save credentials file (chmod 600) +# ------------------------------------------------------------------- +step "Save Google credentials" + +CREDS_FILE="$HOME/.openclaw/google-credentials.json" + +cat > "$CREDS_FILE" < "$TOKEN_FILE" + chmod 600 "$TOKEN_FILE" + success "Refresh token saved to $TOKEN_FILE (mode 600)." +fi + +# ------------------------------------------------------------------- +# Step 5 -- Store secrets in systemd drop-in (chmod 600) +# ------------------------------------------------------------------- +step "Store Google secrets in systemd environment" + +DROPIN_DIR="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$DROPIN_DIR" + +SECRETS_DROPIN="$DROPIN_DIR/google-oauth.conf" +cat > "$SECRETS_DROPIN" </dev/null || warn "Could not reload systemd daemon." + +# ------------------------------------------------------------------- +# Step 6 -- Merge MCP server config +# ------------------------------------------------------------------- +step "Store Google credentials in openclaw.json" + +FRAGMENT_DIR="$ENHANCE_DIR/integrations/google" +RENDERED_FRAGMENT=$(mktemp) + +export GOOGLE_CLIENT_ID GOOGLE_CLIENT_SECRET GOOGLE_REFRESH_TOKEN +template_render "$FRAGMENT_DIR/config-fragment.json" "$RENDERED_FRAGMENT" +config_merge "$RENDERED_FRAGMENT" +rm -f "$RENDERED_FRAGMENT" + +success "Google credentials merged into config." + +# ------------------------------------------------------------------- +# Step 7 -- Set up token refresh cron +# ------------------------------------------------------------------- +step "Set up token refresh schedule" + +REFRESH_SCRIPT="$ENHANCE_DIR/integrations/google/scripts/google-refresh.sh" + +if [[ -f "$REFRESH_SCRIPT" ]]; then + info "Installing daily cron job to refresh the Google OAuth token." + + CRON_LINE="0 4 * * * bash $REFRESH_SCRIPT >> $HOME/.openclaw/logs/google-refresh.log 2>&1" + + # Add to crontab if not already present + (crontab -l 2>/dev/null || true) | grep -qF "google-refresh.sh" || { + (crontab -l 2>/dev/null || true; echo "$CRON_LINE") | crontab - + success "Token refresh cron installed (runs daily at 04:00)." + } +else + warn "Refresh script not found. Token refresh cron not installed." +fi + +# ------------------------------------------------------------------- +# Summary +# ------------------------------------------------------------------- +echo "" +success "Google Calendar / Gmail integration setup complete." +info "Bates can now access Google Calendar and Gmail through the MCP server." +if [[ -z "${GOOGLE_REFRESH_TOKEN:-}" ]]; then + warn "No refresh token was set. Run the OAuth flow to enable access:" + info " bash $ENHANCE_DIR/integrations/google/scripts/google-auth.sh" +fi diff --git a/bates-enhance/integrations/google/workspace-additions/refs/google-api.md b/bates-enhance/integrations/google/workspace-additions/refs/google-api.md new file mode 100644 index 0000000..5e630a7 --- /dev/null +++ b/bates-enhance/integrations/google/workspace-additions/refs/google-api.md @@ -0,0 +1,117 @@ +# Google Calendar and Gmail Integration + +Reference documentation for Bates's Google Calendar and Gmail capabilities, +provided through the `google-calendar` MCP server via mcporter. + +## Overview + +The Google integration gives Bates read/write access to: + +- **Google Calendar** -- View, create, update, and delete calendar events. +- **Gmail** -- Read, search, compose, and send email messages. + +Access is granted through OAuth 2.0 with offline (refresh token) access. +Tokens are refreshed automatically by a daily cron job. + +## Google Calendar Operations + +### List Events + +Retrieve upcoming events from a calendar. + +| Parameter | Description | +|---------------|-----------------------------------------------| +| `calendarId` | Calendar ID (default: `primary`) | +| `timeMin` | Start of time range (RFC 3339) | +| `timeMax` | End of time range (RFC 3339) | +| `maxResults` | Maximum number of events to return | +| `q` | Free-text search term | + +### Create Event + +Create a new calendar event. + +| Parameter | Description | +|---------------|-----------------------------------------------| +| `calendarId` | Calendar ID (default: `primary`) | +| `summary` | Event title | +| `description` | Event description / notes | +| `start` | Start date/time (with timezone) | +| `end` | End date/time (with timezone) | +| `attendees` | List of attendee email addresses | +| `location` | Event location (physical or virtual) | +| `reminders` | Custom reminder overrides | + +### Update Event + +Modify an existing event by its event ID. + +### Delete Event + +Remove an event from the calendar. + +## Gmail Operations + +### Search Messages + +Search the mailbox using Gmail's query syntax. + +| Query Example | Description | +|---------------------------|------------------------------------| +| `from:someone` | Messages from a sender | +| `subject:meeting` | Messages with subject containing | +| `has:attachment` | Messages with attachments | +| `newer_than:7d` | Messages from the last 7 days | +| `is:unread` | Unread messages only | +| `label:inbox` | Messages in the inbox | + +### Read Message + +Retrieve the full content of a message by its ID, including headers, +body (plain text and HTML), and attachment metadata. + +### Send Message + +Compose and send an email. + +| Parameter | Description | +|---------------|-----------------------------------------------| +| `to` | Recipient email address(es) | +| `cc` | CC recipients | +| `bcc` | BCC recipients | +| `subject` | Email subject line | +| `body` | Email body (plain text or HTML) | +| `attachments` | List of file attachments | + +### Reply to Message + +Reply to an existing message thread, preserving threading headers. + +### Draft Management + +Create, list, update, and send draft messages. + +## Authentication + +- **OAuth 2.0** with `offline` access type (refresh token). +- Scopes: `calendar`, `gmail.modify`. +- Credentials stored at `~/.openclaw/google-credentials.json` (mode 600). +- Refresh token stored at `~/.openclaw/google-refresh-token` (mode 600). +- Daily cron job refreshes the token at 04:00 to prevent expiry. +- If the token is rotated by Google, the cron job updates all stored + copies (token file, systemd drop-in, openclaw.json). + +## Troubleshooting + +- **"Token has been expired or revoked"**: The refresh token is invalid. + Re-run the OAuth flow: + ``` + bash ~/.openclaw/enhance/integrations/google/scripts/google-auth.sh + ``` +- **"Access Not Configured"**: Enable the Calendar API and Gmail API in + the Google Cloud Console for your project. +- **"Insufficient Permission"**: The OAuth consent may not have included + the required scopes. Revoke access at + https://myaccount.google.com/permissions and re-authorize. +- **MCP server not starting**: Verify `mcporter` is installed globally: + `npm list -g mcporter`. diff --git a/bates-enhance/integrations/image/config-fragment.json b/bates-enhance/integrations/image/config-fragment.json new file mode 100644 index 0000000..170e9d5 --- /dev/null +++ b/bates-enhance/integrations/image/config-fragment.json @@ -0,0 +1,7 @@ +{ + "env": { + "vars": { + "OPENAI_API_KEY": "{{OPENAI_API_KEY}}" + } + } +} diff --git a/bates-enhance/integrations/image/scripts/generate-image.py b/bates-enhance/integrations/image/scripts/generate-image.py new file mode 100644 index 0000000..e85d3e6 --- /dev/null +++ b/bates-enhance/integrations/image/scripts/generate-image.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +"""generate-image.py -- Unified image generation for OpenAI and Google Imagen. + +Generates images via the OpenAI (gpt-image-1) or Google (Imagen 4.0) API +and writes the result to a file. Outputs JSON metadata to stdout. + +Environment variables: + OPENAI_API_KEY - Required for --provider openai + GOOGLE_GENERATIVE_AI_API_KEY - Required for --provider google + +Usage: + python3 generate-image.py --provider openai --prompt "a sunset over mountains" --output /tmp/sunset.png + python3 generate-image.py --provider google --prompt "a cat in a hat" --output /tmp/cat.png +""" + +import argparse +import base64 +import json +import os +import sys +from pathlib import Path + + +DEFAULT_MODELS = { + "openai": "gpt-image-1", + "google": "imagen-4.0-generate-001", +} + + +def generate_openai(prompt: str, output: str, model: str) -> dict: + """Generate an image using the OpenAI API.""" + try: + from openai import OpenAI + except ImportError: + print("ERROR: openai package not installed. Run: pip install openai", file=sys.stderr) + sys.exit(1) + + api_key = os.environ.get("OPENAI_API_KEY") + if not api_key: + print("ERROR: OPENAI_API_KEY environment variable is not set.", file=sys.stderr) + sys.exit(1) + + client = OpenAI(api_key=api_key) + + response = client.images.generate( + model=model, + prompt=prompt, + n=1, + size="1024x1024", + ) + + image_data = response.data[0] + + # gpt-image-1 returns b64_json by default + if hasattr(image_data, "b64_json") and image_data.b64_json: + img_bytes = base64.b64decode(image_data.b64_json) + Path(output).write_bytes(img_bytes) + elif hasattr(image_data, "url") and image_data.url: + import urllib.request + urllib.request.urlretrieve(image_data.url, output) + else: + print("ERROR: No image data in OpenAI response.", file=sys.stderr) + sys.exit(1) + + return { + "file": str(Path(output).resolve()), + "prompt": prompt, + "provider": "openai", + "model": model, + } + + +def generate_google(prompt: str, output: str, model: str) -> dict: + """Generate an image using the Google Generative AI API.""" + try: + from google import genai + from google.genai import types + except ImportError: + print( + "ERROR: google-genai package not installed. Run: pip install google-genai", + file=sys.stderr, + ) + sys.exit(1) + + api_key = os.environ.get("GOOGLE_GENERATIVE_AI_API_KEY") + if not api_key: + print( + "ERROR: GOOGLE_GENERATIVE_AI_API_KEY environment variable is not set.", + file=sys.stderr, + ) + sys.exit(1) + + client = genai.Client(api_key=api_key) + + response = client.models.generate_images( + model=model, + prompt=prompt, + config=types.GenerateImagesConfig(number_of_images=1), + ) + + if not response.generated_images: + print("ERROR: No images returned by Google API.", file=sys.stderr) + sys.exit(1) + + image = response.generated_images[0] + Path(output).write_bytes(image.image.image_bytes) + + return { + "file": str(Path(output).resolve()), + "prompt": prompt, + "provider": "google", + "model": model, + } + + +def main(): + parser = argparse.ArgumentParser(description="Generate images via OpenAI or Google Imagen") + parser.add_argument( + "--provider", + choices=["openai", "google"], + default="openai", + help="Image generation provider (default: openai)", + ) + parser.add_argument( + "--prompt", + required=True, + help="Text prompt for image generation", + ) + parser.add_argument( + "--output", + required=True, + help="Output file path for the generated image", + ) + parser.add_argument( + "--model", + default=None, + help="Model name (default: provider-specific)", + ) + + args = parser.parse_args() + + model = args.model or DEFAULT_MODELS[args.provider] + + # Ensure output directory exists + Path(args.output).parent.mkdir(parents=True, exist_ok=True) + + if args.provider == "openai": + result = generate_openai(args.prompt, args.output, model) + else: + result = generate_google(args.prompt, args.output, model) + + # Output JSON to stdout + print(json.dumps(result)) + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/image/setup.sh b/bates-enhance/integrations/image/setup.sh new file mode 100644 index 0000000..f16a78a --- /dev/null +++ b/bates-enhance/integrations/image/setup.sh @@ -0,0 +1,160 @@ +# setup.sh -- Image Generation integration for Bates +# Sourced by bates-enhance.sh; has access to common.sh and config-merge.sh functions. +# +# Configures AI image generation with OpenAI (DALL-E / gpt-image-1) and/or +# Google (Imagen) providers. + +step "Image Generation Configuration" + +info "Bates can generate images using OpenAI and/or Google Imagen." +info "You can enable one or both providers." +echo "" + +local use_openai=false +local use_google=false +local providers_json="[]" +local dropin_dir="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$dropin_dir" + +# --- Provider selection --- +echo "Which image generation providers would you like to enable?" +echo " 1) OpenAI only (gpt-image-1 / DALL-E)" +echo " 2) Google only (Imagen 4.0)" +echo " 3) Both OpenAI and Google" +echo "" +local provider_choice="" +read -rp "Select [3]: " provider_choice + +case "$provider_choice" in + 1) + use_openai=true + providers_json='["openai"]' + ;; + 2) + use_google=true + providers_json='["google"]' + ;; + *) + use_openai=true + use_google=true + providers_json='["openai", "google"]' + ;; +esac + +# --- OpenAI setup --- +if $use_openai; then + step "OpenAI API Key for Image Generation" + + info "Get your API key from: https://platform.openai.com/api-keys" + echo "" + + local openai_key="" + while [[ -z "$openai_key" ]]; do + read -rp "OpenAI API key: " openai_key + if [[ -z "$openai_key" ]]; then + warn "API key cannot be empty." + continue + fi + if ! validate_openai_key "$openai_key"; then + warn "Key format looks unexpected (usually starts with sk-)." + if ! confirm "Use this key anyway?"; then + openai_key="" + continue + fi + fi + done + + # Store in systemd drop-in + local dropin_file="$dropin_dir/openai.conf" + cat > "$dropin_file" < "$dropin_file" < \ + --prompt "" \ + --output "" +``` + +### Parameters +- `--provider`: `openai` (default) or `google` +- `--prompt`: Detailed text description of the desired image +- `--output`: Full path where the image should be saved +- `--model`: Optional override (default: `gpt-image-1` for OpenAI, `imagen-4.0-generate-001` for Google) + +## Prompt Best Practices +1. Be specific and detailed about the desired output +2. Include style direction (photorealistic, watercolor, minimalist, etc.) +3. Specify composition elements (foreground, background, lighting) +4. Mention color palette if relevant +5. For professional use, add "high quality, professional" to the prompt + +## Output +- The script outputs JSON to stdout with file path, prompt, provider, and model +- Parse the JSON to get the file path for further operations (upload, share, etc.) + +## File Naming +- Use descriptive, kebab-case filenames: `sunset-mountain-landscape.png` +- General images go to: `drafts/images/` +- Venture-specific images go to: `drafts/Sales/{Company}/images/` + +## Error Handling +- If a provider fails, try the other provider +- Check that the required API key environment variable is set +- Ensure the output directory exists (script creates it automatically) diff --git a/bates-enhance/integrations/m365/config-fragment.json b/bates-enhance/integrations/m365/config-fragment.json new file mode 100644 index 0000000..03f4426 --- /dev/null +++ b/bates-enhance/integrations/m365/config-fragment.json @@ -0,0 +1,7 @@ +{ + "env": { + "vars": { + "ASSISTANT_EMAIL": "{{ASSISTANT_EMAIL}}" + } + } +} diff --git a/bates-enhance/integrations/m365/cron-jobs-m365.json b/bates-enhance/integrations/m365/cron-jobs-m365.json new file mode 100644 index 0000000..587cada --- /dev/null +++ b/bates-enhance/integrations/m365/cron-jobs-m365.json @@ -0,0 +1,115 @@ +[ + { + "name": "morning-briefing", + "schedule": "30 8 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "Morning briefing: Check email inbox for urgent items, review today's calendar, summarize top 5 priorities. Deliver via chat.", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}" + } + }, + { + "name": "email-draft-prep", + "schedule": "0 9 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "Review flagged/starred emails. Draft responses for any that need replies. Save drafts.", + "sessionTarget": "isolated" + }, + { + "name": "daily-health-check", + "schedule": "0 10 * * *", + "tz": "{{USER_TZ}}", + "message": "Run full system health check: gateway, MCP servers, disk, memory, cron status. Report issues only.", + "sessionTarget": "isolated" + }, + { + "name": "daily-review", + "schedule": "0 18 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "End of day review: summarize what was accomplished today, flag items for tomorrow, update observations.", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}" + } + }, + { + "name": "stale-email-chaser", + "schedule": "0 11 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "Check for emails older than 3 days without response. List them with suggested actions.", + "sessionTarget": "isolated" + }, + { + "name": "daily-task-inbox-zero", + "schedule": "0 9,14 * * 1-5", + "tz": "{{USER_TZ}}", + "message": "Review task lists (Planner/To-Do). Identify overdue items. Suggest priorities.", + "sessionTarget": "isolated" + }, + { + "name": "daily-cost-review", + "schedule": "0 22 * * *", + "tz": "{{USER_TZ}}", + "message": "Check today's API costs via /cost command. Compare with 7-day average. Alert if above threshold.", + "sessionTarget": "isolated" + }, + { + "name": "weekly-project-reports", + "schedule": "0 9 * * 1", + "tz": "{{USER_TZ}}", + "message": "Generate weekly project status reports for all active projects. Include metrics, blockers, next steps.", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}" + } + }, + { + "name": "weekly-managers-report", + "schedule": "0 16 * * 5", + "tz": "{{USER_TZ}}", + "message": "Compile weekly manager's report: key achievements, metrics, issues, plan for next week.", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}" + } + }, + { + "name": "project-staleness-check", + "schedule": "0 14 * * 3", + "tz": "{{USER_TZ}}", + "message": "Check all projects for staleness: no updates in 7+ days, no recent commits, stale PRs. Report findings.", + "sessionTarget": "isolated" + }, + { + "name": "overnight-code-review", + "schedule": "0 2 * * *", + "tz": "{{USER_TZ}}", + "message": "Review recent git commits across all repos. Check for issues, security concerns, style violations. Report significant findings only.", + "sessionTarget": "isolated" + }, + { + "name": "receipt-processor", + "schedule": "0 20 * * *", + "tz": "{{USER_TZ}}", + "message": "Search recent emails for receipts and invoices. Extract amounts, vendors, dates. Update expense tracking.", + "sessionTarget": "isolated" + }, + { + "name": "monday-weekly-briefing", + "schedule": "0 8 * * 1", + "tz": "{{USER_TZ}}", + "message": "Monday morning briefing: week ahead calendar, pending tasks, project deadlines, key meetings.", + "delivery": { + "channel": "{{PRIMARY_CHANNEL}}", + "to": "{{DELIVERY_TARGET}}" + } + }, + { + "name": "search-index-monitor", + "schedule": "0 */2 * * *", + "tz": "{{USER_TZ}}", + "message": "Check search index health: document count, last sync time, any errors. Report issues only.", + "sessionTarget": "isolated" + } +] diff --git a/bates-enhance/integrations/m365/scripts/graph-api.sh b/bates-enhance/integrations/m365/scripts/graph-api.sh new file mode 100755 index 0000000..f9362ca --- /dev/null +++ b/bates-enhance/integrations/m365/scripts/graph-api.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +set -euo pipefail + +# graph-api.sh — Helper for direct Microsoft Graph API calls via mcporter +# +# Usage: +# graph-api.sh [BODY_JSON] +# +# Examples: +# graph-api.sh GET "/me/messages?\$top=10" +# graph-api.sh POST "/me/sendMail" '{"message":{"subject":"Test","body":{"content":"Hello"},"toRecipients":[{"emailAddress":{"address":"{{USER_EMAIL}}"}}]}}' +# graph-api.sh PATCH "/me/events/AAMk..." '{"subject":"Updated title"}' +# +# The script uses mcporter to handle authentication automatically. +# The MCP server name defaults to ms365-assistant (write access). +# Set MCP_SERVER=ms365-reader for read-only calls. + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- +MCP_SERVER="${MCP_SERVER:-ms365-assistant}" +GRAPH_BASE="https://graph.microsoft.com/v1.0" + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- +if [[ $# -lt 2 ]]; then + echo "Usage: graph-api.sh [BODY_JSON]" >&2 + echo "" >&2 + echo " METHOD — HTTP method: GET, POST, PATCH, PUT, DELETE" >&2 + echo " ENDPOINT — Graph API path, e.g. /me/messages" >&2 + echo " BODY_JSON — Optional JSON body for POST/PATCH/PUT" >&2 + echo "" >&2 + echo "Environment:" >&2 + echo " MCP_SERVER — MCP server to use (default: ms365-assistant)" >&2 + exit 1 +fi + +METHOD="${1^^}" # uppercase +ENDPOINT="$2" +BODY="${3:-}" + +# Strip leading slash for consistency, then re-add +ENDPOINT="${ENDPOINT#/}" + +# --------------------------------------------------------------------------- +# Build the mcporter call +# --------------------------------------------------------------------------- +# Construct the arguments JSON for the graph-api-call tool +CALL_ARGS=$(cat <>> $METHOD $GRAPH_BASE/$ENDPOINT" >&2 + +RESULT=$(mcporter call "$MCP_SERVER" graph-api-call "$CALL_ARGS") + +# --------------------------------------------------------------------------- +# Output +# --------------------------------------------------------------------------- +# Pretty-print if jq is available, otherwise raw output +if command -v jq &>/dev/null; then + echo "$RESULT" | jq . +else + echo "$RESULT" +fi diff --git a/bates-enhance/integrations/m365/scripts/read-pdf-attachment.sh b/bates-enhance/integrations/m365/scripts/read-pdf-attachment.sh new file mode 100755 index 0000000..1702766 --- /dev/null +++ b/bates-enhance/integrations/m365/scripts/read-pdf-attachment.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +set -euo pipefail + +# read-pdf-attachment.sh — Download and extract text from a PDF email attachment +# +# Usage: +# read-pdf-attachment.sh +# +# Examples: +# read-pdf-attachment.sh "AAMkAD..." "AAMkAT..." +# +# Downloads the PDF attachment to a temporary file, extracts text using +# pdftotext (from poppler-utils), and outputs the text to stdout. +# The temporary file is cleaned up automatically. +# +# Requirements: +# - poppler-utils (apt install poppler-utils) for pdftotext +# - mcporter with ms365-reader configured + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MCP_SERVER="${MCP_SERVER:-ms365-reader}" + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- +if [[ $# -lt 2 ]]; then + echo "Usage: read-pdf-attachment.sh " >&2 + echo "" >&2 + echo "Downloads a PDF attachment from an email and extracts its text." >&2 + echo "Text is written to stdout; status messages go to stderr." >&2 + exit 1 +fi + +MESSAGE_ID="$1" +ATTACHMENT_ID="$2" + +# --------------------------------------------------------------------------- +# Check dependencies +# --------------------------------------------------------------------------- +if ! command -v pdftotext &>/dev/null; then + echo "Error: pdftotext not found. Install poppler-utils:" >&2 + echo " sudo apt install poppler-utils" >&2 + exit 1 +fi + +# --------------------------------------------------------------------------- +# Create temp file with automatic cleanup +# --------------------------------------------------------------------------- +TEMP_PDF=$(mktemp /tmp/pdf-attachment-XXXXXX.pdf) +trap 'rm -f "$TEMP_PDF"' EXIT + +# --------------------------------------------------------------------------- +# Download the attachment +# --------------------------------------------------------------------------- +echo "Downloading PDF attachment..." >&2 + +# Use the save-attachment helper if available, otherwise do it inline +if [[ -x "$SCRIPT_DIR/save-attachment.sh" ]]; then + "$SCRIPT_DIR/save-attachment.sh" "$MESSAGE_ID" "$ATTACHMENT_ID" "$TEMP_PDF" +else + # Inline download + RESULT=$(mcporter call "$MCP_SERVER" get-mail-attachment "$(cat <&2 + exit 1 +fi + +FILE_TYPE=$(file -b --mime-type "$TEMP_PDF" 2>/dev/null || echo "unknown") +if [[ "$FILE_TYPE" != "application/pdf" ]]; then + echo "Warning: File type is '$FILE_TYPE', not application/pdf. Attempting extraction anyway..." >&2 +fi + +# --------------------------------------------------------------------------- +# Extract text +# --------------------------------------------------------------------------- +echo "Extracting text from PDF..." >&2 + +# pdftotext with "-" outputs to stdout +# -layout preserves the visual layout of the PDF +pdftotext -layout "$TEMP_PDF" - + +echo "" >&2 +echo "Text extraction complete." >&2 diff --git a/bates-enhance/integrations/m365/scripts/save-attachment.sh b/bates-enhance/integrations/m365/scripts/save-attachment.sh new file mode 100755 index 0000000..5bc6d49 --- /dev/null +++ b/bates-enhance/integrations/m365/scripts/save-attachment.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash +set -euo pipefail + +# save-attachment.sh — Save an email attachment to a local file +# +# Usage: +# save-attachment.sh +# +# Examples: +# save-attachment.sh "AAMkAD..." "AAMkAT..." /tmp/invoice.pdf +# save-attachment.sh "AAMkAD..." "AAMkAT..." ~/Downloads/report.xlsx +# +# Downloads the specified attachment from an email message and saves it +# to the given local path. Uses mcporter with ms365-reader for auth. + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- +MCP_SERVER="${MCP_SERVER:-ms365-reader}" + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- +if [[ $# -lt 3 ]]; then + echo "Usage: save-attachment.sh " >&2 + echo "" >&2 + echo " message-id — The email message ID (from list-mail-messages)" >&2 + echo " attachment-id — The attachment ID (from list-mail-attachments)" >&2 + echo " output-path — Local path to save the attachment to" >&2 + exit 1 +fi + +MESSAGE_ID="$1" +ATTACHMENT_ID="$2" +OUTPUT_PATH="$3" + +# --------------------------------------------------------------------------- +# Validation +# --------------------------------------------------------------------------- +OUTPUT_DIR=$(dirname "$OUTPUT_PATH") +if [[ ! -d "$OUTPUT_DIR" ]]; then + echo "Creating output directory: $OUTPUT_DIR" >&2 + mkdir -p "$OUTPUT_DIR" +fi + +# --------------------------------------------------------------------------- +# Download the attachment +# --------------------------------------------------------------------------- +echo "Downloading attachment..." >&2 +echo " Message: $MESSAGE_ID" >&2 +echo " Attachment: $ATTACHMENT_ID" >&2 +echo " Output: $OUTPUT_PATH" >&2 + +RESULT=$(mcporter call "$MCP_SERVER" get-mail-attachment "$(cat </dev/null || stat -f%z "$OUTPUT_PATH" 2>/dev/null) + echo "Success: $OUTPUT_PATH ($FILE_SIZE bytes)" >&2 +else + echo "Error: File was not created." >&2 + exit 1 +fi diff --git a/bates-enhance/integrations/m365/scripts/upload-to-onedrive.sh b/bates-enhance/integrations/m365/scripts/upload-to-onedrive.sh new file mode 100755 index 0000000..5e94403 --- /dev/null +++ b/bates-enhance/integrations/m365/scripts/upload-to-onedrive.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +set -euo pipefail + +# upload-to-onedrive.sh — Upload a local file to OneDrive +# +# Usage: +# upload-to-onedrive.sh +# +# Examples: +# upload-to-onedrive.sh /tmp/report.pdf "drafts/documents/report.pdf" +# upload-to-onedrive.sh ./image.png "drafts/images/screenshot.png" +# +# For files > 4 MB, this script uses an upload session (chunked upload). +# For files <= 4 MB, it uses a simple PUT request. +# +# Uses mcporter with ms365-assistant for authentication. + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- +MCP_SERVER="${MCP_SERVER:-ms365-assistant}" +CHUNK_SIZE=$((3 * 1024 * 1024 + 768 * 1024)) # 3.75 MB in bytes +SIMPLE_LIMIT=$((4 * 1024 * 1024)) # 4 MB threshold + +# --------------------------------------------------------------------------- +# Argument parsing +# --------------------------------------------------------------------------- +if [[ $# -lt 2 ]]; then + echo "Usage: upload-to-onedrive.sh " >&2 + echo "" >&2 + echo " local-path — Path to the local file to upload" >&2 + echo " onedrive-path — Destination path in OneDrive (e.g. drafts/docs/file.pdf)" >&2 + exit 1 +fi + +LOCAL_PATH="$1" +ONEDRIVE_PATH="$2" + +# --------------------------------------------------------------------------- +# Validation +# --------------------------------------------------------------------------- +if [[ ! -f "$LOCAL_PATH" ]]; then + echo "Error: File not found: $LOCAL_PATH" >&2 + exit 1 +fi + +FILE_SIZE=$(stat -c%s "$LOCAL_PATH" 2>/dev/null || stat -f%z "$LOCAL_PATH" 2>/dev/null) +FILE_NAME=$(basename "$LOCAL_PATH") + +# Strip leading slash from OneDrive path +ONEDRIVE_PATH="${ONEDRIVE_PATH#/}" + +echo "Uploading: $LOCAL_PATH ($FILE_SIZE bytes)" >&2 +echo " To: OneDrive:/$ONEDRIVE_PATH" >&2 + +# --------------------------------------------------------------------------- +# Simple upload (< 4 MB) +# --------------------------------------------------------------------------- +if [[ "$FILE_SIZE" -le "$SIMPLE_LIMIT" ]]; then + echo "Using simple upload..." >&2 + + # Base64-encode the file content for the MCP call + CONTENT_B64=$(base64 -w0 "$LOCAL_PATH" 2>/dev/null || base64 "$LOCAL_PATH" 2>/dev/null) + + RESULT=$(mcporter call "$MCP_SERVER" upload-drive-item "$(cat <&2 + exit 0 +fi + +# --------------------------------------------------------------------------- +# Chunked upload (> 4 MB) via upload session +# --------------------------------------------------------------------------- +echo "File exceeds 4 MB — using chunked upload session..." >&2 + +# Step 1: Create upload session +SESSION_RESULT=$(mcporter call "$MCP_SERVER" create-upload-session "$(cat </dev/null) + +if [[ -z "$UPLOAD_URL" ]]; then + echo "Error: Failed to create upload session." >&2 + echo "Response: $SESSION_RESULT" >&2 + exit 1 +fi + +echo "Upload session created." >&2 + +# Step 2: Upload in chunks +OFFSET=0 +REMAINING="$FILE_SIZE" + +while [[ "$REMAINING" -gt 0 ]]; do + # Determine chunk size + if [[ "$REMAINING" -lt "$CHUNK_SIZE" ]]; then + THIS_CHUNK="$REMAINING" + else + THIS_CHUNK="$CHUNK_SIZE" + fi + + END_BYTE=$((OFFSET + THIS_CHUNK - 1)) + + echo " Uploading bytes $OFFSET-$END_BYTE of $FILE_SIZE..." >&2 + + # Extract chunk and base64-encode it + CHUNK_B64=$(dd if="$LOCAL_PATH" bs=1 skip="$OFFSET" count="$THIS_CHUNK" 2>/dev/null | base64 -w0 2>/dev/null || \ + dd if="$LOCAL_PATH" bs=1 skip="$OFFSET" count="$THIS_CHUNK" 2>/dev/null | base64 2>/dev/null) + + mcporter call "$MCP_SERVER" upload-chunk "$(cat </dev/null + + OFFSET=$((OFFSET + THIS_CHUNK)) + REMAINING=$((REMAINING - THIS_CHUNK)) +done + +echo "Upload complete: OneDrive:/$ONEDRIVE_PATH" >&2 diff --git a/bates-enhance/integrations/m365/setup.sh b/bates-enhance/integrations/m365/setup.sh new file mode 100644 index 0000000..1221835 --- /dev/null +++ b/bates-enhance/integrations/m365/setup.sh @@ -0,0 +1,232 @@ +# m365 integration setup — sourced by bates-enhance installer, no shebang +# Requires: mcporter, config_merge (from parent installer), interactive terminal + +M365_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- +_prompt() { # _prompt VAR "prompt text" [default] + local __var=$1 __prompt=$2 __default=${3:-} + local __val + if [[ -n "$__default" ]]; then + read -rp "$__prompt [$__default]: " __val + __val="${__val:-$__default}" + else + read -rp "$__prompt: " __val + fi + printf -v "$__var" '%s' "$__val" +} + +_prompt_secret() { # _prompt_secret VAR "prompt text" + local __var=$1 __prompt=$2 __val + read -rsp "$__prompt: " __val + echo + printf -v "$__var" '%s' "$__val" +} + +_confirm() { # _confirm "question" → 0=yes 1=no + local ans + read -rp "$1 [y/N]: " ans + [[ "$ans" =~ ^[Yy] ]] +} + +_step() { # _step N "Title" + echo "" + echo "======================================================================" + echo " Step $1: $2" + echo "======================================================================" + echo "" +} + +# ========================================================================= +# Step 1 — Collect account info +# ========================================================================= +_step 1 "Collect account information" + +_prompt ASSISTANT_EMAIL "Assistant email address (the email Bates sends from)" +_prompt USER_EMAIL "Your personal email address" +_prompt COMPANY_EMAIL "Company/shared mailbox email (leave blank to skip)" "" +_prompt TENANT_ID "Entra (Azure AD) tenant ID" + +export ASSISTANT_EMAIL USER_EMAIL COMPANY_EMAIL TENANT_ID + +echo "" +echo " Assistant email : $ASSISTANT_EMAIL" +echo " Personal email : $USER_EMAIL" +echo " Company email : ${COMPANY_EMAIL:-}" +echo " Tenant ID : $TENANT_ID" +echo "" + +if ! _confirm "Continue with these values?"; then + echo "Aborted." >&2 + return 1 +fi + +# ========================================================================= +# Step 2 — Register personal reader MCP server +# ========================================================================= +_step 2 "Register personal reader MCP server (read-only)" + +echo "You need an Entra app registration with DELEGATED permissions for:" +echo " Mail.Read, Calendars.Read, Contacts.Read, Files.Read, Tasks.Read" +echo "" +echo "See: ${M365_DIR}/manual-steps-m365.md for detailed instructions." +echo "" + +_prompt READER_APP_ID "Personal reader App (Client) ID" +_prompt_secret READER_SECRET "Personal reader Client Secret" + +echo "Registering ms365-reader with mcporter..." +mcporter add ms365-reader \ + --app-id "$READER_APP_ID" \ + --client-secret "$READER_SECRET" \ + --tenant "$TENANT_ID" + +echo "" +echo "Testing connection..." +if mcporter call ms365-reader list-mail-folders '{}' >/dev/null 2>&1; then + echo " ms365-reader OK" +else + echo " WARNING: ms365-reader test failed. You may need to complete consent." + echo " You can re-test later with: mcporter call ms365-reader list-mail-folders '{}'" +fi + +# ========================================================================= +# Step 3 — Register company reader (optional) +# ========================================================================= +if [[ -n "$COMPANY_EMAIL" ]]; then + _step 3 "Register company reader MCP server (read-only)" + + echo "This reader accesses the company/shared mailbox: $COMPANY_EMAIL" + echo "It needs a separate Entra app registration (or the same app with" + echo "additional permissions for the shared mailbox)." + echo "" + + _prompt COMPANY_APP_ID "Company reader App (Client) ID" + _prompt_secret COMPANY_SECRET "Company reader Client Secret" + + echo "Registering ms365-company-reader with mcporter..." + mcporter add ms365-company-reader \ + --app-id "$COMPANY_APP_ID" \ + --client-secret "$COMPANY_SECRET" \ + --tenant "$TENANT_ID" + + echo "" + echo "Testing connection..." + if mcporter call ms365-company-reader list-mail-folders '{}' >/dev/null 2>&1; then + echo " ms365-company-reader OK" + else + echo " WARNING: ms365-company-reader test failed." + echo " Re-test later: mcporter call ms365-company-reader list-mail-folders '{}'" + fi +else + echo "" + echo "--- Step 3: Skipped (no company email provided) ---" +fi + +# ========================================================================= +# Step 4 — Register assistant MCP server (write access) +# ========================================================================= +_step 4 "Register assistant MCP server (write access)" + +echo "The assistant needs a SEPARATE Entra app registration with APPLICATION" +echo "permissions for sending mail, managing calendars, and creating tasks:" +echo " Mail.Send, Mail.ReadWrite, Calendars.ReadWrite, Tasks.ReadWrite," +echo " Files.ReadWrite.All, User.Read.All" +echo "" +echo "See: ${M365_DIR}/manual-steps-m365.md for detailed instructions." +echo "" + +_prompt ASST_APP_ID "Assistant App (Client) ID" +_prompt_secret ASST_SECRET "Assistant Client Secret" + +echo "Registering ms365-assistant with mcporter..." +mcporter add ms365-assistant \ + --app-id "$ASST_APP_ID" \ + --client-secret "$ASST_SECRET" \ + --tenant "$TENANT_ID" \ + --org-mode + +echo "" +echo "Testing connection..." +if mcporter call ms365-assistant list-mail-folders '{}' >/dev/null 2>&1; then + echo " ms365-assistant OK" +else + echo " WARNING: ms365-assistant test failed. You may need to grant admin consent." + echo " Re-test later: mcporter call ms365-assistant list-mail-folders '{}'" +fi + +# ========================================================================= +# Step 5 — Exchange transport rule reminder +# ========================================================================= +_step 5 "Exchange transport rule (safety guardrail)" + +echo "IMPORTANT: Before the assistant can send mail, you MUST set up an" +echo "Exchange Online transport rule to restrict the assistant email" +echo "($ASSISTANT_EMAIL) to approved recipients only." +echo "" +echo "This prevents the assistant from sending mail to arbitrary addresses." +echo "" +echo "Options:" +echo " 1. Exchange Admin Center -> Mail flow -> Rules" +echo " 2. PowerShell: New-TransportRule (see manual-steps-m365.md)" +echo "" +echo "See: ${M365_DIR}/manual-steps-m365.md (section: Transport Rule)" +echo "" + +if _confirm "Have you set up the transport rule (or will do so before enabling send)?"; then + echo " Acknowledged." +else + echo " WARNING: Proceeding without transport rule. The assistant will NOT" + echo " attempt to send mail until you confirm the rule is in place." +fi + +# ========================================================================= +# Step 6 — Deploy config + workspace additions +# ========================================================================= +_step 6 "Deploy configuration and workspace additions" + +echo "Merging M365 config fragment into openclaw.json..." + +# Substitute placeholders in config fragment +_m365_fragment=$(sed \ + -e "s|{{ASSISTANT_EMAIL}}|${ASSISTANT_EMAIL}|g" \ + "$M365_DIR/config-fragment.json") + +config_merge_inline "$_m365_fragment" + +echo " Config merged." + +# Copy workspace additions +if [[ -d "$M365_DIR/workspace-additions" ]]; then + echo "Copying workspace additions..." + cp -r "$M365_DIR/workspace-additions/refs/"* "${WORKSPACE_DIR:-$HOME/.openclaw/workspace}/refs/" 2>/dev/null || true + cp -r "$M365_DIR/workspace-additions/rules/"* "${WORKSPACE_DIR:-$HOME/.openclaw/workspace}/rules/" 2>/dev/null || true + cp -r "$M365_DIR/workspace-additions/skills/"* "${WORKSPACE_DIR:-$HOME/.openclaw/workspace}/skills/" 2>/dev/null || true + echo " Workspace files deployed." +fi + +# Copy scripts +if [[ -d "$M365_DIR/scripts" ]]; then + echo "Installing helper scripts..." + cp "$M365_DIR/scripts/"*.sh "${SCRIPTS_DIR:-$HOME/.openclaw/scripts}/" 2>/dev/null || true + chmod +x "${SCRIPTS_DIR:-$HOME/.openclaw/scripts}/"graph-api.sh \ + "${SCRIPTS_DIR:-$HOME/.openclaw/scripts}/"upload-to-onedrive.sh \ + "${SCRIPTS_DIR:-$HOME/.openclaw/scripts}/"save-attachment.sh \ + "${SCRIPTS_DIR:-$HOME/.openclaw/scripts}/"read-pdf-attachment.sh 2>/dev/null || true + echo " Scripts installed." +fi + +echo "" +echo "======================================================================" +echo " M365 integration setup complete!" +echo "======================================================================" +echo "" +echo "Next steps:" +echo " 1. Restart the gateway: systemctl --user restart openclaw-gateway" +echo " 2. Merge cron jobs from: ${M365_DIR}/cron-jobs-m365.json" +echo " (review and customize schedules / delivery targets first)" +echo " 3. Test: mcporter call ms365-reader list-mail-messages '{\"top\": 5}'" +echo " 4. Test: mcporter call ms365-assistant list-mail-folders '{}'" +echo "" diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/attachments.md b/bates-enhance/integrations/m365/workspace-additions/refs/attachments.md new file mode 100644 index 0000000..19fe468 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/attachments.md @@ -0,0 +1,130 @@ +# Attachment Handling Reference + +## Downloading Attachments from Emails + +### Step 1: List attachments on a message + +``` +list-mail-attachments messageId="AAMk..." +``` + +Returns an array of attachment objects with: +- `id` — attachment ID +- `name` — filename +- `contentType` — MIME type +- `size` — size in bytes +- `isInline` — whether it is an inline image + +### Step 2: Download a specific attachment + +``` +get-mail-attachment messageId="AAMk..." attachmentId="AAMk..." +``` + +Returns the attachment content (base64-encoded for binary files). + +### Helper script + +```bash +# Save attachment to a local file +~/.openclaw/scripts/save-attachment.sh /tmp/output.pdf +``` + +--- + +## Saving Attachments to OneDrive + +After downloading an attachment locally, upload it to OneDrive: + +```bash +# Upload to OneDrive +~/.openclaw/scripts/upload-to-onedrive.sh /tmp/output.pdf "drafts/documents/output.pdf" +``` + +Or use the MCP server directly: + +``` +upload-drive-item localPath="/tmp/output.pdf" remotePath="drafts/documents/output.pdf" +``` + +### Recommended OneDrive paths + +| Content type | Path | +|--------------------|-------------------------------------------| +| General documents | `drafts/documents/` | +| Images | `drafts/images/` | +| Venture/sales docs | `drafts/Sales/{{COMPANY_NAME}}/` | +| Receipts/invoices | `drafts/finance/receipts/` | +| Reports | `drafts/reports/` | + +--- + +## PDF Attachments + +### Read text from a PDF attachment + +```bash +# Download and extract text in one step +~/.openclaw/scripts/read-pdf-attachment.sh +``` + +This downloads the PDF to a temp file, extracts text using `pdftotext` +(from poppler-utils), and outputs the text to stdout. + +### Requirements + +- `poppler-utils` must be installed (`apt install poppler-utils`) +- For scanned PDFs (image-only), OCR via `tesseract` may be needed + +### Reading local PDFs + +If you already have a PDF file locally: + +```bash +pdftotext /path/to/file.pdf - # outputs text to stdout +pdftotext /path/to/file.pdf /path/to/output.txt +``` + +--- + +## Image Attachments + +### Inline images + +Inline images (embedded in HTML body) have `isInline: true` and a +`contentId` that corresponds to `cid:` references in the HTML body. + +### Regular image attachments + +Download as any other attachment. Common types: +- `image/png` +- `image/jpeg` +- `image/gif` + +### Processing images + +Use the image generation or analysis tools if you need to process +downloaded images. + +--- + +## Size Limits + +| Limit | Value | +|------------------------------|----------------| +| Single attachment (Graph API)| 3 MB inline | +| Upload session (large files) | up to 150 MB | +| Total message size | 25 MB | +| OneDrive upload (simple) | 4 MB | +| OneDrive upload (session) | up to 250 GB | + +### Large attachments (> 3 MB) + +For attachments larger than 3 MB, use an upload session: + +1. Create an upload session via Graph API +2. Upload the file in chunks (typically 3.75 MB each) +3. The Graph API returns the final attachment object + +The `upload-to-onedrive.sh` helper handles chunked uploads automatically +for files over 4 MB. diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/email-ops.md b/bates-enhance/integrations/m365/workspace-additions/refs/email-ops.md new file mode 100644 index 0000000..dd7d151 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/email-ops.md @@ -0,0 +1,180 @@ +# Email Operations Reference + +## Searching Emails + +### Search all folders + +Use `list-mail-messages` with the `search` parameter. This searches across +ALL mail folders (Inbox, Sent Items, Archive, etc.). + +``` +list-mail-messages search='"quarterly report"' +``` + +The search term must be wrapped in double quotes inside single quotes. +This uses Microsoft Graph's `$search` parameter which supports KQL syntax. + +### Search a specific folder + +Use `list-mail-folder-messages` to search within ONE folder only: + +``` +list-mail-folder-messages folderId="inbox" filter="contains(subject, 'budget')" +``` + +### Common search patterns + +| Goal | Approach | +|---------------------------|----------------------------------------------------| +| Find by keyword | `list-mail-messages search='"keyword"'` | +| Find by sender | `list-mail-messages search='"from:jane@example.com"'` | +| Find by subject | `list-mail-messages search='"subject:invoice"'` | +| Find by date range | Use `filter` with `receivedDateTime` | +| Find with attachment | `filter="hasAttachments eq true"` | +| Find unread | `filter="isRead eq false"` | + +### Pagination + +Results are paginated. Use `top` (page size) and `skip` (offset): + +``` +list-mail-messages search='"report"' top=25 skip=0 +list-mail-messages search='"report"' top=25 skip=25 +``` + +--- + +## Reading a Specific Email + +``` +get-mail-message messageId="AAMk..." +``` + +Returns the full message including body (HTML or text), headers, and metadata. + +--- + +## Drafting and Sending Emails + +### Step 1: Create a draft (preferred) + +``` +create-draft subject="Re: Project update" body="..." toRecipients='["{{USER_EMAIL}}"]' +``` + +### Step 2: Review the draft + +The draft will appear in the Drafts folder. Review it before sending. + +### Step 3: Send + +``` +send-mail messageId="AAMk..." +``` + +Or send directly (only when explicitly instructed): + +``` +send-mail subject="Meeting notes" body="Here are the notes..." toRecipients='["{{USER_EMAIL}}"]' +``` + +### Important rules + +- Always draft first, never send without review (unless user explicitly says so) +- Only send to approved recipients (transport rule enforces this) +- Match the tone of previous correspondence +- Include proper greeting and sign-off + +--- + +## Email Threading and Reply Chains + +### Reply to a message + +``` +reply-mail messageId="AAMk..." comment="Thanks for the update. I'll review by EOD." +``` + +### Reply all + +``` +reply-mail messageId="AAMk..." comment="Noted, thanks." replyAll=true +``` + +### Forward a message + +``` +forward-mail messageId="AAMk..." toRecipients='["{{USER_EMAIL}}"]' comment="FYI — see below." +``` + +### Thread identification + +- Messages in the same thread share `conversationId`. +- Use `conversationId` to find all messages in a thread. +- Sort by `receivedDateTime` to reconstruct the conversation order. + +--- + +## Attachment Handling + +### List attachments + +``` +list-mail-attachments messageId="AAMk..." +``` + +### Download an attachment + +``` +get-mail-attachment messageId="AAMk..." attachmentId="AAMk..." +``` + +### Send with attachments + +Attachments must be base64-encoded: + +```json +{ + "subject": "Report attached", + "body": "Please find the report attached.", + "toRecipients": ["{{USER_EMAIL}}"], + "attachments": [ + { + "name": "report.pdf", + "contentType": "application/pdf", + "contentBytes": "" + } + ] +} +``` + +For large attachments (>3 MB), use the upload session endpoint. + +See also: `refs/attachments.md` for more detail on attachment workflows. + +--- + +## Folder Navigation + +### List all folders + +``` +list-mail-folders +``` + +### Common folder names + +| Display Name | Well-known ID | +|----------------|-------------------| +| Inbox | `inbox` | +| Sent Items | `sentitems` | +| Drafts | `drafts` | +| Deleted Items | `deleteditems` | +| Archive | `archive` | +| Junk Email | `junkemail` | + +### Move a message + +``` +move-mail-message messageId="AAMk..." destinationId="archive" +``` diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/graph-api.md b/bates-enhance/integrations/m365/workspace-additions/refs/graph-api.md new file mode 100644 index 0000000..8c9af07 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/graph-api.md @@ -0,0 +1,180 @@ +# Microsoft Graph API Reference + +## Overview + +Microsoft Graph is the unified API for Microsoft 365 services. All M365 +integration MCP servers use Graph API endpoints internally. This reference +covers patterns useful when making direct API calls via the `graph-api.sh` +helper script. + +--- + +## Common Endpoints + +### Mail + +| Method | Endpoint | Description | +|--------|---------------------------------------------------|--------------------------| +| GET | `/me/messages` | List messages | +| GET | `/me/messages/{id}` | Get a message | +| GET | `/me/mailFolders` | List mail folders | +| GET | `/me/mailFolders/{id}/messages` | Messages in a folder | +| POST | `/me/sendMail` | Send a message | +| POST | `/me/messages` | Create a draft | +| POST | `/me/messages/{id}/reply` | Reply to a message | +| POST | `/me/messages/{id}/forward` | Forward a message | +| POST | `/me/messages/{id}/move` | Move a message | +| GET | `/me/messages/{id}/attachments` | List attachments | +| GET | `/me/messages/{id}/attachments/{aid}` | Get an attachment | + +### Calendar + +| Method | Endpoint | Description | +|--------|---------------------------------------------------|--------------------------| +| GET | `/me/events` | List events | +| GET | `/me/events/{id}` | Get an event | +| POST | `/me/events` | Create an event | +| PATCH | `/me/events/{id}` | Update an event | +| DELETE | `/me/events/{id}` | Delete an event | +| GET | `/me/calendarView?startDateTime=...&endDateTime=...` | Events in date range | + +### OneDrive + +| Method | Endpoint | Description | +|--------|---------------------------------------------------|--------------------------| +| GET | `/me/drive/root/children` | List root items | +| GET | `/me/drive/root:/{path}:/children` | List folder contents | +| GET | `/me/drive/root:/{path}:/content` | Download file | +| PUT | `/me/drive/root:/{path}:/content` | Upload file (< 4 MB) | +| POST | `/me/drive/root:/{path}:/createUploadSession` | Large file upload | +| POST | `/me/drive/root/children` | Create folder | + +### Planner + +| Method | Endpoint | Description | +|--------|---------------------------------------------------|--------------------------| +| GET | `/me/planner/plans` | List plans | +| GET | `/planner/plans/{id}/tasks` | List tasks in a plan | +| GET | `/planner/plans/{id}/buckets` | List buckets in a plan | +| POST | `/planner/tasks` | Create a task | +| PATCH | `/planner/tasks/{id}` | Update a task | + +### To-Do + +| Method | Endpoint | Description | +|--------|---------------------------------------------------|--------------------------| +| GET | `/me/todo/lists` | List task lists | +| GET | `/me/todo/lists/{id}/tasks` | List tasks in a list | +| POST | `/me/todo/lists/{id}/tasks` | Create a task | +| PATCH | `/me/todo/lists/{id}/tasks/{tid}` | Update a task | + +--- + +## Authentication Flow + +The MCP servers handle authentication automatically. When making direct +Graph API calls via `graph-api.sh`, authentication is handled by mcporter. + +### Token flow (for reference) + +1. Application credentials (client ID + secret) are stored by mcporter +2. mcporter requests a token from `https://login.microsoftonline.com/{{TENANT_ID}}/oauth2/v2.0/token` +3. Token is cached and refreshed automatically +4. All API calls include `Authorization: Bearer ` header + +### Scopes + +- **Delegated** (ms365-reader): acts as the user, limited to user's data +- **Application** (ms365-assistant): acts as the app, can access org-wide data + +--- + +## Pagination + +Graph API uses OData-style pagination: + +```json +{ + "value": [...], + "@odata.nextLink": "https://graph.microsoft.com/v1.0/me/messages?$skip=10" +} +``` + +- Always check for `@odata.nextLink` in responses +- Follow the link to get the next page +- Use `$top` to control page size (default varies by endpoint, max 999) +- Use `$skip` to offset results + +### Example: Fetching all messages + +```bash +# Page 1 +graph-api.sh GET "/me/messages?\$top=50" +# Page 2 (use the nextLink from page 1) +graph-api.sh GET "/me/messages?\$top=50&\$skip=50" +``` + +--- + +## Search Syntax (KQL) + +The `$search` parameter uses Keyword Query Language: + +``` +$search="quarterly report" # contains both words +$search="from:jane@example.com" # from specific sender +$search="subject:budget" # in subject line +$search="hasAttachment:true" # has attachments +$search="received>=2026-01-01" # received after date +``` + +### Combining search terms + +``` +$search="from:jane@example.com AND subject:report" +$search="quarterly OR annual report" +``` + +Note: `$search` and `$filter` cannot always be combined. When both are +needed, prefer `$search` for text matching and post-filter results in code. + +--- + +## Error Handling + +### Common error codes + +| Code | HTTP | Meaning | +|-------------------------|------|--------------------------------------| +| `invalidAuthenticationToken` | 401 | Token expired or invalid | +| `accessDenied` | 403 | Insufficient permissions | +| `itemNotFound` | 404 | Resource does not exist | +| `activityLimitReached` | 429 | Rate limit exceeded | +| `generalException` | 500 | Server error | +| `serviceNotAvailable` | 503 | Service temporarily unavailable | + +### Retry strategy + +- **429 (rate limit)**: Honor `Retry-After` header, back off exponentially +- **503 (service unavailable)**: Retry after 30 seconds +- **500 (server error)**: Retry once, then report failure +- **401 (auth)**: Refresh token and retry once + +--- + +## Rate Limits + +Microsoft Graph enforces per-app and per-tenant rate limits: + +| Resource | Limit | +|------------------|--------------------------| +| General | 10,000 requests / 10 min | +| Mail send | 10,000 messages / day | +| OneDrive upload | Varies by file size | +| Search | 10 concurrent requests | + +Best practices: +- Batch requests when possible (`/$batch` endpoint) +- Cache responses that do not change frequently +- Use `$select` to request only needed fields +- Use `$top` to limit result set size diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/mcp-servers.md b/bates-enhance/integrations/m365/workspace-additions/refs/mcp-servers.md new file mode 100644 index 0000000..c6bc745 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/mcp-servers.md @@ -0,0 +1,102 @@ +# MCP Servers — Microsoft 365 + +## ms365-reader (Personal Mail — Read-Only) + +Reads the user's personal mailbox, calendar, contacts, and OneDrive files. +Registered with delegated permissions scoped to the user's account. + +### Key operations + +| Tool | Description | +|-----------------------------|------------------------------------------| +| `list-mail-folders` | List all mail folders and their IDs | +| `list-mail-messages` | List/search messages (supports `search`, `filter`, `top`, `skip`) | +| `list-mail-folder-messages` | List messages in a specific folder only | +| `get-mail-message` | Get a single message by ID (full body) | +| `list-mail-attachments` | List attachments on a message | +| `get-mail-attachment` | Download a specific attachment | +| `list-calendar-events` | List calendar events (supports date range) | +| `get-calendar-event` | Get a single calendar event | +| `list-contacts` | List contacts | +| `list-drive-items` | List OneDrive files and folders | +| `get-drive-item-content` | Download a file from OneDrive | + +### Usage examples + +```bash +# Search all folders for a keyword +mcporter call ms365-reader list-mail-messages '{"search": "\"quarterly report\""}' + +# List recent inbox messages +mcporter call ms365-reader list-mail-folder-messages '{"folderId": "inbox", "top": 10}' + +# Get today's calendar events +mcporter call ms365-reader list-calendar-events '{"startDateTime": "2026-01-01T00:00:00Z", "endDateTime": "2026-01-02T00:00:00Z"}' +``` + +### Notes + +- `list-mail-messages` with `search` searches ALL folders (Inbox, Sent, etc.). +- `list-mail-folder-messages` searches only ONE specific folder. +- Always prefer `list-mail-messages search='"keyword"'` for broad searches. + +--- + +## ms365-company-reader (Company Mailbox — Read-Only, Optional) + +Reads a company or shared mailbox. Same operations as ms365-reader but scoped +to the company mailbox. Only available if a company email was configured during +setup. + +### Key operations + +Same tool set as ms365-reader above. All calls are scoped to the company +mailbox automatically. + +--- + +## ms365-assistant (Write Access) + +The assistant's own identity for sending mail, managing calendars, creating +tasks, and writing files. Uses application permissions with org-mode. + +### Key operations + +| Tool | Description | +|-----------------------------|------------------------------------------| +| `send-mail` | Send an email (subject, body, recipients)| +| `create-draft` | Create a draft email | +| `reply-mail` | Reply to an existing message | +| `forward-mail` | Forward a message | +| `move-mail-message` | Move a message to a different folder | +| `create-calendar-event` | Create a calendar event | +| `update-calendar-event` | Update an existing event | +| `delete-calendar-event` | Delete a calendar event | +| `create-task` | Create a Planner/To-Do task | +| `update-task` | Update a task (status, due date, etc.) | +| `upload-drive-item` | Upload a file to OneDrive | +| `create-drive-folder` | Create a folder in OneDrive | +| `list-mail-messages` | Read messages (also has read access) | +| `list-calendar-events` | Read calendar (also has read access) | + +### Usage examples + +```bash +# Send an email +mcporter call ms365-assistant send-mail '{"subject": "Report ready", "body": "The weekly report is attached.", "toRecipients": ["{{USER_EMAIL}}"]}' + +# Create a calendar event +mcporter call ms365-assistant create-calendar-event '{"subject": "Team standup", "start": "2026-01-15T09:00:00", "end": "2026-01-15T09:30:00"}' + +# Create a task +mcporter call ms365-assistant create-task '{"title": "Review Q4 numbers", "dueDateTime": "2026-01-20"}' +``` + +### Safety + +- A transport rule MUST be in place restricting the assistant email to approved + recipients only. Never bypass this. +- Always draft emails first, then send after review — unless explicitly + instructed otherwise. +- The assistant uses `--org-mode` which grants application-level permissions. + This is necessary for sending mail on behalf of the assistant identity. diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/onedrive.md b/bates-enhance/integrations/m365/workspace-additions/refs/onedrive.md new file mode 100644 index 0000000..d259b6b --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/onedrive.md @@ -0,0 +1,156 @@ +# OneDrive Operations Reference + +## Upload Files + +### Simple upload (< 4 MB) + +Use the `/me/drive/root:/{path}:/content` endpoint pattern: + +``` +upload-drive-item localPath="/tmp/report.pdf" remotePath="drafts/documents/report.pdf" +``` + +Or via the helper script: + +```bash +~/.openclaw/scripts/upload-to-onedrive.sh /tmp/report.pdf "drafts/documents/report.pdf" +``` + +### Important: Correct endpoint format + +Always use the path-based endpoint: +``` +/me/drive/root:/{path}:/content +``` + +Do NOT use the drive-ID-based endpoint (`/drives/b!.../root:/`). The +path-based endpoint resolves correctly for the authenticated user's +OneDrive. + +### Large file upload (> 4 MB) + +For files larger than 4 MB, use an upload session: + +1. Create upload session: `POST /me/drive/root:/{path}:/createUploadSession` +2. Upload in chunks (3.75 MB recommended) +3. Final chunk returns the completed DriveItem + +The `upload-to-onedrive.sh` script handles this automatically. + +--- + +## Download Files + +``` +get-drive-item-content path="drafts/documents/report.pdf" +``` + +Or by item ID: + +``` +get-drive-item-content itemId="01ABCDEF..." +``` + +--- + +## List Folder Contents + +``` +list-drive-items path="drafts/documents/" +``` + +Returns an array of DriveItem objects with: +- `id` — item ID +- `name` — filename or folder name +- `size` — size in bytes +- `lastModifiedDateTime` — last modified timestamp +- `folder` — present if item is a folder (contains `childCount`) +- `file` — present if item is a file (contains `mimeType`) + +### Pagination + +Large folders return paginated results. Follow `@odata.nextLink` for +additional pages. + +--- + +## Create Folders + +``` +create-drive-folder parentPath="drafts/" name="new-project" +``` + +Creates `drafts/new-project/` in OneDrive. + +### Nested folder creation + +Create parent folders first, then children: + +``` +create-drive-folder parentPath="drafts/" name="Sales" +create-drive-folder parentPath="drafts/Sales/" name="Acme Corp" +``` + +--- + +## Share Links + +### Create a sharing link + +``` +create-sharing-link itemId="01ABCDEF..." type="view" scope="anonymous" +``` + +Link types: +- `view` — read-only +- `edit` — read-write + +Scopes: +- `anonymous` — anyone with the link +- `organization` — anyone in the organization + +### Get existing sharing links + +``` +list-sharing-links itemId="01ABCDEF..." +``` + +--- + +## Path Conventions + +Standard OneDrive folder structure: + +``` +drafts/ + documents/ — general documents, reports + images/ — generated images, screenshots + Sales/ + {{COMPANY_NAME}}/ + images/ — venture-specific images + documents/ — venture-specific docs + finance/ + receipts/ — receipts and invoices + reports/ — generated reports + weekly/ + monthly/ +``` + +### Rules + +- Always use forward slashes in paths +- Paths are case-insensitive on OneDrive but preserve case +- Do not include leading slash (use `drafts/file.pdf` not `/drafts/file.pdf`) +- The `drafts/` prefix keeps AI-generated content separate from user files + +--- + +## Error Handling + +| Error | Cause | Fix | +|------------------------|-----------------------------|------------------------------| +| `itemNotFound` | Path does not exist | Create parent folders first | +| `nameAlreadyExists` | File/folder already exists | Use `@microsoft.graph.conflictBehavior: "replace"` | +| `accessDenied` | Insufficient permissions | Check app registration perms | +| `activityLimitReached` | Too many requests | Back off and retry | +| `invalidRange` | Bad upload chunk range | Restart upload session | diff --git a/bates-enhance/integrations/m365/workspace-additions/refs/planner-todo.md b/bates-enhance/integrations/m365/workspace-additions/refs/planner-todo.md new file mode 100644 index 0000000..92d7d6b --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/refs/planner-todo.md @@ -0,0 +1,152 @@ +# Microsoft Planner / To-Do Reference + +## Overview + +Microsoft Planner and To-Do are task management services accessible via +the Graph API through the ms365-assistant MCP server. + +- **Planner** — team-level task boards associated with Microsoft 365 Groups +- **To-Do** — personal task lists + +--- + +## Planner + +### List plans + +``` +list-planner-plans +``` + +Returns all Planner plans the assistant has access to. Each plan has: +- `id` — plan ID +- `title` — plan name +- `owner` — the group ID that owns the plan + +### List tasks in a plan + +``` +list-planner-tasks planId="PLAN_ID" +``` + +Returns tasks with: +- `id` — task ID +- `title` — task title +- `bucketId` — which bucket/column the task is in +- `percentComplete` — 0, 50, or 100 +- `priority` — 0 (urgent) to 9 (low), default 5 +- `dueDateTime` — ISO 8601 due date +- `assignments` — object mapping user IDs to assignment info +- `createdDateTime` — when the task was created + +### List buckets + +``` +list-planner-buckets planId="PLAN_ID" +``` + +Buckets are the columns on the Planner board (e.g., "To Do", "In Progress", "Done"). + +### Create a task + +``` +create-task planId="PLAN_ID" title="Review quarterly report" bucketId="BUCKET_ID" dueDateTime="2026-02-20" priority=3 +``` + +### Update a task + +``` +update-task taskId="TASK_ID" percentComplete=100 +update-task taskId="TASK_ID" title="Updated title" priority=1 +update-task taskId="TASK_ID" dueDateTime="2026-02-25" +``` + +### Priority levels + +| Value | Label | +|-------|----------| +| 0 | Urgent | +| 1 | Important| +| 3 | Medium | +| 5 | Low | +| 9 | Deferred | + +### Task completion + +| percentComplete | Meaning | +|-----------------|-------------| +| 0 | Not started | +| 50 | In progress | +| 100 | Completed | + +### Assign a task + +``` +update-task taskId="TASK_ID" assignments='{"{{USER_ID}}": {"orderHint": " !"}}' +``` + +--- + +## To-Do + +### List task lists + +``` +list-todo-lists +``` + +Returns personal task lists: +- `id` — list ID +- `displayName` — list name +- `isOwner` — whether the assistant owns this list + +### List tasks in a list + +``` +list-todo-tasks listId="LIST_ID" +``` + +Returns tasks with: +- `id` — task ID +- `title` — task title +- `status` — `notStarted`, `inProgress`, `completed`, `deferred` +- `importance` — `low`, `normal`, `high` +- `dueDateTime` — due date object with `dateTime` and `timeZone` +- `body` — task body/notes +- `reminderDateTime` — reminder date + +### Create a To-Do task + +``` +create-todo-task listId="LIST_ID" title="Follow up with client" importance="high" dueDateTime='{"dateTime": "2026-02-20T09:00:00", "timeZone": "{{USER_TZ}}"}' +``` + +### Update a To-Do task + +``` +update-todo-task listId="LIST_ID" taskId="TASK_ID" status="completed" +update-todo-task listId="LIST_ID" taskId="TASK_ID" importance="high" +``` + +--- + +## Common Patterns + +### Morning task review + +1. `list-planner-plans` to get all plans +2. For each plan, `list-planner-tasks` with filter for overdue or due today +3. `list-todo-lists` then `list-todo-tasks` for personal tasks +4. Compile into prioritized summary + +### Mark task complete + +1. Find the task (by title search or list browsing) +2. `update-task taskId="..." percentComplete=100` (Planner) +3. Or `update-todo-task ... status="completed"` (To-Do) + +### Create follow-up task from email + +1. Read the email to understand the action item +2. Create a task with appropriate title, due date, and priority +3. Include the email subject or ID in the task body for reference diff --git a/bates-enhance/integrations/m365/workspace-additions/rules/daily-routine.md b/bates-enhance/integrations/m365/workspace-additions/rules/daily-routine.md new file mode 100644 index 0000000..b407b63 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/rules/daily-routine.md @@ -0,0 +1,80 @@ +# Daily Routine Rules + +## Morning Routine (8:00 - 9:30) + +### 1. Email triage (8:30) +- Check inbox for urgent or time-sensitive messages +- Flag emails requiring action today +- Identify emails that can be batched for later +- Note any emails from VIP senders (executives, key clients, partners) + +### 2. Calendar review (8:30) +- Review today's meetings and events +- Check for scheduling conflicts +- Identify meetings that need preparation materials +- Note any meetings with external participants + +### 3. Morning briefing (8:30) +- Compile the top 5 priorities for the day +- Include: urgent emails, upcoming meetings, overdue tasks, deadlines +- Deliver briefing to user via primary channel + +### 4. Draft preparation (9:00) +- Review flagged/starred emails from previous day +- Draft responses for emails that need replies +- Save drafts for user review + +--- + +## Midday Routine (11:00 - 14:00) + +### 5. Stale email check (11:00) +- Identify emails older than 3 days without a response +- List them with suggested actions (reply, delegate, archive) +- Escalate anything truly urgent + +### 6. Task review (9:00, 14:00) +- Review Planner and To-Do task lists +- Identify overdue items +- Suggest priority adjustments based on new information +- Update task status for completed items + +--- + +## Evening Routine (18:00 - 22:00) + +### 7. End-of-day review (18:00) +- Summarize what was accomplished today +- Flag items that need attention tomorrow +- Update observations with new learnings +- Note any patterns or recurring issues + +### 8. Cost review (22:00) +- Check today's API costs +- Compare with 7-day rolling average +- Alert if costs are above threshold + +--- + +## Proactive Behavior Guidelines + +### Do proactively: +- Flag urgent emails immediately (do not wait for cron) +- Warn about upcoming deadlines (24-48 hours ahead) +- Note scheduling conflicts as soon as they appear +- Surface relevant context before meetings (prep materials, related emails) +- Track email threads that have gone quiet + +### Do NOT proactively: +- Send emails without explicit approval +- Modify calendar events without asking +- Delete or archive emails +- Change task priorities without consulting the user +- Interrupt the user for low-priority items during focus time + +### Prioritization framework +When multiple items compete for attention: +1. Time-critical items (meetings starting soon, deadlines today) +2. People-critical items (VIP senders, escalations) +3. Impact-critical items (revenue, legal, compliance) +4. Routine items (status updates, newsletters, FYIs) diff --git a/bates-enhance/integrations/m365/workspace-additions/rules/email-drafting.md b/bates-enhance/integrations/m365/workspace-additions/rules/email-drafting.md new file mode 100644 index 0000000..b168652 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/rules/email-drafting.md @@ -0,0 +1,150 @@ +# Email Drafting Rules + +## Core Principle + +**Always draft first, never send without review** — unless the user +explicitly says "send it" or "go ahead and send." + +--- + +## Drafting Workflow + +### Step 1: Create the draft +- Use `create-draft` via ms365-assistant +- Include subject, body, and recipients +- Save as a draft in the Drafts folder + +### Step 2: Present for review +- Show the user: recipients, subject, and a summary of the body +- Ask: "Ready to send, or would you like changes?" + +### Step 3: Send after approval +- Only send after explicit user confirmation +- Use `send-mail` with the draft message ID + +### Exception: Pre-approved templates +If the user has pre-approved a specific response pattern (e.g., "always +confirm receipt of invoices"), drafting can be skipped. Document these +exceptions in observations. + +--- + +## Tone Matching + +### External communications +- **Formal**: proper greeting, full sentences, professional sign-off +- Match the formality level of the incoming email +- When in doubt, err on the side of more formal +- Include the assistant's signature block + +### Internal communications +- **Casual but professional**: can use first names, shorter sentences +- Match the team's communication style +- Skip formal greetings for ongoing threads + +### Tone signals to detect +| Signal | Tone | +|----------------------------|-------------| +| "Dear Mr./Ms." | Very formal | +| "Hi [Name]," | Professional| +| "Hey," | Casual | +| No greeting | Brief/busy | +| Emojis in body | Informal | + +### Reply tone rule +Always match or be slightly more formal than the incoming message. +Never be significantly more casual than the sender. + +--- + +## Reply vs. Reply-All Guidelines + +### Use Reply (to sender only) when: +- The response is only relevant to the sender +- You are asking a clarifying question +- The response contains sensitive information +- The thread has many CC recipients who do not need the update + +### Use Reply-All when: +- The response contains information everyone needs +- You are answering a question asked to the group +- The user explicitly says "reply all" +- Status updates or deliverables the whole team should see + +### When uncertain +Default to Reply (not Reply-All). It is better to under-share than to +spam a thread. The user can always forward if needed. + +--- + +## Subject Line Conventions + +### New emails +- Keep under 60 characters +- Lead with the action or topic: "Q4 Report — Review by Friday" +- Use prefixes sparingly: "FYI:", "Action:", "Urgent:" (only when truly urgent) + +### Replies +- Keep the original subject line (RE: ...) +- Do NOT modify the subject unless the topic has changed +- If the topic changes significantly, start a new thread + +### Forwarding +- Keep the original subject (FW: ...) +- Add context at the top of the body, not in the subject + +--- + +## Signature Handling + +- The assistant has its own email signature configured in Exchange +- Do NOT manually add a signature to drafts (Exchange appends it) +- If the user asks to customize the signature, direct them to Exchange settings + +--- + +## Transport Rule Compliance + +### Approved recipients +The assistant email is restricted by an Exchange transport rule to send +only to approved recipients. This is a safety guardrail. + +### Before sending, verify: +1. All recipients are on the approved list +2. No BCC recipients that might bypass the transport rule +3. No distribution lists that expand to unapproved addresses + +### If a recipient is not approved: +- Inform the user: "I cannot send to [address] — it is not on the approved list." +- Suggest: the user sends from their own account, or adds the recipient to the approved list + +--- + +## Attachment Rules + +### Including attachments +- Reference attachments in the body: "Please find attached..." +- Verify the attachment exists and is the correct file before sending +- For large files (> 10 MB), prefer sharing a OneDrive link instead + +### OneDrive links vs. attachments +| Size | Method | +|------------|-----------------| +| < 5 MB | Direct attachment| +| 5-25 MB | Either works | +| > 25 MB | OneDrive link | + +--- + +## Quality Checklist + +Before presenting a draft to the user: + +- [ ] Recipients are correct (To, CC, BCC) +- [ ] Subject line is clear and concise +- [ ] Tone matches the context +- [ ] No spelling or grammar errors +- [ ] Attachments are referenced and included +- [ ] Reply threading is correct (reply vs. new message) +- [ ] All recipients are on the approved list +- [ ] No confidential information sent to wrong audience diff --git a/bates-enhance/integrations/m365/workspace-additions/rules/proactive-checkins.md b/bates-enhance/integrations/m365/workspace-additions/rules/proactive-checkins.md new file mode 100644 index 0000000..2212f56 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/rules/proactive-checkins.md @@ -0,0 +1,115 @@ +# Proactive Check-in Rules + +## Scoring System + +Before sending a proactive check-in, calculate a notification score: + +``` +Score = Urgency + Impact - Noise +``` + +**Only notify if Score >= 5.** + +### Urgency (0-5) + +| Value | Criteria | +|-------|-------------------------------------------------------| +| 5 | Immediate action required (< 1 hour) | +| 4 | Action required today | +| 3 | Action required this week | +| 2 | Upcoming deadline (next week) | +| 1 | General awareness / FYI | +| 0 | No time pressure | + +### Impact (0-5) + +| Value | Criteria | +|-------|-------------------------------------------------------| +| 5 | Revenue, legal, or compliance implications | +| 4 | Affects external stakeholders (clients, partners) | +| 3 | Affects team or project delivery | +| 2 | Affects personal productivity | +| 1 | Minor convenience | +| 0 | No meaningful impact | + +### Noise (-3 to 0) + +| Value | Criteria | +|-------|-------------------------------------------------------| +| 0 | First notification on this topic today | +| -1 | Second notification on same topic today | +| -2 | User has already acknowledged this topic | +| -3 | User explicitly said "stop" or "not now" on topic | + +--- + +## Email Triggers + +### Immediate notification (Urgency 5) +- Subject contains: "urgent", "ASAP", "emergency", "critical", "action required" +- From a VIP sender AND flagged as high importance +- Reply to a thread the user started with no response in 24h+ from external party + +### Same-day notification (Urgency 4) +- From a VIP sender (executives, key clients — see observations) +- Contains deadline language: "by EOD", "by end of day", "today" +- Calendar invite requiring response (accept/decline) +- Email thread with 3+ unread replies (active discussion happening without user) + +### Awareness notification (Urgency 2-3) +- Emails mentioning the user by name (but not addressed to them) +- Automated alerts from monitored systems +- Invoices or receipts above a threshold amount + +### Do NOT notify +- Marketing emails, newsletters +- Automated notifications already handled by other systems +- CC-only emails with no action required +- Duplicate notifications (same thread, already notified) + +--- + +## Calendar Triggers + +### Immediate notification (Urgency 5) +- Meeting starts in < 15 minutes AND user hasn't responded to invite +- Double-booking detected with external participants + +### Same-day notification (Urgency 4) +- Meeting in < 2 hours that needs preparation materials +- Meeting organizer sent pre-read materials +- Attendee cancellation on a meeting the user organized + +### Awareness notification (Urgency 2-3) +- New meeting invite for this week +- Meeting rescheduled +- Recurring meeting cancelled + +--- + +## Delivery Rules + +### Channel selection +- Use the primary delivery channel configured in cron jobs +- For Score >= 8: consider multi-channel (chat + email summary) +- Never wake the user between 22:00 and 07:00 (respect quiet hours) + +### Message format +- Lead with the action needed, not the source +- Include enough context to decide without opening the email +- Example: "Jane from Acme needs your sign-off on the SOW by EOD. Reply or I can draft an acknowledgment." + +### Batching +- If multiple items arrive within a 10-minute window, batch them +- Present as a numbered list sorted by score (highest first) +- Exception: Score >= 8 items always send immediately + +--- + +## Learning and Adaptation + +- Track which notifications the user acts on vs. ignores +- After 2 weeks, review notification patterns in observations +- Adjust VIP sender list based on interaction frequency +- Raise noise penalty for topics the user consistently ignores +- Lower notification threshold for topics the user consistently acts on quickly diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/calendar-unified/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/calendar-unified/SKILL.md new file mode 100644 index 0000000..8471f7b --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/calendar-unified/SKILL.md @@ -0,0 +1,102 @@ +# Skill: Unified Calendar View + +## Trigger + +User asks about their schedule, calendar, or availability. Examples: +- "What's on my calendar today?" +- "Am I free Thursday afternoon?" +- "Show me next week's schedule" +- "When is my next meeting with Jane?" +- "Find a time slot for a 1-hour meeting this week" + +## Inputs + +- **date_range**: The time period to display. Default: today +- **filter**: Optional filter (person, keyword, type) +- **action**: view / find-slot / check-availability + +## Steps + +### 1. Determine the date range + +Parse the user's request into a concrete date range: +- "today" -> today 00:00 to 23:59 +- "tomorrow" -> tomorrow 00:00 to 23:59 +- "this week" -> Monday to Friday of current week +- "next week" -> Monday to Friday of next week +- Specific date -> that day 00:00 to 23:59 + +### 2. Pull events from personal calendar + +``` +list-calendar-events startDateTime="" endDateTime="" +``` + +### 3. Pull events from company calendar (if configured) + +If ms365-company-reader is available: + +``` +# Via company reader +list-calendar-events startDateTime="" endDateTime="" +``` + +### 4. Merge and deduplicate + +Combine events from all sources: +- Deduplicate by subject + time (same event appearing in multiple calendars) +- Mark the source calendar for each event +- Sort by start time + +### 5. Present the unified view + +#### For "view" action: + +```markdown +## Schedule for {{DATE}} + +| Time | Event | Location | Calendar | +|---------------|--------------------------|--------------|----------| +| 09:00 - 09:30 | Team standup | Teams call | Personal | +| 10:00 - 11:00 | Client review | Room 3A | Company | +| 13:00 - 14:00 | Lunch with Jane | The Bistro | Personal | +| 15:00 - 15:30 | 1:1 with Manager | Teams call | Personal | +``` + +Include: +- Free time blocks (gaps > 30 minutes) +- All-day events at the top +- Travel time if location changes between meetings + +#### For "find-slot" action: + +Find available time slots matching the requested duration: +- List all free blocks of sufficient length +- Prefer morning slots if no preference stated +- Avoid fragmenting existing focus time blocks +- Consider buffer time between meetings (15 min default) + +#### For "check-availability" action: + +Simply report whether the requested time is free or occupied: +- "You're free Thursday 14:00-15:00" +- "Thursday 14:00 is blocked by 'Client review' (14:00-15:30)" + +### 6. Offer follow-up actions + +Depending on context: +- "Want me to schedule something in that slot?" +- "Should I send a meeting invite?" +- "Want me to decline this meeting?" +- "Need me to find an alternative time?" + +## Output + +A formatted calendar view, available time slots, or availability check +response, with follow-up action options. + +## Error handling + +- If one calendar source fails, show available data with a note +- If no events found, confirm the date range and report "no events scheduled" +- For ambiguous dates, ask the user to clarify diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/efficiency-audit/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/efficiency-audit/SKILL.md new file mode 100644 index 0000000..82ec16e --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/efficiency-audit/SKILL.md @@ -0,0 +1,142 @@ +# Skill: Efficiency Audit + +## Trigger + +On-demand when user asks for an efficiency or productivity audit. Examples: +- "How efficient am I with email?" +- "Run an efficiency audit" +- "Analyze my response times" +- "Where am I losing time?" + +## Inputs + +- **period**: Analysis period. Default: last 30 days +- **focus**: What to analyze (email / tasks / calendar / all). Default: all + +## Steps + +### 1. Email efficiency analysis + +#### Response time analysis + +Search sent messages for the analysis period: + +``` +list-mail-messages filter="sentDateTime ge " top=200 +``` + +For each sent reply, find the original message and calculate response time. +Categorize: +- < 1 hour: fast +- 1-4 hours: normal +- 4-24 hours: slow +- > 24 hours: delayed +- No response: dropped + +#### Email volume analysis + +``` +list-mail-messages filter="receivedDateTime ge " top=500 +``` + +Calculate: +- Emails received per day (average) +- Emails sent per day (average) +- Ratio of sent to received +- Peak email hours +- Top senders (by volume) + +#### Thread analysis + +Identify: +- Longest email threads (most back-and-forth) +- Threads with most participants +- Threads that could have been resolved faster + +### 2. Task efficiency analysis + +For each Planner plan: + +``` +list-planner-tasks planId="" +``` + +Calculate: +- Average time from creation to completion +- Tasks completed on time vs. overdue +- Tasks that sat in "In Progress" for > 5 days +- Tasks that were created and completed same day +- Tasks that have been open for > 14 days + +### 3. Calendar efficiency analysis + +``` +list-calendar-events startDateTime="" endDateTime="" +``` + +Calculate: +- Total hours in meetings per week +- Average meeting duration +- Meetings with > 5 attendees (potential "could be an email") +- Back-to-back meeting streaks +- Focus time blocks (2+ hours without meetings) +- Recurring meetings percentage + +### 4. Identify bottlenecks + +Based on the analysis, identify: +- **Email bottlenecks**: senders or threads consuming disproportionate time +- **Task bottlenecks**: tasks stuck in progress, dependencies blocking others +- **Calendar bottlenecks**: overloaded days, lack of focus time +- **Response gaps**: areas where response times are consistently slow + +### 5. Generate recommendations + +Produce actionable recommendations: +- Emails to unsubscribe from or filter +- Meetings to decline or shorten +- Tasks to delegate or defer +- Time blocks to protect for focus work +- Communication patterns to change + +### 6. Compile the audit report + +```markdown +# Efficiency Audit — {{PERIOD}} + +## Email +- Avg response time: X hours +- Volume: X received / X sent per day +- Response rate: X% +- Top time sinks: [list] + +## Tasks +- Avg completion time: X days +- On-time rate: X% +- Currently overdue: X tasks +- Longest open: [task name] (X days) + +## Calendar +- Meeting hours/week: X +- Focus time/week: X hours +- Meetings that could be emails: X + +## Top Recommendations +1. [Most impactful suggestion] +2. [Second suggestion] +3. [Third suggestion] + +## Detailed Findings +[Full analysis with charts/tables] +``` + +## Output + +A structured efficiency audit report with metrics, bottleneck analysis, +and actionable recommendations. + +## Error handling + +- If data is insufficient for meaningful analysis, note the minimum period needed +- If specific data sources are unavailable, analyze what is available +- Always provide at least qualitative observations even with limited data diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/email-file-search/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/email-file-search/SKILL.md new file mode 100644 index 0000000..acb77a7 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/email-file-search/SKILL.md @@ -0,0 +1,94 @@ +# Skill: Email & File Search + +## Trigger + +User asks to find an email, attachment, document, or file. Examples: +- "Find the email from Jane about the budget" +- "Search for the quarterly report PDF" +- "Did anyone send me an invoice this week?" +- "Find the SOW in OneDrive" + +## Inputs + +- **query**: The search term or description of what to find +- **scope**: Where to search (email, files, or both). Default: both +- **date_range**: Optional date constraint (e.g., "this week", "last month") +- **sender**: Optional sender filter + +## Steps + +### 1. Determine search scope + +Based on the user's request, decide whether to search: +- Emails only (mentions "email", "message", "sent", "received") +- Files only (mentions "file", "document", "OneDrive", "drive") +- Both (ambiguous or mentions both) + +### 2. Search emails (if in scope) + +Use ms365-reader to search emails: + +``` +list-mail-messages search='""' top=20 +``` + +If a sender is specified: +``` +list-mail-messages search='"from: "' +``` + +If a date range is specified, add a filter: +``` +list-mail-messages search='""' filter="receivedDateTime ge 2026-01-01" +``` + +### 3. Search OneDrive files (if in scope) + +Use ms365-reader to search files: + +``` +list-drive-items search="" +``` + +Or browse known locations: +``` +list-drive-items path="drafts/documents/" +list-drive-items path="drafts/reports/" +``` + +### 4. Search local search index (if available) + +If the search index is available, query it for broader results: + +```bash +python3 -c " +import sqlite3, json +conn = sqlite3.connect('{{SEARCH_INDEX_DB}}') +cur = conn.execute(\"SELECT id, subject, sender, date FROM emails_fts WHERE emails_fts MATCH ? LIMIT 20\", ['']) +print(json.dumps([dict(zip(['id','subject','sender','date'], r)) for r in cur.fetchall()])) +" +``` + +### 5. Present results + +Format results as a clear list: +- For emails: sender, subject, date, snippet +- For files: name, path, last modified, size +- Sort by relevance (most recent first for ties) +- Indicate which source each result came from + +### 6. Offer follow-up actions + +- "Want me to open any of these?" +- "Should I download the attachment?" +- "Want me to draft a reply?" + +## Output + +A formatted list of search results with source indicators and follow-up options. + +## Error handling + +- If ms365-reader is unavailable, fall back to search index only +- If no results found, suggest broadening the search terms +- If too many results, suggest narrowing with date range or sender filter diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/managers-report/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/managers-report/SKILL.md new file mode 100644 index 0000000..3ef20c3 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/managers-report/SKILL.md @@ -0,0 +1,110 @@ +# Skill: Manager's Report + +## Trigger + +Weekly cron job (Friday 16:00) or on-demand when user asks for a +manager's report. Examples: +- "Compile the weekly report" +- "Manager's update for this week" +- "Weekly summary for leadership" + +## Inputs + +- **period**: The reporting period. Default: current week (Monday-Friday) +- **format**: Output format (chat / email-draft / document). Default: chat +- **include_metrics**: Whether to include quantitative metrics. Default: true + +## Steps + +### 1. Gather metrics from all projects + +For each tracked project: + +``` +list-planner-tasks planId="" +``` + +Calculate: +- Tasks completed this week +- Tasks added this week +- Net task change (completed minus added) +- Overdue task count +- Completion rate (completed / total open at week start) + +### 2. Review email activity + +Search for key email threads from this week: + +``` +list-mail-messages filter="receivedDateTime ge " top=100 +``` + +Identify: +- Key decisions made via email +- Important external communications +- Escalations or issues raised + +### 3. Review calendar (meetings and events) + +``` +list-calendar-events startDateTime="" endDateTime="" +``` + +Note: +- Key meetings held and their outcomes (if noted in observations) +- Meetings scheduled for next week that need prep + +### 4. Check observations for achievements + +Review recent observation entries for: +- Completed milestones +- Resolved blockers +- Process improvements +- Learnings + +### 5. Compile the report + +Structure: + +```markdown +# Weekly Report — Week of {{DATE}} + +## Key Achievements +- [Bullet list of top 3-5 accomplishments] + +## Metrics +| Project | Tasks Done | Tasks Open | Overdue | Status | +|---------|-----------|------------|---------|--------| +| ... | ... | ... | ... | ... | + +## Issues & Blockers +- [Any unresolved issues, with proposed resolution] + +## Key Decisions +- [Decisions made this week and their context] + +## Plan for Next Week +- [Top priorities for next week] +- [Key meetings or deadlines] + +## Risks +- [Any emerging risks or concerns] +``` + +### 6. Deliver the report + +Based on `format`: +- **chat**: Send via primary delivery channel +- **email-draft**: Create a draft email with the report content +- **document**: Upload to OneDrive at `drafts/reports/weekly/` + +## Output + +A structured weekly manager's report with metrics, achievements, issues, +and plan. + +## Error handling + +- If Planner data is unavailable, note it and compile from email/calendar only +- If no observations exist for the period, note "no recorded observations" +- Always produce a report even if some data sources are incomplete diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/project-brief/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/project-brief/SKILL.md new file mode 100644 index 0000000..d7a04d0 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/project-brief/SKILL.md @@ -0,0 +1,93 @@ +# Skill: Project Brief + +## Trigger + +User asks for a project status, update, or brief. Examples: +- "What's the status of Project Alpha?" +- "Give me a brief on the website redesign" +- "Project update for the Q1 launch" +- "How is the migration project going?" + +## Inputs + +- **project_name**: The name or identifier of the project +- **depth**: How detailed the brief should be (quick / standard / deep). Default: standard + +## Steps + +### 1. Gather project context from observations + +Check observations for any stored project context: +- Project team members +- Key milestones and deadlines +- Previous status notes +- Related Planner plan IDs + +### 2. Search recent emails + +Search for project-related emails from the last 7 days: + +``` +list-mail-messages search='""' top=30 +``` + +Filter for the most relevant: +- Emails to/from project team members +- Status update emails +- Emails with action items or decisions + +### 3. Check Planner tasks + +If a Planner plan is associated with the project: + +``` +list-planner-tasks planId="" +``` + +Summarize: +- Total tasks, completed vs. open +- Overdue tasks +- Tasks due this week +- Recently completed tasks + +### 4. Check calendar events + +Look for recent and upcoming project meetings: + +``` +list-calendar-events filter="contains(subject, '')" startDateTime="..." endDateTime="..." +``` + +### 5. Compile the brief + +**Quick brief** (1-2 sentences): +- Overall status (on track / at risk / blocked) +- Next key milestone + +**Standard brief** (structured): +- **Status**: on track / at risk / blocked +- **Recent activity**: key emails, completed tasks, meetings +- **Open items**: count of open tasks, overdue items +- **Blockers**: anything preventing progress +- **Next steps**: upcoming milestones, due dates + +**Deep brief** (comprehensive): +- Everything in standard, plus: +- Full email thread summaries +- Task-by-task breakdown +- Risk analysis +- Recommendations + +### 6. Update observations + +Store the current project status in observations for future reference. + +## Output + +A structured project brief matching the requested depth level. + +## Error handling + +- If no Planner plan found, skip task section and note it +- If no emails found, note "no recent email activity" +- If project name is ambiguous, ask for clarification diff --git a/bates-enhance/integrations/m365/workspace-additions/skills/project-sync/SKILL.md b/bates-enhance/integrations/m365/workspace-additions/skills/project-sync/SKILL.md new file mode 100644 index 0000000..e0bc102 --- /dev/null +++ b/bates-enhance/integrations/m365/workspace-additions/skills/project-sync/SKILL.md @@ -0,0 +1,101 @@ +# Skill: Project Sync + +## Trigger + +Scheduled (via cron) or on-demand when user asks to sync project data. +Examples: +- "Sync all project boards" +- "Update project tracking" +- Triggered by `project-staleness-check` cron job + +## Inputs + +- **scope**: Which projects to sync (all / specific project name). Default: all +- **update_observations**: Whether to update observation files. Default: true + +## Steps + +### 1. Load known projects + +Read the project list from observations: +- Project names +- Associated Planner plan IDs +- Team members +- Key repositories (if any) + +### 2. Sync Planner boards + +For each project with a Planner plan: + +``` +list-planner-plans +``` + +For each plan: + +``` +list-planner-tasks planId="" +list-planner-buckets planId="" +``` + +Collect: +- Task counts by bucket (To Do, In Progress, Done) +- Overdue tasks +- Tasks completed since last sync +- New tasks since last sync + +### 3. Sync email activity + +For each project, search recent emails: + +``` +list-mail-messages search='""' filter="receivedDateTime ge " top=50 +``` + +Summarize: +- Number of new emails +- Key threads and topics +- Any urgent items + +### 4. Sync calendar events + +Check for upcoming project meetings: + +``` +list-calendar-events filter="contains(subject, '')" +``` + +Note: +- Next scheduled meeting +- Any cancelled or rescheduled meetings + +### 5. Check for staleness + +A project is considered stale if: +- No Planner task updates in 7+ days +- No related emails in 7+ days +- No commits in associated repos in 7+ days (if repo is tracked) + +Flag stale projects for attention. + +### 6. Update observations + +If `update_observations` is true, update the project observation files with: +- Current task counts and status +- Last sync timestamp +- Staleness flags +- Key changes since last sync + +## Output + +A sync summary for each project: +- Task status snapshot +- Recent activity count +- Staleness indicator +- Any items requiring attention + +## Error handling + +- If a Planner plan is inaccessible, log warning and continue with other projects +- If email search fails, note the gap and continue +- Never fail the entire sync due to one project's error diff --git a/bates-enhance/integrations/search/config-fragment.json b/bates-enhance/integrations/search/config-fragment.json new file mode 100644 index 0000000..2c63c08 --- /dev/null +++ b/bates-enhance/integrations/search/config-fragment.json @@ -0,0 +1,2 @@ +{ +} diff --git a/bates-enhance/integrations/search/cron-jobs-search.json b/bates-enhance/integrations/search/cron-jobs-search.json new file mode 100644 index 0000000..4dda3ad --- /dev/null +++ b/bates-enhance/integrations/search/cron-jobs-search.json @@ -0,0 +1,9 @@ +[ + { + "name": "search-index-monitor", + "schedule": "0 */2 * * *", + "tz": "{{USER_TZ}}", + "message": "Check search index health: document count, last sync time, errors. Report issues only.", + "sessionTarget": "isolated" + } +] diff --git a/bates-enhance/integrations/search/requirements.txt b/bates-enhance/integrations/search/requirements.txt new file mode 100644 index 0000000..ee49b80 --- /dev/null +++ b/bates-enhance/integrations/search/requirements.txt @@ -0,0 +1,7 @@ +httpx>=0.24.0 +numpy>=1.24.0 +sqlite-utils>=3.30 +pyyaml>=6.0 +tqdm>=4.65.0 +python-dateutil>=2.8.2 +tiktoken>=0.5.0 diff --git a/bates-enhance/integrations/search/scripts/ingest-email.py b/bates-enhance/integrations/search/scripts/ingest-email.py new file mode 100755 index 0000000..0f3fa3f --- /dev/null +++ b/bates-enhance/integrations/search/scripts/ingest-email.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python3 +""" +ingest-email.py -- Email ingestion for Bates local search index + +Usage: + ingest-email.py --incremental # Sync new emails since last run + ingest-email.py --bulk # Full re-sync + ingest-email.py --stagger-phase N # Sync specific phase (1-5) + ingest-email.py --resume # Resume interrupted sync + ingest-email.py --backfill-attachments # Download attachment text + +Reads config from ~/.openclaw/search-index/config.yaml +Stores data in SQLite FTS5 database with WAL mode. +""" + +import argparse +import json +import logging +import os +import sqlite3 +import subprocess +import sys +import time +from datetime import datetime, timedelta, timezone +from pathlib import Path + +import httpx +import yaml +from tqdm import tqdm + +# --------------------------------------------------------------------------- +# Paths and defaults +# --------------------------------------------------------------------------- +CONFIG_PATH = Path.home() / ".openclaw" / "search-index" / "config.yaml" +DEFAULT_DB_PATH = Path.home() / ".openclaw" / "search-index" / "db" / "search.db" +OLLAMA_ENDPOINT = "http://localhost:11434" +EMBEDDING_MODEL = "nomic-embed-text" +BATCH_SIZE = 50 + +# --------------------------------------------------------------------------- +# Logging +# --------------------------------------------------------------------------- +log_dir = Path.home() / ".openclaw" / "search-index" / "logs" +log_dir.mkdir(parents=True, exist_ok=True) + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers=[ + logging.StreamHandler(sys.stderr), + logging.FileHandler(log_dir / "ingestion.log"), + ], +) +logger = logging.getLogger("ingest-email") + + +def load_config(): + """Load config.yaml and return parsed dict.""" + if not CONFIG_PATH.exists(): + logger.error("Config not found: %s", CONFIG_PATH) + sys.exit(1) + with open(CONFIG_PATH) as f: + return yaml.safe_load(f) + + +def get_db_path(config): + """Resolve database path from config, expanding ~.""" + raw = config.get("database", {}).get("path", str(DEFAULT_DB_PATH)) + return Path(os.path.expanduser(raw)) + + +def init_db(db_path): + """Create the database and tables if they don't exist.""" + db_path.parent.mkdir(parents=True, exist_ok=True) + conn = sqlite3.connect(str(db_path)) + conn.execute("PRAGMA journal_mode=WAL") + conn.execute("PRAGMA synchronous=NORMAL") + + # Main FTS5 virtual table for full-text search + conn.execute(""" + CREATE VIRTUAL TABLE IF NOT EXISTS documents USING fts5( + doc_id, + doc_type, + subject, + body, + sender, + recipients, + folder, + received_at, + source_server, + content='', + tokenize='porter unicode61' + ) + """) + + # Sync state tracking per folder per server + conn.execute(""" + CREATE TABLE IF NOT EXISTS sync_state ( + server TEXT NOT NULL, + folder_id TEXT NOT NULL, + folder_name TEXT, + last_sync TEXT, + last_id TEXT, + doc_count INTEGER DEFAULT 0, + phase INTEGER DEFAULT 1, + PRIMARY KEY (server, folder_id) + ) + """) + + # Embeddings stored as BLOBs (numpy arrays serialized) + conn.execute(""" + CREATE TABLE IF NOT EXISTS embeddings ( + doc_id TEXT PRIMARY KEY, + embedding BLOB NOT NULL, + model TEXT NOT NULL, + created_at TEXT NOT NULL + ) + """) + + # Metadata for tracking ingestion runs + conn.execute(""" + CREATE TABLE IF NOT EXISTS ingestion_runs ( + run_id INTEGER PRIMARY KEY AUTOINCREMENT, + started_at TEXT NOT NULL, + finished_at TEXT, + mode TEXT NOT NULL, + phase INTEGER, + docs_added INTEGER DEFAULT 0, + docs_failed INTEGER DEFAULT 0, + status TEXT DEFAULT 'running' + ) + """) + + conn.commit() + return conn + + +def call_mcp(server, tool, params=None): + """Call an MCP server tool via mcporter and return parsed JSON.""" + cmd = ["mcporter", "call", server, tool] + if params: + cmd.append(json.dumps(params)) + try: + result = subprocess.run( + cmd, capture_output=True, text=True, timeout=120 + ) + if result.returncode != 0: + logger.error( + "mcporter call %s %s failed: %s", server, tool, result.stderr.strip() + ) + return None + return json.loads(result.stdout) if result.stdout.strip() else None + except subprocess.TimeoutExpired: + logger.error("mcporter call %s %s timed out", server, tool) + return None + except json.JSONDecodeError: + logger.error("mcporter call %s %s returned invalid JSON", server, tool) + return None + + +def get_folders(server): + """Retrieve mail folders from an MCP server.""" + result = call_mcp(server, "list-mail-folders", {}) + if not result: + return [] + # Normalize: the response may be a list or have a 'value' key + if isinstance(result, list): + return result + return result.get("value", result.get("folders", [])) + + +def get_messages(server, folder_id=None, top=50, skip=0, since=None): + """Retrieve mail messages from an MCP server with pagination.""" + params = {"top": top, "skip": skip} + if folder_id: + params["folderId"] = folder_id + if since: + params["filter"] = f"receivedDateTime ge {since}" + # Try folder-specific tool first, fall back to general + tool = "list-mail-folder-messages" if folder_id else "list-mail-messages" + if not folder_id: + # General search does not use folderId + params.pop("folderId", None) + result = call_mcp(server, tool, params) + if not result: + return [] + if isinstance(result, list): + return result + return result.get("value", result.get("messages", [])) + + +def generate_embedding(text, config): + """Generate an embedding vector via Ollama API.""" + endpoint = config.get("embedding", {}).get("endpoint", OLLAMA_ENDPOINT) + model = config.get("embedding", {}).get("model", EMBEDDING_MODEL) + # Truncate to avoid excessive token usage + text = text[:8000] + try: + resp = httpx.post( + f"{endpoint}/api/embeddings", + json={"model": model, "prompt": text}, + timeout=30.0, + ) + resp.raise_for_status() + data = resp.json() + return data.get("embedding") + except Exception as e: + logger.warning("Embedding generation failed: %s", e) + return None + + +def store_document(conn, doc, server, folder_name, config): + """Store a single email document in FTS5 and generate its embedding.""" + import numpy as np + + doc_id = doc.get("id", doc.get("internetMessageId", "")) + if not doc_id: + return False + + # Check for duplicates + existing = conn.execute( + "SELECT doc_id FROM documents WHERE doc_id = ?", (doc_id,) + ).fetchone() + if existing: + return False + + subject = doc.get("subject", "") + body_preview = doc.get("bodyPreview", doc.get("body", {}).get("content", "")) + sender_obj = doc.get("from", {}).get("emailAddress", {}) + sender = f"{sender_obj.get('name', '')} <{sender_obj.get('address', '')}>" + recipients_list = doc.get("toRecipients", []) + recipients = "; ".join( + r.get("emailAddress", {}).get("address", "") for r in recipients_list + ) + received = doc.get("receivedDateTime", "") + + # Insert into FTS5 + conn.execute( + "INSERT INTO documents(doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server) " + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", + (doc_id, "email", subject, body_preview, sender, recipients, folder_name, received, server), + ) + + # Generate and store embedding + embed_text = f"{subject}\n{sender}\n{body_preview}" + embedding = generate_embedding(embed_text, config) + if embedding: + embedding_blob = np.array(embedding, dtype=np.float32).tobytes() + conn.execute( + "INSERT OR REPLACE INTO embeddings(doc_id, embedding, model, created_at) " + "VALUES (?, ?, ?, ?)", + (doc_id, embedding_blob, config.get("embedding", {}).get("model", EMBEDDING_MODEL), + datetime.now(timezone.utc).isoformat()), + ) + + return True + + +def get_phase_folders(folders, phase, server): + """Filter folders based on stagger phase.""" + if phase >= 5: + return folders + inbox_names = {"inbox", "Inbox", "INBOX"} + if phase == 1: + return [f for f in folders if f.get("displayName", "") in inbox_names] + if phase == 2: + # All personal folders + return folders + # Phase 3+ includes company mailboxes (handled at server level) + return folders + + +def get_sync_timestamp(conn, server, folder_id): + """Get the last sync timestamp for a server/folder pair.""" + row = conn.execute( + "SELECT last_sync FROM sync_state WHERE server = ? AND folder_id = ?", + (server, folder_id), + ).fetchone() + return row[0] if row else None + + +def update_sync_state(conn, server, folder_id, folder_name, doc_count, phase): + """Update sync state after processing a folder.""" + now = datetime.now(timezone.utc).isoformat() + conn.execute( + "INSERT OR REPLACE INTO sync_state(server, folder_id, folder_name, last_sync, doc_count, phase) " + "VALUES (?, ?, ?, ?, ?, ?)", + (server, folder_id, folder_name, now, doc_count, phase), + ) + conn.commit() + + +def ingest_folder(conn, server, folder, config, phase, incremental=True): + """Ingest all messages from a single folder.""" + folder_id = folder.get("id", "") + folder_name = folder.get("displayName", "Unknown") + batch_size = config.get("sources", {}).get("email", {}).get("batch_size", BATCH_SIZE) + max_age = config.get("sources", {}).get("email", {}).get("max_age_days", 365) + + logger.info(" Syncing folder: %s (server: %s)", folder_name, server) + + since = None + if incremental: + since = get_sync_timestamp(conn, server, folder_id) + if not since and max_age: + cutoff = datetime.now(timezone.utc) - timedelta(days=max_age) + since = cutoff.isoformat() + + total_added = 0 + total_skipped = 0 + skip = 0 + + while True: + messages = get_messages(server, folder_id, top=batch_size, skip=skip, since=since) + if not messages: + break + + for msg in messages: + try: + added = store_document(conn, msg, server, folder_name, config) + if added: + total_added += 1 + else: + total_skipped += 1 + except Exception as e: + logger.warning("Failed to store message %s: %s", msg.get("id", "?"), e) + + conn.commit() + + if len(messages) < batch_size: + break + skip += batch_size + # Brief pause to avoid hammering the API + time.sleep(0.5) + + update_sync_state(conn, server, folder_id, folder_name, total_added, phase) + logger.info(" %s: +%d new, %d skipped (duplicates)", folder_name, total_added, total_skipped) + return total_added + + +def run_ingestion(config, mode, phase, resume=False): + """Main ingestion loop.""" + db_path = get_db_path(config) + conn = init_db(db_path) + + incremental = mode == "incremental" + effective_phase = phase or config.get("ingestion", {}).get("current_phase", 1) + + # Record this run + now = datetime.now(timezone.utc).isoformat() + cursor = conn.execute( + "INSERT INTO ingestion_runs(started_at, mode, phase, status) VALUES (?, ?, ?, 'running')", + (now, mode, effective_phase), + ) + run_id = cursor.lastrowid + conn.commit() + + logger.info("=== Ingestion run #%d: mode=%s, phase=%d ===", run_id, mode, effective_phase) + + email_cfg = config.get("sources", {}).get("email", {}) + servers = email_cfg.get("mcp_servers", []) + if not servers: + logger.error("No MCP servers configured in sources.email.mcp_servers") + conn.execute( + "UPDATE ingestion_runs SET finished_at = ?, status = 'error' WHERE run_id = ?", + (datetime.now(timezone.utc).isoformat(), run_id), + ) + conn.commit() + conn.close() + return + + total_docs = 0 + total_errors = 0 + max_retries = config.get("ingestion", {}).get("retry", {}).get("max_attempts", 3) + backoff = config.get("ingestion", {}).get("retry", {}).get("backoff_seconds", 60) + + for server in servers: + logger.info("Processing server: %s", server) + + for attempt in range(1, max_retries + 1): + try: + folders = get_folders(server) + if not folders: + logger.warning("No folders returned from %s (attempt %d/%d)", server, attempt, max_retries) + if attempt < max_retries: + time.sleep(backoff * attempt) + continue + break + break + except Exception as e: + logger.error("Error fetching folders from %s: %s (attempt %d/%d)", server, e, attempt, max_retries) + if attempt < max_retries: + time.sleep(backoff * attempt) + continue + else: + total_errors += 1 + continue + + phase_folders = get_phase_folders(folders, effective_phase, server) + logger.info(" Phase %d: %d folders to sync (of %d total)", effective_phase, len(phase_folders), len(folders)) + + for folder in tqdm(phase_folders, desc=f" {server}", unit="folder", file=sys.stderr): + try: + added = ingest_folder(conn, server, folder, config, effective_phase, incremental) + total_docs += added + except Exception as e: + logger.error("Error ingesting folder %s: %s", folder.get("displayName", "?"), e) + total_errors += 1 + + # Finalize run record + conn.execute( + "UPDATE ingestion_runs SET finished_at = ?, docs_added = ?, docs_failed = ?, status = ? WHERE run_id = ?", + (datetime.now(timezone.utc).isoformat(), total_docs, total_errors, + "complete" if total_errors == 0 else "partial", run_id), + ) + conn.commit() + conn.close() + + logger.info("=== Ingestion complete: %d docs added, %d errors ===", total_docs, total_errors) + + +def main(): + parser = argparse.ArgumentParser(description="Email ingestion for Bates local search index") + group = parser.add_mutually_exclusive_group() + group.add_argument("--incremental", action="store_true", help="Sync new emails since last run") + group.add_argument("--bulk", action="store_true", help="Full re-sync of all messages") + parser.add_argument("--stagger-phase", type=int, choices=[1, 2, 3, 4, 5], + help="Sync specific phase (1=inbox, 2=all personal, 3=company, 4=shared, 5=all)") + parser.add_argument("--resume", action="store_true", help="Resume interrupted sync") + parser.add_argument("--backfill-attachments", action="store_true", help="Download attachment text for existing docs") + parser.add_argument("--config", type=str, default=str(CONFIG_PATH), help="Path to config.yaml") + + args = parser.parse_args() + + config = load_config() + + if args.backfill_attachments: + logger.info("Attachment backfill not yet implemented") + sys.exit(0) + + mode = "bulk" if args.bulk else "incremental" + run_ingestion(config, mode, args.stagger_phase, resume=args.resume) + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/search/scripts/rebuild-index.py b/bates-enhance/integrations/search/scripts/rebuild-index.py new file mode 100755 index 0000000..b31f254 --- /dev/null +++ b/bates-enhance/integrations/search/scripts/rebuild-index.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +""" +rebuild-index.py -- Drop and rebuild the FTS5 index from raw data + +This script is for recovery situations where the FTS5 index has become +corrupted or inconsistent. It: + 1. Reads all existing documents from the content tables + 2. Drops the FTS5 virtual table + 3. Recreates the FTS5 table with the same schema + 4. Re-inserts all documents + +Usage: + rebuild-index.py # Rebuild FTS5 index + rebuild-index.py --embeddings # Also regenerate all embeddings + rebuild-index.py --dry-run # Show what would be done without changes +""" + +import argparse +import json +import os +import sqlite3 +import sys +import time +from datetime import datetime, timezone +from pathlib import Path + +import httpx +import yaml +from tqdm import tqdm + +CONFIG_PATH = Path.home() / ".openclaw" / "search-index" / "config.yaml" +DEFAULT_DB_PATH = Path.home() / ".openclaw" / "search-index" / "db" / "search.db" +OLLAMA_ENDPOINT = "http://localhost:11434" +EMBEDDING_MODEL = "nomic-embed-text" + + +def load_config(): + """Load config.yaml.""" + if not CONFIG_PATH.exists(): + return {} + with open(CONFIG_PATH) as f: + return yaml.safe_load(f) or {} + + +def get_db_path(config): + """Resolve database path from config.""" + raw = config.get("database", {}).get("path", str(DEFAULT_DB_PATH)) + return Path(os.path.expanduser(raw)) + + +def generate_embedding(text, config): + """Generate an embedding vector via Ollama API.""" + endpoint = config.get("embedding", {}).get("endpoint", OLLAMA_ENDPOINT) + model = config.get("embedding", {}).get("model", EMBEDDING_MODEL) + text = text[:8000] + try: + resp = httpx.post( + f"{endpoint}/api/embeddings", + json={"model": model, "prompt": text}, + timeout=30.0, + ) + resp.raise_for_status() + return resp.json().get("embedding") + except Exception as e: + print(f" Warning: Embedding failed: {e}", file=sys.stderr) + return None + + +def main(): + parser = argparse.ArgumentParser(description="Rebuild the FTS5 search index") + parser.add_argument("--embeddings", action="store_true", + help="Also regenerate all embeddings") + parser.add_argument("--dry-run", action="store_true", + help="Show what would be done without making changes") + + args = parser.parse_args() + config = load_config() + db_path = get_db_path(config) + + if not db_path.exists(): + print(f"Database not found: {db_path}") + sys.exit(1) + + conn = sqlite3.connect(str(db_path)) + conn.execute("PRAGMA journal_mode=WAL") + + # Check if there's a backup table from a previous rebuild + tables = [row[0] for row in conn.execute( + "SELECT name FROM sqlite_master WHERE type='table'" + ).fetchall()] + + # Count existing documents + try: + doc_count = conn.execute("SELECT COUNT(*) FROM documents").fetchone()[0] + except sqlite3.OperationalError: + doc_count = 0 + + print() + print("=" * 60) + print(" FTS5 Index Rebuild") + print("=" * 60) + print() + print(f" Database : {db_path}") + print(f" Documents : {doc_count:,}") + print(f" Embeddings : {'will regenerate' if args.embeddings else 'preserved'}") + print(f" Mode : {'DRY RUN' if args.dry_run else 'LIVE'}") + print() + + if args.dry_run: + print(" Dry run complete. No changes made.") + conn.close() + return + + if doc_count == 0: + print(" No documents to rebuild. Exiting.") + conn.close() + return + + # Step 1: Read all documents into memory + print(" Step 1: Reading existing documents...") + try: + rows = conn.execute( + "SELECT doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server " + "FROM documents" + ).fetchall() + except sqlite3.OperationalError as e: + print(f" Error reading documents: {e}") + print(" Cannot rebuild -- the documents table may already be corrupted.") + conn.close() + sys.exit(1) + + print(f" Read {len(rows):,} documents into memory.") + + # Step 2: Create backup of sync_state and ingestion_runs (they're not FTS) + print(" Step 2: Backing up metadata tables...") + # These are safe -- they're regular tables + + # Step 3: Drop and recreate FTS5 table + print(" Step 3: Dropping FTS5 index...") + conn.execute("DROP TABLE IF EXISTS documents") + + print(" Step 4: Recreating FTS5 index...") + conn.execute(""" + CREATE VIRTUAL TABLE documents USING fts5( + doc_id, + doc_type, + subject, + body, + sender, + recipients, + folder, + received_at, + source_server, + content='', + tokenize='porter unicode61' + ) + """) + + # Step 4: Re-insert all documents + print(" Step 5: Re-inserting documents...") + inserted = 0 + failed = 0 + + for row in tqdm(rows, desc=" Rebuilding", unit="doc", file=sys.stderr): + try: + conn.execute( + "INSERT INTO documents(doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server) " + "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", + row, + ) + inserted += 1 + except Exception as e: + print(f" Warning: Failed to insert doc {row[0]}: {e}", file=sys.stderr) + failed += 1 + + conn.commit() + print(f" Inserted: {inserted:,}, Failed: {failed:,}") + + # Step 5: Optionally regenerate embeddings + if args.embeddings: + import numpy as np + + print(" Step 6: Regenerating embeddings...") + conn.execute("DELETE FROM embeddings") + conn.commit() + + model = config.get("embedding", {}).get("model", EMBEDDING_MODEL) + embedded = 0 + + for row in tqdm(rows, desc=" Embeddings", unit="doc", file=sys.stderr): + doc_id, doc_type, subject, body = row[0], row[1], row[2], row[3] + embed_text = f"{subject}\n{body}" if body else subject or "" + if not embed_text.strip(): + continue + + embedding = generate_embedding(embed_text, config) + if embedding: + blob = np.array(embedding, dtype=np.float32).tobytes() + conn.execute( + "INSERT OR REPLACE INTO embeddings(doc_id, embedding, model, created_at) " + "VALUES (?, ?, ?, ?)", + (doc_id, blob, model, datetime.now(timezone.utc).isoformat()), + ) + embedded += 1 + + # Commit in batches and pause to avoid overloading Ollama + if embedded % 100 == 0: + conn.commit() + time.sleep(0.1) + + conn.commit() + print(f" Embeddings regenerated: {embedded:,}") + + conn.close() + + print() + print(" Rebuild complete!") + print() + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/search/scripts/search-query.py b/bates-enhance/integrations/search/scripts/search-query.py new file mode 100755 index 0000000..93babb9 --- /dev/null +++ b/bates-enhance/integrations/search/scripts/search-query.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 +""" +search-query.py -- Search the Bates local search index + +Usage: + search-query.py "query text" + search-query.py "query text" --limit 20 + search-query.py "query text" --type email + search-query.py "query text" --semantic # Include semantic (embedding) search + search-query.py "query text" --json # Output as JSON (default) + search-query.py "query text" --pretty # Human-readable output + +Returns results as JSON to stdout. +""" + +import argparse +import json +import os +import sqlite3 +import sys +from pathlib import Path + +import httpx +import yaml + +CONFIG_PATH = Path.home() / ".openclaw" / "search-index" / "config.yaml" +DEFAULT_DB_PATH = Path.home() / ".openclaw" / "search-index" / "db" / "search.db" +OLLAMA_ENDPOINT = "http://localhost:11434" +EMBEDDING_MODEL = "nomic-embed-text" + + +def load_config(): + """Load config.yaml.""" + if not CONFIG_PATH.exists(): + return {} + with open(CONFIG_PATH) as f: + return yaml.safe_load(f) or {} + + +def get_db_path(config): + """Resolve database path from config.""" + raw = config.get("database", {}).get("path", str(DEFAULT_DB_PATH)) + return Path(os.path.expanduser(raw)) + + +def fts_search(conn, query, doc_type=None, limit=10): + """Perform FTS5 full-text search.""" + # Escape special FTS5 characters + safe_query = query.replace('"', '""') + + sql = "SELECT doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server" + sql += " FROM documents WHERE documents MATCH ?" + + params = [f'"{safe_query}"'] + + if doc_type: + sql += " AND doc_type = ?" + params.append(doc_type) + + sql += " ORDER BY rank LIMIT ?" + params.append(limit) + + try: + rows = conn.execute(sql, params).fetchall() + except sqlite3.OperationalError: + # Fall back to simpler query if FTS match syntax fails + sql = ( + "SELECT doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server" + " FROM documents WHERE documents MATCH ? LIMIT ?" + ) + rows = conn.execute(sql, [safe_query, limit]).fetchall() + + results = [] + for row in rows: + results.append({ + "doc_id": row[0], + "doc_type": row[1], + "subject": row[2], + "body": row[3][:500] if row[3] else "", + "sender": row[4], + "recipients": row[5], + "folder": row[6], + "received_at": row[7], + "source_server": row[8], + }) + return results + + +def semantic_search(conn, query, config, limit=10): + """Perform semantic search using embeddings.""" + import numpy as np + + endpoint = config.get("embedding", {}).get("endpoint", OLLAMA_ENDPOINT) + model = config.get("embedding", {}).get("model", EMBEDDING_MODEL) + + # Generate query embedding + try: + resp = httpx.post( + f"{endpoint}/api/embeddings", + json={"model": model, "prompt": query[:8000]}, + timeout=30.0, + ) + resp.raise_for_status() + query_embedding = np.array(resp.json().get("embedding", []), dtype=np.float32) + except Exception as e: + print(f"Warning: Could not generate query embedding: {e}", file=sys.stderr) + return [] + + if query_embedding.size == 0: + return [] + + # Load all embeddings and compute cosine similarity + rows = conn.execute("SELECT doc_id, embedding FROM embeddings").fetchall() + if not rows: + return [] + + scores = [] + for doc_id, emb_blob in rows: + emb = np.frombuffer(emb_blob, dtype=np.float32) + if emb.size != query_embedding.size: + continue + # Cosine similarity + dot = np.dot(query_embedding, emb) + norm = np.linalg.norm(query_embedding) * np.linalg.norm(emb) + similarity = float(dot / norm) if norm > 0 else 0.0 + scores.append((doc_id, similarity)) + + # Sort by similarity descending + scores.sort(key=lambda x: x[1], reverse=True) + top_ids = scores[:limit] + + # Fetch full documents for top results + results = [] + for doc_id, score in top_ids: + row = conn.execute( + "SELECT doc_id, doc_type, subject, body, sender, recipients, folder, received_at, source_server" + " FROM documents WHERE doc_id = ?", + (doc_id,), + ).fetchone() + if row: + results.append({ + "doc_id": row[0], + "doc_type": row[1], + "subject": row[2], + "body": row[3][:500] if row[3] else "", + "sender": row[4], + "recipients": row[5], + "folder": row[6], + "received_at": row[7], + "source_server": row[8], + "similarity": round(score, 4), + }) + return results + + +def merge_results(fts_results, semantic_results): + """Merge FTS and semantic results, deduplicating by doc_id.""" + seen = set() + merged = [] + + # FTS results first (they are more precise for keyword matches) + for r in fts_results: + if r["doc_id"] not in seen: + r["match_type"] = "fts" + merged.append(r) + seen.add(r["doc_id"]) + + # Then semantic results + for r in semantic_results: + if r["doc_id"] not in seen: + r["match_type"] = "semantic" + merged.append(r) + seen.add(r["doc_id"]) + else: + # Update existing entry with similarity score + for m in merged: + if m["doc_id"] == r["doc_id"]: + m["similarity"] = r.get("similarity") + m["match_type"] = "both" + break + + return merged + + +def format_pretty(results): + """Format results for human-readable output.""" + if not results: + print("No results found.") + return + + print(f"\n{'='*70}") + print(f" {len(results)} result(s) found") + print(f"{'='*70}\n") + + for i, r in enumerate(results, 1): + match_info = r.get("match_type", "fts") + sim = f" (similarity: {r['similarity']})" if r.get("similarity") else "" + print(f" [{i}] {r['subject']}") + print(f" From: {r['sender']}") + print(f" Date: {r['received_at']}") + print(f" Folder: {r['folder']} | Match: {match_info}{sim}") + if r["body"]: + preview = r["body"][:200].replace("\n", " ") + print(f" Preview: {preview}...") + print() + + +def main(): + parser = argparse.ArgumentParser(description="Search the Bates local search index") + parser.add_argument("query", help="Search query text") + parser.add_argument("--limit", "-n", type=int, default=10, help="Max results (default: 10)") + parser.add_argument("--type", "-t", choices=["email", "calendar", "file"], + help="Filter by document type") + parser.add_argument("--semantic", "-s", action="store_true", + help="Include semantic (embedding) search") + parser.add_argument("--pretty", "-p", action="store_true", + help="Human-readable output instead of JSON") + parser.add_argument("--json", action="store_true", default=True, + help="Output as JSON (default)") + + args = parser.parse_args() + config = load_config() + db_path = get_db_path(config) + + if not db_path.exists(): + print(json.dumps({"error": "Database not found. Run ingest-email.py first.", "results": []})) + sys.exit(1) + + conn = sqlite3.connect(str(db_path)) + conn.execute("PRAGMA journal_mode=WAL") + + # FTS search + fts_results = fts_search(conn, args.query, doc_type=args.type, limit=args.limit) + + # Semantic search (optional) + semantic_results = [] + if args.semantic: + semantic_results = semantic_search(conn, args.query, config, limit=args.limit) + + # Merge results + if semantic_results: + results = merge_results(fts_results, semantic_results)[:args.limit] + else: + results = fts_results + + conn.close() + + if args.pretty: + format_pretty(results) + else: + output = { + "query": args.query, + "total": len(results), + "results": results, + } + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/search/scripts/search-stats.py b/bates-enhance/integrations/search/scripts/search-stats.py new file mode 100755 index 0000000..ff7979c --- /dev/null +++ b/bates-enhance/integrations/search/scripts/search-stats.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +""" +search-stats.py -- Show statistics for the Bates local search index + +Displays: + - Total document count (by type) + - Database file size + - Last sync times per server/folder + - Stagger phase status + - Embedding coverage + - Recent ingestion runs +""" + +import json +import os +import sqlite3 +import sys +from datetime import datetime, timezone +from pathlib import Path + +import yaml + +CONFIG_PATH = Path.home() / ".openclaw" / "search-index" / "config.yaml" +DEFAULT_DB_PATH = Path.home() / ".openclaw" / "search-index" / "db" / "search.db" + + +def load_config(): + """Load config.yaml.""" + if not CONFIG_PATH.exists(): + return {} + with open(CONFIG_PATH) as f: + return yaml.safe_load(f) or {} + + +def get_db_path(config): + """Resolve database path from config.""" + raw = config.get("database", {}).get("path", str(DEFAULT_DB_PATH)) + return Path(os.path.expanduser(raw)) + + +def format_size(size_bytes): + """Format file size in human-readable form.""" + for unit in ["B", "KB", "MB", "GB"]: + if abs(size_bytes) < 1024.0: + return f"{size_bytes:.1f} {unit}" + size_bytes /= 1024.0 + return f"{size_bytes:.1f} TB" + + +def format_age(iso_timestamp): + """Format a timestamp as a human-readable age string.""" + if not iso_timestamp: + return "never" + try: + ts = datetime.fromisoformat(iso_timestamp.replace("Z", "+00:00")) + now = datetime.now(timezone.utc) + delta = now - ts + if delta.days > 0: + return f"{delta.days}d ago" + hours = delta.seconds // 3600 + if hours > 0: + return f"{hours}h ago" + minutes = delta.seconds // 60 + return f"{minutes}m ago" + except (ValueError, TypeError): + return iso_timestamp + + +def main(): + config = load_config() + db_path = get_db_path(config) + + if not db_path.exists(): + print("Database not found. Run ingest-email.py first.") + print(f"Expected at: {db_path}") + sys.exit(1) + + # Database size + db_size = db_path.stat().st_size + wal_path = db_path.with_suffix(".db-wal") + wal_size = wal_path.stat().st_size if wal_path.exists() else 0 + + conn = sqlite3.connect(str(db_path)) + conn.execute("PRAGMA journal_mode=WAL") + + # Document counts by type + doc_counts = conn.execute( + "SELECT doc_type, COUNT(*) FROM documents GROUP BY doc_type" + ).fetchall() + total_docs = sum(c for _, c in doc_counts) + + # Embedding count + embedding_count = conn.execute("SELECT COUNT(*) FROM embeddings").fetchone()[0] + + # Sync state + sync_states = conn.execute( + "SELECT server, folder_name, last_sync, doc_count, phase FROM sync_state ORDER BY server, folder_name" + ).fetchall() + + # Recent ingestion runs + recent_runs = conn.execute( + "SELECT run_id, started_at, finished_at, mode, phase, docs_added, docs_failed, status " + "FROM ingestion_runs ORDER BY run_id DESC LIMIT 5" + ).fetchall() + + conn.close() + + # Current phase from config + current_phase = config.get("ingestion", {}).get("current_phase", 1) + phase_labels = config.get("ingestion", {}).get("stagger_phases", {}) + + # Output + print() + print("=" * 60) + print(" Bates Search Index Statistics") + print("=" * 60) + print() + + print(f" Database path : {db_path}") + print(f" Database size : {format_size(db_size)} (WAL: {format_size(wal_size)})") + print(f" Total docs : {total_docs:,}") + print() + + if doc_counts: + print(" Documents by type:") + for doc_type, count in doc_counts: + print(f" {doc_type:12s} : {count:,}") + print() + + print(f" Embeddings : {embedding_count:,} ({embedding_count*100//max(total_docs,1)}% coverage)") + print() + + print(f" Current phase : {current_phase} - {phase_labels.get(current_phase, phase_labels.get(str(current_phase), 'Unknown'))}") + print() + + if sync_states: + print(" Sync state per folder:") + print(f" {'Server':<20s} {'Folder':<25s} {'Last Sync':<15s} {'Docs':>8s}") + print(f" {'-'*20} {'-'*25} {'-'*15} {'-'*8}") + for server, folder_name, last_sync, doc_count, phase in sync_states: + age = format_age(last_sync) + print(f" {server:<20s} {(folder_name or 'unknown'):<25s} {age:<15s} {doc_count:>8,}") + print() + + if recent_runs: + print(" Recent ingestion runs:") + print(f" {'ID':>4s} {'Mode':<12s} {'Phase':>5s} {'Added':>6s} {'Errors':>6s} {'Status':<10s} {'When':<15s}") + print(f" {'-'*4} {'-'*12} {'-'*5} {'-'*6} {'-'*6} {'-'*10} {'-'*15}") + for run_id, started, finished, mode, phase, added, failed, status in recent_runs: + age = format_age(started) + print(f" {run_id:>4d} {mode:<12s} {phase or '-':>5} {added:>6,} {failed:>6,} {status:<10s} {age:<15s}") + print() + + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/search/scripts/setup-venv.sh b/bates-enhance/integrations/search/scripts/setup-venv.sh new file mode 100755 index 0000000..45a200a --- /dev/null +++ b/bates-enhance/integrations/search/scripts/setup-venv.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# setup-venv.sh -- Create or update the Python virtual environment for search index +# +# Usage: +# setup-venv.sh # Create/update venv and install requirements +# setup-venv.sh --force # Delete existing venv and recreate from scratch + +set -euo pipefail + +SEARCH_INDEX_DIR="$HOME/.openclaw/search-index" +VENV_DIR="$SEARCH_INDEX_DIR/venv" +REQUIREMENTS="$SEARCH_INDEX_DIR/requirements.txt" +FORCE=false + +if [[ "${1:-}" == "--force" ]]; then + FORCE=true +fi + +echo "Search Index Python Environment Setup" +echo "======================================" +echo "" + +# Ensure directory structure exists +mkdir -p "$SEARCH_INDEX_DIR/scripts" +mkdir -p "$SEARCH_INDEX_DIR/db" +mkdir -p "$SEARCH_INDEX_DIR/cache" +mkdir -p "$SEARCH_INDEX_DIR/logs" + +# Check requirements file +if [[ ! -f "$REQUIREMENTS" ]]; then + echo "ERROR: Requirements file not found: $REQUIREMENTS" + echo "Copy requirements.txt to $SEARCH_INDEX_DIR/ first." + exit 1 +fi + +# Remove existing venv if --force +if $FORCE && [[ -d "$VENV_DIR" ]]; then + echo "Removing existing virtual environment..." + rm -rf "$VENV_DIR" +fi + +# Create venv if it doesn't exist +if [[ ! -d "$VENV_DIR" ]]; then + echo "Creating Python virtual environment..." + python3 -m venv "$VENV_DIR" + echo " Created: $VENV_DIR" +else + echo "Virtual environment already exists: $VENV_DIR" +fi + +# Upgrade pip +echo "Upgrading pip..." +"$VENV_DIR/bin/pip" install --quiet --upgrade pip + +# Install requirements +echo "Installing dependencies..." +"$VENV_DIR/bin/pip" install --quiet -r "$REQUIREMENTS" + +# Verify key packages +echo "" +echo "Installed packages:" +"$VENV_DIR/bin/pip" list --format=columns 2>/dev/null | grep -E "httpx|numpy|sqlite-utils|pyyaml|tqdm|python-dateutil|tiktoken" || true + +echo "" +echo "Python: $("$VENV_DIR/bin/python3" --version)" +echo "Venv: $VENV_DIR" +echo "" +echo "Setup complete." diff --git a/bates-enhance/integrations/search/scripts/sync-monitor.py b/bates-enhance/integrations/search/scripts/sync-monitor.py new file mode 100755 index 0000000..adece68 --- /dev/null +++ b/bates-enhance/integrations/search/scripts/sync-monitor.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +""" +sync-monitor.py -- Monitor search index health for cron job delivery + +Checks: + - Database exists and is accessible + - Last sync was recent (within 2 hours) + - No persistent errors in recent runs + - Document count is growing (not stalled) + - Ollama embedding service is reachable + - Disk space is sufficient + +Output goes to stdout for the cron job to deliver to the user. +Exits 0 if healthy, 1 if issues found. +""" + +import json +import os +import sqlite3 +import sys +from datetime import datetime, timedelta, timezone +from pathlib import Path + +import httpx +import yaml + +CONFIG_PATH = Path.home() / ".openclaw" / "search-index" / "config.yaml" +DEFAULT_DB_PATH = Path.home() / ".openclaw" / "search-index" / "db" / "search.db" +OLLAMA_ENDPOINT = "http://localhost:11434" + +# Thresholds +MAX_SYNC_AGE_HOURS = 2 +MIN_DOCS_EXPECTED = 10 +MAX_CONSECUTIVE_ERRORS = 3 +MIN_DISK_FREE_MB = 500 + + +def load_config(): + """Load config.yaml.""" + if not CONFIG_PATH.exists(): + return {} + with open(CONFIG_PATH) as f: + return yaml.safe_load(f) or {} + + +def get_db_path(config): + """Resolve database path from config.""" + raw = config.get("database", {}).get("path", str(DEFAULT_DB_PATH)) + return Path(os.path.expanduser(raw)) + + +def check_database(db_path): + """Check database accessibility and basic health.""" + issues = [] + + if not db_path.exists(): + return ["Database file not found: {}".format(db_path)], {} + + try: + conn = sqlite3.connect(str(db_path), timeout=5) + conn.execute("PRAGMA journal_mode=WAL") + except sqlite3.Error as e: + return ["Cannot open database: {}".format(e)], {} + + stats = {} + + # Document count + try: + total = conn.execute("SELECT COUNT(*) FROM documents").fetchone()[0] + stats["total_docs"] = total + if total < MIN_DOCS_EXPECTED: + issues.append("Low document count: {} (expected at least {})".format(total, MIN_DOCS_EXPECTED)) + except sqlite3.OperationalError as e: + issues.append("Cannot query documents table: {}".format(e)) + + # Embedding count + try: + emb_count = conn.execute("SELECT COUNT(*) FROM embeddings").fetchone()[0] + stats["embeddings"] = emb_count + except sqlite3.OperationalError: + stats["embeddings"] = 0 + + # Last sync time + try: + row = conn.execute( + "SELECT MAX(last_sync) FROM sync_state" + ).fetchone() + if row and row[0]: + last_sync = datetime.fromisoformat(row[0].replace("Z", "+00:00")) + age = datetime.now(timezone.utc) - last_sync + stats["last_sync_age_hours"] = round(age.total_seconds() / 3600, 1) + if age > timedelta(hours=MAX_SYNC_AGE_HOURS): + issues.append("Last sync was {:.1f} hours ago (threshold: {}h)".format( + age.total_seconds() / 3600, MAX_SYNC_AGE_HOURS)) + else: + issues.append("No sync has ever completed") + except sqlite3.OperationalError: + issues.append("Cannot read sync_state table") + + # Recent ingestion errors + try: + error_runs = conn.execute( + "SELECT COUNT(*) FROM ingestion_runs WHERE status IN ('error', 'partial') " + "AND started_at > datetime('now', '-24 hours')" + ).fetchone()[0] + stats["errors_24h"] = error_runs + if error_runs >= MAX_CONSECUTIVE_ERRORS: + issues.append("{} error/partial runs in last 24 hours".format(error_runs)) + except sqlite3.OperationalError: + pass + + # Check for stalled ingestion (running for > 1 hour) + try: + stalled = conn.execute( + "SELECT run_id, started_at FROM ingestion_runs WHERE status = 'running' " + "AND started_at < datetime('now', '-1 hour')" + ).fetchall() + if stalled: + for run_id, started in stalled: + issues.append("Stalled ingestion run #{} (started {})".format(run_id, started)) + except sqlite3.OperationalError: + pass + + # DB file size + db_size = db_path.stat().st_size + stats["db_size_mb"] = round(db_size / (1024 * 1024), 1) + + conn.close() + return issues, stats + + +def check_ollama(config): + """Check if Ollama embedding service is reachable.""" + endpoint = config.get("embedding", {}).get("endpoint", OLLAMA_ENDPOINT) + issues = [] + + try: + resp = httpx.get(f"{endpoint}/api/tags", timeout=5.0) + if resp.status_code != 200: + issues.append("Ollama returned HTTP {}".format(resp.status_code)) + return issues + + models = resp.json().get("models", []) + model_names = [m.get("name", "").split(":")[0] for m in models] + expected_model = config.get("embedding", {}).get("model", "nomic-embed-text") + if expected_model not in model_names: + issues.append("Embedding model '{}' not found in Ollama (available: {})".format( + expected_model, ", ".join(model_names) or "none")) + except httpx.ConnectError: + issues.append("Cannot connect to Ollama at {}".format(endpoint)) + except httpx.TimeoutException: + issues.append("Ollama connection timed out at {}".format(endpoint)) + except Exception as e: + issues.append("Ollama check failed: {}".format(e)) + + return issues + + +def check_disk(db_path): + """Check available disk space.""" + issues = [] + try: + stat = os.statvfs(str(db_path.parent)) + free_mb = (stat.f_bavail * stat.f_frsize) / (1024 * 1024) + if free_mb < MIN_DISK_FREE_MB: + issues.append("Low disk space: {:.0f} MB free (minimum: {} MB)".format(free_mb, MIN_DISK_FREE_MB)) + except OSError: + pass + return issues + + +def main(): + config = load_config() + db_path = get_db_path(config) + + all_issues = [] + stats = {} + + # Check database + db_issues, db_stats = check_database(db_path) + all_issues.extend(db_issues) + stats.update(db_stats) + + # Check Ollama + ollama_issues = check_ollama(config) + all_issues.extend(ollama_issues) + + # Check disk space + disk_issues = check_disk(db_path) + all_issues.extend(disk_issues) + + # Report + if all_issues: + print("Search Index Health Check: {} issue(s) found".format(len(all_issues))) + print() + for i, issue in enumerate(all_issues, 1): + print(" {}. {}".format(i, issue)) + print() + if stats: + print("Stats: {} docs, {} embeddings, DB size {:.1f} MB".format( + stats.get("total_docs", "?"), + stats.get("embeddings", "?"), + stats.get("db_size_mb", 0))) + sys.exit(1) + else: + # Healthy -- output nothing (cron job only reports issues) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/bates-enhance/integrations/search/setup.sh b/bates-enhance/integrations/search/setup.sh new file mode 100644 index 0000000..83627c1 --- /dev/null +++ b/bates-enhance/integrations/search/setup.sh @@ -0,0 +1,204 @@ +# search integration setup -- sourced by bates-enhance installer, no shebang +# Requires: m365 integration installed, common.sh functions available +# Sets up local search index with SQLite FTS5 + Ollama embeddings + +SEARCH_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SEARCH_INDEX_DIR="$HOME/.openclaw/search-index" + +# ========================================================================= +# Step 1 -- Check m365 dependency +# ========================================================================= +step "Check Microsoft 365 dependency" + +if ! is_installed "m365" 2>/dev/null; then + fatal "Microsoft 365 integration must be installed first. Run: bates-enhance.sh m365" +fi + +success "Microsoft 365 integration is installed" + +# ========================================================================= +# Step 2 -- Install Ollama +# ========================================================================= +step "Install Ollama (local embedding engine)" + +if command -v ollama &>/dev/null; then + info "Ollama is already installed: $(ollama --version 2>/dev/null || echo 'unknown version')" +else + info "Installing Ollama..." + curl -fsSL https://ollama.com/install.sh | sh + if ! command -v ollama &>/dev/null; then + fatal "Ollama installation failed. Install manually from https://ollama.com" + fi + success "Ollama installed" +fi + +# Ensure Ollama service is running +if ! ollama list &>/dev/null 2>&1; then + info "Starting Ollama service..." + ollama serve &>/dev/null & + sleep 3 +fi + +# Pull embedding model +info "Pulling nomic-embed-text embedding model (this may take a few minutes)..." +if ollama list 2>/dev/null | grep -q "nomic-embed-text"; then + info "nomic-embed-text model already available" +else + ollama pull nomic-embed-text +fi + +# Verify the model is available +if ollama list 2>/dev/null | grep -q "nomic-embed-text"; then + success "nomic-embed-text model is ready" +else + warn "Could not verify nomic-embed-text model. You may need to pull it manually:" + warn " ollama pull nomic-embed-text" +fi + +# ========================================================================= +# Step 3 -- Set up Python environment +# ========================================================================= +step "Set up Python environment and scripts" + +# Create directory structure +info "Creating search index directory structure..." +mkdir -p "$SEARCH_INDEX_DIR/scripts" +mkdir -p "$SEARCH_INDEX_DIR/db" +mkdir -p "$SEARCH_INDEX_DIR/cache" +mkdir -p "$SEARCH_INDEX_DIR/logs" + +# Copy scripts from integration package +info "Installing search index scripts..." +cp "$SEARCH_DIR/scripts/"*.py "$SEARCH_INDEX_DIR/scripts/" +cp "$SEARCH_DIR/scripts/"*.sh "$SEARCH_INDEX_DIR/scripts/" +chmod +x "$SEARCH_INDEX_DIR/scripts/"*.py +chmod +x "$SEARCH_INDEX_DIR/scripts/"*.sh + +# Copy requirements +cp "$SEARCH_DIR/requirements.txt" "$SEARCH_INDEX_DIR/requirements.txt" + +# Create Python venv +info "Creating Python virtual environment..." +if [[ -d "$SEARCH_INDEX_DIR/venv" ]]; then + info "Virtual environment already exists, updating..." +else + python3 -m venv "$SEARCH_INDEX_DIR/venv" +fi + +# Install requirements +info "Installing Python dependencies..." +"$SEARCH_INDEX_DIR/venv/bin/pip" install --quiet --upgrade pip +"$SEARCH_INDEX_DIR/venv/bin/pip" install --quiet -r "$SEARCH_INDEX_DIR/requirements.txt" + +success "Python environment ready at $SEARCH_INDEX_DIR/venv/" + +# ========================================================================= +# Step 4 -- Configure search index +# ========================================================================= +step "Configure search index" + +info "The search index needs to know which MCP server to use for reading email." +info "This should match the reader server registered during m365 setup." +echo "" + +local mcp_reader_server="" +prompt_default "MCP reader server name" "ms365-reader" mcp_reader_server + +export MCP_READER_SERVER="$mcp_reader_server" + +# Render config template +info "Generating config.yaml..." +template_render "$SEARCH_DIR/templates/config.yaml.template" "$SEARCH_INDEX_DIR/config.yaml" + +success "Config written to $SEARCH_INDEX_DIR/config.yaml" +info "Initial stagger phase set to 1 (personal inbox only)" +info "Increase with: edit ~/.openclaw/search-index/config.yaml (current_phase)" + +# ========================================================================= +# Step 5 -- Initial sync option +# ========================================================================= +step "Initial email sync" + +echo "" +info "The search index needs to perform an initial email ingestion." +info "Phase 1 syncs your personal inbox only (safest starting point)." +echo "" +echo " 1) Start initial sync now (runs in background)" +echo " 2) Schedule for tonight at 02:00" +echo "" + +local sync_choice="" +read -rp "Select [1]: " sync_choice +sync_choice="${sync_choice:-1}" + +local venv_python="$SEARCH_INDEX_DIR/venv/bin/python3" +local ingest_script="$SEARCH_INDEX_DIR/scripts/ingest-email.py" + +case "$sync_choice" in + 2) + info "Scheduling initial sync for 02:00 tonight..." + # Add one-shot crontab entry (self-removing) + local cron_cmd="$venv_python $ingest_script --stagger-phase 1 --incremental >> $SEARCH_INDEX_DIR/logs/initial-sync.log 2>&1; crontab -l 2>/dev/null | grep -v 'initial-search-sync' | crontab -" + (crontab -l 2>/dev/null; echo "0 2 * * * $cron_cmd # initial-search-sync") | crontab - + success "Initial sync scheduled for 02:00" + ;; + *) + info "Starting initial sync in background..." + nohup "$venv_python" "$ingest_script" --stagger-phase 1 --incremental \ + >> "$SEARCH_INDEX_DIR/logs/initial-sync.log" 2>&1 & + local sync_pid=$! + success "Initial sync started (PID: $sync_pid)" + info "Monitor progress: tail -f $SEARCH_INDEX_DIR/logs/initial-sync.log" + ;; +esac + +# Add recurring crontab: ingestion every 30 min during 06:00-22:00 +info "Adding recurring sync crontab (every 30 min, 06:00-22:00)..." +local recurring_cmd="$venv_python $ingest_script --incremental >> $SEARCH_INDEX_DIR/logs/ingestion.log 2>&1" +# Remove old entry if it exists, then add new one +(crontab -l 2>/dev/null | grep -v 'search-index-sync'; echo "*/30 6-22 * * * $recurring_cmd # search-index-sync") | crontab - +success "Recurring sync crontab installed" + +# --- Merge config fragment --- +info "Merging search index config into openclaw.json..." +config_merge "$SEARCH_DIR/config-fragment.json" +success "Search index configured in openclaw.json" + +# --- Deploy workspace additions --- +if [[ -d "$SEARCH_DIR/workspace-additions" ]]; then + info "Deploying workspace files..." + local ws_dir="${WORKSPACE_DIR:-$HOME/.openclaw/workspace}" + if [[ -d "$SEARCH_DIR/workspace-additions/refs" ]]; then + cp "$SEARCH_DIR/workspace-additions/refs/"* "$ws_dir/refs/" 2>/dev/null || true + fi + if [[ -d "$SEARCH_DIR/workspace-additions/rules" ]]; then + cp "$SEARCH_DIR/workspace-additions/rules/"* "$ws_dir/rules/" 2>/dev/null || true + fi + if [[ -d "$SEARCH_DIR/workspace-additions/skills" ]]; then + cp -r "$SEARCH_DIR/workspace-additions/skills/"* "$ws_dir/skills/" 2>/dev/null || true + fi + success "Workspace files deployed" +fi + +echo "" +echo "======================================================================" +echo " Search Index integration setup complete!" +echo "======================================================================" +echo "" +echo "Key paths:" +echo " Config : $SEARCH_INDEX_DIR/config.yaml" +echo " Database : $SEARCH_INDEX_DIR/db/search.db" +echo " Scripts : $SEARCH_INDEX_DIR/scripts/" +echo " Logs : $SEARCH_INDEX_DIR/logs/" +echo "" +echo "Useful commands:" +echo " Check stats : $venv_python $SEARCH_INDEX_DIR/scripts/search-stats.py" +echo " Search : $venv_python $SEARCH_INDEX_DIR/scripts/search-query.py \"your query\"" +echo " Manual sync : $venv_python $ingest_script --incremental" +echo " Monitor : $venv_python $SEARCH_INDEX_DIR/scripts/sync-monitor.py" +echo "" +echo "Next steps:" +echo " 1. Wait for initial sync to complete" +echo " 2. Verify: $venv_python $SEARCH_INDEX_DIR/scripts/search-stats.py" +echo " 3. Increase stagger phase in config.yaml as you're comfortable" +echo "" diff --git a/bates-enhance/integrations/search/templates/config.yaml.template b/bates-enhance/integrations/search/templates/config.yaml.template new file mode 100644 index 0000000..ba30526 --- /dev/null +++ b/bates-enhance/integrations/search/templates/config.yaml.template @@ -0,0 +1,45 @@ +# Search Index Configuration +# Generated by Bates Enhancement Wizard + +database: + path: ~/.openclaw/search-index/db/search.db + wal_mode: true + +embedding: + provider: ollama + model: nomic-embed-text + endpoint: http://localhost:11434 + +sources: + email: + enabled: true + mcp_servers: + - {{MCP_READER_SERVER}} + sync_interval: 1800 # 30 minutes + batch_size: 50 + max_age_days: 365 + + calendar: + enabled: true + mcp_servers: + - {{MCP_READER_SERVER}} + +ingestion: + stagger_phases: + 1: "Personal inbox" + 2: "All personal folders" + 3: "Company mailbox" + 4: "Shared mailboxes" + 5: "All sources" + + current_phase: 1 + + retry: + max_attempts: 3 + backoff_seconds: 60 + +logging: + level: INFO + file: ~/.openclaw/search-index/logs/ingestion.log + max_size_mb: 50 + rotate_count: 5 diff --git a/bates-enhance/integrations/social/config-fragment.json b/bates-enhance/integrations/social/config-fragment.json new file mode 100644 index 0000000..e18dc9f --- /dev/null +++ b/bates-enhance/integrations/social/config-fragment.json @@ -0,0 +1,7 @@ +{ + "env": { + "vars": { + "MIXPOST_URL": "{{MIXPOST_URL}}" + } + } +} diff --git a/bates-enhance/integrations/social/docker/docker-compose.yml b/bates-enhance/integrations/social/docker/docker-compose.yml new file mode 100644 index 0000000..b7e8324 --- /dev/null +++ b/bates-enhance/integrations/social/docker/docker-compose.yml @@ -0,0 +1,83 @@ +version: "3.8" + +# MixPost -- Self-hosted social media management +# Deployed by bates-enhance.sh social integration +# +# Services: +# mixpost -- MixPost application (Laravel) +# mysql -- MySQL 8 database +# redis -- Redis for queue and cache + +services: + mixpost: + image: inovector/mixpost:latest + container_name: mixpost-app + restart: unless-stopped + ports: + - "${MIXPOST_PORT:-9000}:80" + depends_on: + mysql: + condition: service_healthy + redis: + condition: service_started + environment: + APP_NAME: "MixPost" + APP_KEY: "${APP_KEY}" + APP_URL: "${MIXPOST_URL:-http://localhost:9000}" + DB_CONNECTION: mysql + DB_HOST: mysql + DB_PORT: 3306 + DB_DATABASE: mixpost + DB_USERNAME: mixpost + DB_PASSWORD: "${DB_PASSWORD}" + REDIS_HOST: redis + REDIS_PORT: 6379 + REDIS_PASSWORD: "${REDIS_PASSWORD}" + CACHE_DRIVER: redis + QUEUE_CONNECTION: redis + SESSION_DRIVER: redis + volumes: + - mixpost-storage:/var/www/html/storage/app + - mixpost-logs:/var/www/html/storage/logs + networks: + - mixpost-net + + mysql: + image: mysql:8.0 + container_name: mixpost-mysql + restart: unless-stopped + environment: + MYSQL_ROOT_PASSWORD: "${DB_ROOT_PASSWORD}" + MYSQL_DATABASE: mixpost + MYSQL_USER: mixpost + MYSQL_PASSWORD: "${DB_PASSWORD}" + volumes: + - mixpost-mysql-data:/var/lib/mysql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "root", "-p${DB_ROOT_PASSWORD}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + networks: + - mixpost-net + + redis: + image: redis:7-alpine + container_name: mixpost-redis + restart: unless-stopped + command: redis-server --requirepass "${REDIS_PASSWORD}" + volumes: + - mixpost-redis-data:/data + networks: + - mixpost-net + +volumes: + mixpost-storage: + mixpost-logs: + mixpost-mysql-data: + mixpost-redis-data: + +networks: + mixpost-net: + driver: bridge diff --git a/bates-enhance/integrations/social/docker/env.template b/bates-enhance/integrations/social/docker/env.template new file mode 100644 index 0000000..90ccb30 --- /dev/null +++ b/bates-enhance/integrations/social/docker/env.template @@ -0,0 +1,15 @@ +# MixPost Docker environment +# Generated by bates-enhance.sh social integration +# This file contains secrets -- do not commit to version control. + +# Application +APP_KEY={{APP_KEY}} +MIXPOST_URL=http://{{MIXPOST_DOMAIN}}:{{MIXPOST_PORT}} +MIXPOST_PORT={{MIXPOST_PORT}} + +# MySQL +DB_PASSWORD={{DB_PASSWORD}} +DB_ROOT_PASSWORD={{DB_ROOT_PASSWORD}} + +# Redis +REDIS_PASSWORD={{REDIS_PASSWORD}} diff --git a/bates-enhance/integrations/social/setup.sh b/bates-enhance/integrations/social/setup.sh new file mode 100644 index 0000000..b7e6b5d --- /dev/null +++ b/bates-enhance/integrations/social/setup.sh @@ -0,0 +1,132 @@ +# setup.sh -- Social Media (MixPost) integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Deploys MixPost (self-hosted social media management) via Docker Compose, +# then configures the gateway to use it for scheduling and publishing posts. +# +# Prerequisites: +# - Docker and Docker Compose installed +# - Ports 9000 (MixPost UI) available on the host + +# ------------------------------------------------------------------- +# Step 1 -- Check Docker +# ------------------------------------------------------------------- +step "Check Docker installation" + +if ! command -v docker &>/dev/null; then + fatal "Docker is not installed. Install Docker first: https://docs.docker.com/engine/install/" +fi + +if ! docker info &>/dev/null 2>&1; then + warn "Docker daemon is not running or current user lacks permissions." + info "Try: sudo systemctl start docker && sudo usermod -aG docker \$USER" + if ! confirm "Continue anyway (you can start Docker later)?"; then + fatal "Aborted." + fi +fi + +success "Docker is available." + +# ------------------------------------------------------------------- +# Step 2 -- Collect MixPost settings +# ------------------------------------------------------------------- +step "Configure MixPost" + +prompt_default "Domain or hostname for MixPost (e.g. localhost or mixpost.example.com)" "localhost" MIXPOST_DOMAIN + +MIXPOST_PORT="9000" +prompt_default "MixPost HTTP port" "$MIXPOST_PORT" MIXPOST_PORT + +if [[ "$MIXPOST_DOMAIN" == "localhost" || "$MIXPOST_DOMAIN" == "127.0.0.1" ]]; then + MIXPOST_URL="http://${MIXPOST_DOMAIN}:${MIXPOST_PORT}" +else + MIXPOST_URL="https://${MIXPOST_DOMAIN}" + info "For production, set up a reverse proxy with TLS in front of port $MIXPOST_PORT." +fi + +info "MixPost will be accessible at: $MIXPOST_URL" + +# ------------------------------------------------------------------- +# Step 3 -- Generate passwords +# ------------------------------------------------------------------- +step "Generate database credentials" + +DB_PASSWORD=$(python3 -c "import secrets; print(secrets.token_urlsafe(24))") +DB_ROOT_PASSWORD=$(python3 -c "import secrets; print(secrets.token_urlsafe(24))") +APP_KEY=$(python3 -c "import secrets; print('base64:' + __import__('base64').b64encode(secrets.token_bytes(32)).decode())") +REDIS_PASSWORD=$(python3 -c "import secrets; print(secrets.token_urlsafe(16))") + +success "Credentials generated (stored in .env, not displayed)." + +# ------------------------------------------------------------------- +# Step 4 -- Deploy Docker Compose files +# ------------------------------------------------------------------- +step "Deploy Docker Compose stack" + +DOCKER_DIR="$HOME/.openclaw/docker/mixpost" +mkdir -p "$DOCKER_DIR" + +# Copy docker-compose.yml +COMPOSE_SRC="$ENHANCE_DIR/integrations/social/docker/docker-compose.yml" +if [[ -f "$COMPOSE_SRC" ]]; then + cp "$COMPOSE_SRC" "$DOCKER_DIR/docker-compose.yml" +else + fatal "docker-compose.yml template not found at $COMPOSE_SRC" +fi + +# Render .env from template +ENV_TEMPLATE="$ENHANCE_DIR/integrations/social/docker/env.template" +if [[ -f "$ENV_TEMPLATE" ]]; then + export DB_PASSWORD DB_ROOT_PASSWORD APP_KEY REDIS_PASSWORD MIXPOST_DOMAIN MIXPOST_PORT + template_render "$ENV_TEMPLATE" "$DOCKER_DIR/.env" + chmod 600 "$DOCKER_DIR/.env" +else + fatal "env.template not found at $ENV_TEMPLATE" +fi + +success "Docker Compose files deployed to $DOCKER_DIR" + +# ------------------------------------------------------------------- +# Step 5 -- Offer to start containers +# ------------------------------------------------------------------- +step "Start MixPost containers" + +echo "" +info "Ready to start MixPost containers." +info "This will pull images and start MySQL, Redis, and MixPost." +echo "" + +if confirm "Start containers now?"; then + info "Pulling images and starting containers (this may take a few minutes)..." + if docker compose -f "$DOCKER_DIR/docker-compose.yml" --env-file "$DOCKER_DIR/.env" up -d 2>&1; then + success "MixPost containers started." + echo "" + info "MixPost UI: $MIXPOST_URL" + info "Default admin credentials are set on first visit." + else + warn "Docker Compose failed. You can start manually later:" + info " cd $DOCKER_DIR && docker compose up -d" + fi +else + info "Skipped. Start containers later with:" + info " cd $DOCKER_DIR && docker compose up -d" +fi + +# ------------------------------------------------------------------- +# Step 6 -- Merge config fragment +# ------------------------------------------------------------------- +step "Merge social media config into openclaw.json" + +FRAGMENT_DIR="$ENHANCE_DIR/integrations/social" +RENDERED_FRAGMENT=$(mktemp) + +export MIXPOST_URL +template_render "$FRAGMENT_DIR/config-fragment.json" "$RENDERED_FRAGMENT" +config_merge "$RENDERED_FRAGMENT" +rm -f "$RENDERED_FRAGMENT" + +success "Config merged." + +echo "" +success "Social media (MixPost) integration setup complete." +info "Connect your social accounts in the MixPost UI at $MIXPOST_URL" diff --git a/bates-enhance/integrations/tailscale/setup.sh b/bates-enhance/integrations/tailscale/setup.sh new file mode 100644 index 0000000..308434d --- /dev/null +++ b/bates-enhance/integrations/tailscale/setup.sh @@ -0,0 +1,142 @@ +# setup.sh -- Tailscale Remote Access integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Installs Tailscale (if needed), authenticates the node, and sets up +# Tailscale Serve to expose the gateway dashboard and (optionally) the +# voice webhook over HTTPS on the tailnet. +# +# Prerequisites: +# - sudo access (for Tailscale install and configuration) +# - A Tailscale account (free tier is fine) + +# ------------------------------------------------------------------- +# Step 1 -- Check / install Tailscale +# ------------------------------------------------------------------- +step "Check Tailscale installation" + +if command -v tailscale &>/dev/null; then + TS_VERSION=$(tailscale version 2>/dev/null | head -1 || echo "unknown") + success "Tailscale is already installed (version: $TS_VERSION)." +else + info "Tailscale is not installed. Installing now..." + echo "" + + if ! confirm "Install Tailscale via the official install script?"; then + fatal "Tailscale installation is required for this integration." + fi + + if curl -fsSL https://tailscale.com/install.sh | sh; then + success "Tailscale installed." + else + fatal "Tailscale installation failed. Check network connectivity and try again." + fi +fi + +# ------------------------------------------------------------------- +# Step 2 -- Authenticate / bring up Tailscale +# ------------------------------------------------------------------- +step "Authenticate with Tailscale" + +TS_STATUS=$(tailscale status --json 2>/dev/null | python3 -c "import json,sys; print(json.load(sys.stdin).get('BackendState',''))" 2>/dev/null || echo "") + +if [[ "$TS_STATUS" == "Running" ]]; then + success "Tailscale is already connected." +else + info "Starting Tailscale. You will need to authenticate in your browser." + echo "" + + if sudo tailscale up; then + success "Tailscale authenticated and connected." + else + fatal "Tailscale authentication failed. Run 'sudo tailscale up' manually." + fi +fi + +# Wait a moment for DNS to propagate +sleep 2 + +# Get the hostname +TS_HOSTNAME=$(tailscale status --json 2>/dev/null | python3 -c "import json,sys; print(json.load(sys.stdin).get('Self',{}).get('DNSName','').rstrip('.'))" 2>/dev/null || echo "") +TS_IP=$(tailscale ip -4 2>/dev/null || echo "") + +if [[ -n "$TS_HOSTNAME" ]]; then + info "Tailscale hostname: $TS_HOSTNAME" + info "Tailscale IPv4: $TS_IP" +else + warn "Could not determine Tailscale hostname. Check 'tailscale status'." +fi + +# ------------------------------------------------------------------- +# Step 3 -- Set up Tailscale Serve for dashboard +# ------------------------------------------------------------------- +step "Configure Tailscale Serve (dashboard)" + +info "Exposing gateway dashboard (port 18789) on Tailscale HTTPS port 443." +echo "" + +if sudo tailscale serve --bg --https=443 18789; then + success "Dashboard available at: https://${TS_HOSTNAME}" +else + warn "Failed to configure Tailscale Serve for dashboard." + info "You can try manually: sudo tailscale serve --bg --https=443 18789" +fi + +# ------------------------------------------------------------------- +# Step 4 -- Set up Tailscale Serve for voice webhook (if twilio installed) +# ------------------------------------------------------------------- +step "Configure Tailscale Serve (voice webhook)" + +if is_installed "twilio" 2>/dev/null; then + info "Twilio integration detected. Exposing voice webhook on port 8443." + echo "" + + if sudo tailscale serve --bg --https=8443 18789; then + success "Voice webhook available at: https://${TS_HOSTNAME}:8443/webhook" + echo "" + info "Update your Twilio phone number webhook to:" + info " https://${TS_HOSTNAME}:8443/webhook" + else + warn "Failed to configure Tailscale Serve for voice webhook." + info "You can try manually: sudo tailscale serve --bg --https=8443 18789" + fi +else + info "Twilio integration not installed -- skipping voice webhook." + info "If you add Twilio later, re-run this integration to set up the webhook." +fi + +# ------------------------------------------------------------------- +# Step 5 -- Enable Tailscale auth in gateway +# ------------------------------------------------------------------- +step "Enable Tailscale authentication in gateway" + +if command -v openclaw &>/dev/null; then + if openclaw config set gateway.auth.allowTailscale true 2>/dev/null; then + success "Tailscale auth enabled in gateway config." + else + warn "Could not set gateway.auth.allowTailscale via CLI." + info "You may need to add it manually to ~/.openclaw/openclaw.json" + fi +else + warn "openclaw CLI not found. Set gateway.auth.allowTailscale manually." +fi + +# ------------------------------------------------------------------- +# Summary +# ------------------------------------------------------------------- +step "Summary" + +echo "" +info "Tailscale setup complete. Your endpoints:" +echo "" +if [[ -n "$TS_HOSTNAME" ]]; then + info " Dashboard: https://${TS_HOSTNAME}" + if is_installed "twilio" 2>/dev/null; then + info " Voice webhook: https://${TS_HOSTNAME}:8443/webhook" + fi + info " Tailscale IP: ${TS_IP}" +else + info " Run 'tailscale status' to see your hostname." +fi +echo "" + +success "Tailscale remote access integration setup complete." diff --git a/bates-enhance/integrations/tailscale/verify.sh b/bates-enhance/integrations/tailscale/verify.sh new file mode 100644 index 0000000..380bdb6 --- /dev/null +++ b/bates-enhance/integrations/tailscale/verify.sh @@ -0,0 +1,129 @@ +#!/usr/bin/env bash +# verify.sh -- Quick verification for Tailscale integration +# Run this script to check that Tailscale is working correctly +# with the Bates gateway. + +set -euo pipefail + +# Colors +if [[ -t 1 ]]; then + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + CYAN='\033[0;36m' + NC='\033[0m' +else + RED='' GREEN='' YELLOW='' CYAN='' NC='' +fi + +PASS=0 +FAIL=0 +WARN_COUNT=0 + +check_pass() { echo -e " ${GREEN}[PASS]${NC} $1"; ((PASS++)); } +check_fail() { echo -e " ${RED}[FAIL]${NC} $1"; ((FAIL++)); } +check_warn() { echo -e " ${YELLOW}[WARN]${NC} $1"; ((WARN_COUNT++)); } + +echo "" +echo "Tailscale Integration Verification" +echo "===================================" +echo "" + +# 1. Tailscale installed +echo "${CYAN}Checking Tailscale installation...${NC}" +if command -v tailscale &>/dev/null; then + TS_VERSION=$(tailscale version 2>/dev/null | head -1 || echo "unknown") + check_pass "Tailscale installed (version: $TS_VERSION)" +else + check_fail "Tailscale is not installed" +fi + +# 2. Tailscale running +echo "${CYAN}Checking Tailscale status...${NC}" +TS_STATE=$(tailscale status --json 2>/dev/null | python3 -c "import json,sys; print(json.load(sys.stdin).get('BackendState',''))" 2>/dev/null || echo "") +if [[ "$TS_STATE" == "Running" ]]; then + check_pass "Tailscale daemon is running" +else + check_fail "Tailscale daemon is not running (state: ${TS_STATE:-unknown})" +fi + +# 3. Get hostname and IP +TS_HOSTNAME=$(tailscale status --json 2>/dev/null | python3 -c "import json,sys; print(json.load(sys.stdin).get('Self',{}).get('DNSName','').rstrip('.'))" 2>/dev/null || echo "") +TS_IP=$(tailscale ip -4 2>/dev/null || echo "") + +if [[ -n "$TS_HOSTNAME" ]]; then + check_pass "Tailscale hostname: $TS_HOSTNAME" +else + check_fail "Could not determine Tailscale hostname" +fi + +if [[ -n "$TS_IP" ]]; then + check_pass "Tailscale IPv4: $TS_IP" +else + check_fail "Could not determine Tailscale IP" +fi + +# 4. Check Tailscale Serve +echo "${CYAN}Checking Tailscale Serve...${NC}" +SERVE_OUTPUT=$(sudo tailscale serve status 2>/dev/null || echo "") +if [[ -n "$SERVE_OUTPUT" && "$SERVE_OUTPUT" != *"No serve config"* ]]; then + check_pass "Tailscale Serve is configured" + + if echo "$SERVE_OUTPUT" | grep -q "443"; then + check_pass "HTTPS :443 -> gateway (dashboard)" + else + check_warn "HTTPS :443 not configured for dashboard" + fi + + if echo "$SERVE_OUTPUT" | grep -q "8443"; then + check_pass "HTTPS :8443 -> gateway (voice webhook)" + else + check_warn "HTTPS :8443 not configured (voice webhook -- only needed if Twilio is installed)" + fi +else + check_fail "Tailscale Serve is not configured" +fi + +# 5. Check gateway is running +echo "${CYAN}Checking gateway connectivity...${NC}" +if systemctl --user is-active openclaw-gateway &>/dev/null; then + check_pass "Gateway service is active" +else + check_fail "Gateway service is not running" +fi + +# 6. Try to reach the gateway via Tailscale IP +if [[ -n "$TS_IP" ]]; then + if curl -s -o /dev/null -w "%{http_code}" --connect-timeout 5 "http://${TS_IP}:18789/" 2>/dev/null | grep -qE "^(200|302|401|403)"; then + check_pass "Gateway reachable via Tailscale IP (http://${TS_IP}:18789)" + else + check_warn "Gateway not reachable via Tailscale IP (may need auth)" + fi +fi + +# 7. Check gateway Tailscale auth setting +echo "${CYAN}Checking gateway auth config...${NC}" +if command -v openclaw &>/dev/null; then + TS_AUTH=$(openclaw config get gateway.auth.allowTailscale 2>/dev/null || echo "") + if [[ "$TS_AUTH" == "true" ]]; then + check_pass "Gateway Tailscale auth is enabled" + else + check_warn "Gateway Tailscale auth may not be enabled (got: ${TS_AUTH:-empty})" + fi +else + check_warn "openclaw CLI not available -- cannot verify auth config" +fi + +# Summary +echo "" +echo "===================================" +echo -e "Results: ${GREEN}${PASS} passed${NC}, ${RED}${FAIL} failed${NC}, ${YELLOW}${WARN_COUNT} warnings${NC}" +echo "===================================" + +if [[ $FAIL -gt 0 ]]; then + echo "" + echo "Some checks failed. Review the output above and fix issues." + exit 1 +fi + +exit 0 diff --git a/bates-enhance/integrations/teams/config-fragment.json b/bates-enhance/integrations/teams/config-fragment.json new file mode 100644 index 0000000..d9f522e --- /dev/null +++ b/bates-enhance/integrations/teams/config-fragment.json @@ -0,0 +1,11 @@ +{ + "channels": { + "msteams": { + "enabled": true, + "appId": "{{TEAMS_APP_ID}}", + "appPassword": "{{TEAMS_SECRET}}", + "dmPolicy": "open", + "groupPolicy": "allowlist" + } + } +} diff --git a/bates-enhance/integrations/teams/setup.sh b/bates-enhance/integrations/teams/setup.sh new file mode 100644 index 0000000..ed0cd68 --- /dev/null +++ b/bates-enhance/integrations/teams/setup.sh @@ -0,0 +1,106 @@ +# setup.sh -- Microsoft Teams integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Configures the MS Teams channel (Bot Framework) so Bates can send and +# receive messages in Teams DMs and group chats. +# +# Prerequisites: +# - An Azure / Entra Bot registration (App ID + Secret) +# - The openclaw msteams extension installed via npm +# - The openclaw gateway running + +# ------------------------------------------------------------------- +# Step 1 -- Collect credentials +# ------------------------------------------------------------------- +step "Collect Teams Bot credentials" + +info "You need an Azure / Entra ID Bot registration." +info "Go to https://portal.azure.com -> App registrations -> your bot app." +echo "" + +prompt_default "Teams App ID (Entra Bot App ID)" "" TEAMS_APP_ID +if [[ -z "${TEAMS_APP_ID:-}" ]]; then + fatal "Teams App ID is required." +fi + +prompt_default "Teams App Secret (client secret value)" "" TEAMS_SECRET +if [[ -z "${TEAMS_SECRET:-}" ]]; then + fatal "Teams App Secret is required." +fi + +success "Credentials collected." + +# ------------------------------------------------------------------- +# Step 2 -- Install NODE_PATH systemd drop-in +# ------------------------------------------------------------------- +step "Install NODE_PATH systemd drop-in" + +DROPIN_DIR="$HOME/.config/systemd/user/openclaw-gateway.service.d" +DROPIN_FILE="$DROPIN_DIR/msteams-deps.conf" + +mkdir -p "$DROPIN_DIR" + +if [[ -f "$DROPIN_FILE" ]]; then + info "Drop-in already exists at $DROPIN_FILE -- overwriting." +fi + +cat > "$DROPIN_FILE" </dev/null || warn "Could not reload systemd daemon." + +# ------------------------------------------------------------------- +# Step 3 -- Store secret in a systemd credential drop-in (chmod 600) +# ------------------------------------------------------------------- +step "Store Teams secret securely" + +SECRETS_DROPIN="$DROPIN_DIR/msteams-secret.conf" + +cat > "$SECRETS_DROPIN" < Teams apps -> Manage apps." +info "3. Click 'Upload new app' and select the manifest ZIP." +info "4. Approve the app for your organisation." +echo "" +info "For a step-by-step guide, see:" +info " https://learn.microsoft.com/en-us/microsoftteams/platform/concepts/deploy-and-publish/apps-publish-overview" +echo "" + +success "Teams integration setup complete." diff --git a/bates-enhance/integrations/teams/workspace-additions/refs/file-sharing.md b/bates-enhance/integrations/teams/workspace-additions/refs/file-sharing.md new file mode 100644 index 0000000..a44b124 --- /dev/null +++ b/bates-enhance/integrations/teams/workspace-additions/refs/file-sharing.md @@ -0,0 +1,64 @@ +# File Sharing via Teams + +Reference documentation for sharing files through Microsoft Teams, OneDrive +links, and handling attachments in conversations with Bates. + +## Sending Files to Users + +When Bates needs to share a file with a user through Teams: + +1. **OneDrive links (preferred)** -- Upload the file to the user's OneDrive + (or a shared location) and send a link in the Teams message. This avoids + size limits and keeps the file accessible later. + + - Use the Graph API endpoint + `PUT /me/drive/root:/{path}:/content` to upload. + - Then share the link in the Teams reply. + +2. **Inline attachments** -- Small files (< 4 MB) can be sent as inline + attachments via the Bot Framework. The file is embedded directly in the + message payload. This is suitable for quick exports, small CSVs, or + generated images. + +3. **Adaptive Card file cards** -- For a richer experience, wrap the download + link in an Adaptive Card with a preview thumbnail and an "Open" action + button. + +## Receiving Files from Users + +When a user sends a file to Bates in a Teams chat: + +- The Bot Framework delivers a `fileConsent` or `attachment` activity. +- The attachment includes a `contentUrl` pointing to the Teams/SharePoint + blob storage. +- Bates should download the file from the `contentUrl` using the bot's + bearer token before processing. + +### Supported Attachment Types + +| Type | Notes | +|----------|--------------------------------------------| +| Images | JPEG, PNG, GIF, WEBP -- displayed inline | +| PDFs | Rendered as a preview card in Teams | +| Office | Word, Excel, PowerPoint -- preview via link | +| Archives | ZIP, TAR -- download only, no preview | +| Other | Any file type can be sent as an attachment | + +## OneDrive Folder Conventions + +| Purpose | Path | +|----------------------|---------------------------------------------| +| General drafts | `drafts/` | +| Generated images | `drafts/images/` | +| Venture files | `drafts/Sales/{Company}/` | +| Venture images | `drafts/Sales/{Company}/images/` | +| Shared documents | `shared/` | + +## Tips + +- Always prefer OneDrive links over inline attachments for files larger than + 1 MB. Teams has a 4 MB per-message payload limit for bot messages. +- When sharing sensitive documents, ensure the OneDrive sharing permission is + scoped to the intended recipient only. +- Use the `@microsoft.graph.downloadUrl` property from the Graph response for + time-limited direct download links (useful in Adaptive Cards). diff --git a/bates-enhance/integrations/telegram/config-fragment.json b/bates-enhance/integrations/telegram/config-fragment.json new file mode 100644 index 0000000..4a131ad --- /dev/null +++ b/bates-enhance/integrations/telegram/config-fragment.json @@ -0,0 +1,10 @@ +{ + "channels": { + "telegram": { + "enabled": true, + "botToken": "{{TELEGRAM_BOT_TOKEN}}", + "dmPolicy": "pairing", + "streamMode": "partial" + } + } +} diff --git a/bates-enhance/integrations/telegram/setup.sh b/bates-enhance/integrations/telegram/setup.sh new file mode 100644 index 0000000..0817c22 --- /dev/null +++ b/bates-enhance/integrations/telegram/setup.sh @@ -0,0 +1,71 @@ +# setup.sh -- Telegram integration for Bates +# Sourced by bates-enhance.sh; has access to common.sh and config-merge.sh functions. +# +# Configures Telegram as a messaging channel for Bates via BotFather bot token. + +step "Telegram Bot Configuration" + +info "You'll need a Telegram bot token from @BotFather and your Telegram user ID." +info "To create a bot: open Telegram, search for @BotFather, send /newbot." +info "To find your user ID: search for @userinfobot and send /start." +echo "" + +# --- Bot Token --- +local bot_token="" +while [[ -z "$bot_token" ]]; do + read -rp "Telegram bot token (from @BotFather): " bot_token + if [[ -z "$bot_token" ]]; then + warn "Bot token cannot be empty." + continue + fi + if ! validate_telegram_token "$bot_token"; then + warn "Token format looks incorrect. Expected format: 123456789:ABCdefGHI-jklMNOpqr..." + if ! confirm "Use this token anyway?"; then + bot_token="" + continue + fi + fi +done + +# --- User ID --- +local user_id="" +while [[ -z "$user_id" ]]; do + read -rp "Your Telegram user ID (numeric): " user_id + if [[ -z "$user_id" ]]; then + warn "User ID cannot be empty." + continue + fi + if ! [[ "$user_id" =~ ^[0-9]+$ ]]; then + warn "User ID should be numeric." + if ! confirm "Use this value anyway?"; then + user_id="" + continue + fi + fi +done + +# --- Optional: Test bot token --- +echo "" +if confirm "Test the bot token now?"; then + if test_telegram_bot "$bot_token"; then + success "Bot token verified!" + else + warn "Could not verify bot token. You can continue and fix later." + if ! confirm "Continue anyway?"; then + fatal "Aborted by user." + fi + fi +fi + +# --- Export for template engine --- +export TELEGRAM_BOT_TOKEN="$bot_token" +export TELEGRAM_USER_ID="$user_id" + +# --- Merge config --- +step "Applying Telegram configuration" + +config_merge_telegram "$bot_token" "$user_id" +success "Telegram channel configured in openclaw.json" + +echo "" +info "After the gateway restarts, send a message to your bot to verify." diff --git a/bates-enhance/integrations/twilio/config-fragment.json b/bates-enhance/integrations/twilio/config-fragment.json new file mode 100644 index 0000000..33c0c60 --- /dev/null +++ b/bates-enhance/integrations/twilio/config-fragment.json @@ -0,0 +1,26 @@ +{ + "plugins": { + "entries": { + "voice-call": { + "enabled": true, + "config": { + "provider": "twilio", + "fromNumber": "{{TWILIO_NUMBER}}", + "twilio": { + "accountSid": "{{TWILIO_SID}}", + "authToken": "{{TWILIO_TOKEN}}" + }, + "streaming": { + "enabled": true + }, + "publicUrl": "{{VOICE_WEBHOOK_URL}}", + "maxDurationSeconds": 600, + "maxConcurrentCalls": 1, + "outbound": { + "defaultMode": "conversation" + } + } + } + } + } +} diff --git a/bates-enhance/integrations/twilio/setup.sh b/bates-enhance/integrations/twilio/setup.sh new file mode 100644 index 0000000..86118e1 --- /dev/null +++ b/bates-enhance/integrations/twilio/setup.sh @@ -0,0 +1,134 @@ +# setup.sh -- Twilio Voice Calling integration for Bates +# Sourced by bates-enhance.sh -- do NOT run directly. +# +# Configures inbound and outbound voice calls through Twilio so Bates can +# speak with users over the phone. Streaming (real-time speech) is enabled +# by default. +# +# Prerequisites: +# - A Twilio account with a phone number +# - A publicly reachable webhook URL (Tailscale Serve or ngrok) +# - (Optional) ElevenLabs integration for high-quality TTS + +# ------------------------------------------------------------------- +# Step 1 -- Collect Twilio credentials +# ------------------------------------------------------------------- +step "Collect Twilio credentials" + +info "You need your Twilio Account SID, Auth Token, and a phone number." +info "Find them at https://console.twilio.com -> Account Info." +echo "" + +prompt_default "Twilio Account SID (starts with AC)" "" TWILIO_SID +if [[ -z "${TWILIO_SID:-}" ]]; then + fatal "Twilio Account SID is required." +fi +if [[ ! "$TWILIO_SID" =~ ^AC ]]; then + warn "Account SID usually starts with 'AC'. Double-check your value." + if ! confirm "Continue anyway?"; then + fatal "Aborted." + fi +fi + +prompt_default "Twilio Auth Token" "" TWILIO_TOKEN +if [[ -z "${TWILIO_TOKEN:-}" ]]; then + fatal "Twilio Auth Token is required." +fi + +prompt_default "Twilio phone number (E.164 format, e.g. +15551234567)" "" TWILIO_NUMBER +if [[ -z "${TWILIO_NUMBER:-}" ]]; then + fatal "Twilio phone number is required." +fi +if [[ ! "$TWILIO_NUMBER" =~ ^\+[0-9]{7,15}$ ]]; then + warn "Phone number does not look like valid E.164 format." + if ! confirm "Continue anyway?"; then + fatal "Aborted." + fi +fi + +success "Twilio credentials collected." + +# ------------------------------------------------------------------- +# Step 2 -- Determine webhook URL +# ------------------------------------------------------------------- +step "Configure webhook URL" + +DEFAULT_WEBHOOK="" +if command -v tailscale &>/dev/null; then + TS_HOSTNAME=$(tailscale status --json 2>/dev/null | python3 -c "import json,sys; print(json.load(sys.stdin).get('Self',{}).get('DNSName','').rstrip('.'))" 2>/dev/null || true) + if [[ -n "$TS_HOSTNAME" ]]; then + DEFAULT_WEBHOOK="https://${TS_HOSTNAME}:8443/webhook" + info "Tailscale detected. Suggested webhook URL: $DEFAULT_WEBHOOK" + fi +fi + +prompt_default "Webhook URL (publicly reachable)" "${DEFAULT_WEBHOOK}" VOICE_WEBHOOK_URL +if [[ -z "${VOICE_WEBHOOK_URL:-}" ]]; then + warn "No webhook URL provided. Inbound calls will not work until you set one." + VOICE_WEBHOOK_URL="" +fi + +# ------------------------------------------------------------------- +# Step 3 -- Check for ElevenLabs TTS +# ------------------------------------------------------------------- +step "Check TTS provider" + +if is_installed "elevenlabs" 2>/dev/null; then + success "ElevenLabs integration detected -- high-quality TTS will be used." +else + warn "ElevenLabs is not installed." + info "Voice calls will use the default (basic) TTS engine." + info "For a natural-sounding voice, run: bates-enhance.sh elevenlabs" +fi + +# ------------------------------------------------------------------- +# Step 4 -- Store secret in systemd drop-in (chmod 600) +# ------------------------------------------------------------------- +step "Store Twilio credentials securely" + +DROPIN_DIR="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$DROPIN_DIR" + +SECRETS_DROPIN="$DROPIN_DIR/twilio-secret.conf" +cat > "$SECRETS_DROPIN" </dev/null || warn "Could not reload systemd daemon." + +# ------------------------------------------------------------------- +# Step 5 -- Merge config fragment +# ------------------------------------------------------------------- +step "Merge Twilio config into openclaw.json" + +FRAGMENT_DIR="$ENHANCE_DIR/integrations/twilio" +RENDERED_FRAGMENT=$(mktemp) + +export TWILIO_SID TWILIO_TOKEN TWILIO_NUMBER VOICE_WEBHOOK_URL +template_render "$FRAGMENT_DIR/config-fragment.json" "$RENDERED_FRAGMENT" +config_merge "$RENDERED_FRAGMENT" +rm -f "$RENDERED_FRAGMENT" + +success "Config merged." + +# ------------------------------------------------------------------- +# Step 6 -- Reminder +# ------------------------------------------------------------------- +step "Configure Twilio webhook" + +echo "" +info "Point your Twilio phone number's Voice webhook to:" +info " ${VOICE_WEBHOOK_URL:-/webhook}" +echo "" +info "In the Twilio Console:" +info " 1. Go to Phone Numbers -> Manage -> Active Numbers." +info " 2. Select your number (${TWILIO_NUMBER})." +info " 3. Under Voice Configuration, set 'A CALL COMES IN' webhook to the URL above." +info " 4. Set HTTP method to POST." +echo "" + +success "Twilio voice integration setup complete." diff --git a/bates-enhance/integrations/twilio/workspace-additions/refs/voice-calling.md b/bates-enhance/integrations/twilio/workspace-additions/refs/voice-calling.md new file mode 100644 index 0000000..6a50463 --- /dev/null +++ b/bates-enhance/integrations/twilio/workspace-additions/refs/voice-calling.md @@ -0,0 +1,83 @@ +# Voice Calling + +Reference documentation for the Twilio voice integration with Bates. +Covers inbound calls, outbound calls, webhook configuration, and the +real-time streaming call flow. + +## Overview + +Bates can make and receive phone calls through Twilio. Calls use +WebSocket-based streaming so that speech is transcribed and responses are +synthesised in real time, providing a natural conversational experience. + +## Call Flow + +### Inbound Calls + +1. A caller dials the Twilio phone number. +2. Twilio sends an HTTP POST to the configured webhook URL. +3. The gateway responds with TwiML that opens a WebSocket stream. +4. Audio flows bidirectionally over the WebSocket: + - Caller speech is transcribed (STT). + - Bates generates a reply. + - The reply is synthesised (TTS) and streamed back to the caller. +5. The call ends when either party hangs up or `maxDurationSeconds` is + reached. + +### Outbound Calls + +1. Bates initiates a call via `make_voice_call` tool with a destination + number and an opening message. +2. The gateway creates a Twilio outbound call using the REST API. +3. When the callee answers, the same WebSocket streaming flow begins. + +## Configuration + +| Setting | Description | +|-------------------------|------------------------------------------| +| `twilio.accountSid` | Twilio Account SID (starts with AC) | +| `twilio.authToken` | Twilio Auth Token | +| `twilio.fromNumber` | The Twilio phone number (E.164) | +| `streaming.enabled` | Enable real-time WebSocket streaming | +| `publicUrl` | Publicly reachable URL for webhooks | +| `maxDurationSeconds` | Maximum call duration (default: 600) | +| `maxConcurrent` | Maximum simultaneous calls (default: 1) | +| `outbound.defaultMode` | Default outbound mode ("conversation") | + +## Webhook Setup + +The webhook URL must be reachable from the public internet. Two common +approaches: + +1. **Tailscale Serve** -- Expose the gateway port (18789) via Tailscale + Funnel on port 8443. The URL will look like + `https://:8443/webhook`. + +2. **Reverse proxy / ngrok** -- Use a reverse proxy or ngrok tunnel + pointed at `localhost:18789/webhook`. + +In the Twilio Console, configure: +- **Voice webhook URL**: `https://:8443/webhook` +- **HTTP method**: POST + +## TTS Providers + +| Provider | Quality | Notes | +|---------------|----------|------------------------------------| +| ElevenLabs | High | Natural voice, requires API key | +| Default/basic | Low | Built-in fallback, robotic tone | + +If the ElevenLabs integration is installed, Bates will use it +automatically for voice synthesis. Otherwise, the built-in basic TTS +engine is used. + +## Troubleshooting + +- **No audio / one-way audio**: Check that the `publicUrl` is reachable + from the internet and that the WebSocket upgrade succeeds. +- **Call drops immediately**: Verify the webhook returns valid TwiML. + Check gateway logs with `journalctl --user -u openclaw-gateway`. +- **TTS sounds robotic**: Install the ElevenLabs integration for better + quality: `bates-enhance.sh elevenlabs`. +- **Concurrent call limit**: Increase `maxConcurrent` if needed, but be + aware of API rate limits and resource usage. diff --git a/bates-enhance/integrations/websearch/config-fragment.json b/bates-enhance/integrations/websearch/config-fragment.json new file mode 100644 index 0000000..13eac8f --- /dev/null +++ b/bates-enhance/integrations/websearch/config-fragment.json @@ -0,0 +1,14 @@ +{ + "tools": { + "web": { + "search": { + "apiKey": "{{BRAVE_API_KEY}}" + } + } + }, + "env": { + "vars": { + "BRAVE_API_KEY": "{{BRAVE_API_KEY}}" + } + } +} diff --git a/bates-enhance/integrations/websearch/setup.sh b/bates-enhance/integrations/websearch/setup.sh new file mode 100644 index 0000000..7902d83 --- /dev/null +++ b/bates-enhance/integrations/websearch/setup.sh @@ -0,0 +1,181 @@ +# setup.sh -- Web Search integration for Bates +# Sourced by bates-enhance.sh; has access to common.sh and config-merge.sh functions. +# +# Configures web search capabilities via Brave Search and/or Perplexity MCP servers. + +step "Web Search Configuration" + +info "Bates can search the web using Brave Search and/or Perplexity." +info "You can enable one or both providers." +echo "" + +local use_brave=false +local use_perplexity=false +local dropin_dir="$HOME/.config/systemd/user/openclaw-gateway.service.d" +mkdir -p "$dropin_dir" + +# --- Provider selection --- +echo "Which web search providers would you like to enable?" +echo " 1) Brave Search only" +echo " 2) Perplexity only" +echo " 3) Both Brave Search and Perplexity" +echo "" +local provider_choice="" +read -rp "Select [1]: " provider_choice + +case "$provider_choice" in + 2) + use_perplexity=true + ;; + 3) + use_brave=true + use_perplexity=true + ;; + *) + use_brave=true + ;; +esac + +# --- Brave Search setup --- +if $use_brave; then + step "Brave Search API Key" + + info "Get your API key from: https://brave.com/search/api/" + echo "" + + local brave_key="" + while [[ -z "$brave_key" ]]; do + read -rp "Brave Search API key: " brave_key + if [[ -z "$brave_key" ]]; then + warn "API key cannot be empty." + continue + fi + if ! validate_brave_key "$brave_key"; then + warn "Key format looks unexpected (usually starts with BSA)." + if ! confirm "Use this key anyway?"; then + brave_key="" + continue + fi + fi + done + + # Store in systemd drop-in + local dropin_file="$dropin_dir/brave-search.conf" + cat > "$dropin_file" < "$dropin_file" <&2 + echo " Expected: sk-ant-oat01-... (subscription) or sk-ant-api03-... (API key)" >&2 + return 1 + fi +} + +validate_openai_key() { + local key="$1" + if [[ "$key" =~ ^sk-proj- || "$key" =~ ^sk- ]]; then + echo " Format: OpenAI API key" + return 0 + else + echo " ERROR: Unrecognized OpenAI key format (expected sk-proj-... or sk-...)" >&2 + return 1 + fi +} + +validate_google_key() { + local key="$1" + if [[ "$key" =~ ^AIzaSy ]]; then + echo " Format: Google API key" + return 0 + else + echo " ERROR: Unrecognized Google key format (expected AIzaSy...)" >&2 + return 1 + fi +} + +validate_elevenlabs_key() { + local key="$1" + if [[ "$key" =~ ^sk_ && ${#key} -ge 32 ]]; then + echo " Format: ElevenLabs API key" + return 0 + else + echo " ERROR: Unrecognized ElevenLabs key format" >&2 + return 1 + fi +} + +validate_twilio_sid() { + local sid="$1" + if [[ "$sid" =~ ^AC[0-9a-f]{32}$ ]]; then + echo " Format: Twilio Account SID" + return 0 + else + echo " ERROR: Invalid Twilio SID format (expected AC + 32 hex chars)" >&2 + return 1 + fi +} + +validate_brave_key() { + local key="$1" + if [[ "$key" =~ ^BSA ]]; then + echo " Format: Brave Search API key" + return 0 + else + echo " ERROR: Unrecognized Brave key format (expected BSA...)" >&2 + return 1 + fi +} + +validate_telegram_token() { + local token="$1" + if [[ "$token" =~ ^[0-9]+:[A-Za-z0-9_-]+$ ]]; then + echo " Format: Telegram bot token" + return 0 + else + echo " ERROR: Invalid Telegram bot token format (expected 123456:ABC-def...)" >&2 + return 1 + fi +} + +# Test API key connectivity (optional, non-blocking) +test_anthropic_connection() { + local token="$1" + local result + result=$(curl -sf --max-time 10 \ + -H "x-api-key: $token" \ + -H "anthropic-version: 2023-06-01" \ + https://api.anthropic.com/v1/models 2>&1) + + if [[ $? -eq 0 ]]; then + echo " Connection: OK" + return 0 + else + echo " Connection: FAILED (check key and network)" >&2 + return 1 + fi +} + +test_telegram_bot() { + local token="$1" + local result + result=$(curl -sf --max-time 10 "https://api.telegram.org/bot${token}/getMe" 2>&1) + + if echo "$result" | python3 -c "import json,sys; d=json.load(sys.stdin); sys.exit(0 if d.get('ok') else 1)" 2>/dev/null; then + local botname + botname=$(echo "$result" | python3 -c "import json,sys; print(json.load(sys.stdin)['result']['username'])" 2>/dev/null) + echo " Telegram bot verified: @$botname" + return 0 + else + echo " ERROR: Telegram bot token invalid or bot not found" >&2 + return 1 + fi +} diff --git a/bates-enhance/lib/backup-restore.sh b/bates-enhance/lib/backup-restore.sh new file mode 100755 index 0000000..99a0b01 --- /dev/null +++ b/bates-enhance/lib/backup-restore.sh @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# backup-restore.sh -- Config backup + rollback per integration +# +# Before each integration install, the current config state is backed up. +# rollback_integration restores the most recent backup for a given integration. + +BACKUP_DIR="$HOME/.openclaw/enhance/backups" + +backup_config() { + local integration="$1" + local timestamp + timestamp=$(date +%Y%m%d-%H%M%S) + local backup_path="$BACKUP_DIR/$integration/$timestamp" + mkdir -p "$backup_path" + + # Backup core config files + cp ~/.openclaw/openclaw.json "$backup_path/" 2>/dev/null || true + cp ~/.openclaw/agents/main/agent/auth-profiles.json "$backup_path/" 2>/dev/null || true + crontab -l > "$backup_path/crontab.bak" 2>/dev/null || true + + # Backup openclaw cron jobs + if command -v openclaw &>/dev/null; then + openclaw cron list --json > "$backup_path/cron-jobs.json" 2>/dev/null || true + fi + + echo " Config backed up to $backup_path" +} + +rollback_integration() { + local integration="$1" + + if [[ -z "$integration" ]]; then + echo "Usage: bates-enhance.sh rollback " + echo "Example: bates-enhance.sh rollback m365" + exit 1 + fi + + local latest + latest=$(ls -1t "$BACKUP_DIR/$integration/" 2>/dev/null | head -1) + + if [[ -z "$latest" ]]; then + echo "No backup found for $integration." + echo "Available backups:" + ls -1 "$BACKUP_DIR/" 2>/dev/null || echo " (none)" + exit 1 + fi + + local backup_path="$BACKUP_DIR/$integration/$latest" + + echo "Rolling back $integration to backup from $latest..." + echo "" + + # Restore openclaw.json + if [[ -f "$backup_path/openclaw.json" ]]; then + cp "$backup_path/openclaw.json" ~/.openclaw/openclaw.json + echo " Restored openclaw.json" + fi + + # Restore auth profiles + if [[ -f "$backup_path/auth-profiles.json" ]]; then + cp "$backup_path/auth-profiles.json" ~/.openclaw/agents/main/agent/auth-profiles.json + echo " Restored auth-profiles.json" + fi + + # Restore system crontab + if [[ -f "$backup_path/crontab.bak" ]]; then + crontab "$backup_path/crontab.bak" + echo " Restored system crontab" + fi + + # Mark integration as uninstalled + source "$(dirname "${BASH_SOURCE[0]}")/integration-state.sh" + mark_uninstalled "$integration" + + # Restart gateway + echo "" + echo "Restarting gateway..." + systemctl --user restart openclaw-gateway + + echo "" + echo "Rollback complete. $integration has been removed." + echo "Run 'bates-enhance.sh' to verify current state." +} + +# List all available backups +list_backups() { + echo "Available backups:" + echo "" + for dir in "$BACKUP_DIR"/*/; do + [[ -d "$dir" ]] || continue + local integration + integration=$(basename "$dir") + local count + count=$(ls -1 "$dir" 2>/dev/null | wc -l) + local latest + latest=$(ls -1t "$dir" 2>/dev/null | head -1) + echo " $integration: $count backup(s), latest: $latest" + done +} diff --git a/bates-enhance/lib/common.sh b/bates-enhance/lib/common.sh new file mode 100755 index 0000000..41d73ca --- /dev/null +++ b/bates-enhance/lib/common.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# common.sh -- Shared functions for Bates installer scripts +# Provides logging, colors, prompts, and step tracking + +set -euo pipefail + +# Colors (only if terminal supports them) +if [[ -t 1 ]]; then + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + CYAN='\033[0;36m' + BOLD='\033[1m' + NC='\033[0m' +else + RED='' GREEN='' YELLOW='' CYAN='' BOLD='' NC='' +fi + +# Step counter +_STEP_NUM=0 + +step() { + ((_STEP_NUM++)) || true + echo -e "\n${CYAN}==> Step ${_STEP_NUM}: $1${NC}" +} + +info() { + echo -e "${CYAN}[INFO]${NC} $1" +} + +success() { + echo -e "${GREEN}[OK]${NC} $1" +} + +warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +fatal() { + error "$1" + exit 1 +} + +# Prompt with default value +prompt_default() { + local prompt="$1" + local default="$2" + local varname="$3" + local input + + if [[ -n "$default" ]]; then + read -rp "$prompt [$default]: " input + eval "$varname=\"${input:-$default}\"" + else + read -rp "$prompt: " input + eval "$varname=\"$input\"" + fi +} + +# Yes/No prompt (returns 0 for yes, 1 for no) +confirm() { + local prompt="${1:-Continue?}" + local reply + read -rp "$prompt (y/n): " reply + [[ "$reply" =~ ^[Yy] ]] +} + +# Check if a command exists +require_cmd() { + local cmd="$1" + local msg="${2:-$cmd is required but not installed}" + if ! command -v "$cmd" &>/dev/null; then + fatal "$msg" + fi +} + +# Spinner for long-running commands +spinner() { + local pid=$1 + local msg="${2:-Working...}" + local spin='⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + local i=0 + while kill -0 "$pid" 2>/dev/null; do + printf "\r${CYAN}%s${NC} %s" "${spin:i++%${#spin}:1}" "$msg" + sleep 0.1 + done + printf "\r" +} + +# Run a command with spinner +run_with_spinner() { + local msg="$1" + shift + "$@" &>/dev/null & + local pid=$! + spinner "$pid" "$msg" + wait "$pid" + local rc=$? + if [[ $rc -eq 0 ]]; then + success "$msg" + else + error "$msg (exit code $rc)" + return $rc + fi +} + +# Get the install directory (where bates-core/ scripts live) +get_install_dir() { + local script_dir + script_dir="$(cd "$(dirname "${BASH_SOURCE[1]:-${BASH_SOURCE[0]}}")" && pwd)" + # If called from lib/, go up one level + if [[ "$(basename "$script_dir")" == "lib" ]]; then + echo "$(dirname "$script_dir")" + else + echo "$script_dir" + fi +} + +INSTALL_DIR="$(get_install_dir)" diff --git a/bates-enhance/lib/config-merge.sh b/bates-enhance/lib/config-merge.sh new file mode 100755 index 0000000..eaf46a6 --- /dev/null +++ b/bates-enhance/lib/config-merge.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +# config-merge.sh -- Merge integration config fragments into openclaw.json +# +# Uses Python's json module for reliable deep merge. +# Each integration provides a JSON fragment that gets merged into the +# existing openclaw.json without overwriting unrelated settings. + +config_merge() { + local fragment="$1" + local config="${2:-$HOME/.openclaw/openclaw.json}" + + if [[ ! -f "$fragment" ]]; then + echo "ERROR: Config fragment not found: $fragment" >&2 + return 1 + fi + + if [[ ! -f "$config" ]]; then + echo "ERROR: Config file not found: $config" >&2 + return 1 + fi + + python3 -c " +import json, sys + +def deep_merge(base, overlay): + for key, value in overlay.items(): + if key in base and isinstance(base[key], dict) and isinstance(value, dict): + deep_merge(base[key], value) + else: + base[key] = value + +try: + with open('$config') as f: + config = json.load(f) + with open('$fragment') as f: + fragment = json.load(f) + deep_merge(config, fragment) + with open('$config', 'w') as f: + json.dump(config, f, indent=2) + print(' Config merged successfully.') +except Exception as e: + print(f'ERROR: Config merge failed: {e}', file=sys.stderr) + sys.exit(1) +" +} + +# Convenience: merge a JSON string directly (no file needed) +config_merge_inline() { + local json_str="$1" + local config="${2:-$HOME/.openclaw/openclaw.json}" + local tmpfile + tmpfile=$(mktemp) + + echo "$json_str" > "$tmpfile" + config_merge "$tmpfile" "$config" + rm -f "$tmpfile" +} + +# M365-specific merge helper +config_merge_m365() { + local assistant_email="$1" + local user_email="$2" + local tenant_id="$3" + + config_merge_inline "{ + \"env\": { + \"vars\": { + \"ASSISTANT_EMAIL\": \"$assistant_email\", + \"USER_EMAIL\": \"$user_email\", + \"TENANT_ID\": \"$tenant_id\" + } + } + }" +} + +# Telegram-specific merge helper +config_merge_telegram() { + local bot_token="$1" + local user_id="$2" + + config_merge_inline "{ + \"channels\": { + \"telegram\": { + \"enabled\": true, + \"botToken\": \"$bot_token\", + \"dmPolicy\": \"pairing\", + \"streamMode\": \"partial\" + } + } + }" +} + +# Teams-specific merge helper +config_merge_teams() { + local app_id="$1" + local app_secret="$2" + + config_merge_inline "{ + \"channels\": { + \"msteams\": { + \"enabled\": true, + \"appId\": \"$app_id\", + \"appPassword\": \"$app_secret\", + \"dmPolicy\": \"open\", + \"groupPolicy\": \"allowlist\" + } + } + }" +} + +# Twilio/voice-specific merge helper +config_merge_voice() { + local twilio_sid="$1" + local twilio_token="$2" + local twilio_number="$3" + local voice_id="${4:-}" + local webhook_url="${5:-}" + + local tts_section="" + if [[ -n "$voice_id" ]]; then + tts_section="\"tts\": {\"provider\": \"elevenlabs\", \"elevenlabs\": {\"voiceId\": \"$voice_id\", \"modelId\": \"eleven_flash_v2_5\"}}," + fi + + config_merge_inline "{ + \"plugins\": { + \"entries\": { + \"voice-call\": { + \"enabled\": true, + \"config\": { + \"provider\": \"twilio\", + \"fromNumber\": \"$twilio_number\", + \"twilio\": { + \"accountSid\": \"$twilio_sid\", + \"authToken\": \"$twilio_token\" + }, + ${tts_section} + \"streaming\": {\"enabled\": true}, + \"publicUrl\": \"$webhook_url\", + \"maxDurationSeconds\": 600, + \"maxConcurrentCalls\": 1, + \"outbound\": {\"defaultMode\": \"conversation\"} + } + } + } + } + }" +} diff --git a/bates-enhance/lib/cron-unlock.sh b/bates-enhance/lib/cron-unlock.sh new file mode 100755 index 0000000..5a06621 --- /dev/null +++ b/bates-enhance/lib/cron-unlock.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +# cron-unlock.sh -- Enable cron jobs per integration +# +# Each integration can include a cron-jobs-.json file listing +# cron jobs to register when the integration is installed. +# +# Template placeholders in the JSON are resolved before registration: +# {{USER_TZ}} -- from $USER_TZ env or defaults to UTC +# {{PRIMARY_CHANNEL}} -- from $PRIMARY_CHANNEL env or openclaw.json +# {{DELIVERY_TARGET}} -- from $DELIVERY_TARGET env or openclaw.json +# {{HOME}} -- user's home directory + +ENHANCE_DIR="${ENHANCE_DIR:-$HOME/.openclaw/enhance}" + +unlock_cron_jobs() { + local integration="$1" + local jobs_file + + # Find the cron jobs file for this integration + jobs_file=$(ls "$ENHANCE_DIR/integrations/$integration"/cron-jobs-*.json 2>/dev/null | head -1) + + if [[ -z "$jobs_file" || ! -f "$jobs_file" ]]; then + return 0 + fi + + echo "Unlocking cron jobs for $integration..." + + python3 -c " +import json, subprocess, sys, os + +jobs = json.load(open('$jobs_file')) + +# Resolve placeholder values +user_tz = os.environ.get('USER_TZ', 'UTC') +user_home = os.path.expanduser('~') +primary_channel = os.environ.get('PRIMARY_CHANNEL', '') +delivery_target = os.environ.get('DELIVERY_TARGET', '') + +# Try to read delivery config from openclaw.json if not in env +if not primary_channel or not delivery_target: + try: + cfg_path = os.path.join(user_home, '.openclaw', 'openclaw.json') + with open(cfg_path) as f: + cfg = json.load(f) + # Check for channels config + channels = cfg.get('channels', {}) + if not primary_channel: + # Use the first configured channel (msteams, telegram, etc.) + for ch in ['msteams', 'telegram', 'whatsapp']: + if ch in channels and channels[ch].get('enabled', True): + primary_channel = ch + break + if not delivery_target: + # Check for default delivery target in config + delivery_target = cfg.get('delivery', {}).get('defaultTarget', '') + except Exception: + pass + +def substitute(text): + \"\"\"Replace template placeholders in a string.\"\"\" + if not isinstance(text, str): + return text + text = text.replace('{{USER_TZ}}', user_tz) + text = text.replace('{{HOME}}', user_home) + text = text.replace('{{PRIMARY_CHANNEL}}', primary_channel) + text = text.replace('{{DELIVERY_TARGET}}', delivery_target) + return text + +for job in jobs: + cmd = ['openclaw', 'cron', 'add', + '--name', job['name'], + '--schedule', substitute(job['schedule']), + '--tz', substitute(job.get('tz', user_tz)), + '--message', substitute(job['message'])] + + if 'delivery' in job: + d = job['delivery'] + channel = substitute(d.get('channel', '')) + target = substitute(d.get('to', '')) + + # Only add delivery if both channel and target are resolved + if channel and target and '{{' not in channel and '{{' not in target: + cmd.extend(['--delivery-channel', channel]) + cmd.extend(['--delivery-to', target]) + if d.get('bestEffort'): + cmd.extend(['--delivery-best-effort']) + elif channel or target: + print(f' [~] {job[\"name\"]}: delivery skipped (channel={channel!r}, target={target!r} -- configure PRIMARY_CHANNEL and DELIVERY_TARGET)', file=sys.stderr) + + if 'sessionTarget' in job: + cmd.extend(['--session-target', job['sessionTarget']]) + + if 'agentId' in job: + cmd.extend(['--agent-id', job['agentId']]) + + try: + subprocess.run(cmd, check=True, capture_output=True, text=True) + print(f' [x] {job[\"name\"]}') + except subprocess.CalledProcessError as e: + # Job may already exist + if 'already exists' in (e.stderr or ''): + print(f' [=] {job[\"name\"]} (already exists)') + else: + print(f' [!] {job[\"name\"]} FAILED: {e.stderr}', file=sys.stderr) +" +} diff --git a/bates-enhance/lib/integration-state.sh b/bates-enhance/lib/integration-state.sh new file mode 100755 index 0000000..bc61f05 --- /dev/null +++ b/bates-enhance/lib/integration-state.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash +# integration-state.sh -- Track installed integrations in state.json + +STATE_FILE="$HOME/.openclaw/enhance/state.json" + +init_state() { + if [[ ! -f "$STATE_FILE" ]]; then + mkdir -p "$(dirname "$STATE_FILE")" + echo '{"installed":{},"version":"2.0.0"}' > "$STATE_FILE" + fi +} + +is_installed() { + local name="$1" + init_state + python3 -c " +import json, sys +s = json.load(open('$STATE_FILE')) +sys.exit(0 if '$name' in s['installed'] else 1) +" +} + +mark_installed() { + local name="$1" + init_state + python3 -c " +import json, datetime +s = json.load(open('$STATE_FILE')) +s['installed']['$name'] = { + 'date': datetime.datetime.now().isoformat(), + 'version': '2.0.0' +} +json.dump(s, open('$STATE_FILE', 'w'), indent=2) +" + echo " Marked $name as installed." +} + +mark_uninstalled() { + local name="$1" + init_state + python3 -c " +import json +s = json.load(open('$STATE_FILE')) +s['installed'].pop('$name', None) +json.dump(s, open('$STATE_FILE', 'w'), indent=2) +" + echo " Marked $name as uninstalled." +} + +show_integration_status() { + init_state + echo "" + echo "Bates Enhancement Status" + echo "========================" + echo "" + echo " [x] Core (AI subscription, dashboard, cost-tracker, Telegram)" + + local integrations=(m365 teams twilio elevenlabs search image social tailscale agents google github deepseek websearch) + local labels=("Microsoft 365" "MS Teams" "Voice Calling (Twilio)" "Voice Clone (ElevenLabs)" "Search Index" "Image Generation" "Social Media (MixPost)" "Tailscale Remote Access" "Deputy Agents" "Google Calendar" "GitHub" "DeepSeek" "Web Search") + + for i in "${!integrations[@]}"; do + local name="${integrations[$i]}" + local label="${labels[$i]}" + if is_installed "$name" 2>/dev/null; then + echo " [x] $label" + else + echo " [ ] $label" + fi + done +} + +show_integration_details() { + show_integration_status + echo "" + echo "Integration details:" + echo " m365 Microsoft 365 (email, calendar, OneDrive, Planner)" + echo " teams MS Teams messaging (DM and group chat)" + echo " twilio Voice calling (inbound/outbound via Twilio)" + echo " elevenlabs Voice clone TTS (requires ElevenLabs account)" + echo " search Local email/file search index (SQLite FTS5 + Ollama)" + echo " image AI image generation (OpenAI/Google)" + echo " social Social media management (MixPost + Docker)" + echo " tailscale Remote access via Tailscale VPN" + echo " agents Deputy agent system (12 specialists)" + echo " google Google Calendar + Gmail integration" + echo " github GitHub repository access via MCP" + echo " deepseek DeepSeek model for overnight code review" + echo " websearch Brave + Perplexity web search" +} + +suggest_next_integration() { + local current="$1" + echo "" + case "$current" in + m365) + echo "Recommended next: bates-enhance.sh teams (Teams messaging)" + ;; + teams) + echo "Recommended next: bates-enhance.sh twilio (voice calling)" + ;; + twilio) + echo "Recommended next: bates-enhance.sh elevenlabs (voice clone TTS)" + ;; + elevenlabs) + echo "Recommended next: bates-enhance.sh tailscale (remote access)" + ;; + *) + echo "Run 'bates-enhance.sh' to see available integrations." + ;; + esac +} + +deploy_workspace_additions() { + local integration="$1" + local additions_dir="$ENHANCE_DIR/integrations/$integration/workspace-additions" + + if [[ ! -d "$additions_dir" ]]; then + return 0 + fi + + echo "Deploying workspace files for $integration..." + + # Copy all workspace additions preserving directory structure + if [[ -d "$additions_dir/rules" ]]; then + cp "$additions_dir"/rules/* ~/.openclaw/workspace/rules/ 2>/dev/null || true + fi + if [[ -d "$additions_dir/refs" ]]; then + cp "$additions_dir"/refs/* ~/.openclaw/workspace/refs/ 2>/dev/null || true + fi + if [[ -d "$additions_dir/skills" ]]; then + cp -r "$additions_dir"/skills/* ~/.openclaw/workspace/skills/ 2>/dev/null || true + fi +} diff --git a/bates-enhance/lib/template-engine.sh b/bates-enhance/lib/template-engine.sh new file mode 100755 index 0000000..9f74275 --- /dev/null +++ b/bates-enhance/lib/template-engine.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# template-engine.sh -- Replace {{PLACEHOLDER}} variables in template files +# +# Usage: +# source lib/template-engine.sh +# export ASSISTANT_NAME="Bates" USER_NAME="Robert" +# template_render "input.template" "output.conf" +# +# Placeholders use the format {{VAR_NAME}} where VAR_NAME matches +# an exported environment variable. Unset variables are left as-is. + +template_render() { + local template="$1" + local output="$2" + + if [[ ! -f "$template" ]]; then + echo "ERROR: Template not found: $template" >&2 + return 1 + fi + + cp "$template" "$output" + + # Find all {{VAR}} placeholders in the output file + local vars + vars=$(grep -oP '\{\{[A-Z_][A-Z0-9_]*\}\}' "$output" 2>/dev/null | sort -u) || true + + for var_with_braces in $vars; do + # Strip {{ and }} + local var_name="${var_with_braces#\{\{}" + var_name="${var_name%\}\}}" + + # Get the value from the environment + local var_value="${!var_name:-}" + + if [[ -n "$var_value" ]]; then + # Escape special sed characters in the value + local escaped_value + escaped_value=$(printf '%s' "$var_value" | sed 's/[&/\]/\\&/g') + sed -i "s|{{${var_name}}}|${escaped_value}|g" "$output" + fi + done +} + +# Render a template string (stdin) to stdout +template_render_string() { + local content + content=$(cat) + + local vars + vars=$(echo "$content" | grep -oP '\{\{[A-Z_][A-Z0-9_]*\}\}' 2>/dev/null | sort -u) || true + + for var_with_braces in $vars; do + local var_name="${var_with_braces#\{\{}" + var_name="${var_name%\}\}}" + local var_value="${!var_name:-}" + if [[ -n "$var_value" ]]; then + local escaped_value + escaped_value=$(printf '%s' "$var_value" | sed 's/[&/\]/\\&/g') + content=$(echo "$content" | sed "s|{{${var_name}}}|${escaped_value}|g") + fi + done + + echo "$content" +} diff --git a/bates-enhance/patches/README.md b/bates-enhance/patches/README.md new file mode 100644 index 0000000..48f2cd2 --- /dev/null +++ b/bates-enhance/patches/README.md @@ -0,0 +1,42 @@ +# Bates Gateway Patches + +These patches extend OpenClaw gateway functionality. They must be reapplied +after every `openclaw update` since dist filenames change with each release. + +## Patches + +### cost-footer.patch.js +Injects a cost footer into all outgoing messages via `globalThis.__openclawMessageTransform`. +Targets 5-7 dist files (reply dispatcher + proactive delivery files). + +### adaptive-cards.patch.js +Enables styled Adaptive Cards for sub-agent results in Teams via +`globalThis.__openclawSendTeamsCard`. Targets the reply dispatcher and +the Teams channel bridge. + +### channel-bridge.patch.ts +Adds the `__openclawSendTeamsCard` bridge function into the Teams channel +`startAccount()` function. + +## Usage + +```bash +# Apply all patches +./reapply-patches.sh + +# Apply after an OpenClaw update +openclaw update +./reapply-patches.sh +``` + +## How It Works + +1. Backs up original files to `~/.openclaw/patch-backup//` +2. Discovers patch targets via grep (filenames include hash suffixes that change per release) +3. Applies patches using Node.js AST manipulation +4. Restarts the gateway to pick up changes + +## Warning + +These patches modify OpenClaw internal dist files. They are fragile and may +break with major OpenClaw refactors. Always keep backups. diff --git a/bates-enhance/patches/adaptive-cards.patch.js b/bates-enhance/patches/adaptive-cards.patch.js new file mode 100644 index 0000000..63d163b --- /dev/null +++ b/bates-enhance/patches/adaptive-cards.patch.js @@ -0,0 +1,111 @@ +#!/usr/bin/env node +/** + * adaptive-cards.patch.js -- Inject Adaptive Card rendering for sub-agent results + * + * Usage: node adaptive-cards.patch.js + * + * This patch modifies the sub-agent announce flow to send styled Adaptive Cards + * in Teams channels when a sub-agent completes its work. Non-Teams channels + * are unaffected. + * + * The patch injects a call to globalThis.__openclawSendTeamsCard (set up by + * the channel-bridge.patch.ts in the Teams extension). + */ +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const targetFile = process.argv[2]; +if (!targetFile) { + console.error('Usage: node adaptive-cards.patch.js '); + process.exit(1); +} + +if (!fs.existsSync(targetFile)) { + console.error(`File not found: ${targetFile}`); + process.exit(1); +} + +let code = fs.readFileSync(targetFile, 'utf8'); + +// Check if already patched +if (code.includes('__openclawSendTeamsCard')) { + console.log(`Already patched: ${path.basename(targetFile)}`); + process.exit(0); +} + +// Find the sub-agent announce flow +// Look for: runSubagentAnnounceFlow or similar pattern where sub-agent results are dispatched +const announcePattern = /((?:runSubagentAnnounce|announceSubagent|subagent.*?announce)\w*\s*=\s*async\s+(?:function\s*)?\([^)]*\)\s*(?:=>)?\s*\{)/; + +let patched = false; + +if (announcePattern.test(code)) { + // Find where the result text is available and delivery happens + // Insert Adaptive Card send before the normal delivery paths + const deliveryPattern = /((?:const|let|var)\s+(?:targetRequesterOrigin|requesterOrigin)\s*=\s*[^;]+;)/; + + if (deliveryPattern.test(code)) { + code = code.replace(deliveryPattern, (match, originAssign) => { + patched = true; + return `${originAssign} + // [Bates patch] Send Adaptive Card for Teams sub-agent results + if (typeof globalThis.__openclawSendTeamsCard === 'function') { + try { + const _origin = targetRequesterOrigin || requesterOrigin; + if (_origin?.channel === 'msteams' && _origin?.conversationId) { + const _resultText = (typeof result === 'string' ? result : result?.text || result?.message || '').slice(0, 3000); + if (_resultText.length > 0) { + const _card = { + type: 'AdaptiveCard', + version: '1.4', + body: [ + { + type: 'Container', + style: 'emphasis', + items: [{ + type: 'ColumnSet', + columns: [ + { type: 'Column', width: 'auto', items: [{ type: 'TextBlock', text: '\\ud83e\\udd16', size: 'Medium' }] }, + { type: 'Column', width: 'stretch', items: [{ type: 'TextBlock', text: '**Sub-Agent Result**', wrap: true }] } + ] + }] + }, + { + type: 'Container', + style: 'accent', + items: [{ type: 'TextBlock', text: _resultText, wrap: true, size: 'Small' }] + } + ] + }; + await globalThis.__openclawSendTeamsCard(_origin.conversationId, _card).catch(() => {}); + } + } + } catch {} + }`; + }); + } +} + +if (!patched) { + // Fallback: search for any function containing "subagent" and "announce" or "result" + const fallbackPattern = /(async\s+function[^{]*subagent[^{]*\{|subagent[^=]*=\s*async[^{]*\{)/i; + if (fallbackPattern.test(code)) { + code = code.replace(fallbackPattern, (match) => { + patched = true; + return `${match} + // [Bates patch] Adaptive Card for sub-agent results (fallback insertion) + // Note: This is a fallback patch point. Verify the card is sent correctly.`; + }); + } +} + +if (patched) { + fs.writeFileSync(targetFile, code, 'utf8'); + console.log(`Patched: ${path.basename(targetFile)}`); +} else { + console.warn(`Could not find patch insertion point in: ${path.basename(targetFile)}`); + console.warn('The sub-agent announce flow may have changed. Manual patching may be required.'); + process.exit(1); +} diff --git a/bates-enhance/patches/avatar-map.js b/bates-enhance/patches/avatar-map.js new file mode 100644 index 0000000..1cbc5a8 --- /dev/null +++ b/bates-enhance/patches/avatar-map.js @@ -0,0 +1,76 @@ +/** + * avatar-map.js -- Agent avatar mapping for Teams Adaptive Cards + * + * This module provides avatar URLs and helper functions used by the + * Adaptive Card patches in reply-*.js. These functions must be injected + * into the dist file alongside the card patches. + * + * The avatars are hosted on GitHub (public repo) so Teams servers can + * fetch them directly. Adaptive Cards require publicly accessible image URLs. + * + * MANUAL PATCH: Insert these functions into reply-*.js just before the + * `updateThinkingCardToFailed` helper function. Then update all card + * templates to use `buildBatesAvatarColumn(agentId)` instead of emoji + * TextBlock columns, and `resolveBatesDisplayName(agentId)` for the label. + * + * Also add `agentId: targetAgentId` to the `registerSubagentRun()` call + * in the sessions_spawn handler so progress/failure cards can resolve + * the agent identity. + */ + +// --- BEGIN PATCH --- +// [Bates patch] Agent avatar map for Teams Adaptive Cards +const BATES_AVATAR_BASE = "https://raw.githubusercontent.com/getBates/Bates/feature/installer-v1/bates-core/plugins/dashboard/static/assets"; +const BATES_AVATAR_MAP = { + main: `${BATES_AVATAR_BASE}/agent-avatar.png`, + mira: `${BATES_AVATAR_BASE}/agent-baby_Sage.png`, + conrad: `${BATES_AVATAR_BASE}/agent-baby_bolt.png`, + soren: `${BATES_AVATAR_BASE}/agent-baby_core.png`, + amara: `${BATES_AVATAR_BASE}/agent-baby_aqua.png`, + jules: `${BATES_AVATAR_BASE}/agent-baby_frost.png`, + dash: `${BATES_AVATAR_BASE}/agent-baby_Ember.png`, + mercer: `${BATES_AVATAR_BASE}/agent-baby_Dark.png`, + kira: `${BATES_AVATAR_BASE}/agent-baby_pixel.png`, + nova: `${BATES_AVATAR_BASE}/agent-baby_nova.png`, + paige: `${BATES_AVATAR_BASE}/agent-baby_Sage.png`, + quinn: `${BATES_AVATAR_BASE}/agent-baby_sky.png`, + archer: `${BATES_AVATAR_BASE}/agent-baby_sky.png`, +}; +function buildBatesAvatarColumn(agentId) { + const url = BATES_AVATAR_MAP[agentId] || BATES_AVATAR_MAP.main; + return { type: "Column", width: "auto", items: [{ type: "Image", url, size: "Small", style: "Person" }] }; +} +function resolveBatesDisplayName(agentId) { + if (!agentId || agentId === "main") return "Bates"; + return agentId.charAt(0).toUpperCase() + agentId.slice(1); +} +// --- END PATCH --- + +/** + * Card template updates needed (4 locations in reply-*.js): + * + * 1. RESULT CARD (runSubagentAnnounceFlow): + * - Replace: { type: "TextBlock", text: "\u{1F916}", size: "Large" } + * - With: buildBatesAvatarColumn(subagentName || "main") + * - Replace: text: "**Baby Bates:**" + * - With: text: `**${resolveBatesDisplayName(subagentName || "main")}:**` + * + * 2. THINKING CARD (sessions_spawn handler): + * - Add `agentId: targetAgentId` to registerSubagentRun() call + * - Replace: { type: "TextBlock", text: "\u{1F9E0}", size: "Large" } + * - With: buildBatesAvatarColumn(targetAgentId || "main") + * - Replace: text: `**Bates is working on:** ${thinkingLabel}` + * - With: text: `**${resolveBatesDisplayName(targetAgentId || "main")} is working on:** ${thinkingLabel}` + * + * 3. PROGRESS CARD (sweepSubagentRuns): + * - Replace: { type: "TextBlock", text: "\u{1F9E0}", size: "Large" } + * - With: buildBatesAvatarColumn(entry.agentId || "main") + * - Replace: text: `**Bates is working on:** ${entry.label || "a task"}` + * - With: text: `**${resolveBatesDisplayName(entry.agentId || "main")} is working on:** ${entry.label || "a task"}` + * + * 4. FAILURE CARD (updateThinkingCardToFailed): + * - Replace: { type: "TextBlock", text: "\u{26A0}\u{FE0F}", size: "Large" } + * - With: buildBatesAvatarColumn(entry.agentId || "main") + * - Replace: text: `**Task:** ${entry.label || "sub-agent"}` + * - With: text: `**${resolveBatesDisplayName(entry.agentId || "main")}:** ${entry.label || "task"}` + */ diff --git a/bates-enhance/patches/channel-bridge.patch.ts b/bates-enhance/patches/channel-bridge.patch.ts new file mode 100644 index 0000000..e5e8db7 --- /dev/null +++ b/bates-enhance/patches/channel-bridge.patch.ts @@ -0,0 +1,57 @@ +/** + * channel-bridge.patch.ts -- Teams channel bridge for Adaptive Cards + * + * This code should be inserted into the Teams extension's channel.ts file, + * inside the startAccount() function, after the bot adapter is initialized. + * + * It exposes two globalThis bridges: + * - __openclawSendTeamsCard: Send a new Adaptive Card to a conversation + * - __openclawUpdateTeamsCard: Update an existing Adaptive Card in-place (for thinking bubbles) + * + * It also adds an abort-signal block so the gateway does not auto-restart + * the channel (monitorMSTeamsProvider resolves immediately in v2026.2.17+). + * + * MANUAL PATCH: Insert this block into: + * ~/.npm-global/lib/node_modules/openclaw/extensions/msteams/src/channel.ts + * Inside startAccount(), replacing the default `await monitorMSTeamsProvider(...)` block. + * + * Prerequisites: + * - channel.ts must import `updateAdaptiveCardMSTeams` from "./send.js" + * (add to the existing import: `import { sendAdaptiveCardMSTeams, sendMessageMSTeams, updateAdaptiveCardMSTeams } from "./send.js";`) + * - send.ts must have the updateAdaptiveCardMSTeams function (see send-update.patch.ts) + */ + +// --- BEGIN PATCH --- +// [Bates patch] Expose Teams card sender for sub-agent Adaptive Cards +if (!globalThis.__openclawSendTeamsCard) { + const capturedCfg = ctx.cfg; + globalThis.__openclawSendTeamsCard = async (to: string, card: Record) => { + return sendAdaptiveCardMSTeams({ cfg: capturedCfg, to, card }); + }; +} +// [Bates patch] Expose card updater for thinking bubble (update existing card in-place) +if (!globalThis.__openclawUpdateTeamsCard) { + const capturedCfg = ctx.cfg; + globalThis.__openclawUpdateTeamsCard = async ( + to: string, + activityId: string, + card: Record, + ) => { + return updateAdaptiveCardMSTeams({ cfg: capturedCfg, to, activityId, card }); + }; +} +const result = await monitorMSTeamsProvider({ + cfg: ctx.cfg, + runtime: ctx.runtime, + abortSignal: ctx.abortSignal, +}); +// [Bates patch] Block until abort signal fires — gateway treats resolved promise as "channel stopped" +// Without this, monitorMSTeamsProvider resolves immediately (v2026.2.17+) and the gateway auto-restarts the channel in a loop. +if (ctx.abortSignal && !ctx.abortSignal.aborted) { + await new Promise((resolve) => { + ctx.abortSignal!.addEventListener("abort", () => resolve(), { once: true }); + }); + await result.shutdown(); +} +return result; +// --- END PATCH --- diff --git a/bates-enhance/patches/cost-footer.patch.js b/bates-enhance/patches/cost-footer.patch.js new file mode 100644 index 0000000..6285561 --- /dev/null +++ b/bates-enhance/patches/cost-footer.patch.js @@ -0,0 +1,104 @@ +#!/usr/bin/env node +/** + * cost-footer.patch.js -- Inject cost footer transform into OpenClaw dist files + * + * Usage: node cost-footer.patch.js + * + * This patch adds a call to globalThis.__openclawMessageTransform (if defined) + * before messages are dispatched to channels. The cost-tracker plugin sets up + * this transform to append daily cost summaries to outgoing messages. + * + * Target files are discovered by grepping for specific function signatures + * in the OpenClaw dist directory. + */ +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const targetFile = process.argv[2]; +if (!targetFile) { + console.error('Usage: node cost-footer.patch.js '); + process.exit(1); +} + +if (!fs.existsSync(targetFile)) { + console.error(`File not found: ${targetFile}`); + process.exit(1); +} + +let code = fs.readFileSync(targetFile, 'utf8'); + +// Check if already patched +if (code.includes('__openclawMessageTransform')) { + console.log(`Already patched: ${path.basename(targetFile)}`); + process.exit(0); +} + +// Pattern 1: Reply dispatcher — find where message text is finalized before send +// Look for the pattern where `text` or `body` is assigned before channel dispatch +const replyPattern = /(\btext\s*=\s*[^;]+;)(\s*(?:await\s+)?(?:deliver|dispatch|send))/g; +let patched = false; + +if (replyPattern.test(code)) { + code = code.replace(replyPattern, (match, textAssign, deliverCall) => { + patched = true; + return `${textAssign} + if (typeof globalThis.__openclawMessageTransform === 'function') { + try { text = await globalThis.__openclawMessageTransform(text, { channel: channel?.name || 'unknown' }); } catch {} + } + ${deliverCall}`; + }); +} + +// Pattern 2: Proactive delivery — similar pattern in deliver files +const proactivePattern = /((?:message|msg|body)\s*(?:\.text)?\s*=\s*[^;]+;)(\s*(?:await\s+)?(?:this\.)?(?:deliver|send|dispatch|forward))/g; + +if (!patched && proactivePattern.test(code)) { + code = code.replace(proactivePattern, (match, msgAssign, sendCall) => { + patched = true; + return `${msgAssign} + if (typeof globalThis.__openclawMessageTransform === 'function') { + try { + const _txt = typeof message === 'string' ? message : (message?.text || msg?.text || body); + const _transformed = await globalThis.__openclawMessageTransform(_txt, { channel: 'proactive' }); + if (typeof message === 'string') message = _transformed; + else if (message?.text) message.text = _transformed; + else if (msg?.text) msg.text = _transformed; + } catch {} + } + ${sendCall}`; + }); +} + +if (!patched) { + // Fallback: insert transform hook before any function that looks like a message sender + const fallbackPattern = /(async\s+function\s+(?:deliver|dispatch|send)\w*\s*\([^)]*\)\s*\{)/; + if (fallbackPattern.test(code)) { + code = code.replace(fallbackPattern, (match, funcDecl) => { + patched = true; + return `${funcDecl} + // [Bates patch] Cost footer transform + if (typeof globalThis.__openclawMessageTransform === 'function') { + try { + const _args = Array.from(arguments); + for (let i = 0; i < _args.length; i++) { + if (typeof _args[i] === 'string' && _args[i].length > 10) { + arguments[i] = await globalThis.__openclawMessageTransform(_args[i], {}); + break; + } + } + } catch {} + }`; + }); + } +} + +if (patched) { + fs.writeFileSync(targetFile, code, 'utf8'); + console.log(`Patched: ${path.basename(targetFile)}`); +} else { + console.warn(`Could not find patch insertion point in: ${path.basename(targetFile)}`); + console.warn('The file structure may have changed. Manual patching may be required.'); + process.exit(1); +} diff --git a/bates-enhance/patches/policy-dm.patch.ts b/bates-enhance/patches/policy-dm.patch.ts new file mode 100644 index 0000000..1faaf31 --- /dev/null +++ b/bates-enhance/patches/policy-dm.patch.ts @@ -0,0 +1,32 @@ +/** + * policy-dm.patch.ts -- Fix DM reply style to prevent proxy revocation errors + * + * Bot Framework SDK wraps TurnContext in Proxy.revocable(), which gets revoked + * when the inbound HTTP request completes. The default DM replyStyle "thread" + * uses this proxy directly (via ctx.sendActivity), causing: + * "Cannot perform 'set' on a proxy that has been revoked" + * + * Changing DM replyStyle to "top-level" forces the code path through + * adapter.continueConversation(), which creates a fresh TurnContext. + * + * MANUAL PATCH: In policy.ts, find the resolveMSTeamsReplyPolicy function: + * ~/.npm-global/lib/node_modules/openclaw/extensions/msteams/src/policy.ts + * + * Change this line (typically around line 223): + * return { requireMention: false, replyStyle: "thread" }; + * To: + * return { requireMention: false, replyStyle: "top-level" }; + * + * This only affects DMs (isDirectMessage === true). Group/channel replies + * are unaffected. + */ + +// Before: +// if (params.isDirectMessage) { +// return { requireMention: false, replyStyle: "thread" }; +// } + +// After: +// if (params.isDirectMessage) { +// return { requireMention: false, replyStyle: "top-level" }; +// } diff --git a/bates-enhance/patches/reapply-patches.sh b/bates-enhance/patches/reapply-patches.sh new file mode 100644 index 0000000..4101748 --- /dev/null +++ b/bates-enhance/patches/reapply-patches.sh @@ -0,0 +1,256 @@ +#!/usr/bin/env bash +# reapply-patches.sh -- Re-apply all Bates patches after an OpenClaw update +# +# Usage: +# ./reapply-patches.sh # Apply all patches +# ./reapply-patches.sh --dry # Show what would be patched without applying +# +# Run this after every `openclaw update`. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DIST_DIR="$HOME/.npm-global/lib/node_modules/openclaw/dist" +TEAMS_DIR="$HOME/.npm-global/lib/node_modules/openclaw/extensions/msteams/src" +BACKUP_DIR="$HOME/.openclaw/patch-backup" +DRY_RUN="${1:-}" + +# Colors +RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; CYAN='\033[0;36m'; NC='\033[0m' + +info() { echo -e "${CYAN}[INFO]${NC} $*"; } +success() { echo -e "${GREEN}[OK]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[ERROR]${NC} $*"; } + +# ── Get current OpenClaw version ── +OC_VERSION="unknown" +if command -v openclaw &>/dev/null; then + OC_VERSION=$(openclaw --version 2>/dev/null | head -1 | grep -oP '[\d.]+' | head -1 || echo "unknown") +fi +info "OpenClaw version: $OC_VERSION" + +# ── Verify dist directory ── +if [[ ! -d "$DIST_DIR" ]]; then + error "OpenClaw dist directory not found: $DIST_DIR" + exit 1 +fi + +# ── Create backup ── +BACKUP_PATH="$BACKUP_DIR/$OC_VERSION" +if [[ "$DRY_RUN" != "--dry" ]]; then + mkdir -p "$BACKUP_PATH" + info "Backup directory: $BACKUP_PATH" +fi + +# ============================================================ +# 1. Cost Footer Patches +# ============================================================ +echo "" +info "=== Cost Footer Patches ===" + +# Find target files: reply dispatcher + proactive delivery files +COST_TARGETS=() +while IFS= read -r f; do + COST_TARGETS+=("$f") +done < <(grep -rl 'triggerMessage\|deliverProactive\|dispatchReply\|deliver.*Message' "$DIST_DIR"/*.js 2>/dev/null | sort -u) + +if [[ ${#COST_TARGETS[@]} -eq 0 ]]; then + warn "No cost footer targets found. Gateway structure may have changed." +else + info "Found ${#COST_TARGETS[@]} potential target(s)" + for target in "${COST_TARGETS[@]}"; do + basename=$(basename "$target") + if [[ "$DRY_RUN" == "--dry" ]]; then + echo " Would patch: $basename" + else + # Backup original (only if not already backed up) + if [[ ! -f "$BACKUP_PATH/$basename" ]]; then + cp "$target" "$BACKUP_PATH/$basename" + fi + if node "$SCRIPT_DIR/cost-footer.patch.js" "$target"; then + success " $basename" + else + warn " Failed: $basename (may need manual patching)" + fi + fi + done +fi + +# ============================================================ +# 2. Adaptive Cards Patches +# ============================================================ +echo "" +info "=== Adaptive Cards Patches ===" + +# Find the sub-agent announce file +ANNOUNCE_TARGETS=() +while IFS= read -r f; do + ANNOUNCE_TARGETS+=("$f") +done < <(grep -rl 'runSubagentAnnounce\|subagent.*announce\|announceSubagent' "$DIST_DIR"/*.js 2>/dev/null | sort -u) + +if [[ ${#ANNOUNCE_TARGETS[@]} -eq 0 ]]; then + warn "No adaptive card targets found in dist." +else + info "Found ${#ANNOUNCE_TARGETS[@]} target(s)" + for target in "${ANNOUNCE_TARGETS[@]}"; do + basename=$(basename "$target") + if [[ "$DRY_RUN" == "--dry" ]]; then + echo " Would patch: $basename" + else + if [[ ! -f "$BACKUP_PATH/$basename" ]]; then + cp "$target" "$BACKUP_PATH/$basename" + fi + if node "$SCRIPT_DIR/adaptive-cards.patch.js" "$target"; then + success " $basename" + else + warn " Failed: $basename" + fi + fi + done +fi + +# ============================================================ +# 3. Teams Channel Bridge +# ============================================================ +echo "" +info "=== Teams Channel Bridge ===" + +CHANNEL_TS="$TEAMS_DIR/channel.ts" +if [[ ! -f "$CHANNEL_TS" ]]; then + warn "Teams channel.ts not found at: $CHANNEL_TS" + warn "Teams extension may not be installed. Skipping." +else + if grep -q '__openclawSendTeamsCard' "$CHANNEL_TS"; then + success "channel.ts already patched" + elif [[ "$DRY_RUN" == "--dry" ]]; then + echo " Would patch: channel.ts" + else + if [[ ! -f "$BACKUP_PATH/channel.ts" ]]; then + cp "$CHANNEL_TS" "$BACKUP_PATH/channel.ts" + fi + + # Find insertion point: after adapter initialization in startAccount() + # Look for a line like: const adapter = or this.adapter = + PATCH_CONTENT=$(sed -n '/^\/\/ --- BEGIN PATCH ---$/,/^\/\/ --- END PATCH ---$/p' "$SCRIPT_DIR/channel-bridge.patch.ts") + + if [[ -z "$PATCH_CONTENT" ]]; then + warn "Could not extract patch content from channel-bridge.patch.ts" + else + # Insert after the adapter assignment + if grep -q 'adapter\s*=' "$CHANNEL_TS"; then + # Use Python for reliable insertion + python3 -c " +import re, sys +with open('$CHANNEL_TS', 'r') as f: + content = f.read() +if '__openclawSendTeamsCard' in content: + print('Already patched') + sys.exit(0) +# Find adapter assignment and insert after the statement +pattern = r'((?:const|let|var)\s+adapter\s*=\s*[^;]+;)' +match = re.search(pattern, content) +if match: + insert_pos = match.end() + patch = ''' +$PATCH_CONTENT +''' + content = content[:insert_pos] + patch + content[insert_pos:] + with open('$CHANNEL_TS', 'w') as f: + f.write(content) + print('Patched successfully') +else: + print('Could not find adapter assignment', file=sys.stderr) + sys.exit(1) +" + if [[ $? -eq 0 ]]; then + success "channel.ts" + else + warn "Could not auto-patch channel.ts. Apply channel-bridge.patch.ts manually." + fi + else + warn "Could not find adapter assignment in channel.ts. Manual patching required." + fi + fi + fi +fi + +# ============================================================ +# 4. Teams send.ts -- updateAdaptiveCardMSTeams +# ============================================================ +echo "" +info "=== Teams send.ts (Card Update Function) ===" + +SEND_TS="$TEAMS_DIR/send.ts" +if [[ ! -f "$SEND_TS" ]]; then + warn "Teams send.ts not found at: $SEND_TS" + warn "Teams extension may not be installed. Skipping." +else + if grep -q 'updateAdaptiveCardMSTeams' "$SEND_TS"; then + success "send.ts already has updateAdaptiveCardMSTeams" + elif [[ "$DRY_RUN" == "--dry" ]]; then + echo " Would patch: send.ts" + else + if [[ ! -f "$BACKUP_PATH/send.ts" ]]; then + cp "$SEND_TS" "$BACKUP_PATH/send.ts" + fi + + PATCH_CONTENT=$(sed -n '/^\/\/ --- BEGIN PATCH ---$/,/^\/\/ --- END PATCH ---$/p' "$SCRIPT_DIR/send-update.patch.ts" | sed '1d;$d') + if [[ -z "$PATCH_CONTENT" ]]; then + warn "Could not extract patch content from send-update.patch.ts" + else + echo "" >> "$SEND_TS" + echo "$PATCH_CONTENT" >> "$SEND_TS" + success "send.ts" + fi + fi +fi + +# ============================================================ +# 5. Teams policy.ts -- DM replyStyle fix +# ============================================================ +echo "" +info "=== Teams policy.ts (DM Proxy Revocation Fix) ===" + +POLICY_TS="$TEAMS_DIR/policy.ts" +if [[ ! -f "$POLICY_TS" ]]; then + warn "Teams policy.ts not found at: $POLICY_TS" +else + if grep -q 'replyStyle: "top-level"' "$POLICY_TS" && grep -q 'isDirectMessage' "$POLICY_TS"; then + success "policy.ts already patched (DM replyStyle = top-level)" + elif [[ "$DRY_RUN" == "--dry" ]]; then + echo " Would patch: policy.ts (change DM replyStyle from thread to top-level)" + else + if [[ ! -f "$BACKUP_PATH/policy.ts" ]]; then + cp "$POLICY_TS" "$BACKUP_PATH/policy.ts" + fi + + # Replace "thread" with "top-level" in the DM branch + if sed -i 's/replyStyle: "thread"/replyStyle: "top-level"/' "$POLICY_TS"; then + if grep -q 'replyStyle: "top-level"' "$POLICY_TS"; then + success "policy.ts" + else + warn "policy.ts: sed ran but pattern not found. Manual patching may be required." + fi + else + warn "Could not patch policy.ts. Apply policy-dm.patch.ts manually." + fi + fi +fi + +# ============================================================ +# Summary +# ============================================================ +echo "" +if [[ "$DRY_RUN" == "--dry" ]]; then + info "Dry run complete. No files were modified." +else + success "Patch application complete!" + info "Backups saved to: $BACKUP_PATH" + echo "" + info "NOTE: The adaptive-cards.patch.js applies the basic card rendering only." + info "For the full thinking bubble + progress updates + failure cards, manual" + info "patching of reply-*.js is required. See post-update-checklist.md." + echo "" + info "Restart the gateway to apply changes:" + echo " systemctl --user restart openclaw-gateway" +fi diff --git a/bates-enhance/patches/send-update.patch.ts b/bates-enhance/patches/send-update.patch.ts new file mode 100644 index 0000000..641f0d3 --- /dev/null +++ b/bates-enhance/patches/send-update.patch.ts @@ -0,0 +1,92 @@ +/** + * send-update.patch.ts -- Add updateAdaptiveCardMSTeams to send.ts + * + * This function allows updating an existing Adaptive Card message in-place, + * which is used by the thinking bubble feature: when a sub-agent spawns, + * a "working on..." card is sent, then updated with the result when done. + * + * MANUAL PATCH: Append this code to the end of: + * ~/.npm-global/lib/node_modules/openclaw/extensions/msteams/src/send.ts + * (before any trailing exports if present) + * + * Also add to the existing imports at the top of send.ts if not already present: + * import { buildConversationReference } from "./messenger.js"; + * import { classifyMSTeamsSendError, formatMSTeamsSendErrorHint, formatUnknownError } from "./errors.js"; + * + * And export this function from the module (add to channel.ts import line). + */ + +// --- BEGIN PATCH --- +// [Bates patch] Types for card update +export type UpdateMSTeamsCardParams = { + /** Full config (for credentials) */ + cfg: OpenClawConfig; + /** Conversation ID or user ID to send to */ + to: string; + /** Activity ID of the message to update */ + activityId: string; + /** New Adaptive Card JSON object */ + card: Record; +}; + +export type UpdateMSTeamsCardResult = { + activityId: string; + conversationId: string; +}; + +/** + * Update an existing Adaptive Card message in a Teams conversation. + * The bot must have originally sent the message being updated. + */ +export async function updateAdaptiveCardMSTeams( + params: UpdateMSTeamsCardParams, +): Promise { + const { cfg, to, activityId, card } = params; + const { adapter, appId, conversationId, ref, log } = await resolveMSTeamsSendContext({ + cfg, + to, + }); + + log.debug?.("updating adaptive card", { + conversationId, + activityId, + cardType: card.type, + }); + + const baseRef = buildConversationReference(ref); + const proactiveRef = { + ...baseRef, + activityId: undefined, + }; + + try { + await adapter.continueConversation(appId, proactiveRef, async (ctx: any) => { + await ctx.updateActivity({ + id: activityId, + type: "message", + attachments: [ + { + contentType: "application/vnd.microsoft.card.adaptive", + content: card, + }, + ], + }); + }); + } catch (err) { + const classification = classifyMSTeamsSendError(err); + const hint = formatMSTeamsSendErrorHint(classification); + const status = classification.statusCode ? ` (HTTP ${classification.statusCode})` : ""; + throw new Error( + `msteams card update failed${status}: ${formatUnknownError(err)}${hint ? ` (${hint})` : ""}`, + { cause: err }, + ); + } + + log.info("updated adaptive card", { conversationId, activityId }); + + return { + activityId, + conversationId, + }; +} +// --- END PATCH --- diff --git a/build/build-package.sh b/build/build-package.sh new file mode 100755 index 0000000..8b0e104 --- /dev/null +++ b/build/build-package.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# build-package.sh -- Build the Bates installer packages +# Creates distributable archives for Step 1 (Core) and Step 2 (Enhance) +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +BUILD_DIR="$REPO_DIR/build/output" +VERSION="2.0.0" + +echo "Bates Installer Build" +echo "=====================" +echo "Version: $VERSION" +echo "" + +# --- Sanitization Check --- +echo "Step 1: Running sanitization check..." +bash "$REPO_DIR/build/sanitize-check.sh" "$REPO_DIR" +echo "" + +# --- Create Build Output Directory --- +mkdir -p "$BUILD_DIR" + +# --- Build Core Package --- +echo "Step 2: Building Core package..." +CORE_PKG="$BUILD_DIR/bates-core-$VERSION" +rm -rf "$CORE_PKG" +mkdir -p "$CORE_PKG" + +# Copy core files +cp -r "$REPO_DIR/bates-core/"* "$CORE_PKG/" + +# Remove any data directories or temp files +find "$CORE_PKG" -name "node_modules" -type d -exec rm -rf {} + 2>/dev/null || true +find "$CORE_PKG" -name "data" -type d -exec rm -rf {} + 2>/dev/null || true +find "$CORE_PKG" -name "*.tmp" -delete 2>/dev/null || true +find "$CORE_PKG" -name "*.bak" -delete 2>/dev/null || true + +# Make scripts executable +find "$CORE_PKG" -name "*.sh" -exec chmod +x {} + + +# Create tar.gz +(cd "$BUILD_DIR" && tar czf "bates-core-$VERSION.tar.gz" "bates-core-$VERSION/") +echo " Created: bates-core-$VERSION.tar.gz" + +# --- Build Enhance Package --- +echo "Step 3: Building Enhance package..." +ENHANCE_PKG="$BUILD_DIR/bates-enhance-$VERSION" +rm -rf "$ENHANCE_PKG" +mkdir -p "$ENHANCE_PKG" + +# Copy enhance files +cp -r "$REPO_DIR/bates-enhance/"* "$ENHANCE_PKG/" + +# Resolve symlinks (replace with actual files for distribution) +find "$ENHANCE_PKG" -type l | while read -r link; do + target="$(readlink -f "$link")" + if [[ -f "$target" ]]; then + rm "$link" + cp "$target" "$link" + fi +done + +# Clean up +find "$ENHANCE_PKG" -name "node_modules" -type d -exec rm -rf {} + 2>/dev/null || true +find "$ENHANCE_PKG" -name "*.tmp" -delete 2>/dev/null || true + +# Make scripts executable +find "$ENHANCE_PKG" -name "*.sh" -exec chmod +x {} + + +# Create tar.gz +(cd "$BUILD_DIR" && tar czf "bates-enhance-$VERSION.tar.gz" "bates-enhance-$VERSION/") +echo " Created: bates-enhance-$VERSION.tar.gz" + +# --- Summary --- +echo "" +echo "Build Complete" +echo "==============" +ls -lh "$BUILD_DIR"/*.tar.gz +echo "" +echo "To compile the Windows installer (.exe):" +echo " 1. Install Inno Setup 6 on Windows" +echo " 2. Open bates-core/BatesCore.iss in Inno Setup" +echo " 3. Compile (Ctrl+F9)" +echo " 4. Output: build/output/BatesCore-$VERSION.exe" diff --git a/build/sanitize-check.sh b/build/sanitize-check.sh new file mode 100755 index 0000000..5d0b71d --- /dev/null +++ b/build/sanitize-check.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +# sanitize-check.sh -- Scan package for forbidden patterns (secrets, personal data) +# Run this before committing or releasing any package. +set -euo pipefail + +SCAN_DIR="${1:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" + +echo "Sanitization Check" +echo "==================" +echo "Scanning: $SCAN_DIR" +echo "" + +FAILURES=0 + +# Patterns that must NOT appear in the package +declare -A FORBIDDEN=( + ["sk-ant-api03-"]="Anthropic API key" + ["sk-ant-oat01-"]="Anthropic subscription token" + ["sk-proj-"]="OpenAI API key" + ["pplx-"]="Perplexity API key" + ["AIzaSy"]="Google API key" + ["@vernot.com"]="Personal domain (vernot.com)" + ["@fdesk.tech"]="Company domain (fdesk.tech)" + ["22e1a92c"]="Microsoft Graph user ID" + ["8518923276"]="Telegram user ID" + ["a523f509"]="Entra tenant ID" + ["100.80.245"]="Tailscale IP range" + ["openclawgateway-1"]="Tailscale hostname" + ["tail0e82c9"]="Tailscale network" + ["7a4a278f76f15475"]="Gateway token" + ["fmXCTJRKqHL4c7ycPvMH"]="ElevenLabs voice ID" +) + +for pattern in "${!FORBIDDEN[@]}"; do + label="${FORBIDDEN[$pattern]}" + matches=$(grep -r --include="*.sh" --include="*.ps1" --include="*.iss" \ + --include="*.json" --include="*.md" --include="*.ts" \ + --include="*.js" --include="*.html" --include="*.css" \ + --include="*.template" --include="*.yaml" --include="*.yml" \ + -l "$pattern" "$SCAN_DIR" 2>/dev/null | grep -v ".git/" | grep -v "sanitize-check.sh" | grep -v "api-key-validator.sh" || true) + + if [[ -n "$matches" ]]; then + echo "[FAIL] $label ($pattern) found in:" + echo "$matches" | sed 's/^/ /' + ((FAILURES++)) + else + echo "[OK] $label" + fi +done + +echo "" +echo "======================================" +if [[ $FAILURES -eq 0 ]]; then + echo "PASS: No forbidden patterns found." + echo "Package is clean for distribution." +else + echo "FAIL: $FAILURES forbidden pattern(s) found!" + echo "Fix these before committing or releasing." + exit 1 +fi