From 31ca8535103757e747af6cc1f47259b0e63b5ebd Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 03:07:30 +0000 Subject: [PATCH 01/18] feat: Add ActivityWatch-compatible data model - Add SQL migration for buckets and events tables (AW-compatible schema) - Create Rust structs for Bucket, Event, Heartbeat in aw_models.rs - Implement AwDatabase with CRUD operations and heartbeat merging - Add Tauri commands for AW API (aw_commands.rs) - Update tracking loop to send heartbeats alongside legacy sessions - Add TypeScript types for AW API (AWBucket, AWEvent, etc.) - Create useActivityWatch React hook for frontend integration This enables compatibility with ActivityWatch ecosystem including Awatcher for Linux, browser extensions, and editor plugins. https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/Cargo.toml | 1 + .../migrations/0003_activitywatch_compat.sql | 101 +++++ desktop-app/src-tauri/src/aw_commands.rs | 281 ++++++++++++ desktop-app/src-tauri/src/aw_database.rs | 427 ++++++++++++++++++ desktop-app/src-tauri/src/aw_models.rs | 194 ++++++++ desktop-app/src-tauri/src/lib.rs | 80 +++- desktop-app/src-tauri/src/main.rs | 37 +- desktop-app/src/hooks/useActivityWatch.ts | 339 ++++++++++++++ desktop-app/src/types/index.ts | 133 ++++++ 9 files changed, 1584 insertions(+), 9 deletions(-) create mode 100644 desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql create mode 100644 desktop-app/src-tauri/src/aw_commands.rs create mode 100644 desktop-app/src-tauri/src/aw_database.rs create mode 100644 desktop-app/src-tauri/src/aw_models.rs create mode 100644 desktop-app/src/hooks/useActivityWatch.ts diff --git a/desktop-app/src-tauri/Cargo.toml b/desktop-app/src-tauri/Cargo.toml index 4061be5..10fd301 100644 --- a/desktop-app/src-tauri/Cargo.toml +++ b/desktop-app/src-tauri/Cargo.toml @@ -33,6 +33,7 @@ sqlx = { version = "0.8.6", features = ["runtime-tokio-rustls", "sqlite", "chron tokio = { version = "1.45.1", features = ["time", "full"] } reqwest = { version = "0.12", features = ["json"] } tauri-plugin-store = "2.3.0" +gethostname = "0.5" # OS-specific dependencies for app detection [target.'cfg(windows)'.dependencies] diff --git a/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql b/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql new file mode 100644 index 0000000..3fc8be7 --- /dev/null +++ b/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql @@ -0,0 +1,101 @@ +-- ActivityWatch-compatible data model +-- Buckets are containers for events from a specific watcher on a specific host +-- Events store the actual activity data with flexible JSON data field + +-- Buckets table: one bucket per watcher per host +CREATE TABLE IF NOT EXISTS buckets ( + id TEXT PRIMARY KEY, -- e.g., "aw-watcher-window_hostname" + name TEXT, -- human-readable name (optional) + type TEXT NOT NULL, -- event type: "currentwindow", "afkstatus", "web.tab.current", etc. + client TEXT NOT NULL, -- client/watcher name: "aw-watcher-window", "loopd", etc. + hostname TEXT NOT NULL, -- device hostname + created TEXT NOT NULL, -- ISO8601 timestamp + data TEXT, -- optional JSON metadata + last_updated TEXT -- ISO8601 timestamp of last event +); + +-- Indexes for buckets +CREATE INDEX IF NOT EXISTS idx_buckets_hostname ON buckets(hostname); +CREATE INDEX IF NOT EXISTS idx_buckets_type ON buckets(type); +CREATE INDEX IF NOT EXISTS idx_buckets_client ON buckets(client); + +-- Events table: ActivityWatch-compatible events +CREATE TABLE IF NOT EXISTS events ( + id INTEGER PRIMARY KEY AUTOINCREMENT, -- auto-incrementing ID + bucket_id TEXT NOT NULL, -- reference to buckets.id + timestamp TEXT NOT NULL, -- ISO8601 timestamp (UTC) + duration REAL NOT NULL DEFAULT 0, -- duration in seconds (float) + data TEXT NOT NULL, -- JSON object with event data + FOREIGN KEY (bucket_id) REFERENCES buckets(id) ON DELETE CASCADE +); + +-- Indexes for events - critical for query performance +CREATE INDEX IF NOT EXISTS idx_events_bucket_id ON events(bucket_id); +CREATE INDEX IF NOT EXISTS idx_events_timestamp ON events(timestamp); +CREATE INDEX IF NOT EXISTS idx_events_bucket_timestamp ON events(bucket_id, timestamp); + +-- Key-value store for settings and metadata +CREATE TABLE IF NOT EXISTS key_value ( + key TEXT PRIMARY KEY, + value TEXT, + updated_at TEXT NOT NULL -- ISO8601 timestamp +); + +-- Migrate existing sessions to events format +-- First, create a default bucket for existing data +INSERT OR IGNORE INTO buckets (id, name, type, client, hostname, created, data, last_updated) +SELECT + 'aw-watcher-window_' || COALESCE( + (SELECT name FROM devices LIMIT 1), + 'localhost' + ), + 'Window Activity', + 'currentwindow', + 'loopd', + COALESCE( + (SELECT name FROM devices LIMIT 1), + 'localhost' + ), + datetime('now'), + '{}', + datetime('now') +WHERE EXISTS (SELECT 1 FROM sessions LIMIT 1); + +-- Migrate sessions to events +INSERT OR IGNORE INTO events (bucket_id, timestamp, duration, data) +SELECT + 'aw-watcher-window_' || COALESCE( + (SELECT name FROM devices LIMIT 1), + 'localhost' + ), + datetime(start_time, 'unixepoch'), + COALESCE(duration_sec, 0), + json_object('app', app_name, 'title', COALESCE(window_title, '')) +FROM sessions +WHERE duration_sec IS NOT NULL; + +-- Create view for easy querying of window events +CREATE VIEW IF NOT EXISTS window_events AS +SELECT + e.id, + e.bucket_id, + e.timestamp, + e.duration, + json_extract(e.data, '$.app') as app, + json_extract(e.data, '$.title') as title, + e.data +FROM events e +JOIN buckets b ON e.bucket_id = b.id +WHERE b.type = 'currentwindow'; + +-- Create view for daily usage summary (ActivityWatch compatible) +CREATE VIEW IF NOT EXISTS aw_usage_summary AS +SELECT + date(timestamp) AS day, + json_extract(data, '$.app') AS app_name, + SUM(duration) AS total_seconds +FROM events e +JOIN buckets b ON e.bucket_id = b.id +WHERE b.type = 'currentwindow' +GROUP BY day, app_name +ORDER BY day DESC, total_seconds DESC; diff --git a/desktop-app/src-tauri/src/aw_commands.rs b/desktop-app/src-tauri/src/aw_commands.rs new file mode 100644 index 0000000..9cf84e7 --- /dev/null +++ b/desktop-app/src-tauri/src/aw_commands.rs @@ -0,0 +1,281 @@ +use chrono::{DateTime, Utc}; +use serde_json::Value as JsonValue; +use std::collections::HashMap; +use std::sync::Arc; +use tauri::State; + +use crate::aw_database::AwDatabase; +use crate::aw_models::{Bucket, Event, GetEventsParams, Heartbeat, ServerInfo}; + +/// Shared AW database handle +pub type AwDb = Arc; + +// ========== Info Commands ========== + +/// Get server info +#[tauri::command] +pub async fn aw_get_info(app_handle: tauri::AppHandle) -> Result { + let device_id = crate::get_or_create_device_id(&app_handle); + let hostname = gethostname::gethostname() + .to_string_lossy() + .to_string(); + + Ok(ServerInfo { + hostname, + version: env!("CARGO_PKG_VERSION").to_string(), + testing: cfg!(debug_assertions), + device_id, + }) +} + +// ========== Bucket Commands ========== + +/// Get all buckets +#[tauri::command] +pub async fn aw_get_buckets( + aw_db: State<'_, AwDb>, +) -> Result, String> { + aw_db.get_buckets() + .await + .map_err(|e| format!("Failed to get buckets: {}", e)) +} + +/// Get a single bucket by ID +#[tauri::command] +pub async fn aw_get_bucket( + aw_db: State<'_, AwDb>, + bucket_id: String, +) -> Result { + aw_db.get_bucket(&bucket_id) + .await + .map_err(|e| format!("Failed to get bucket: {}", e))? + .ok_or_else(|| format!("Bucket not found: {}", bucket_id)) +} + +/// Create a new bucket +#[tauri::command] +pub async fn aw_create_bucket( + aw_db: State<'_, AwDb>, + bucket_id: String, + bucket_type: String, + client: String, + hostname: String, +) -> Result { + let bucket = Bucket { + id: bucket_id, + name: None, + bucket_type, + client, + hostname, + created: Utc::now(), + data: None, + last_updated: None, + }; + + aw_db.get_or_create_bucket(&bucket) + .await + .map_err(|e| format!("Failed to create bucket: {}", e)) +} + +/// Delete a bucket +#[tauri::command] +pub async fn aw_delete_bucket( + aw_db: State<'_, AwDb>, + bucket_id: String, +) -> Result<(), String> { + aw_db.delete_bucket(&bucket_id) + .await + .map_err(|e| format!("Failed to delete bucket: {}", e)) +} + +// ========== Event Commands ========== + +/// Get events from a bucket +#[tauri::command] +pub async fn aw_get_events( + aw_db: State<'_, AwDb>, + bucket_id: String, + start: Option, + end: Option, + limit: Option, +) -> Result, String> { + let params = GetEventsParams { + start: start.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))), + end: end.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))), + limit, + }; + + aw_db.get_events(&bucket_id, ¶ms) + .await + .map_err(|e| format!("Failed to get events: {}", e)) +} + +/// Get a single event +#[tauri::command] +pub async fn aw_get_event( + aw_db: State<'_, AwDb>, + bucket_id: String, + event_id: i64, +) -> Result { + aw_db.get_event(&bucket_id, event_id) + .await + .map_err(|e| format!("Failed to get event: {}", e))? + .ok_or_else(|| format!("Event not found: {}", event_id)) +} + +/// Insert events into a bucket +#[tauri::command] +pub async fn aw_insert_events( + aw_db: State<'_, AwDb>, + bucket_id: String, + events: Vec, +) -> Result, String> { + aw_db.insert_events(&bucket_id, &events) + .await + .map_err(|e| format!("Failed to insert events: {}", e)) +} + +/// Delete an event +#[tauri::command] +pub async fn aw_delete_event( + aw_db: State<'_, AwDb>, + bucket_id: String, + event_id: i64, +) -> Result { + aw_db.delete_event(&bucket_id, event_id) + .await + .map_err(|e| format!("Failed to delete event: {}", e)) +} + +/// Get event count for a bucket +#[tauri::command] +pub async fn aw_get_event_count( + aw_db: State<'_, AwDb>, + bucket_id: String, +) -> Result { + aw_db.get_event_count(&bucket_id) + .await + .map_err(|e| format!("Failed to get event count: {}", e)) +} + +// ========== Heartbeat Command ========== + +/// Submit a heartbeat event (merges with last event if data matches) +#[tauri::command] +pub async fn aw_heartbeat( + aw_db: State<'_, AwDb>, + bucket_id: String, + timestamp: String, + duration: f64, + data: JsonValue, + pulsetime: f64, +) -> Result { + let timestamp = DateTime::parse_from_rfc3339(×tamp) + .map_err(|e| format!("Invalid timestamp: {}", e))? + .with_timezone(&Utc); + + let heartbeat = Heartbeat { + timestamp, + duration, + data, + }; + + aw_db.heartbeat(&bucket_id, &heartbeat, pulsetime) + .await + .map_err(|e| format!("Failed to process heartbeat: {}", e)) +} + +// ========== Query Commands ========== + +/// Get usage summary for a bucket +#[tauri::command] +pub async fn aw_get_usage_summary( + aw_db: State<'_, AwDb>, + bucket_id: String, + start: Option, + end: Option, +) -> Result, String> { + let start = start.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))); + let end = end.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))); + + aw_db.get_usage_summary(&bucket_id, start, end) + .await + .map_err(|e| format!("Failed to get usage summary: {}", e)) +} + +/// Get current/latest event from a bucket +#[tauri::command] +pub async fn aw_get_current_event( + aw_db: State<'_, AwDb>, + bucket_id: String, +) -> Result, String> { + aw_db.get_current_event(&bucket_id) + .await + .map_err(|e| format!("Failed to get current event: {}", e)) +} + +// ========== Settings Commands ========== + +/// Get a setting value +#[tauri::command] +pub async fn aw_get_setting( + aw_db: State<'_, AwDb>, + key: String, +) -> Result, String> { + aw_db.get_setting(&key) + .await + .map_err(|e| format!("Failed to get setting: {}", e)) +} + +/// Set a setting value +#[tauri::command] +pub async fn aw_set_setting( + aw_db: State<'_, AwDb>, + key: String, + value: String, +) -> Result<(), String> { + aw_db.set_setting(&key, &value) + .await + .map_err(|e| format!("Failed to set setting: {}", e)) +} + +// ========== Export Commands ========== + +/// Export a bucket with all its events +#[tauri::command] +pub async fn aw_export_bucket( + aw_db: State<'_, AwDb>, + bucket_id: String, +) -> Result { + let bucket = aw_db.get_bucket(&bucket_id) + .await + .map_err(|e| format!("Failed to get bucket: {}", e))? + .ok_or_else(|| format!("Bucket not found: {}", bucket_id))?; + + let events = aw_db.get_events(&bucket_id, &GetEventsParams::default()) + .await + .map_err(|e| format!("Failed to get events: {}", e))?; + + Ok(crate::aw_models::BucketExport { bucket, events }) +} + +/// Export all buckets with events +#[tauri::command] +pub async fn aw_export_all( + aw_db: State<'_, AwDb>, +) -> Result, String> { + let buckets = aw_db.get_buckets() + .await + .map_err(|e| format!("Failed to get buckets: {}", e))?; + + let mut exports = HashMap::new(); + for (id, bucket) in buckets { + let events = aw_db.get_events(&id, &GetEventsParams::default()) + .await + .map_err(|e| format!("Failed to get events: {}", e))?; + + exports.insert(id.clone(), crate::aw_models::BucketExport { bucket, events }); + } + + Ok(exports) +} diff --git a/desktop-app/src-tauri/src/aw_database.rs b/desktop-app/src-tauri/src/aw_database.rs new file mode 100644 index 0000000..ed7aa55 --- /dev/null +++ b/desktop-app/src-tauri/src/aw_database.rs @@ -0,0 +1,427 @@ +use chrono::{DateTime, Datelike, Utc}; +use serde_json::Value as JsonValue; +use sqlx::{Pool, Sqlite, Row}; +use std::collections::HashMap; + +use crate::aw_models::{Bucket, Event, GetEventsParams, Heartbeat}; + +/// ActivityWatch-compatible database operations +pub struct AwDatabase { + pool: Pool, +} + +impl AwDatabase { + pub fn new(pool: Pool) -> Self { + Self { pool } + } + + // ========== Bucket Operations ========== + + /// Get all buckets + pub async fn get_buckets(&self) -> Result, sqlx::Error> { + let rows = sqlx::query( + "SELECT id, name, type, client, hostname, created, data, last_updated FROM buckets" + ) + .fetch_all(&self.pool) + .await?; + + let mut buckets = HashMap::new(); + for row in rows { + let id: String = row.get("id"); + let bucket = Bucket { + id: id.clone(), + name: row.get("name"), + bucket_type: row.get("type"), + client: row.get("client"), + hostname: row.get("hostname"), + created: parse_datetime(row.get("created")), + data: row.get::, _>("data") + .and_then(|s| serde_json::from_str(&s).ok()), + last_updated: row.get::, _>("last_updated") + .map(|s| parse_datetime(&s)), + }; + buckets.insert(id, bucket); + } + + Ok(buckets) + } + + /// Get a single bucket by ID + pub async fn get_bucket(&self, bucket_id: &str) -> Result, sqlx::Error> { + let row = sqlx::query( + "SELECT id, name, type, client, hostname, created, data, last_updated FROM buckets WHERE id = ?" + ) + .bind(bucket_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|r| Bucket { + id: r.get("id"), + name: r.get("name"), + bucket_type: r.get("type"), + client: r.get("client"), + hostname: r.get("hostname"), + created: parse_datetime(r.get("created")), + data: r.get::, _>("data") + .and_then(|s| serde_json::from_str(&s).ok()), + last_updated: r.get::, _>("last_updated") + .map(|s| parse_datetime(&s)), + })) + } + + /// Create a new bucket + pub async fn create_bucket(&self, bucket: &Bucket) -> Result<(), sqlx::Error> { + let data_json = bucket.data.as_ref() + .map(|d| serde_json::to_string(d).unwrap_or_else(|_| "{}".to_string())); + + sqlx::query( + "INSERT INTO buckets (id, name, type, client, hostname, created, data, last_updated) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)" + ) + .bind(&bucket.id) + .bind(&bucket.name) + .bind(&bucket.bucket_type) + .bind(&bucket.client) + .bind(&bucket.hostname) + .bind(bucket.created.to_rfc3339()) + .bind(&data_json) + .bind(bucket.last_updated.map(|dt| dt.to_rfc3339())) + .execute(&self.pool) + .await?; + + Ok(()) + } + + /// Create bucket if it doesn't exist, return existing or new bucket + pub async fn get_or_create_bucket(&self, bucket: &Bucket) -> Result { + if let Some(existing) = self.get_bucket(&bucket.id).await? { + return Ok(existing); + } + self.create_bucket(bucket).await?; + Ok(bucket.clone()) + } + + /// Delete a bucket and all its events + pub async fn delete_bucket(&self, bucket_id: &str) -> Result<(), sqlx::Error> { + // Events are deleted via CASCADE + sqlx::query("DELETE FROM buckets WHERE id = ?") + .bind(bucket_id) + .execute(&self.pool) + .await?; + Ok(()) + } + + /// Update bucket's last_updated timestamp + pub async fn update_bucket_last_updated(&self, bucket_id: &str) -> Result<(), sqlx::Error> { + sqlx::query("UPDATE buckets SET last_updated = ? WHERE id = ?") + .bind(Utc::now().to_rfc3339()) + .bind(bucket_id) + .execute(&self.pool) + .await?; + Ok(()) + } + + // ========== Event Operations ========== + + /// Get events from a bucket with optional filtering + pub async fn get_events( + &self, + bucket_id: &str, + params: &GetEventsParams, + ) -> Result, sqlx::Error> { + let mut query = String::from( + "SELECT id, bucket_id, timestamp, duration, data FROM events WHERE bucket_id = ?" + ); + + if params.start.is_some() { + query.push_str(" AND timestamp >= ?"); + } + if params.end.is_some() { + query.push_str(" AND timestamp <= ?"); + } + + query.push_str(" ORDER BY timestamp DESC"); + + if params.limit.is_some() { + query.push_str(" LIMIT ?"); + } + + let mut q = sqlx::query(&query).bind(bucket_id); + + if let Some(start) = ¶ms.start { + q = q.bind(start.to_rfc3339()); + } + if let Some(end) = ¶ms.end { + q = q.bind(end.to_rfc3339()); + } + if let Some(limit) = params.limit { + q = q.bind(limit); + } + + let rows = q.fetch_all(&self.pool).await?; + + let events = rows.iter().map(|r| Event { + id: Some(r.get("id")), + bucket_id: Some(r.get("bucket_id")), + timestamp: parse_datetime(r.get("timestamp")), + duration: r.get("duration"), + data: serde_json::from_str(r.get::<&str, _>("data")).unwrap_or(JsonValue::Null), + }).collect(); + + Ok(events) + } + + /// Get a single event by ID + pub async fn get_event(&self, bucket_id: &str, event_id: i64) -> Result, sqlx::Error> { + let row = sqlx::query( + "SELECT id, bucket_id, timestamp, duration, data FROM events WHERE bucket_id = ? AND id = ?" + ) + .bind(bucket_id) + .bind(event_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|r| Event { + id: Some(r.get("id")), + bucket_id: Some(r.get("bucket_id")), + timestamp: parse_datetime(r.get("timestamp")), + duration: r.get("duration"), + data: serde_json::from_str(r.get::<&str, _>("data")).unwrap_or(JsonValue::Null), + })) + } + + /// Insert multiple events into a bucket + pub async fn insert_events(&self, bucket_id: &str, events: &[Event]) -> Result, sqlx::Error> { + let mut inserted = Vec::new(); + + for event in events { + let data_json = serde_json::to_string(&event.data).unwrap_or_else(|_| "{}".to_string()); + + let result = sqlx::query( + "INSERT INTO events (bucket_id, timestamp, duration, data) VALUES (?, ?, ?, ?)" + ) + .bind(bucket_id) + .bind(event.timestamp.to_rfc3339()) + .bind(event.duration) + .bind(&data_json) + .execute(&self.pool) + .await?; + + let mut new_event = event.clone(); + new_event.id = Some(result.last_insert_rowid()); + new_event.bucket_id = Some(bucket_id.to_string()); + inserted.push(new_event); + } + + // Update bucket's last_updated + self.update_bucket_last_updated(bucket_id).await?; + + Ok(inserted) + } + + /// Insert a single event + pub async fn insert_event(&self, bucket_id: &str, event: &Event) -> Result { + let events = self.insert_events(bucket_id, &[event.clone()]).await?; + Ok(events.into_iter().next().unwrap()) + } + + /// Delete an event + pub async fn delete_event(&self, bucket_id: &str, event_id: i64) -> Result { + let result = sqlx::query("DELETE FROM events WHERE bucket_id = ? AND id = ?") + .bind(bucket_id) + .bind(event_id) + .execute(&self.pool) + .await?; + Ok(result.rows_affected() > 0) + } + + /// Get event count for a bucket + pub async fn get_event_count(&self, bucket_id: &str) -> Result { + let row = sqlx::query("SELECT COUNT(*) as count FROM events WHERE bucket_id = ?") + .bind(bucket_id) + .fetch_one(&self.pool) + .await?; + Ok(row.get("count")) + } + + /// Heartbeat - merge with last event if data matches and within pulsetime + pub async fn heartbeat( + &self, + bucket_id: &str, + heartbeat: &Heartbeat, + pulsetime: f64, + ) -> Result { + // Get the last event in this bucket + let last_event = sqlx::query( + "SELECT id, bucket_id, timestamp, duration, data FROM events + WHERE bucket_id = ? ORDER BY timestamp DESC LIMIT 1" + ) + .bind(bucket_id) + .fetch_optional(&self.pool) + .await?; + + if let Some(row) = last_event { + let last_id: i64 = row.get("id"); + let last_timestamp: String = row.get("timestamp"); + let last_timestamp = parse_datetime(&last_timestamp); + let last_duration: f64 = row.get("duration"); + let last_data: String = row.get("data"); + let last_data: JsonValue = serde_json::from_str(&last_data).unwrap_or(JsonValue::Null); + + // Calculate end time of last event + let last_end = last_timestamp + chrono::Duration::milliseconds((last_duration * 1000.0) as i64); + + // Check if heartbeat is within pulsetime of last event's end + let time_diff = (heartbeat.timestamp - last_end).num_milliseconds() as f64 / 1000.0; + + // If data matches and within pulsetime, merge + if time_diff <= pulsetime && last_data == heartbeat.data { + // Extend the last event + let new_duration = (heartbeat.timestamp - last_timestamp).num_milliseconds() as f64 / 1000.0 + heartbeat.duration; + + sqlx::query("UPDATE events SET duration = ? WHERE id = ?") + .bind(new_duration) + .bind(last_id) + .execute(&self.pool) + .await?; + + // Update bucket's last_updated + self.update_bucket_last_updated(bucket_id).await?; + + return Ok(Event { + id: Some(last_id), + bucket_id: Some(bucket_id.to_string()), + timestamp: last_timestamp, + duration: new_duration, + data: last_data, + }); + } + } + + // Otherwise, insert as new event + self.insert_event(bucket_id, &heartbeat.clone().to_event()).await + } + + // ========== Query Operations ========== + + /// Get usage summary for a time period + pub async fn get_usage_summary( + &self, + bucket_id: &str, + start: Option>, + end: Option>, + ) -> Result, sqlx::Error> { + let mut query = String::from( + "SELECT json_extract(data, '$.app') as app, SUM(duration) as total + FROM events WHERE bucket_id = ?" + ); + + if start.is_some() { + query.push_str(" AND timestamp >= ?"); + } + if end.is_some() { + query.push_str(" AND timestamp <= ?"); + } + + query.push_str(" GROUP BY app ORDER BY total DESC"); + + let mut q = sqlx::query(&query).bind(bucket_id); + + if let Some(start) = start { + q = q.bind(start.to_rfc3339()); + } + if let Some(end) = end { + q = q.bind(end.to_rfc3339()); + } + + let rows = q.fetch_all(&self.pool).await?; + + let summary = rows.iter().map(|r| { + let app: Option = r.get("app"); + let total: f64 = r.get("total"); + (app.unwrap_or_else(|| "Unknown".to_string()), total) + }).collect(); + + Ok(summary) + } + + /// Get the current/latest event from a bucket + pub async fn get_current_event(&self, bucket_id: &str) -> Result, sqlx::Error> { + let row = sqlx::query( + "SELECT id, bucket_id, timestamp, duration, data FROM events + WHERE bucket_id = ? ORDER BY timestamp DESC LIMIT 1" + ) + .bind(bucket_id) + .fetch_optional(&self.pool) + .await?; + + Ok(row.map(|r| Event { + id: Some(r.get("id")), + bucket_id: Some(r.get("bucket_id")), + timestamp: parse_datetime(r.get("timestamp")), + duration: r.get("duration"), + data: serde_json::from_str(r.get::<&str, _>("data")).unwrap_or(JsonValue::Null), + })) + } + + // ========== Key-Value Operations ========== + + /// Get a value from the key-value store + pub async fn get_setting(&self, key: &str) -> Result, sqlx::Error> { + let row = sqlx::query("SELECT value FROM key_value WHERE key = ?") + .bind(key) + .fetch_optional(&self.pool) + .await?; + Ok(row.map(|r| r.get("value"))) + } + + /// Set a value in the key-value store + pub async fn set_setting(&self, key: &str, value: &str) -> Result<(), sqlx::Error> { + sqlx::query( + "INSERT OR REPLACE INTO key_value (key, value, updated_at) VALUES (?, ?, ?)" + ) + .bind(key) + .bind(value) + .bind(Utc::now().to_rfc3339()) + .execute(&self.pool) + .await?; + Ok(()) + } + + /// Delete a setting + pub async fn delete_setting(&self, key: &str) -> Result<(), sqlx::Error> { + sqlx::query("DELETE FROM key_value WHERE key = ?") + .bind(key) + .execute(&self.pool) + .await?; + Ok(()) + } +} + +/// Helper to parse datetime from SQLite string +fn parse_datetime(s: &str) -> DateTime { + DateTime::parse_from_rfc3339(s) + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(|_| { + // Try parsing as SQLite datetime format + chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") + .map(|ndt| DateTime::from_naive_utc_and_offset(ndt, Utc)) + .unwrap_or_else(|_| Utc::now()) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_datetime() { + let rfc3339 = "2024-01-15T10:30:00Z"; + let dt = parse_datetime(rfc3339); + assert_eq!(dt.year(), 2024); + + let sqlite_format = "2024-01-15 10:30:00"; + let dt = parse_datetime(sqlite_format); + assert_eq!(dt.year(), 2024); + } +} diff --git a/desktop-app/src-tauri/src/aw_models.rs b/desktop-app/src-tauri/src/aw_models.rs new file mode 100644 index 0000000..b32eb36 --- /dev/null +++ b/desktop-app/src-tauri/src/aw_models.rs @@ -0,0 +1,194 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value as JsonValue; + +/// ActivityWatch-compatible Bucket +/// A bucket is a container for events from a specific watcher on a specific host +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Bucket { + pub id: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(rename = "type")] + pub bucket_type: String, + pub client: String, + pub hostname: String, + pub created: DateTime, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated: Option>, +} + +impl Bucket { + /// Create a new bucket with standard naming convention + pub fn new(bucket_type: &str, client: &str, hostname: &str) -> Self { + let id = format!("{}_{}", client, hostname); + Self { + id, + name: None, + bucket_type: bucket_type.to_string(), + client: client.to_string(), + hostname: hostname.to_string(), + created: Utc::now(), + data: None, + last_updated: None, + } + } + + /// Create a window watcher bucket + pub fn window_bucket(hostname: &str) -> Self { + Self::new("currentwindow", "aw-watcher-window", hostname) + } + + /// Create an AFK watcher bucket + pub fn afk_bucket(hostname: &str) -> Self { + Self::new("afkstatus", "aw-watcher-afk", hostname) + } + + /// Create a loopd window watcher bucket + pub fn loopd_window_bucket(hostname: &str) -> Self { + Self::new("currentwindow", "loopd", hostname) + } +} + +/// ActivityWatch-compatible Event +/// Events store the actual activity data with flexible JSON data field +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Event { + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub bucket_id: Option, + pub timestamp: DateTime, + pub duration: f64, + pub data: JsonValue, +} + +impl Event { + /// Create a new event + pub fn new(timestamp: DateTime, duration: f64, data: JsonValue) -> Self { + Self { + id: None, + bucket_id: None, + timestamp, + duration, + data, + } + } + + /// Create a window event + pub fn window_event(timestamp: DateTime, duration: f64, app: &str, title: &str) -> Self { + Self::new( + timestamp, + duration, + serde_json::json!({ + "app": app, + "title": title + }), + ) + } + + /// Create an AFK event + pub fn afk_event(timestamp: DateTime, duration: f64, is_afk: bool) -> Self { + Self::new( + timestamp, + duration, + serde_json::json!({ + "status": if is_afk { "afk" } else { "not-afk" } + }), + ) + } +} + +/// Window event data structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WindowEventData { + pub app: String, + pub title: String, +} + +/// AFK event data structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AfkEventData { + pub status: String, // "afk" or "not-afk" +} + +/// Web tab event data structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WebTabEventData { + pub url: String, + pub title: String, + #[serde(default)] + pub audible: bool, + #[serde(default)] + pub incognito: bool, +} + +/// Editor activity event data structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EditorEventData { + pub file: String, + pub project: String, + pub language: String, +} + +/// Bucket export format (bucket with all events) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BucketExport { + #[serde(flatten)] + pub bucket: Bucket, + pub events: Vec, +} + +/// Query parameters for getting events +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct GetEventsParams { + #[serde(skip_serializing_if = "Option::is_none")] + pub start: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub end: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, +} + +/// Heartbeat request - used for efficient event submission +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Heartbeat { + pub timestamp: DateTime, + pub duration: f64, + pub data: JsonValue, +} + +impl Heartbeat { + /// Convert heartbeat to event + pub fn to_event(self) -> Event { + Event::new(self.timestamp, self.duration, self.data) + } +} + +/// Info response for the API +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ServerInfo { + pub hostname: String, + pub version: String, + pub testing: bool, + pub device_id: String, +} + +/// Standard bucket types as constants +pub mod bucket_types { + pub const CURRENT_WINDOW: &str = "currentwindow"; + pub const AFK_STATUS: &str = "afkstatus"; + pub const WEB_TAB: &str = "web.tab.current"; + pub const EDITOR_ACTIVITY: &str = "app.editor.activity"; +} + +/// Standard client names +pub mod clients { + pub const LOOPD: &str = "loopd"; + pub const AW_WATCHER_WINDOW: &str = "aw-watcher-window"; + pub const AW_WATCHER_AFK: &str = "aw-watcher-afk"; + pub const AW_WATCHER_WEB: &str = "aw-watcher-web"; + pub const AWATCHER: &str = "awatcher"; +} diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index c11e97e..9335fa9 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -4,6 +4,9 @@ pub mod database; pub mod supabase; pub mod blocking; pub mod updater; +pub mod aw_models; +pub mod aw_database; +pub mod aw_commands; use blocking::BLOCKING_SYSTEM; @@ -22,6 +25,9 @@ use tauri_plugin_store::Builder as StorePluginBuilder; /// and avoid the mismatching-type runtime panic described in the Tauri docs. pub type Db = Arc; +/// Shared ActivityWatch database handle +pub type AwDb = Arc; + // Get or create persistent device ID stored in app data directory pub fn get_or_create_device_id(app_handle: &tauri::AppHandle) -> String { let app_dir = app_handle.path().app_data_dir().expect("Failed to get app data dir"); @@ -73,15 +79,19 @@ pub async fn reset_tracking(db: &Db) -> Result = None; let mut last_switch_time: chrono::DateTime = Local::now(); - // Get the persistent device ID + // Get the persistent device ID and hostname let device_id = get_or_create_device_id(&app_handle); + let hostname = gethostname::gethostname() + .to_string_lossy() + .to_string(); println!("[TRACKER] Using persistent device ID: {}", device_id); + println!("[TRACKER] Hostname: {}", hostname); // Initialize device in database with the persistent ID match db.initialize_device_with_id(device_id.clone(), "Desktop App".to_string(), "macOS".to_string()).await { @@ -92,6 +102,23 @@ pub fn start_tracking(db: Db, app_handle: tauri::AppHandle) { } } + // Initialize ActivityWatch bucket for window tracking + let bucket_id = format!("aw-watcher-window_{}", hostname); + let bucket = aw_models::Bucket { + id: bucket_id.clone(), + name: Some("Window Activity".to_string()), + bucket_type: aw_models::bucket_types::CURRENT_WINDOW.to_string(), + client: aw_models::clients::LOOPD.to_string(), + hostname: hostname.clone(), + created: chrono::Utc::now(), + data: None, + last_updated: None, + }; + match aw_db.get_or_create_bucket(&bucket).await { + Ok(_) => println!("[TRACKER] AW bucket initialized: {}", bucket_id), + Err(e) => println!("[TRACKER] Failed to initialize AW bucket: {}", e), + } + // Create and store the usage tracker { let mut tracker_guard = usage::USAGE_TRACKER.lock().unwrap(); @@ -261,22 +288,38 @@ pub fn start_tracking(db: Db, app_handle: tauri::AppHandle) { // Handle database operations in a separate task let db_clone = db.clone(); + let aw_db_clone = aw_db.clone(); let device_id = device_id.clone(); + let bucket_id_clone = bucket_id.clone(); tauri::async_runtime::spawn(async move { - // End previous session + // End previous session (legacy) if let Err(e) = db_clone.end_current_session(&device_id).await { log::warn!("Failed to end session: {}", e); } - // Start new session + // Start new session (legacy) let title_str = window_title.clone().unwrap_or_default(); if let Err(e) = db_clone.start_new_session( &device_id, app_name.clone(), - title_str, + title_str.clone(), ).await { log::warn!("Failed to start session: {}", e); } + + // Send heartbeat to ActivityWatch bucket + let heartbeat = aw_models::Heartbeat { + timestamp: chrono::Utc::now(), + duration: 0.0, // Initial duration, will be extended by subsequent heartbeats + data: serde_json::json!({ + "app": app_name, + "title": title_str + }), + }; + // Use 5 second pulsetime for merging events + if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 5.0).await { + log::warn!("Failed to send AW heartbeat: {}", e); + } }); } } @@ -291,6 +334,33 @@ pub fn start_tracking(db: Db, app_handle: tauri::AppHandle) { }); } usage::UpdateAction::None => { + // Send heartbeat to keep current event alive + if let Some(current_app_name) = ¤t_app { + let aw_db_clone = aw_db.clone(); + let bucket_id_clone = bucket_id.clone(); + let app_name = current_app_name.clone(); + tauri::async_runtime::spawn(async move { + // Get the current window title + let title = match crate::usage::get_active_app_with_title().await { + Ok(active) => active.title, + Err(_) => String::new(), + }; + + let heartbeat = aw_models::Heartbeat { + timestamp: chrono::Utc::now(), + duration: 1.0, // 1 second heartbeat + data: serde_json::json!({ + "app": app_name, + "title": title + }), + }; + // Use 5 second pulsetime for merging events + if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 5.0).await { + log::warn!("Failed to send AW heartbeat: {}", e); + } + }); + } + // Evaluate blocking for current app even when no change if let Some(current_app_name) = ¤t_app { let app_name = current_app_name.clone(); diff --git a/desktop-app/src-tauri/src/main.rs b/desktop-app/src-tauri/src/main.rs index 5176a1a..6b0fa02 100644 --- a/desktop-app/src-tauri/src/main.rs +++ b/desktop-app/src-tauri/src/main.rs @@ -4,7 +4,7 @@ use tauri::Manager; use std::sync::Arc; use sqlx::SqlitePool; -use app_lib::{database::Database, start_tracking}; +use app_lib::{database::Database, aw_database::AwDatabase, start_tracking}; use std::path::PathBuf; use tauri_plugin_sql::{Builder, Migration, MigrationKind}; use tauri::{ @@ -31,6 +31,12 @@ fn main() { sql: include_str!("../migrations/0002_block_rules.sql"), kind: MigrationKind::Up, }, + Migration { + version: 3, + description: "activitywatch_compat", + sql: include_str!("../migrations/0003_activitywatch_compat.sql"), + kind: MigrationKind::Up, + }, ]; let tauri_builder = tauri::Builder::default() @@ -111,7 +117,10 @@ fn main() { // No need for additional sqlx::migrate! call // Wrap the Database in an Arc so it can be shared safely - let db = Arc::new(Database::new(pool)); + let db = Arc::new(Database::new(pool.clone())); + + // Create ActivityWatch-compatible database + let aw_db = Arc::new(AwDatabase::new(pool)); // --- Use the local db variable for any setup work before manage --- tauri::async_runtime::block_on(async { @@ -138,11 +147,12 @@ fn main() { } }); - // Make the Database available as managed state for commands + // Make the databases available as managed state for commands app.manage(db.clone()); + app.manage(aw_db.clone()); // Start background tracking, passing the same Arc - start_tracking(db.clone(), app.handle().clone()); + start_tracking(db.clone(), aw_db.clone(), app.handle().clone()); // Setup updater events app_lib::updater::setup_updater_events(app.handle().clone()); @@ -178,6 +188,7 @@ fn main() { tauri_builder .invoke_handler(tauri::generate_handler![ + // Legacy commands app_lib::usage::get_active_app, app_lib::usage::get_active_app_with_title, app_lib::usage::check_accessibility_permissions_command, @@ -208,6 +219,24 @@ fn main() { app_lib::updater::check_for_updates, app_lib::updater::install_update, app_lib::updater::get_current_version, + // ActivityWatch-compatible commands + app_lib::aw_commands::aw_get_info, + app_lib::aw_commands::aw_get_buckets, + app_lib::aw_commands::aw_get_bucket, + app_lib::aw_commands::aw_create_bucket, + app_lib::aw_commands::aw_delete_bucket, + app_lib::aw_commands::aw_get_events, + app_lib::aw_commands::aw_get_event, + app_lib::aw_commands::aw_insert_events, + app_lib::aw_commands::aw_delete_event, + app_lib::aw_commands::aw_get_event_count, + app_lib::aw_commands::aw_heartbeat, + app_lib::aw_commands::aw_get_usage_summary, + app_lib::aw_commands::aw_get_current_event, + app_lib::aw_commands::aw_get_setting, + app_lib::aw_commands::aw_set_setting, + app_lib::aw_commands::aw_export_bucket, + app_lib::aw_commands::aw_export_all, minimize_to_tray, test_command ]) diff --git a/desktop-app/src/hooks/useActivityWatch.ts b/desktop-app/src/hooks/useActivityWatch.ts new file mode 100644 index 0000000..d0ce6fc --- /dev/null +++ b/desktop-app/src/hooks/useActivityWatch.ts @@ -0,0 +1,339 @@ +import { useState, useCallback } from 'react'; +import { invoke } from '@tauri-apps/api/core'; +import type { + AWBucket, + AWEvent, + AWServerInfo, + AWBucketExport, + AWUsageSummary, +} from '@/types'; + +/** + * Hook for interacting with ActivityWatch-compatible API + */ +export function useActivityWatch() { + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + // ========== Info ========== + + const getInfo = useCallback(async (): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_info'); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + // ========== Buckets ========== + + const getBuckets = useCallback(async (): Promise> => { + try { + setLoading(true); + setError(null); + return await invoke>('aw_get_buckets'); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return {}; + } finally { + setLoading(false); + } + }, []); + + const getBucket = useCallback(async (bucketId: string): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_bucket', { bucketId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + const createBucket = useCallback(async ( + bucketId: string, + bucketType: string, + client: string, + hostname: string + ): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_create_bucket', { + bucketId, + bucketType, + client, + hostname, + }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + const deleteBucket = useCallback(async (bucketId: string): Promise => { + try { + setLoading(true); + setError(null); + await invoke('aw_delete_bucket', { bucketId }); + return true; + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return false; + } finally { + setLoading(false); + } + }, []); + + // ========== Events ========== + + const getEvents = useCallback(async ( + bucketId: string, + options?: { + start?: string; + end?: string; + limit?: number; + } + ): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_events', { + bucketId, + start: options?.start, + end: options?.end, + limit: options?.limit, + }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return []; + } finally { + setLoading(false); + } + }, []); + + const getEvent = useCallback(async (bucketId: string, eventId: number): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_event', { bucketId, eventId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + const insertEvents = useCallback(async ( + bucketId: string, + events: AWEvent[] + ): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_insert_events', { bucketId, events }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return []; + } finally { + setLoading(false); + } + }, []); + + const deleteEvent = useCallback(async (bucketId: string, eventId: number): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_delete_event', { bucketId, eventId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return false; + } finally { + setLoading(false); + } + }, []); + + const getEventCount = useCallback(async (bucketId: string): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_event_count', { bucketId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return 0; + } finally { + setLoading(false); + } + }, []); + + // ========== Heartbeat ========== + + const heartbeat = useCallback(async ( + bucketId: string, + timestamp: string, + duration: number, + data: Record, + pulsetime: number = 5.0 + ): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_heartbeat', { + bucketId, + timestamp, + duration, + data, + pulsetime, + }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + // ========== Query ========== + + const getUsageSummary = useCallback(async ( + bucketId: string, + options?: { + start?: string; + end?: string; + } + ): Promise => { + try { + setLoading(true); + setError(null); + const result = await invoke<[string, number][]>('aw_get_usage_summary', { + bucketId, + start: options?.start, + end: options?.end, + }); + return result.map(([app, total_seconds]) => ({ app, total_seconds })); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return []; + } finally { + setLoading(false); + } + }, []); + + const getCurrentEvent = useCallback(async (bucketId: string): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_current_event', { bucketId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + // ========== Settings ========== + + const getSetting = useCallback(async (key: string): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_get_setting', { key }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + const setSetting = useCallback(async (key: string, value: string): Promise => { + try { + setLoading(true); + setError(null); + await invoke('aw_set_setting', { key, value }); + return true; + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return false; + } finally { + setLoading(false); + } + }, []); + + // ========== Export ========== + + const exportBucket = useCallback(async (bucketId: string): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_export_bucket', { bucketId }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return null; + } finally { + setLoading(false); + } + }, []); + + const exportAll = useCallback(async (): Promise> => { + try { + setLoading(true); + setError(null); + return await invoke>('aw_export_all'); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return {}; + } finally { + setLoading(false); + } + }, []); + + return { + // State + loading, + error, + + // Info + getInfo, + + // Buckets + getBuckets, + getBucket, + createBucket, + deleteBucket, + + // Events + getEvents, + getEvent, + insertEvents, + deleteEvent, + getEventCount, + + // Heartbeat + heartbeat, + + // Query + getUsageSummary, + getCurrentEvent, + + // Settings + getSetting, + setSetting, + + // Export + exportBucket, + exportAll, + }; +} diff --git a/desktop-app/src/types/index.ts b/desktop-app/src/types/index.ts index ad7b202..31bf4a4 100644 --- a/desktop-app/src/types/index.ts +++ b/desktop-app/src/types/index.ts @@ -1,4 +1,137 @@ +// ============================================================ +// ActivityWatch-compatible types +// ============================================================ +/** + * ActivityWatch Bucket - container for events from a specific watcher + */ +export interface AWBucket { + id: string; + name?: string; + type: string; // e.g., "currentwindow", "afkstatus", "web.tab.current" + client: string; // e.g., "aw-watcher-window", "loopd", "awatcher" + hostname: string; + created: string; // ISO8601 timestamp + data?: Record; + last_updated?: string; // ISO8601 timestamp +} + +/** + * ActivityWatch Event - activity data with timestamp and duration + */ +export interface AWEvent { + id?: number; + bucket_id?: string; + timestamp: string; // ISO8601 timestamp + duration: number; // seconds + data: Record; +} + +/** + * Window event data + */ +export interface AWWindowEventData { + app: string; + title: string; +} + +/** + * AFK event data + */ +export interface AWAfkEventData { + status: 'afk' | 'not-afk'; +} + +/** + * Web tab event data + */ +export interface AWWebTabEventData { + url: string; + title: string; + audible?: boolean; + incognito?: boolean; +} + +/** + * Editor activity event data + */ +export interface AWEditorEventData { + file: string; + project: string; + language: string; +} + +/** + * Heartbeat request for efficient event submission + */ +export interface AWHeartbeat { + timestamp: string; // ISO8601 timestamp + duration: number; + data: Record; +} + +/** + * Server info response + */ +export interface AWServerInfo { + hostname: string; + version: string; + testing: boolean; + device_id: string; +} + +/** + * Bucket export format (bucket with all events) + */ +export interface AWBucketExport { + id: string; + name?: string; + type: string; + client: string; + hostname: string; + created: string; + data?: Record; + last_updated?: string; + events: AWEvent[]; +} + +/** + * Query parameters for getting events + */ +export interface AWGetEventsParams { + start?: string; // ISO8601 timestamp + end?: string; // ISO8601 timestamp + limit?: number; +} + +/** + * Usage summary entry + */ +export interface AWUsageSummary { + app: string; + total_seconds: number; +} + +// Standard bucket types +export const AW_BUCKET_TYPES = { + CURRENT_WINDOW: 'currentwindow', + AFK_STATUS: 'afkstatus', + WEB_TAB: 'web.tab.current', + EDITOR_ACTIVITY: 'app.editor.activity', +} as const; + +// Standard client names +export const AW_CLIENTS = { + LOOPD: 'loopd', + AW_WATCHER_WINDOW: 'aw-watcher-window', + AW_WATCHER_AFK: 'aw-watcher-afk', + AW_WATCHER_WEB: 'aw-watcher-web', + AWATCHER: 'awatcher', +} as const; + +// ============================================================ +// Legacy types (for backwards compatibility) +// ============================================================ // Session data types export interface Session { From 0280d19286ce5dcdd3d970825e2238c5daa32ff1 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 03:15:09 +0000 Subject: [PATCH 02/18] feat: Add ActivityWatch-compatible REST API server - Add axum, tower, tower-http dependencies for HTTP server - Create aw_server.rs with full AW REST API implementation - Server runs on localhost:5600 (standard AW port) - Implement all bucket endpoints (CRUD) - Implement all event endpoints (CRUD, count) - Implement heartbeat endpoint with pulsetime merging - Implement export endpoints (single bucket and all) - Add CORS support for browser extensions - Server starts automatically on app launch This enables external watchers like Awatcher for Linux, browser extensions, and editor plugins to connect directly. https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/Cargo.toml | 5 + desktop-app/src-tauri/src/aw_server.rs | 324 +++++++++++++++++++++++++ desktop-app/src-tauri/src/lib.rs | 1 + desktop-app/src-tauri/src/main.rs | 12 +- 4 files changed, 341 insertions(+), 1 deletion(-) create mode 100644 desktop-app/src-tauri/src/aw_server.rs diff --git a/desktop-app/src-tauri/Cargo.toml b/desktop-app/src-tauri/Cargo.toml index 10fd301..102f391 100644 --- a/desktop-app/src-tauri/Cargo.toml +++ b/desktop-app/src-tauri/Cargo.toml @@ -35,6 +35,11 @@ reqwest = { version = "0.12", features = ["json"] } tauri-plugin-store = "2.3.0" gethostname = "0.5" +# ActivityWatch REST API server +axum = "0.7" +tower = "0.5" +tower-http = { version = "0.6", features = ["cors"] } + # OS-specific dependencies for app detection [target.'cfg(windows)'.dependencies] windows = { version = "0.52", features = [ diff --git a/desktop-app/src-tauri/src/aw_server.rs b/desktop-app/src-tauri/src/aw_server.rs new file mode 100644 index 0000000..0fd6eb6 --- /dev/null +++ b/desktop-app/src-tauri/src/aw_server.rs @@ -0,0 +1,324 @@ +use axum::{ + extract::{Path, Query, State}, + http::StatusCode, + response::IntoResponse, + routing::{delete, get, post}, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value as JsonValue}; +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; +use tower_http::cors::{Any, CorsLayer}; + +use crate::aw_database::AwDatabase; +use crate::aw_models::{Bucket, Event, GetEventsParams, Heartbeat, ServerInfo}; + +/// Shared state for the API server +#[derive(Clone)] +pub struct AppState { + pub aw_db: Arc, + pub hostname: String, + pub device_id: String, +} + +/// Start the ActivityWatch-compatible REST API server +pub async fn start_server(aw_db: Arc, hostname: String, device_id: String, port: u16) { + let state = AppState { + aw_db, + hostname, + device_id, + }; + + // Build CORS layer - allow all origins for local development + let cors = CorsLayer::new() + .allow_origin(Any) + .allow_methods(Any) + .allow_headers(Any); + + // Build the router with all ActivityWatch API routes + let app = Router::new() + // Info endpoint + .route("/api/0/info", get(get_info)) + // Bucket endpoints + .route("/api/0/buckets", get(get_buckets)) + .route("/api/0/buckets/", get(get_buckets)) + .route("/api/0/buckets/:bucket_id", get(get_bucket)) + .route("/api/0/buckets/:bucket_id", post(create_bucket)) + .route("/api/0/buckets/:bucket_id", delete(delete_bucket)) + // Event endpoints + .route("/api/0/buckets/:bucket_id/events", get(get_events)) + .route("/api/0/buckets/:bucket_id/events", post(insert_events)) + .route("/api/0/buckets/:bucket_id/events/:event_id", get(get_event)) + .route("/api/0/buckets/:bucket_id/events/:event_id", delete(delete_event)) + .route("/api/0/buckets/:bucket_id/events/count", get(get_event_count)) + // Heartbeat endpoint + .route("/api/0/buckets/:bucket_id/heartbeat", post(heartbeat)) + // Export endpoints + .route("/api/0/buckets/:bucket_id/export", get(export_bucket)) + .route("/api/0/export", get(export_all)) + // Add CORS and state + .layer(cors) + .with_state(state); + + let addr = SocketAddr::from(([127, 0, 0, 1], port)); + println!("[AW-SERVER] Starting ActivityWatch-compatible server on http://{}", addr); + + // Start the server + let listener = tokio::net::TcpListener::bind(addr).await; + match listener { + Ok(listener) => { + if let Err(e) = axum::serve(listener, app).await { + eprintln!("[AW-SERVER] Server error: {}", e); + } + } + Err(e) => { + eprintln!("[AW-SERVER] Failed to bind to {}: {}", addr, e); + eprintln!("[AW-SERVER] Port {} may already be in use (perhaps by aw-server?)", port); + } + } +} + +// ========== Info Endpoint ========== + +async fn get_info(State(state): State) -> Json { + Json(ServerInfo { + hostname: state.hostname, + version: env!("CARGO_PKG_VERSION").to_string(), + testing: cfg!(debug_assertions), + device_id: state.device_id, + }) +} + +// ========== Bucket Endpoints ========== + +async fn get_buckets( + State(state): State, +) -> Result>, (StatusCode, String)> { + state.aw_db + .get_buckets() + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +async fn get_bucket( + State(state): State, + Path(bucket_id): Path, +) -> Result, (StatusCode, String)> { + match state.aw_db.get_bucket(&bucket_id).await { + Ok(Some(bucket)) => Ok(Json(bucket)), + Ok(None) => Err((StatusCode::NOT_FOUND, format!("Bucket not found: {}", bucket_id))), + Err(e) => Err((StatusCode::INTERNAL_SERVER_ERROR, e.to_string())), + } +} + +#[derive(Debug, Deserialize)] +struct CreateBucketRequest { + client: String, + #[serde(rename = "type")] + bucket_type: String, + hostname: String, + #[serde(default)] + name: Option, + #[serde(default)] + data: Option, +} + +async fn create_bucket( + State(state): State, + Path(bucket_id): Path, + Json(body): Json, +) -> Result, (StatusCode, String)> { + let bucket = Bucket { + id: bucket_id, + name: body.name, + bucket_type: body.bucket_type, + client: body.client, + hostname: body.hostname, + created: Utc::now(), + data: body.data, + last_updated: None, + }; + + state.aw_db + .get_or_create_bucket(&bucket) + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +async fn delete_bucket( + State(state): State, + Path(bucket_id): Path, +) -> Result { + state.aw_db + .delete_bucket(&bucket_id) + .await + .map(|_| StatusCode::OK) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +// ========== Event Endpoints ========== + +#[derive(Debug, Deserialize, Default)] +struct GetEventsQuery { + start: Option, + end: Option, + limit: Option, +} + +async fn get_events( + State(state): State, + Path(bucket_id): Path, + Query(query): Query, +) -> Result>, (StatusCode, String)> { + let params = GetEventsParams { + start: query.start.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))), + end: query.end.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))), + limit: query.limit, + }; + + state.aw_db + .get_events(&bucket_id, ¶ms) + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +async fn get_event( + State(state): State, + Path((bucket_id, event_id)): Path<(String, i64)>, +) -> Result, (StatusCode, String)> { + match state.aw_db.get_event(&bucket_id, event_id).await { + Ok(Some(event)) => Ok(Json(event)), + Ok(None) => Err((StatusCode::NOT_FOUND, format!("Event not found: {}", event_id))), + Err(e) => Err((StatusCode::INTERNAL_SERVER_ERROR, e.to_string())), + } +} + +async fn insert_events( + State(state): State, + Path(bucket_id): Path, + Json(events): Json>, +) -> Result>, (StatusCode, String)> { + state.aw_db + .insert_events(&bucket_id, &events) + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +async fn delete_event( + State(state): State, + Path((bucket_id, event_id)): Path<(String, i64)>, +) -> Result { + match state.aw_db.delete_event(&bucket_id, event_id).await { + Ok(true) => Ok(StatusCode::OK), + Ok(false) => Err((StatusCode::NOT_FOUND, format!("Event not found: {}", event_id))), + Err(e) => Err((StatusCode::INTERNAL_SERVER_ERROR, e.to_string())), + } +} + +async fn get_event_count( + State(state): State, + Path(bucket_id): Path, +) -> Result, (StatusCode, String)> { + state.aw_db + .get_event_count(&bucket_id) + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +// ========== Heartbeat Endpoint ========== + +#[derive(Debug, Deserialize)] +struct HeartbeatQuery { + #[serde(default = "default_pulsetime")] + pulsetime: f64, +} + +fn default_pulsetime() -> f64 { + 5.0 +} + +#[derive(Debug, Deserialize)] +struct HeartbeatRequest { + timestamp: String, + duration: f64, + data: JsonValue, +} + +async fn heartbeat( + State(state): State, + Path(bucket_id): Path, + Query(query): Query, + Json(body): Json, +) -> Result, (StatusCode, String)> { + let timestamp = DateTime::parse_from_rfc3339(&body.timestamp) + .map_err(|e| (StatusCode::BAD_REQUEST, format!("Invalid timestamp: {}", e)))? + .with_timezone(&Utc); + + let heartbeat = Heartbeat { + timestamp, + duration: body.duration, + data: body.data, + }; + + state.aw_db + .heartbeat(&bucket_id, &heartbeat, query.pulsetime) + .await + .map(Json) + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string())) +} + +// ========== Export Endpoints ========== + +#[derive(Debug, Serialize)] +struct BucketExport { + #[serde(flatten)] + bucket: Bucket, + events: Vec, +} + +async fn export_bucket( + State(state): State, + Path(bucket_id): Path, +) -> Result, (StatusCode, String)> { + let bucket = match state.aw_db.get_bucket(&bucket_id).await { + Ok(Some(b)) => b, + Ok(None) => return Err((StatusCode::NOT_FOUND, format!("Bucket not found: {}", bucket_id))), + Err(e) => return Err((StatusCode::INTERNAL_SERVER_ERROR, e.to_string())), + }; + + let events = state.aw_db + .get_events(&bucket_id, &GetEventsParams::default()) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + Ok(Json(BucketExport { bucket, events })) +} + +async fn export_all( + State(state): State, +) -> Result>, (StatusCode, String)> { + let buckets = state.aw_db + .get_buckets() + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + let mut exports = HashMap::new(); + for (id, bucket) in buckets { + let events = state.aw_db + .get_events(&id, &GetEventsParams::default()) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?; + + exports.insert(id, BucketExport { bucket, events }); + } + + Ok(Json(exports)) +} diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index 9335fa9..920e892 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -7,6 +7,7 @@ pub mod updater; pub mod aw_models; pub mod aw_database; pub mod aw_commands; +pub mod aw_server; use blocking::BLOCKING_SYSTEM; diff --git a/desktop-app/src-tauri/src/main.rs b/desktop-app/src-tauri/src/main.rs index 6b0fa02..bd0a419 100644 --- a/desktop-app/src-tauri/src/main.rs +++ b/desktop-app/src-tauri/src/main.rs @@ -4,7 +4,7 @@ use tauri::Manager; use std::sync::Arc; use sqlx::SqlitePool; -use app_lib::{database::Database, aw_database::AwDatabase, start_tracking}; +use app_lib::{database::Database, aw_database::AwDatabase, start_tracking, aw_server, get_or_create_device_id}; use std::path::PathBuf; use tauri_plugin_sql::{Builder, Migration, MigrationKind}; use tauri::{ @@ -154,6 +154,16 @@ fn main() { // Start background tracking, passing the same Arc start_tracking(db.clone(), aw_db.clone(), app.handle().clone()); + // Start ActivityWatch-compatible REST API server on port 5600 + let aw_db_for_server = aw_db.clone(); + let hostname = gethostname::gethostname() + .to_string_lossy() + .to_string(); + let device_id = get_or_create_device_id(&app.handle()); + tauri::async_runtime::spawn(async move { + aw_server::start_server(aw_db_for_server, hostname, device_id, 5600).await; + }); + // Setup updater events app_lib::updater::setup_updater_events(app.handle().clone()); From 1b7fc37c1a6fa46eb5e90a8ec7336c6b569013db Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 03:24:54 +0000 Subject: [PATCH 03/18] feat: Add Neon cloud sync and Query API - Added Neon PostgreSQL client for cloud sync (replaces Supabase) - Implemented Query API with aw-query compatible syntax - Added query functions: filter, merge, categorize, summarize - Updated useSync hook to work with Neon - Added useNeonSync and useQuery frontend hooks - Added regex dependency for category matching - Deprecated Supabase module (to be removed) https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/Cargo.toml | 1 + desktop-app/src-tauri/src/aw_query.rs | 566 +++++++++++++++++++++++++ desktop-app/src-tauri/src/aw_server.rs | 29 ++ desktop-app/src-tauri/src/lib.rs | 2 + desktop-app/src-tauri/src/main.rs | 11 + desktop-app/src-tauri/src/neon.rs | 372 ++++++++++++++++ desktop-app/src-tauri/src/supabase.rs | 4 + desktop-app/src/hooks/useNeonSync.ts | 220 ++++++++++ desktop-app/src/hooks/useQuery.ts | 249 +++++++++++ desktop-app/src/hooks/useSync.ts | 337 +++++++++++---- desktop-app/src/types/index.ts | 41 ++ 11 files changed, 1746 insertions(+), 86 deletions(-) create mode 100644 desktop-app/src-tauri/src/aw_query.rs create mode 100644 desktop-app/src-tauri/src/neon.rs create mode 100644 desktop-app/src/hooks/useNeonSync.ts create mode 100644 desktop-app/src/hooks/useQuery.ts diff --git a/desktop-app/src-tauri/Cargo.toml b/desktop-app/src-tauri/Cargo.toml index 102f391..eb2f270 100644 --- a/desktop-app/src-tauri/Cargo.toml +++ b/desktop-app/src-tauri/Cargo.toml @@ -34,6 +34,7 @@ tokio = { version = "1.45.1", features = ["time", "full"] } reqwest = { version = "0.12", features = ["json"] } tauri-plugin-store = "2.3.0" gethostname = "0.5" +regex = "1.10" # ActivityWatch REST API server axum = "0.7" diff --git a/desktop-app/src-tauri/src/aw_query.rs b/desktop-app/src-tauri/src/aw_query.rs new file mode 100644 index 0000000..20d4275 --- /dev/null +++ b/desktop-app/src-tauri/src/aw_query.rs @@ -0,0 +1,566 @@ +use std::collections::HashMap; +use std::sync::Arc; +use chrono::{DateTime, Utc, Duration}; +use serde::{Deserialize, Serialize}; +use serde_json::{Value, json}; +use crate::aw_database::AwDatabase; +use crate::aw_models::Event; + +/// Query result type +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum QueryResult { + Events(Vec), + Summary(Vec<(String, f64)>), + Categories(HashMap), + Number(f64), + String(String), + Value(Value), +} + +/// Category rule for classifying events +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CategoryRule { + pub name: String, + #[serde(default)] + pub categories: Vec, // Hierarchical categories like ["Work", "Development"] + pub rule: CategoryRuleType, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum CategoryRuleType { + #[serde(rename = "regex")] + Regex { + #[serde(default)] + ignore_case: bool, + pattern: String + }, + #[serde(rename = "glob")] + Glob { pattern: String }, + #[serde(rename = "exact")] + Exact { app: Option, title: Option }, +} + +/// Query request structure +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct QueryRequest { + pub query: Vec, // Query statements + pub timeperiods: Vec, // Time periods in format "start/end" +} + +/// Query context for executing queries +pub struct QueryContext { + db: Arc, + variables: HashMap, + start: DateTime, + end: DateTime, +} + +impl QueryContext { + pub fn new(db: Arc, start: DateTime, end: DateTime) -> Self { + Self { + db, + variables: HashMap::new(), + start, + end, + } + } + + /// Execute a simple query and return results + pub async fn execute(&mut self, query: &str) -> Result { + let query = query.trim(); + + // Handle variable assignment + if let Some(eq_pos) = query.find('=') { + let var_name = query[..eq_pos].trim(); + let expr = query[eq_pos + 1..].trim(); + let result = self.evaluate(expr).await?; + self.variables.insert(var_name.to_string(), result.clone()); + return Ok(result); + } + + // Handle return statement + if query.starts_with("RETURN") || query.starts_with("return") { + let expr = query[6..].trim(); + return self.evaluate(expr).await; + } + + self.evaluate(query).await + } + + /// Evaluate an expression + async fn evaluate(&mut self, expr: &str) -> Result { + let expr = expr.trim(); + + // Check for function calls + if let Some(paren_start) = expr.find('(') { + let func_name = &expr[..paren_start]; + let args_end = expr.rfind(')').ok_or("Missing closing parenthesis")?; + let args_str = &expr[paren_start + 1..args_end]; + + return self.call_function(func_name, args_str).await; + } + + // Check for variable reference + if let Some(result) = self.variables.get(expr) { + return Ok(result.clone()); + } + + // Try parsing as JSON value + if let Ok(value) = serde_json::from_str::(expr) { + return Ok(QueryResult::Value(value)); + } + + Err(format!("Unknown expression: {}", expr)) + } + + /// Call a built-in function + async fn call_function(&mut self, name: &str, args_str: &str) -> Result { + match name.to_lowercase().as_str() { + "query_bucket" | "flood" => { + // query_bucket(bucket_id) - Get events from a bucket + let bucket_id = args_str.trim().trim_matches('"'); + let events = self.db.get_events( + bucket_id, + Some(self.start), + Some(self.end), + None + ).await.map_err(|e| e.to_string())?; + Ok(QueryResult::Events(events)) + } + "filter_keyvals" => { + // filter_keyvals(events, key, values) - Filter events by data key + let args = self.parse_args(args_str)?; + if args.len() < 3 { + return Err("filter_keyvals requires 3 arguments".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let key = args[1].trim_matches('"'); + let values: Vec<&str> = args[2].trim_matches(|c| c == '[' || c == ']') + .split(',') + .map(|s| s.trim().trim_matches('"')) + .collect(); + + let filtered: Vec = events.into_iter() + .filter(|e| { + if let Some(val) = e.data.get(key) { + values.iter().any(|v| { + val.as_str().map(|s| s.contains(v)).unwrap_or(false) + }) + } else { + false + } + }) + .collect(); + Ok(QueryResult::Events(filtered)) + } + "exclude_keyvals" => { + // exclude_keyvals(events, key, values) - Exclude events by data key + let args = self.parse_args(args_str)?; + if args.len() < 3 { + return Err("exclude_keyvals requires 3 arguments".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let key = args[1].trim_matches('"'); + let values: Vec<&str> = args[2].trim_matches(|c| c == '[' || c == ']') + .split(',') + .map(|s| s.trim().trim_matches('"')) + .collect(); + + let filtered: Vec = events.into_iter() + .filter(|e| { + if let Some(val) = e.data.get(key) { + !values.iter().any(|v| { + val.as_str().map(|s| s.contains(v)).unwrap_or(false) + }) + } else { + true + } + }) + .collect(); + Ok(QueryResult::Events(filtered)) + } + "merge_events_by_keys" => { + // merge_events_by_keys(events, keys) - Merge consecutive events with same keys + let args = self.parse_args(args_str)?; + if args.len() < 2 { + return Err("merge_events_by_keys requires 2 arguments".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let keys: Vec<&str> = args[1].trim_matches(|c| c == '[' || c == ']') + .split(',') + .map(|s| s.trim().trim_matches('"')) + .collect(); + + let merged = self.merge_events_by_keys(events, &keys); + Ok(QueryResult::Events(merged)) + } + "sort_by_duration" => { + // sort_by_duration(events) - Sort events by duration descending + let args = self.parse_args(args_str)?; + if args.is_empty() { + return Err("sort_by_duration requires 1 argument".to_string()); + } + let mut events = self.get_events_from_arg(&args[0]).await?; + events.sort_by(|a, b| b.duration.partial_cmp(&a.duration).unwrap_or(std::cmp::Ordering::Equal)); + Ok(QueryResult::Events(events)) + } + "sort_by_timestamp" => { + // sort_by_timestamp(events) - Sort events by timestamp + let args = self.parse_args(args_str)?; + if args.is_empty() { + return Err("sort_by_timestamp requires 1 argument".to_string()); + } + let mut events = self.get_events_from_arg(&args[0]).await?; + events.sort_by(|a, b| a.timestamp.cmp(&b.timestamp)); + Ok(QueryResult::Events(events)) + } + "limit_events" => { + // limit_events(events, count) - Limit number of events + let args = self.parse_args(args_str)?; + if args.len() < 2 { + return Err("limit_events requires 2 arguments".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let limit: usize = args[1].trim().parse().map_err(|_| "Invalid limit")?; + Ok(QueryResult::Events(events.into_iter().take(limit).collect())) + } + "sum_durations" => { + // sum_durations(events) - Sum all event durations + let args = self.parse_args(args_str)?; + if args.is_empty() { + return Err("sum_durations requires 1 argument".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let total: f64 = events.iter().map(|e| e.duration).sum(); + Ok(QueryResult::Number(total)) + } + "categorize" => { + // categorize(events, categories) - Categorize events using rules + let args = self.parse_args(args_str)?; + if args.len() < 2 { + return Err("categorize requires 2 arguments".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + let rules: Vec = serde_json::from_str(&args[1]) + .map_err(|e| format!("Invalid category rules: {}", e))?; + + let categorized = self.categorize_events(&events, &rules); + Ok(QueryResult::Categories(categorized)) + } + "summarize_by_app" | "summarize_events" => { + // summarize_by_app(events) - Summarize events by app name + let args = self.parse_args(args_str)?; + if args.is_empty() { + return Err("summarize_by_app requires 1 argument".to_string()); + } + let events = self.get_events_from_arg(&args[0]).await?; + + let mut summary: HashMap = HashMap::new(); + for event in events { + let app = event.data.get("app") + .and_then(|v| v.as_str()) + .unwrap_or("Unknown") + .to_string(); + *summary.entry(app).or_insert(0.0) += event.duration; + } + + let mut sorted: Vec<(String, f64)> = summary.into_iter().collect(); + sorted.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)); + Ok(QueryResult::Summary(sorted)) + } + "concat" => { + // concat(events1, events2, ...) - Concatenate event lists + let args = self.parse_args(args_str)?; + let mut all_events = Vec::new(); + for arg in args { + let events = self.get_events_from_arg(&arg).await?; + all_events.extend(events); + } + Ok(QueryResult::Events(all_events)) + } + "nop" => { + // nop() - No operation, returns empty events + Ok(QueryResult::Events(vec![])) + } + _ => Err(format!("Unknown function: {}", name)) + } + } + + /// Parse function arguments + fn parse_args(&self, args_str: &str) -> Result, String> { + let mut args = Vec::new(); + let mut current = String::new(); + let mut depth = 0; + let mut in_string = false; + let mut escape = false; + + for c in args_str.chars() { + if escape { + current.push(c); + escape = false; + continue; + } + + match c { + '\\' => escape = true, + '"' => { + in_string = !in_string; + current.push(c); + } + '(' | '[' | '{' if !in_string => { + depth += 1; + current.push(c); + } + ')' | ']' | '}' if !in_string => { + depth -= 1; + current.push(c); + } + ',' if !in_string && depth == 0 => { + args.push(current.trim().to_string()); + current = String::new(); + } + _ => current.push(c), + } + } + + if !current.trim().is_empty() { + args.push(current.trim().to_string()); + } + + Ok(args) + } + + /// Get events from an argument (variable reference or expression) + async fn get_events_from_arg(&mut self, arg: &str) -> Result, String> { + let arg = arg.trim(); + + // Check if it's a variable reference + if let Some(QueryResult::Events(events)) = self.variables.get(arg) { + return Ok(events.clone()); + } + + // Try to evaluate as expression + match self.evaluate(arg).await? { + QueryResult::Events(events) => Ok(events), + _ => Err(format!("Expected events, got other type for: {}", arg)) + } + } + + /// Merge consecutive events with same key values + fn merge_events_by_keys(&self, events: Vec, keys: &[&str]) -> Vec { + if events.is_empty() { + return events; + } + + let mut merged = Vec::new(); + let mut current = events[0].clone(); + + for event in events.into_iter().skip(1) { + let same_keys = keys.iter().all(|key| { + current.data.get(*key) == event.data.get(*key) + }); + + if same_keys { + // Extend duration + current.duration += event.duration; + } else { + merged.push(current); + current = event; + } + } + + merged.push(current); + merged + } + + /// Categorize events using rules + fn categorize_events(&self, events: &[Event], rules: &[CategoryRule]) -> HashMap { + let mut categories: HashMap = HashMap::new(); + + for event in events { + let app = event.data.get("app").and_then(|v| v.as_str()).unwrap_or(""); + let title = event.data.get("title").and_then(|v| v.as_str()).unwrap_or(""); + + let mut matched = false; + for rule in rules { + if self.matches_rule(app, title, &rule.rule) { + let category = if rule.categories.is_empty() { + rule.name.clone() + } else { + rule.categories.join(" > ") + }; + *categories.entry(category).or_insert(0.0) += event.duration; + matched = true; + break; + } + } + + if !matched { + *categories.entry("Uncategorized".to_string()).or_insert(0.0) += event.duration; + } + } + + categories + } + + /// Check if app/title matches a rule + fn matches_rule(&self, app: &str, title: &str, rule: &CategoryRuleType) -> bool { + match rule { + CategoryRuleType::Regex { pattern, ignore_case } => { + let regex = if *ignore_case { + regex::RegexBuilder::new(pattern) + .case_insensitive(true) + .build() + } else { + regex::Regex::new(pattern) + }; + match regex { + Ok(re) => re.is_match(app) || re.is_match(title), + Err(_) => false, + } + } + CategoryRuleType::Glob { pattern } => { + let pattern = pattern.replace("*", ".*").replace("?", "."); + if let Ok(re) = regex::Regex::new(&pattern) { + re.is_match(app) || re.is_match(title) + } else { + false + } + } + CategoryRuleType::Exact { app: rule_app, title: rule_title } => { + let app_match = rule_app.as_ref().map(|a| a == app).unwrap_or(true); + let title_match = rule_title.as_ref().map(|t| t == title).unwrap_or(true); + app_match && title_match + } + } + } +} + +/// Parse a time period string "start/end" into two DateTime values +pub fn parse_timeperiod(period: &str) -> Result<(DateTime, DateTime), String> { + let parts: Vec<&str> = period.split('/').collect(); + if parts.len() != 2 { + return Err("Invalid time period format, expected 'start/end'".to_string()); + } + + let start = DateTime::parse_from_rfc3339(parts[0]) + .map_err(|e| format!("Invalid start time: {}", e))? + .with_timezone(&Utc); + + let end = DateTime::parse_from_rfc3339(parts[1]) + .map_err(|e| format!("Invalid end time: {}", e))? + .with_timezone(&Utc); + + Ok((start, end)) +} + +/// Execute a query request +pub async fn execute_query( + db: Arc, + request: QueryRequest, +) -> Result, String> { + let mut results = Vec::new(); + + for period in &request.timeperiods { + let (start, end) = parse_timeperiod(period)?; + let mut ctx = QueryContext::new(db.clone(), start, end); + + let mut last_result = QueryResult::Events(vec![]); + for statement in &request.query { + last_result = ctx.execute(statement).await?; + } + results.push(last_result); + } + + Ok(results) +} + +// ========== Tauri Commands ========== + +#[tauri::command] +pub async fn aw_query( + aw_db: tauri::State<'_, crate::AwDb>, + query: Vec, + timeperiods: Vec, +) -> Result, String> { + let request = QueryRequest { query, timeperiods }; + let results = execute_query(aw_db.inner().clone(), request).await?; + + // Convert results to JSON values + let json_results: Vec = results.into_iter().map(|r| { + match r { + QueryResult::Events(events) => serde_json::to_value(events).unwrap_or(Value::Null), + QueryResult::Summary(summary) => serde_json::to_value(summary).unwrap_or(Value::Null), + QueryResult::Categories(cats) => serde_json::to_value(cats).unwrap_or(Value::Null), + QueryResult::Number(n) => json!(n), + QueryResult::String(s) => json!(s), + QueryResult::Value(v) => v, + } + }).collect(); + + Ok(json_results) +} + +#[tauri::command] +pub async fn aw_categorize( + aw_db: tauri::State<'_, crate::AwDb>, + bucket_id: String, + rules: Vec, + start: Option, + end: Option, +) -> Result, String> { + let start_dt = start.as_ref() + .and_then(|s| DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(|| Utc::now() - Duration::days(1)); + + let end_dt = end.as_ref() + .and_then(|s| DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(Utc::now); + + let events = aw_db.get_events(&bucket_id, Some(start_dt), Some(end_dt), None) + .await + .map_err(|e| e.to_string())?; + + let ctx = QueryContext::new(aw_db.inner().clone(), start_dt, end_dt); + Ok(ctx.categorize_events(&events, &rules)) +} + +#[tauri::command] +pub async fn aw_summarize( + aw_db: tauri::State<'_, crate::AwDb>, + bucket_id: String, + group_by: String, + start: Option, + end: Option, +) -> Result, String> { + let start_dt = start.as_ref() + .and_then(|s| DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(|| Utc::now() - Duration::days(1)); + + let end_dt = end.as_ref() + .and_then(|s| DateTime::parse_from_rfc3339(s).ok()) + .map(|dt| dt.with_timezone(&Utc)) + .unwrap_or_else(Utc::now); + + let events = aw_db.get_events(&bucket_id, Some(start_dt), Some(end_dt), None) + .await + .map_err(|e| e.to_string())?; + + let mut summary: HashMap = HashMap::new(); + for event in events { + let key = event.data.get(&group_by) + .and_then(|v| v.as_str()) + .unwrap_or("Unknown") + .to_string(); + *summary.entry(key).or_insert(0.0) += event.duration; + } + + let mut sorted: Vec<(String, f64)> = summary.into_iter().collect(); + sorted.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal)); + Ok(sorted) +} diff --git a/desktop-app/src-tauri/src/aw_server.rs b/desktop-app/src-tauri/src/aw_server.rs index 0fd6eb6..d989138 100644 --- a/desktop-app/src-tauri/src/aw_server.rs +++ b/desktop-app/src-tauri/src/aw_server.rs @@ -15,6 +15,7 @@ use tower_http::cors::{Any, CorsLayer}; use crate::aw_database::AwDatabase; use crate::aw_models::{Bucket, Event, GetEventsParams, Heartbeat, ServerInfo}; +use crate::aw_query::{QueryRequest, execute_query}; /// Shared state for the API server #[derive(Clone)] @@ -59,6 +60,9 @@ pub async fn start_server(aw_db: Arc, hostname: String, device_id: S // Export endpoints .route("/api/0/buckets/:bucket_id/export", get(export_bucket)) .route("/api/0/export", get(export_all)) + // Query endpoint + .route("/api/0/query", post(query)) + .route("/api/0/query/", post(query)) // Add CORS and state .layer(cors) .with_state(state); @@ -322,3 +326,28 @@ async fn export_all( Ok(Json(exports)) } + +// ========== Query Endpoint ========== + +async fn query( + State(state): State, + Json(request): Json, +) -> Result>, (StatusCode, String)> { + let results = execute_query(state.aw_db.clone(), request) + .await + .map_err(|e| (StatusCode::BAD_REQUEST, e))?; + + // Convert results to JSON values + let json_results: Vec = results.into_iter().map(|r| { + match r { + crate::aw_query::QueryResult::Events(events) => serde_json::to_value(events).unwrap_or(JsonValue::Null), + crate::aw_query::QueryResult::Summary(summary) => serde_json::to_value(summary).unwrap_or(JsonValue::Null), + crate::aw_query::QueryResult::Categories(cats) => serde_json::to_value(cats).unwrap_or(JsonValue::Null), + crate::aw_query::QueryResult::Number(n) => json!(n), + crate::aw_query::QueryResult::String(s) => json!(s), + crate::aw_query::QueryResult::Value(v) => v, + } + }).collect(); + + Ok(Json(json_results)) +} diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index 920e892..b6dbb70 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -8,6 +8,8 @@ pub mod aw_models; pub mod aw_database; pub mod aw_commands; pub mod aw_server; +pub mod aw_query; +pub mod neon; use blocking::BLOCKING_SYSTEM; diff --git a/desktop-app/src-tauri/src/main.rs b/desktop-app/src-tauri/src/main.rs index bd0a419..1d337f5 100644 --- a/desktop-app/src-tauri/src/main.rs +++ b/desktop-app/src-tauri/src/main.rs @@ -247,6 +247,17 @@ fn main() { app_lib::aw_commands::aw_set_setting, app_lib::aw_commands::aw_export_bucket, app_lib::aw_commands::aw_export_all, + // Query API commands + app_lib::aw_query::aw_query, + app_lib::aw_query::aw_categorize, + app_lib::aw_query::aw_summarize, + // Neon cloud sync commands + app_lib::neon::neon_test_connection, + app_lib::neon::neon_init_schema, + app_lib::neon::neon_sync_events, + app_lib::neon::neon_sync_bucket, + app_lib::neon::neon_get_events, + app_lib::neon::neon_get_buckets, minimize_to_tray, test_command ]) diff --git a/desktop-app/src-tauri/src/neon.rs b/desktop-app/src-tauri/src/neon.rs new file mode 100644 index 0000000..024532f --- /dev/null +++ b/desktop-app/src-tauri/src/neon.rs @@ -0,0 +1,372 @@ +use anyhow::Result; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use chrono::{DateTime, Utc}; + +use crate::aw_models::Event; + +/// Neon event record for cloud storage +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NeonEvent { + pub id: Option, + pub bucket_id: String, + pub device_id: String, + pub timestamp: String, + pub duration: f64, + pub data: Value, + pub synced_at: Option, +} + +impl NeonEvent { + pub fn from_event(event: &Event, bucket_id: &str, device_id: &str) -> Self { + Self { + id: event.id, + bucket_id: bucket_id.to_string(), + device_id: device_id.to_string(), + timestamp: event.timestamp.to_rfc3339(), + duration: event.duration, + data: event.data.clone(), + synced_at: None, + } + } +} + +/// Neon bucket record for cloud storage +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NeonBucket { + pub id: String, + pub device_id: String, + pub name: Option, + pub bucket_type: String, + pub client: String, + pub hostname: String, + pub created: String, + pub data: Option, +} + +/// Neon client for cloud sync using Neon's serverless driver HTTP API +pub struct NeonClient { + client: Client, + connection_string: String, +} + +impl NeonClient { + pub fn new(connection_string: String) -> Self { + let client = Client::new(); + Self { + client, + connection_string, + } + } + + /// Execute a SQL query via Neon's HTTP API + async fn execute(&self, query: &str, params: &[Value]) -> Result> { + // Neon serverless driver uses a specific HTTP endpoint + // Format: https:///sql + let url = format!("{}/sql", self.connection_string.replace("postgres://", "https://").replace("postgresql://", "https://")); + + let body = serde_json::json!({ + "query": query, + "params": params + }); + + let response = self.client + .post(&url) + .header("Content-Type", "application/json") + .header("Neon-Connection-String", &self.connection_string) + .json(&body) + .send() + .await?; + + let status = response.status(); + let text = response.text().await?; + + if !status.is_success() { + return Err(anyhow::anyhow!("Neon request failed: {}", text)); + } + + let result: Value = serde_json::from_str(&text)?; + + // Extract rows from response + if let Some(rows) = result.get("rows").and_then(|r| r.as_array()) { + Ok(rows.clone()) + } else { + Ok(vec![]) + } + } + + /// Initialize the database schema + pub async fn init_schema(&self) -> Result<()> { + // Create buckets table + self.execute( + r#" + CREATE TABLE IF NOT EXISTS buckets ( + id TEXT PRIMARY KEY, + device_id TEXT NOT NULL, + name TEXT, + bucket_type TEXT NOT NULL, + client TEXT NOT NULL, + hostname TEXT NOT NULL, + created TIMESTAMPTZ NOT NULL, + data JSONB, + created_at TIMESTAMPTZ DEFAULT NOW() + ) + "#, + &[], + ).await?; + + // Create events table + self.execute( + r#" + CREATE TABLE IF NOT EXISTS events ( + id BIGSERIAL PRIMARY KEY, + local_id BIGINT, + bucket_id TEXT NOT NULL REFERENCES buckets(id) ON DELETE CASCADE, + device_id TEXT NOT NULL, + timestamp TIMESTAMPTZ NOT NULL, + duration DOUBLE PRECISION NOT NULL DEFAULT 0, + data JSONB NOT NULL, + synced_at TIMESTAMPTZ DEFAULT NOW(), + UNIQUE(bucket_id, device_id, local_id) + ) + "#, + &[], + ).await?; + + // Create indexes + self.execute( + "CREATE INDEX IF NOT EXISTS idx_events_bucket_id ON events(bucket_id)", + &[], + ).await?; + + self.execute( + "CREATE INDEX IF NOT EXISTS idx_events_timestamp ON events(timestamp)", + &[], + ).await?; + + self.execute( + "CREATE INDEX IF NOT EXISTS idx_events_device_id ON events(device_id)", + &[], + ).await?; + + Ok(()) + } + + /// Test the connection + pub async fn test_connection(&self) -> Result<()> { + self.execute("SELECT 1", &[]).await?; + Ok(()) + } + + /// Sync a bucket to Neon + pub async fn sync_bucket(&self, bucket: &NeonBucket) -> Result<()> { + self.execute( + r#" + INSERT INTO buckets (id, device_id, name, bucket_type, client, hostname, created, data) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT (id) DO UPDATE SET + name = EXCLUDED.name, + data = EXCLUDED.data + "#, + &[ + Value::String(bucket.id.clone()), + Value::String(bucket.device_id.clone()), + bucket.name.clone().map(Value::String).unwrap_or(Value::Null), + Value::String(bucket.bucket_type.clone()), + Value::String(bucket.client.clone()), + Value::String(bucket.hostname.clone()), + Value::String(bucket.created.clone()), + bucket.data.clone().unwrap_or(Value::Null), + ], + ).await?; + Ok(()) + } + + /// Sync events to Neon + pub async fn sync_events(&self, events: &[NeonEvent]) -> Result { + if events.is_empty() { + return Ok(0); + } + + let mut synced = 0; + for event in events { + let result = self.execute( + r#" + INSERT INTO events (local_id, bucket_id, device_id, timestamp, duration, data) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (bucket_id, device_id, local_id) DO UPDATE SET + duration = EXCLUDED.duration, + data = EXCLUDED.data + "#, + &[ + event.id.map(|id| Value::Number(id.into())).unwrap_or(Value::Null), + Value::String(event.bucket_id.clone()), + Value::String(event.device_id.clone()), + Value::String(event.timestamp.clone()), + Value::Number(serde_json::Number::from_f64(event.duration).unwrap_or(serde_json::Number::from(0))), + event.data.clone(), + ], + ).await; + + if result.is_ok() { + synced += 1; + } + } + + Ok(synced) + } + + /// Get events from Neon for a specific device + pub async fn get_events( + &self, + device_id: &str, + bucket_id: Option<&str>, + start: Option>, + end: Option>, + limit: Option, + ) -> Result> { + let mut query = String::from( + "SELECT local_id as id, bucket_id, device_id, timestamp, duration, data, synced_at FROM events WHERE device_id = $1" + ); + let mut params: Vec = vec![Value::String(device_id.to_string())]; + let mut param_idx = 2; + + if let Some(bid) = bucket_id { + query.push_str(&format!(" AND bucket_id = ${}", param_idx)); + params.push(Value::String(bid.to_string())); + param_idx += 1; + } + + if let Some(s) = start { + query.push_str(&format!(" AND timestamp >= ${}", param_idx)); + params.push(Value::String(s.to_rfc3339())); + param_idx += 1; + } + + if let Some(e) = end { + query.push_str(&format!(" AND timestamp <= ${}", param_idx)); + params.push(Value::String(e.to_rfc3339())); + param_idx += 1; + } + + query.push_str(" ORDER BY timestamp DESC"); + + if let Some(l) = limit { + query.push_str(&format!(" LIMIT {}", l)); + } + + let rows = self.execute(&query, ¶ms).await?; + + let events: Vec = rows.iter().filter_map(|row| { + serde_json::from_value(row.clone()).ok() + }).collect(); + + Ok(events) + } + + /// Get buckets for a device + pub async fn get_buckets(&self, device_id: &str) -> Result> { + let rows = self.execute( + "SELECT id, device_id, name, bucket_type, client, hostname, created, data FROM buckets WHERE device_id = $1", + &[Value::String(device_id.to_string())], + ).await?; + + let buckets: Vec = rows.iter().filter_map(|row| { + serde_json::from_value(row.clone()).ok() + }).collect(); + + Ok(buckets) + } + + /// Get last sync timestamp for a device + pub async fn get_last_sync(&self, device_id: &str) -> Result>> { + let rows = self.execute( + "SELECT MAX(synced_at) as last_sync FROM events WHERE device_id = $1", + &[Value::String(device_id.to_string())], + ).await?; + + if let Some(row) = rows.first() { + if let Some(last_sync) = row.get("last_sync").and_then(|v| v.as_str()) { + if let Ok(dt) = DateTime::parse_from_rfc3339(last_sync) { + return Ok(Some(dt.with_timezone(&Utc))); + } + } + } + + Ok(None) + } + + /// Delete events older than a certain date + pub async fn delete_old_events(&self, device_id: &str, before: DateTime) -> Result { + let rows = self.execute( + "DELETE FROM events WHERE device_id = $1 AND timestamp < $2 RETURNING id", + &[ + Value::String(device_id.to_string()), + Value::String(before.to_rfc3339()), + ], + ).await?; + + Ok(rows.len() as u64) + } +} + +// ========== Tauri Commands ========== + +#[tauri::command] +pub async fn neon_test_connection(connection_string: String) -> Result<(), String> { + let client = NeonClient::new(connection_string); + client.test_connection().await.map_err(|e| e.to_string()) +} + +#[tauri::command] +pub async fn neon_init_schema(connection_string: String) -> Result<(), String> { + let client = NeonClient::new(connection_string); + client.init_schema().await.map_err(|e| e.to_string()) +} + +#[tauri::command] +pub async fn neon_sync_events( + connection_string: String, + events: Vec, +) -> Result { + let client = NeonClient::new(connection_string); + client.sync_events(&events).await.map_err(|e| e.to_string()) +} + +#[tauri::command] +pub async fn neon_sync_bucket( + connection_string: String, + bucket: NeonBucket, +) -> Result<(), String> { + let client = NeonClient::new(connection_string); + client.sync_bucket(&bucket).await.map_err(|e| e.to_string()) +} + +#[tauri::command] +pub async fn neon_get_events( + connection_string: String, + device_id: String, + bucket_id: Option, + start: Option, + end: Option, + limit: Option, +) -> Result, String> { + let client = NeonClient::new(connection_string); + + let start_dt = start.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))); + let end_dt = end.and_then(|s| DateTime::parse_from_rfc3339(&s).ok().map(|dt| dt.with_timezone(&Utc))); + + client.get_events(&device_id, bucket_id.as_deref(), start_dt, end_dt, limit) + .await + .map_err(|e| e.to_string()) +} + +#[tauri::command] +pub async fn neon_get_buckets( + connection_string: String, + device_id: String, +) -> Result, String> { + let client = NeonClient::new(connection_string); + client.get_buckets(&device_id).await.map_err(|e| e.to_string()) +} diff --git a/desktop-app/src-tauri/src/supabase.rs b/desktop-app/src-tauri/src/supabase.rs index 1ebba82..1429065 100644 --- a/desktop-app/src-tauri/src/supabase.rs +++ b/desktop-app/src-tauri/src/supabase.rs @@ -1,3 +1,7 @@ +//! DEPRECATED: This module is being replaced by neon.rs for cloud sync. +//! Supabase sync will be removed in a future version. +//! Please migrate to use the Neon cloud sync functionality instead. + use anyhow::Result; use reqwest::Client; use serde::{Deserialize, Serialize}; diff --git a/desktop-app/src/hooks/useNeonSync.ts b/desktop-app/src/hooks/useNeonSync.ts new file mode 100644 index 0000000..34eb32f --- /dev/null +++ b/desktop-app/src/hooks/useNeonSync.ts @@ -0,0 +1,220 @@ +import { useState, useCallback, useRef } from 'react'; +import { invoke } from '@tauri-apps/api/core'; +import type { NeonEvent, NeonBucket, NeonSyncStatus } from '@/types'; + +/** + * Hook for syncing data with Neon cloud database + */ +export function useNeonSync() { + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + const [syncStatus, setSyncStatus] = useState({ + connected: false, + pendingEvents: 0, + }); + const connectionStringRef = useRef(null); + + // ========== Connection ========== + + const setConnectionString = useCallback((connectionString: string) => { + connectionStringRef.current = connectionString; + }, []); + + const testConnection = useCallback(async (connectionString?: string): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return false; + } + + try { + setLoading(true); + setError(null); + await invoke('neon_test_connection', { connectionString: connStr }); + setSyncStatus(prev => ({ ...prev, connected: true, error: undefined })); + if (connectionString) { + connectionStringRef.current = connectionString; + } + return true; + } catch (e) { + const errorMsg = e instanceof Error ? e.message : String(e); + setError(errorMsg); + setSyncStatus(prev => ({ ...prev, connected: false, error: errorMsg })); + return false; + } finally { + setLoading(false); + } + }, []); + + const initSchema = useCallback(async (connectionString?: string): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return false; + } + + try { + setLoading(true); + setError(null); + await invoke('neon_init_schema', { connectionString: connStr }); + return true; + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return false; + } finally { + setLoading(false); + } + }, []); + + // ========== Sync Operations ========== + + const syncBucket = useCallback(async ( + bucket: NeonBucket, + connectionString?: string + ): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return false; + } + + try { + setLoading(true); + setError(null); + await invoke('neon_sync_bucket', { connectionString: connStr, bucket }); + return true; + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return false; + } finally { + setLoading(false); + } + }, []); + + const syncEvents = useCallback(async ( + events: NeonEvent[], + connectionString?: string + ): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return 0; + } + + try { + setLoading(true); + setError(null); + const synced = await invoke('neon_sync_events', { + connectionString: connStr, + events, + }); + setSyncStatus(prev => ({ + ...prev, + lastSync: new Date().toISOString(), + pendingEvents: Math.max(0, prev.pendingEvents - synced), + })); + return synced; + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return 0; + } finally { + setLoading(false); + } + }, []); + + // ========== Query Operations ========== + + const getEvents = useCallback(async ( + deviceId: string, + options?: { + bucketId?: string; + start?: string; + end?: string; + limit?: number; + }, + connectionString?: string + ): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return []; + } + + try { + setLoading(true); + setError(null); + return await invoke('neon_get_events', { + connectionString: connStr, + deviceId, + bucketId: options?.bucketId, + start: options?.start, + end: options?.end, + limit: options?.limit, + }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return []; + } finally { + setLoading(false); + } + }, []); + + const getBuckets = useCallback(async ( + deviceId: string, + connectionString?: string + ): Promise => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setError('No connection string provided'); + return []; + } + + try { + setLoading(true); + setError(null); + return await invoke('neon_get_buckets', { + connectionString: connStr, + deviceId, + }); + } catch (e) { + setError(e instanceof Error ? e.message : String(e)); + return []; + } finally { + setLoading(false); + } + }, []); + + // ========== Utility ========== + + const clearError = useCallback(() => { + setError(null); + }, []); + + const updatePendingCount = useCallback((count: number) => { + setSyncStatus(prev => ({ ...prev, pendingEvents: count })); + }, []); + + return { + // State + loading, + error, + syncStatus, + + // Connection + setConnectionString, + testConnection, + initSchema, + + // Sync + syncBucket, + syncEvents, + + // Query + getEvents, + getBuckets, + + // Utility + clearError, + updatePendingCount, + }; +} diff --git a/desktop-app/src/hooks/useQuery.ts b/desktop-app/src/hooks/useQuery.ts new file mode 100644 index 0000000..cd25fa9 --- /dev/null +++ b/desktop-app/src/hooks/useQuery.ts @@ -0,0 +1,249 @@ +import { useState, useCallback } from 'react'; +import { invoke } from '@tauri-apps/api/core'; +import type { AWEvent } from '@/types'; + +/** + * Category rule for classifying events + */ +export interface CategoryRule { + name: string; + categories?: string[]; // Hierarchical categories like ["Work", "Development"] + rule: CategoryRuleType; +} + +export type CategoryRuleType = + | { type: 'regex'; pattern: string; ignore_case?: boolean } + | { type: 'glob'; pattern: string } + | { type: 'exact'; app?: string; title?: string }; + +/** + * Query result types + */ +export type QueryResult = AWEvent[] | [string, number][] | Record | number | string; + +/** + * Hook for running ActivityWatch-compatible queries + */ +export function useQuery() { + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + /** + * Run a query with the ActivityWatch query language + * @param query Array of query statements + * @param timeperiods Array of time periods in "start/end" ISO format + */ + const runQuery = useCallback(async ( + query: string[], + timeperiods: string[] + ): Promise => { + try { + setLoading(true); + setError(null); + return await invoke('aw_query', { query, timeperiods }); + } catch (e) { + const errorMsg = e instanceof Error ? e.message : String(e); + setError(errorMsg); + return []; + } finally { + setLoading(false); + } + }, []); + + /** + * Categorize events using rules + * @param bucketId The bucket to get events from + * @param rules Array of category rules + * @param start Optional start time (ISO string) + * @param end Optional end time (ISO string) + */ + const categorize = useCallback(async ( + bucketId: string, + rules: CategoryRule[], + start?: string, + end?: string + ): Promise> => { + try { + setLoading(true); + setError(null); + return await invoke>('aw_categorize', { + bucketId, + rules, + start, + end, + }); + } catch (e) { + const errorMsg = e instanceof Error ? e.message : String(e); + setError(errorMsg); + return {}; + } finally { + setLoading(false); + } + }, []); + + /** + * Summarize events by a specific field + * @param bucketId The bucket to get events from + * @param groupBy The field to group by (e.g., "app", "title") + * @param start Optional start time (ISO string) + * @param end Optional end time (ISO string) + */ + const summarize = useCallback(async ( + bucketId: string, + groupBy: string, + start?: string, + end?: string + ): Promise<[string, number][]> => { + try { + setLoading(true); + setError(null); + return await invoke<[string, number][]>('aw_summarize', { + bucketId, + groupBy, + start, + end, + }); + } catch (e) { + const errorMsg = e instanceof Error ? e.message : String(e); + setError(errorMsg); + return []; + } finally { + setLoading(false); + } + }, []); + + /** + * Get events from a bucket for a time period + */ + const getEventsForPeriod = useCallback(async ( + bucketId: string, + start: string, + end: string + ): Promise => { + const query = [ + `events = query_bucket("${bucketId}")`, + 'RETURN events' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as AWEvent[]) || []; + }, [runQuery]); + + /** + * Get usage summary (time per app) for a time period + */ + const getUsageSummary = useCallback(async ( + bucketId: string, + start: string, + end: string + ): Promise<[string, number][]> => { + const query = [ + `events = query_bucket("${bucketId}")`, + 'merged = merge_events_by_keys(events, ["app"])', + 'summary = summarize_by_app(merged)', + 'RETURN summary' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as [string, number][]) || []; + }, [runQuery]); + + /** + * Get total active time for a period + */ + const getTotalTime = useCallback(async ( + bucketId: string, + start: string, + end: string + ): Promise => { + const query = [ + `events = query_bucket("${bucketId}")`, + 'total = sum_durations(events)', + 'RETURN total' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as number) || 0; + }, [runQuery]); + + /** + * Filter events by app name + */ + const filterByApp = useCallback(async ( + bucketId: string, + appNames: string[], + start: string, + end: string + ): Promise => { + const appsJson = JSON.stringify(appNames); + const query = [ + `events = query_bucket("${bucketId}")`, + `filtered = filter_keyvals(events, "app", ${appsJson})`, + 'RETURN filtered' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as AWEvent[]) || []; + }, [runQuery]); + + /** + * Exclude events by app name + */ + const excludeApps = useCallback(async ( + bucketId: string, + appNames: string[], + start: string, + end: string + ): Promise => { + const appsJson = JSON.stringify(appNames); + const query = [ + `events = query_bucket("${bucketId}")`, + `filtered = exclude_keyvals(events, "app", ${appsJson})`, + 'RETURN filtered' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as AWEvent[]) || []; + }, [runQuery]); + + /** + * Get top apps by usage time + */ + const getTopApps = useCallback(async ( + bucketId: string, + start: string, + end: string, + limit: number = 10 + ): Promise<[string, number][]> => { + const query = [ + `events = query_bucket("${bucketId}")`, + 'merged = merge_events_by_keys(events, ["app"])', + 'sorted = sort_by_duration(merged)', + `limited = limit_events(sorted, ${limit})`, + 'summary = summarize_by_app(limited)', + 'RETURN summary' + ]; + const timeperiod = `${start}/${end}`; + const results = await runQuery(query, [timeperiod]); + return (results[0] as [string, number][]) || []; + }, [runQuery]); + + return { + // State + loading, + error, + + // Raw query + runQuery, + + // Higher-level functions + categorize, + summarize, + getEventsForPeriod, + getUsageSummary, + getTotalTime, + filterByApp, + excludeApps, + getTopApps, + }; +} diff --git a/desktop-app/src/hooks/useSync.ts b/desktop-app/src/hooks/useSync.ts index de28895..87bed5b 100644 --- a/desktop-app/src/hooks/useSync.ts +++ b/desktop-app/src/hooks/useSync.ts @@ -1,38 +1,36 @@ -import { useState, useEffect, useCallback } from 'react'; +import { useState, useEffect, useCallback, useRef } from 'react'; import { invoke } from '@tauri-apps/api/core'; -import { useUser } from '@/contexts/UserContext'; import { logError } from '@/lib/errorHandling'; +import type { AWBucket, AWEvent, NeonBucket, NeonEvent } from '@/types'; export interface SyncStatus { isSyncing: boolean; lastSyncTime: string | null; unsyncedCount: number; error: string | null; + connected: boolean; } -// Add Session interface for unsynced sessions -export interface Session { - id: string; - device_id: string; - user_id: string | null; - app_name: string; - window_title: string; - start_time: string; - end_time: string | null; - duration_sec: number; - created_at: string; +export interface SyncConfig { + connectionString: string; + autoSync?: boolean; + syncInterval?: number; // milliseconds, default 30000 } -export function useSync() { - const { user, session } = useUser(); +/** + * Hook for syncing ActivityWatch data with Neon cloud database + */ +export function useSync(config?: SyncConfig) { const [syncStatus, setSyncStatus] = useState({ isSyncing: false, lastSyncTime: null, unsyncedCount: 0, error: null, + connected: false, }); const [deviceId, setDeviceId] = useState(''); + const connectionStringRef = useRef(config?.connectionString || null); // Get device ID on mount useEffect(() => { @@ -47,32 +45,193 @@ export function useSync() { fetchDeviceId(); }, []); - // Check for unsynced sessions - const checkUnsyncedSessions = useCallback(async () => { + // Update connection string when config changes + useEffect(() => { + if (config?.connectionString) { + connectionStringRef.current = config.connectionString; + } + }, [config?.connectionString]); + + // Set connection string + const setConnectionString = useCallback((connectionString: string) => { + connectionStringRef.current = connectionString; + }, []); + + // Test Neon connection + const testConnection = useCallback(async (connectionString?: string) => { + const connStr = connectionString || connectionStringRef.current; + if (!connStr) { + setSyncStatus(prev => ({ + ...prev, + error: 'No Neon connection string configured', + connected: false, + })); + return false; + } + + try { + await invoke('neon_test_connection', { connectionString: connStr }); + if (connectionString) { + connectionStringRef.current = connectionString; + } + setSyncStatus(prev => ({ + ...prev, + error: null, + connected: true, + })); + return true; + } catch (e) { + logError(e, 'testConnection'); + setSyncStatus(prev => ({ + ...prev, + error: 'Failed to connect to Neon', + connected: false, + })); + return false; + } + }, []); + + // Initialize Neon schema + const initSchema = useCallback(async () => { + const connStr = connectionStringRef.current; + if (!connStr) { + setSyncStatus(prev => ({ + ...prev, + error: 'No Neon connection string configured', + })); + return false; + } + + try { + await invoke('neon_init_schema', { connectionString: connStr }); + return true; + } catch (e) { + logError(e, 'initSchema'); + setSyncStatus(prev => ({ + ...prev, + error: 'Failed to initialize Neon schema', + })); + return false; + } + }, []); + + // Get unsynced events count from local AW database + const checkUnsyncedEvents = useCallback(async () => { if (!deviceId) return; try { - const unsyncedSessions = await invoke('get_unsynced_sessions_command', { deviceId }); + // Get buckets and count events + const buckets = await invoke>('aw_get_buckets'); + let totalEvents = 0; + + for (const bucketId of Object.keys(buckets)) { + const count = await invoke('aw_get_event_count', { bucketId }); + totalEvents += count; + } + setSyncStatus(prev => ({ ...prev, - unsyncedCount: unsyncedSessions.length, + unsyncedCount: totalEvents, error: null, })); } catch (e) { - logError(e, 'checkUnsyncedSessions'); + logError(e, 'checkUnsyncedEvents'); setSyncStatus(prev => ({ ...prev, - error: 'Failed to check unsynced sessions', + error: 'Failed to check unsynced events', })); } }, [deviceId]); - // Manual sync function + // Sync buckets to Neon + const syncBuckets = useCallback(async () => { + const connStr = connectionStringRef.current; + if (!connStr || !deviceId) { + return false; + } + + try { + const buckets = await invoke>('aw_get_buckets'); + + for (const bucket of Object.values(buckets)) { + const neonBucket: NeonBucket = { + id: bucket.id, + device_id: deviceId, + name: bucket.name, + bucket_type: bucket.type, + client: bucket.client, + hostname: bucket.hostname, + created: bucket.created, + data: bucket.data, + }; + + await invoke('neon_sync_bucket', { + connectionString: connStr, + bucket: neonBucket, + }); + } + return true; + } catch (e) { + logError(e, 'syncBuckets'); + return false; + } + }, [deviceId]); + + // Sync events to Neon + const syncEvents = useCallback(async (bucketId?: string, limit?: number) => { + const connStr = connectionStringRef.current; + if (!connStr || !deviceId) { + setSyncStatus(prev => ({ + ...prev, + error: 'Not configured for sync', + })); + return 0; + } + + try { + const buckets = await invoke>('aw_get_buckets'); + const bucketIds = bucketId ? [bucketId] : Object.keys(buckets); + let totalSynced = 0; + + for (const bid of bucketIds) { + const events = await invoke('aw_get_events', { + bucketId: bid, + limit: limit || 1000, + }); + + if (events.length === 0) continue; + + const neonEvents: NeonEvent[] = events.map(event => ({ + id: event.id, + bucket_id: bid, + device_id: deviceId, + timestamp: event.timestamp, + duration: event.duration, + data: event.data, + })); + + const synced = await invoke('neon_sync_events', { + connectionString: connStr, + events: neonEvents, + }); + + totalSynced += synced; + } + + return totalSynced; + } catch (e) { + logError(e, 'syncEvents'); + return 0; + } + }, [deviceId]); + + // Full sync function const syncData = useCallback(async () => { - if (!user || !deviceId || !session) { + const connStr = connectionStringRef.current; + if (!connStr || !deviceId) { setSyncStatus(prev => ({ ...prev, - error: 'User not authenticated or device ID not available', + error: 'Device ID or connection string not available', })); return; } @@ -84,39 +243,26 @@ export function useSync() { })); try { - // Get Supabase credentials from environment - const supabaseUrl = process.env.NEXT_PUBLIC_SUPABASE_URL; - const supabaseKey = process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY; - - if (!supabaseUrl || !supabaseKey) { - throw new Error('Supabase configuration not found'); + // Sync buckets first + const bucketsOk = await syncBuckets(); + if (!bucketsOk) { + throw new Error('Failed to sync buckets'); } - // Get the user's access token for authenticated requests - const accessToken = session.access_token; - - // Call the sync command with the access token - await invoke('sync_data_command', { - deviceId, - userId: user.id, - supabaseUrl, - supabaseKey, - accessToken, - }); + // Sync events + const synced = await syncEvents(); - // Update sync status setSyncStatus(prev => ({ ...prev, isSyncing: false, lastSyncTime: new Date().toISOString(), - unsyncedCount: 0, error: null, })); - // After sync, check for any remaining unsynced sessions to update the UI - await checkUnsyncedSessions(); + // Check remaining unsynced + await checkUnsyncedEvents(); - console.log('Sync completed successfully'); + console.log(`Sync completed: ${synced} events synced`); } catch (e) { logError(e, 'syncData'); setSyncStatus(prev => ({ @@ -125,64 +271,83 @@ export function useSync() { error: e instanceof Error ? e.message : 'Sync failed', })); } - }, [user, deviceId, session, checkUnsyncedSessions]); + }, [deviceId, syncBuckets, syncEvents, checkUnsyncedEvents]); - // Test Supabase connection - const testConnection = async () => { - const supabaseUrl = process.env.NEXT_PUBLIC_SUPABASE_URL; - const supabaseKey = process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY; - - if (!supabaseUrl || !supabaseKey) { - setSyncStatus(prev => ({ - ...prev, - error: 'Supabase configuration not found', - })); - return false; + // Get events from Neon (cloud) + const getCloudEvents = useCallback(async ( + options?: { + bucketId?: string; + start?: string; + end?: string; + limit?: number; + } + ): Promise => { + const connStr = connectionStringRef.current; + if (!connStr || !deviceId) { + return []; } try { - await invoke('test_supabase_connection_command', { - supabaseUrl, - supabaseKey, + return await invoke('neon_get_events', { + connectionString: connStr, + deviceId, + bucketId: options?.bucketId, + start: options?.start, + end: options?.end, + limit: options?.limit, }); - setSyncStatus(prev => ({ - ...prev, - error: null, - })); - return true; } catch (e) { - logError(e, 'testConnection'); - setSyncStatus(prev => ({ - ...prev, - error: 'Failed to connect to Supabase', - })); - return false; + logError(e, 'getCloudEvents'); + return []; } - }; + }, [deviceId]); - // Auto-sync when user logs in - useEffect(() => { - if (user && deviceId) { - checkUnsyncedSessions(); + // Get buckets from Neon (cloud) + const getCloudBuckets = useCallback(async (): Promise => { + const connStr = connectionStringRef.current; + if (!connStr || !deviceId) { + return []; + } + + try { + return await invoke('neon_get_buckets', { + connectionString: connStr, + deviceId, + }); + } catch (e) { + logError(e, 'getCloudBuckets'); + return []; } - }, [user, deviceId, checkUnsyncedSessions]); + }, [deviceId]); - // Periodic batch sync (every 30 seconds) + // Auto-sync setup useEffect(() => { - if (!user || !deviceId) return; + if (!config?.autoSync || !connectionStringRef.current || !deviceId) return; + + // Initial check + checkUnsyncedEvents(); const interval = setInterval(() => { syncData(); - }, 30000); + }, config.syncInterval || 30000); return () => clearInterval(interval); - }, [user, deviceId, syncData]); + }, [config?.autoSync, config?.syncInterval, deviceId, syncData, checkUnsyncedEvents]); return { syncStatus, - syncData, - testConnection, - checkUnsyncedSessions, deviceId, + // Configuration + setConnectionString, + // Actions + testConnection, + initSchema, + syncData, + syncBuckets, + syncEvents, + checkUnsyncedEvents, + // Cloud queries + getCloudEvents, + getCloudBuckets, }; -} \ No newline at end of file +} diff --git a/desktop-app/src/types/index.ts b/desktop-app/src/types/index.ts index 31bf4a4..47a311e 100644 --- a/desktop-app/src/types/index.ts +++ b/desktop-app/src/types/index.ts @@ -129,6 +129,47 @@ export const AW_CLIENTS = { AWATCHER: 'awatcher', } as const; +// ============================================================ +// Neon Cloud Sync types +// ============================================================ + +/** + * Neon event record for cloud storage + */ +export interface NeonEvent { + id?: number; + bucket_id: string; + device_id: string; + timestamp: string; // ISO8601 timestamp + duration: number; + data: Record; + synced_at?: string; +} + +/** + * Neon bucket record for cloud storage + */ +export interface NeonBucket { + id: string; + device_id: string; + name?: string; + bucket_type: string; + client: string; + hostname: string; + created: string; + data?: Record; +} + +/** + * Neon sync status + */ +export interface NeonSyncStatus { + connected: boolean; + lastSync?: string; + pendingEvents: number; + error?: string; +} + // ============================================================ // Legacy types (for backwards compatibility) // ============================================================ From 78efcc414d12b8fd4baf81a026bab3d9ba3aa69b Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 03:43:13 +0000 Subject: [PATCH 04/18] chore: Update Cargo.lock with new dependencies https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/Cargo.lock | 151 +++++++++++++++++++++++++++++-- 1 file changed, 143 insertions(+), 8 deletions(-) diff --git a/desktop-app/src-tauri/Cargo.lock b/desktop-app/src-tauri/Cargo.lock index ea8055f..3c5e9a2 100644 --- a/desktop-app/src-tauri/Cargo.lock +++ b/desktop-app/src-tauri/Cargo.lock @@ -111,6 +111,17 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "atk" version = "0.18.2" @@ -155,6 +166,61 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backtrace" version = "0.3.75" @@ -1390,6 +1456,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "gethostname" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc3655aa6818d65bc620d6911f05aa7b6aeb596291e1e9f79e52df85583d1e30" +dependencies = [ + "rustix 0.38.44", + "windows-targets 0.52.6", +] + [[package]] name = "getrandom" version = "0.1.16" @@ -1727,6 +1803,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "hyper" version = "1.6.0" @@ -1740,6 +1822,7 @@ dependencies = [ "http", "http-body", "httparse", + "httpdate", "itoa", "pin-project-lite", "smallvec", @@ -2183,6 +2266,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -2219,12 +2308,15 @@ name = "loopd" version = "0.1.0" dependencies = [ "anyhow", + "axum", "chrono", "cocoa", "core-foundation 0.9.4", + "gethostname 0.5.0", "log", "objc", "once_cell", + "regex", "reqwest", "serde", "serde_json", @@ -2236,6 +2328,8 @@ dependencies = [ "tauri-plugin-store", "tauri-plugin-updater", "tokio", + "tower", + "tower-http", "uuid", "windows 0.52.0", "x11rb", @@ -2294,6 +2388,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "md-5" version = "0.10.6" @@ -3662,6 +3762,19 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + [[package]] name = "rustix" version = "1.0.7" @@ -3671,7 +3784,7 @@ dependencies = [ "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] @@ -3843,10 +3956,11 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -3861,11 +3975,20 @@ dependencies = [ "typeid", ] +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -3895,6 +4018,17 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" +dependencies = [ + "itoa", + "serde", + "serde_core", +] + [[package]] name = "serde_repr" version = "0.1.20" @@ -4895,7 +5029,7 @@ dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix", + "rustix 1.0.7", "windows-sys 0.59.0", ] @@ -5157,6 +5291,7 @@ dependencies = [ "tokio", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -6312,7 +6447,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1641b26d4dec61337c35a1b1aaf9e3cba8f46f0b43636c609ab0291a648040a" dependencies = [ - "gethostname", + "gethostname 0.3.0", "nix", "winapi", "winapi-wsapoll", @@ -6335,7 +6470,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix", + "rustix 1.0.7", ] [[package]] From 52696a22cb4071ba6d6bc0283b6cc185b93e1a91 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 06:40:01 +0000 Subject: [PATCH 05/18] fix: Address PR review feedback for ActivityWatch compatibility - Fix heartbeat merge logic to check time_diff >= 0 to prevent event data corruption from clock drift causing negative time differences - Fix migration timestamps to use RFC3339 format (strftime with 'T' separator and 'Z' suffix) for proper lexicographic comparison with new events - Fix glob-to-regex conversion to properly escape regex metacharacters before converting glob wildcards, preventing patterns like "test.app" from incorrectly matching "testXapp" https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- .../src-tauri/migrations/0003_activitywatch_compat.sql | 8 +++++--- desktop-app/src-tauri/src/aw_database.rs | 3 ++- desktop-app/src-tauri/src/aw_query.rs | 10 ++++++++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql b/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql index 3fc8be7..28cc315 100644 --- a/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql +++ b/desktop-app/src-tauri/migrations/0003_activitywatch_compat.sql @@ -43,6 +43,7 @@ CREATE TABLE IF NOT EXISTS key_value ( -- Migrate existing sessions to events format -- First, create a default bucket for existing data +-- Note: Using strftime to produce RFC3339/ISO8601 format with 'T' separator and 'Z' suffix INSERT OR IGNORE INTO buckets (id, name, type, client, hostname, created, data, last_updated) SELECT 'aw-watcher-window_' || COALESCE( @@ -56,19 +57,20 @@ SELECT (SELECT name FROM devices LIMIT 1), 'localhost' ), - datetime('now'), + strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), '{}', - datetime('now') + strftime('%Y-%m-%dT%H:%M:%SZ', 'now') WHERE EXISTS (SELECT 1 FROM sessions LIMIT 1); -- Migrate sessions to events +-- Using strftime for RFC3339 format to ensure proper timestamp comparison with new events INSERT OR IGNORE INTO events (bucket_id, timestamp, duration, data) SELECT 'aw-watcher-window_' || COALESCE( (SELECT name FROM devices LIMIT 1), 'localhost' ), - datetime(start_time, 'unixepoch'), + strftime('%Y-%m-%dT%H:%M:%SZ', start_time, 'unixepoch'), COALESCE(duration_sec, 0), json_object('app', app_name, 'title', COALESCE(window_title, '')) FROM sessions diff --git a/desktop-app/src-tauri/src/aw_database.rs b/desktop-app/src-tauri/src/aw_database.rs index ed7aa55..d2252da 100644 --- a/desktop-app/src-tauri/src/aw_database.rs +++ b/desktop-app/src-tauri/src/aw_database.rs @@ -275,7 +275,8 @@ impl AwDatabase { let time_diff = (heartbeat.timestamp - last_end).num_milliseconds() as f64 / 1000.0; // If data matches and within pulsetime, merge - if time_diff <= pulsetime && last_data == heartbeat.data { + // Note: time_diff >= 0 check prevents merging when clock drift causes negative differences + if time_diff >= 0.0 && time_diff <= pulsetime && last_data == heartbeat.data { // Extend the last event let new_duration = (heartbeat.timestamp - last_timestamp).num_milliseconds() as f64 / 1000.0 + heartbeat.duration; diff --git a/desktop-app/src-tauri/src/aw_query.rs b/desktop-app/src-tauri/src/aw_query.rs index 20d4275..b8ae8ab 100644 --- a/desktop-app/src-tauri/src/aw_query.rs +++ b/desktop-app/src-tauri/src/aw_query.rs @@ -422,8 +422,14 @@ impl QueryContext { } } CategoryRuleType::Glob { pattern } => { - let pattern = pattern.replace("*", ".*").replace("?", "."); - if let Ok(re) = regex::Regex::new(&pattern) { + // Convert glob pattern to regex by: + // 1. First escape all regex metacharacters (except * and ?) + // 2. Then convert glob wildcards to regex equivalents + let escaped = regex::escape(pattern) + .replace(r"\*", ".*") // glob * -> regex .* + .replace(r"\?", "."); // glob ? -> regex . + let anchored = format!("^{}$", escaped); // Anchor for full match + if let Ok(re) = regex::Regex::new(&anchored) { re.is_match(app) || re.is_match(title) } else { false From 67ff4b63be410e683333c2fd78df110d87c1939a Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 20:49:48 +0000 Subject: [PATCH 06/18] fix: Address additional PR review feedback - Fix parse_datetime to log warnings and return epoch time instead of current time on parse failure, preventing silent data corruption - Enable SQLite foreign key constraints with PRAGMA foreign_keys = ON to ensure ON DELETE CASCADE works properly for bucket/event cleanup https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/aw_database.rs | 27 +++++++++++++++++------- desktop-app/src-tauri/src/main.rs | 13 ++++++++++-- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/desktop-app/src-tauri/src/aw_database.rs b/desktop-app/src-tauri/src/aw_database.rs index d2252da..8b26538 100644 --- a/desktop-app/src-tauri/src/aw_database.rs +++ b/desktop-app/src-tauri/src/aw_database.rs @@ -400,15 +400,26 @@ impl AwDatabase { } /// Helper to parse datetime from SQLite string +/// Supports both RFC3339 format (new events) and SQLite datetime format (legacy) fn parse_datetime(s: &str) -> DateTime { - DateTime::parse_from_rfc3339(s) - .map(|dt| dt.with_timezone(&Utc)) - .unwrap_or_else(|_| { - // Try parsing as SQLite datetime format - chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") - .map(|ndt| DateTime::from_naive_utc_and_offset(ndt, Utc)) - .unwrap_or_else(|_| Utc::now()) - }) + // First try RFC3339 format (e.g., "2024-01-15T10:30:00Z") + if let Ok(dt) = DateTime::parse_from_rfc3339(s) { + return dt.with_timezone(&Utc); + } + + // Try SQLite datetime format (e.g., "2024-01-15 10:30:00") + if let Ok(ndt) = chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") { + return DateTime::from_naive_utc_and_offset(ndt, Utc); + } + + // Log warning for unparseable timestamps - this indicates data corruption + eprintln!( + "[AW-DATABASE] Warning: Failed to parse timestamp '{}', using epoch time. This may indicate data corruption.", + s + ); + // Return Unix epoch (1970-01-01) instead of current time to make the issue visible + // and avoid corrupting historical data ordering + DateTime::from_timestamp(0, 0).unwrap_or_else(Utc::now) } #[cfg(test)] diff --git a/desktop-app/src-tauri/src/main.rs b/desktop-app/src-tauri/src/main.rs index 1d337f5..cd7f258 100644 --- a/desktop-app/src-tauri/src/main.rs +++ b/desktop-app/src-tauri/src/main.rs @@ -108,9 +108,18 @@ fn main() { // Connect the pool (will create the file if missing) let pool = tauri::async_runtime::block_on(async { - SqlitePool::connect(&db_url) + let pool = SqlitePool::connect(&db_url) .await - .expect("Failed to connect to SQLite database") + .expect("Failed to connect to SQLite database"); + + // Enable foreign key constraints - SQLite requires this to be explicitly enabled + // This ensures ON DELETE CASCADE works properly for bucket/event relationships + sqlx::query("PRAGMA foreign_keys = ON") + .execute(&pool) + .await + .expect("Failed to enable foreign key constraints"); + + pool }); // Note: tauri_plugin_sql handles migrations automatically From 6adbd19e43208f2d40028c1ec19a6393fd1b8782 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 23:21:15 +0000 Subject: [PATCH 07/18] fix: Address additional PR review feedback - Fix Neon URL construction to properly handle query parameters by inserting /sql before the query string instead of naively appending - Fix query parser to check for function calls before variable assignment, preventing misparse of function arguments containing '=' characters - Fix heartbeat duration inflation by using duration: 0.0 instead of 1.0, since actual duration is calculated from timestamp differences during merge https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/aw_query.rs | 29 ++++++++++++++++++--------- desktop-app/src-tauri/src/lib.rs | 2 +- desktop-app/src-tauri/src/neon.rs | 14 ++++++++++++- 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/desktop-app/src-tauri/src/aw_query.rs b/desktop-app/src-tauri/src/aw_query.rs index b8ae8ab..cc45137 100644 --- a/desktop-app/src-tauri/src/aw_query.rs +++ b/desktop-app/src-tauri/src/aw_query.rs @@ -71,21 +71,30 @@ impl QueryContext { pub async fn execute(&mut self, query: &str) -> Result { let query = query.trim(); - // Handle variable assignment - if let Some(eq_pos) = query.find('=') { - let var_name = query[..eq_pos].trim(); - let expr = query[eq_pos + 1..].trim(); - let result = self.evaluate(expr).await?; - self.variables.insert(var_name.to_string(), result.clone()); - return Ok(result); - } - - // Handle return statement + // Handle return statement first if query.starts_with("RETURN") || query.starts_with("return") { let expr = query[6..].trim(); return self.evaluate(expr).await; } + // Handle variable assignment - but only if '=' appears before any '(' + // This prevents misparse of function arguments containing '=' like ["param=value"] + if let Some(eq_pos) = query.find('=') { + let paren_pos = query.find('('); + // Only treat as assignment if '=' comes before '(' or there's no '(' + // AND the left side is a valid identifier (alphanumeric/underscore only) + if paren_pos.map_or(true, |p| eq_pos < p) { + let var_name = query[..eq_pos].trim(); + // Validate that var_name is a valid identifier (no special chars except underscore) + if !var_name.is_empty() && var_name.chars().all(|c| c.is_alphanumeric() || c == '_') { + let expr = query[eq_pos + 1..].trim(); + let result = self.evaluate(expr).await?; + self.variables.insert(var_name.to_string(), result.clone()); + return Ok(result); + } + } + } + self.evaluate(query).await } diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index b6dbb70..5395e3b 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -351,7 +351,7 @@ pub fn start_tracking(db: Db, aw_db: AwDb, app_handle: tauri::AppHandle) { let heartbeat = aw_models::Heartbeat { timestamp: chrono::Utc::now(), - duration: 1.0, // 1 second heartbeat + duration: 0.0, // Duration is calculated from timestamp diff during merge data: serde_json::json!({ "app": app_name, "title": title diff --git a/desktop-app/src-tauri/src/neon.rs b/desktop-app/src-tauri/src/neon.rs index 024532f..2f3f359 100644 --- a/desktop-app/src-tauri/src/neon.rs +++ b/desktop-app/src-tauri/src/neon.rs @@ -64,7 +64,19 @@ impl NeonClient { async fn execute(&self, query: &str, params: &[Value]) -> Result> { // Neon serverless driver uses a specific HTTP endpoint // Format: https:///sql - let url = format!("{}/sql", self.connection_string.replace("postgres://", "https://").replace("postgresql://", "https://")); + // Must handle query parameters properly (e.g., ?sslmode=require) + let base_url = self.connection_string + .replace("postgres://", "https://") + .replace("postgresql://", "https://"); + + // Parse URL to properly insert /sql before query string + let url = if let Some(query_start) = base_url.find('?') { + // Insert /sql before the query string + format!("{}/sql{}", &base_url[..query_start], &base_url[query_start..]) + } else { + // No query string, just append /sql + format!("{}/sql", base_url) + }; let body = serde_json::json!({ "query": query, From a4083cbcd0d028510467ab65aaba4cc10a137ac5 Mon Sep 17 00:00:00 2001 From: Claude Date: Thu, 5 Feb 2026 23:31:26 +0000 Subject: [PATCH 08/18] fix: Address additional PR review feedback - Fix foreign key PRAGMA to apply to all pool connections by using SqliteConnectOptions with .pragma() instead of executing PRAGMA on a single connection after pool creation - Fix empty title fallback by skipping heartbeat when window title can't be retrieved, preventing event fragmentation from empty title comparisons failing https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/lib.rs | 8 ++++++-- desktop-app/src-tauri/src/main.rs | 21 ++++++++++----------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index 5395e3b..a9e6cf3 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -343,10 +343,14 @@ pub fn start_tracking(db: Db, aw_db: AwDb, app_handle: tauri::AppHandle) { let bucket_id_clone = bucket_id.clone(); let app_name = current_app_name.clone(); tauri::async_runtime::spawn(async move { - // Get the current window title + // Get the current window title - skip heartbeat if we can't get it + // Using empty string would cause heartbeat comparison to fail and fragment events let title = match crate::usage::get_active_app_with_title().await { Ok(active) => active.title, - Err(_) => String::new(), + Err(e) => { + log::debug!("Skipping heartbeat - couldn't get window title: {}", e); + return; // Skip this heartbeat, next one will succeed + } }; let heartbeat = aw_models::Heartbeat { diff --git a/desktop-app/src-tauri/src/main.rs b/desktop-app/src-tauri/src/main.rs index cd7f258..9c9e57d 100644 --- a/desktop-app/src-tauri/src/main.rs +++ b/desktop-app/src-tauri/src/main.rs @@ -4,6 +4,8 @@ use tauri::Manager; use std::sync::Arc; use sqlx::SqlitePool; +use sqlx::sqlite::SqliteConnectOptions; +use std::str::FromStr; use app_lib::{database::Database, aw_database::AwDatabase, start_tracking, aw_server, get_or_create_device_id}; use std::path::PathBuf; use tauri_plugin_sql::{Builder, Migration, MigrationKind}; @@ -106,20 +108,17 @@ fn main() { // The plugin will resolve "sqlite:usage.db" relative to AppConfig, so we do the same. let db_url = format!("sqlite://{}", db_path.to_string_lossy()); - // Connect the pool (will create the file if missing) + // Connect the pool with foreign key constraints enabled for ALL connections + // Using SqliteConnectOptions ensures every connection in the pool has foreign keys ON let pool = tauri::async_runtime::block_on(async { - let pool = SqlitePool::connect(&db_url) - .await - .expect("Failed to connect to SQLite database"); + let options = SqliteConnectOptions::from_str(&db_url) + .expect("Failed to parse database URL") + .create_if_missing(true) + .pragma("foreign_keys", "ON"); // Applied to every connection - // Enable foreign key constraints - SQLite requires this to be explicitly enabled - // This ensures ON DELETE CASCADE works properly for bucket/event relationships - sqlx::query("PRAGMA foreign_keys = ON") - .execute(&pool) + SqlitePool::connect_with(options) .await - .expect("Failed to enable foreign key constraints"); - - pool + .expect("Failed to connect to SQLite database") }); // Note: tauri_plugin_sql handles migrations automatically From 7d16c9d77ad350a3d1f2a858bde41830ca3e0540 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 6 Feb 2026 00:51:56 +0000 Subject: [PATCH 09/18] fix: Address additional PR review feedback - Remove duplicate BucketExport struct from aw_server.rs, import from aw_models - Fix TOCTOU race condition in get_or_create_bucket using INSERT OR IGNORE - Wrap insert_events in a transaction for atomic all-or-nothing insertion - Change heartbeat pulsetime from 5 seconds to 10 minutes for better consecutive event merging https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/aw_database.rs | 49 ++++++++++++++++++++---- desktop-app/src-tauri/src/aw_server.rs | 9 +---- desktop-app/src-tauri/src/lib.rs | 8 ++-- 3 files changed, 46 insertions(+), 20 deletions(-) diff --git a/desktop-app/src-tauri/src/aw_database.rs b/desktop-app/src-tauri/src/aw_database.rs index 8b26538..42ccb91 100644 --- a/desktop-app/src-tauri/src/aw_database.rs +++ b/desktop-app/src-tauri/src/aw_database.rs @@ -93,12 +93,31 @@ impl AwDatabase { } /// Create bucket if it doesn't exist, return existing or new bucket + /// Uses INSERT OR IGNORE to avoid TOCTOU race conditions pub async fn get_or_create_bucket(&self, bucket: &Bucket) -> Result { - if let Some(existing) = self.get_bucket(&bucket.id).await? { - return Ok(existing); - } - self.create_bucket(bucket).await?; - Ok(bucket.clone()) + let data_json = bucket.data.as_ref() + .map(|d| serde_json::to_string(d).unwrap_or_else(|_| "{}".to_string())); + + // Use INSERT OR IGNORE to atomically create if not exists + sqlx::query( + "INSERT OR IGNORE INTO buckets (id, name, type, client, hostname, created, data, last_updated) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)" + ) + .bind(&bucket.id) + .bind(&bucket.name) + .bind(&bucket.bucket_type) + .bind(&bucket.client) + .bind(&bucket.hostname) + .bind(bucket.created.to_rfc3339()) + .bind(&data_json) + .bind(bucket.last_updated.map(|dt| dt.to_rfc3339())) + .execute(&self.pool) + .await?; + + // Now fetch the bucket (either newly created or existing) + self.get_bucket(&bucket.id).await?.ok_or_else(|| { + sqlx::Error::RowNotFound + }) } /// Delete a bucket and all its events @@ -191,7 +210,14 @@ impl AwDatabase { } /// Insert multiple events into a bucket + /// Uses a transaction to ensure all-or-nothing insertion pub async fn insert_events(&self, bucket_id: &str, events: &[Event]) -> Result, sqlx::Error> { + if events.is_empty() { + return Ok(Vec::new()); + } + + // Start a transaction for atomic batch insert + let mut tx = self.pool.begin().await?; let mut inserted = Vec::new(); for event in events { @@ -204,7 +230,7 @@ impl AwDatabase { .bind(event.timestamp.to_rfc3339()) .bind(event.duration) .bind(&data_json) - .execute(&self.pool) + .execute(&mut *tx) .await?; let mut new_event = event.clone(); @@ -213,8 +239,15 @@ impl AwDatabase { inserted.push(new_event); } - // Update bucket's last_updated - self.update_bucket_last_updated(bucket_id).await?; + // Update bucket's last_updated within the same transaction + sqlx::query("UPDATE buckets SET last_updated = ? WHERE id = ?") + .bind(Utc::now().to_rfc3339()) + .bind(bucket_id) + .execute(&mut *tx) + .await?; + + // Commit the transaction + tx.commit().await?; Ok(inserted) } diff --git a/desktop-app/src-tauri/src/aw_server.rs b/desktop-app/src-tauri/src/aw_server.rs index d989138..27dbaaf 100644 --- a/desktop-app/src-tauri/src/aw_server.rs +++ b/desktop-app/src-tauri/src/aw_server.rs @@ -14,7 +14,7 @@ use std::sync::Arc; use tower_http::cors::{Any, CorsLayer}; use crate::aw_database::AwDatabase; -use crate::aw_models::{Bucket, Event, GetEventsParams, Heartbeat, ServerInfo}; +use crate::aw_models::{Bucket, BucketExport, Event, GetEventsParams, Heartbeat, ServerInfo}; use crate::aw_query::{QueryRequest, execute_query}; /// Shared state for the API server @@ -281,13 +281,6 @@ async fn heartbeat( // ========== Export Endpoints ========== -#[derive(Debug, Serialize)] -struct BucketExport { - #[serde(flatten)] - bucket: Bucket, - events: Vec, -} - async fn export_bucket( State(state): State, Path(bucket_id): Path, diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index a9e6cf3..b9b7822 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -319,8 +319,8 @@ pub fn start_tracking(db: Db, aw_db: AwDb, app_handle: tauri::AppHandle) { "title": title_str }), }; - // Use 5 second pulsetime for merging events - if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 5.0).await { + // Use 10 minute pulsetime for merging consecutive events + if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 600.0).await { log::warn!("Failed to send AW heartbeat: {}", e); } }); @@ -361,8 +361,8 @@ pub fn start_tracking(db: Db, aw_db: AwDb, app_handle: tauri::AppHandle) { "title": title }), }; - // Use 5 second pulsetime for merging events - if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 5.0).await { + // Use 10 minute pulsetime for merging consecutive events + if let Err(e) = aw_db_clone.heartbeat(&bucket_id_clone, &heartbeat, 600.0).await { log::warn!("Failed to send AW heartbeat: {}", e); } }); From 37e5ad5ba0a605359430769794da9924d77e29c1 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 6 Feb 2026 01:07:34 +0000 Subject: [PATCH 10/18] fix: Fix heartbeat app/title timing mismatch Fetch app and title together from get_active_app_with_title() to ensure consistency. Verify that the app hasn't changed before sending heartbeat to prevent corrupted event data when user switches apps mid-heartbeat. https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/lib.rs | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/desktop-app/src-tauri/src/lib.rs b/desktop-app/src-tauri/src/lib.rs index b9b7822..2d4af22 100644 --- a/desktop-app/src-tauri/src/lib.rs +++ b/desktop-app/src-tauri/src/lib.rs @@ -341,24 +341,31 @@ pub fn start_tracking(db: Db, aw_db: AwDb, app_handle: tauri::AppHandle) { if let Some(current_app_name) = ¤t_app { let aw_db_clone = aw_db.clone(); let bucket_id_clone = bucket_id.clone(); - let app_name = current_app_name.clone(); + let expected_app = current_app_name.clone(); tauri::async_runtime::spawn(async move { - // Get the current window title - skip heartbeat if we can't get it - // Using empty string would cause heartbeat comparison to fail and fragment events - let title = match crate::usage::get_active_app_with_title().await { - Ok(active) => active.title, + // Get fresh app and title together to ensure consistency + // This prevents mismatched app/title if user switches apps mid-heartbeat + let active = match crate::usage::get_active_app_with_title().await { + Ok(active) => active, Err(e) => { - log::debug!("Skipping heartbeat - couldn't get window title: {}", e); - return; // Skip this heartbeat, next one will succeed + log::debug!("Skipping heartbeat - couldn't get window info: {}", e); + return; } }; + // Verify the app hasn't changed - if it has, skip heartbeat + // The change will be detected on the next tracking cycle + if active.name != expected_app { + log::debug!("Skipping heartbeat - app changed from {} to {}", expected_app, active.name); + return; + } + let heartbeat = aw_models::Heartbeat { timestamp: chrono::Utc::now(), duration: 0.0, // Duration is calculated from timestamp diff during merge data: serde_json::json!({ - "app": app_name, - "title": title + "app": active.name, + "title": active.title }), }; // Use 10 minute pulsetime for merging consecutive events From a94c20819b35bf03b6438e2fca099b636cac97bb Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 6 Feb 2026 01:23:21 +0000 Subject: [PATCH 11/18] fix: Fix Neon URL construction to extract host correctly Parse connection string to extract just the host component, stripping user:pass credentials and database path. Neon's HTTP API expects the endpoint at https://host/sql, not with database path included. https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- desktop-app/src-tauri/src/neon.rs | 45 ++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/desktop-app/src-tauri/src/neon.rs b/desktop-app/src-tauri/src/neon.rs index 2f3f359..66658e9 100644 --- a/desktop-app/src-tauri/src/neon.rs +++ b/desktop-app/src-tauri/src/neon.rs @@ -60,23 +60,42 @@ impl NeonClient { } } + /// Build the Neon HTTP API URL from the connection string + /// Extracts just the host from postgres://user:pass@host/database?params + /// and constructs https://host/sql + fn build_neon_api_url(&self) -> Result { + let conn = &self.connection_string; + + // Remove protocol prefix + let without_protocol = conn + .strip_prefix("postgres://") + .or_else(|| conn.strip_prefix("postgresql://")) + .ok_or_else(|| anyhow::anyhow!("Invalid connection string: missing postgres:// prefix"))?; + + // Find the host - it comes after optional user:pass@ and before /database or ?params + let after_auth = if let Some(at_pos) = without_protocol.find('@') { + &without_protocol[at_pos + 1..] + } else { + without_protocol + }; + + // Extract just the host (before any / or ?) + let host = after_auth + .split('/') + .next() + .and_then(|s| s.split('?').next()) + .ok_or_else(|| anyhow::anyhow!("Invalid connection string: couldn't extract host"))?; + + Ok(format!("https://{}/sql", host)) + } + /// Execute a SQL query via Neon's HTTP API async fn execute(&self, query: &str, params: &[Value]) -> Result> { // Neon serverless driver uses a specific HTTP endpoint // Format: https:///sql - // Must handle query parameters properly (e.g., ?sslmode=require) - let base_url = self.connection_string - .replace("postgres://", "https://") - .replace("postgresql://", "https://"); - - // Parse URL to properly insert /sql before query string - let url = if let Some(query_start) = base_url.find('?') { - // Insert /sql before the query string - format!("{}/sql{}", &base_url[..query_start], &base_url[query_start..]) - } else { - // No query string, just append /sql - format!("{}/sql", base_url) - }; + // Connection string format: postgres://user:pass@host/database?params + // We need to extract just the host and use https://host/sql + let url = self.build_neon_api_url()?; let body = serde_json::json!({ "query": query, From fb455ea3022866115e650fc4a34799486f7ae8e2 Mon Sep 17 00:00:00 2001 From: Claude Date: Fri, 6 Feb 2026 07:42:32 +0000 Subject: [PATCH 12/18] fix: Fix Vercel deployment and redesign landing page - Fix font loading issue by using CSS-based Google Fonts instead of next/font - Update Supabase clients to handle missing env vars gracefully during build - Completely redesign landing page with modern aesthetic: - Interactive mouse-following gradient background - Bento grid layout for features section - Dashboard preview mockup in hero - Improved animations and micro-interactions - New violet/fuchsia/cyan color scheme - Platform stats and status indicators - Update globals.css with new design system tokens and utilities https://claude.ai/code/session_01QYNePFqXFj4AaRtTDJVe6J --- webclient/src/app/auth/confirm/route.ts | 4 + webclient/src/app/globals.css | 198 +++-- webclient/src/app/layout.tsx | 10 +- webclient/src/components/CustomAuthForm.tsx | 102 ++- webclient/src/components/LandingPage.tsx | 897 +++++++++++--------- webclient/src/contexts/UserContext.tsx | 18 +- webclient/src/lib/supabase/client.ts | 27 +- webclient/src/lib/supabase/server.ts | 15 +- 8 files changed, 768 insertions(+), 503 deletions(-) diff --git a/webclient/src/app/auth/confirm/route.ts b/webclient/src/app/auth/confirm/route.ts index 46cee4c..18a0eb7 100644 --- a/webclient/src/app/auth/confirm/route.ts +++ b/webclient/src/app/auth/confirm/route.ts @@ -50,6 +50,10 @@ export async function GET(request: NextRequest) { const supabase = await createClient() + if (!supabase) { + redirect('/login?message=' + encodeURIComponent('Authentication service unavailable')) + } + const { data, error } = await supabase.auth.verifyOtp({ type, token_hash, diff --git a/webclient/src/app/globals.css b/webclient/src/app/globals.css index e66c483..14dbfb2 100644 --- a/webclient/src/app/globals.css +++ b/webclient/src/app/globals.css @@ -4,27 +4,26 @@ @layer base { :root { - --background: 220 27% 18%; + --background: 0 0% 4%; --foreground: 0 0% 98%; - --card: 220 27% 15%; + --card: 0 0% 6%; --card-foreground: 0 0% 98%; - --popover: 220 27% 15%; + --popover: 0 0% 6%; --popover-foreground: 0 0% 98%; - --primary: 239 84% 67%; + --primary: 263 70% 50%; --primary-foreground: 0 0% 98%; - --secondary: 220 27% 20%; + --secondary: 0 0% 10%; --secondary-foreground: 0 0% 98%; - --muted: 220 27% 20%; - --muted-foreground: 215 20.2% 65.1%; - --accent: 220 27% 20%; + --muted: 0 0% 15%; + --muted-foreground: 0 0% 60%; + --accent: 263 70% 50%; --accent-foreground: 0 0% 98%; --destructive: 0 84% 60%; --destructive-foreground: 0 0% 98%; - --border: 220 27% 20%; - --input: 220 27% 20%; - --ring: 239 84% 67%; + --border: 0 0% 15%; + --input: 0 0% 15%; + --ring: 263 70% 50%; --radius: 0.75rem; - --font-inter: 'Inter', sans-serif; --sidebar-background: 0 0% 98%; --sidebar-foreground: 240 5.3% 26.1%; --sidebar-primary: 240 5.9% 10%; @@ -40,109 +39,194 @@ * { @apply border-border; } + + html { + scroll-behavior: smooth; + } + body { - @apply bg-background text-foreground; - background: #1e293b; - background-attachment: fixed; + @apply bg-background text-foreground antialiased; + font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; + background: #0a0a0f; min-height: 100vh; - font-family: var(--font-inter); + } + + ::selection { + background: rgba(139, 92, 246, 0.3); + color: white; } } @layer components { + /* Card styles */ .card-dark { - background: #1a2332; + background: rgba(255, 255, 255, 0.03); backdrop-filter: blur(10px); - border: 1px solid rgba(51, 65, 85, 0.3); + border: 1px solid rgba(255, 255, 255, 0.08); } .sidebar-dark { - background: rgba(15, 23, 42, 0.8); - backdrop-filter: blur(10px); - border-right: 1px solid rgba(51, 65, 85, 0.3); + background: rgba(10, 10, 15, 0.9); + backdrop-filter: blur(20px); + border-right: 1px solid rgba(255, 255, 255, 0.05); } .glass-effect { - background: #1a2332; + background: rgba(255, 255, 255, 0.03); backdrop-filter: blur(10px); - border: 1px solid rgba(51, 65, 85, 0.2); + border: 1px solid rgba(255, 255, 255, 0.08); } + /* Gradient button */ .gradient-button { - background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #a855f7 100%); + background: linear-gradient(135deg, #8b5cf6 0%, #d946ef 50%, #ec4899 100%); } .gradient-button:hover { - background: linear-gradient(135deg, #5b21b6 0%, #7c3aed 50%, #9333ea 100%); + background: linear-gradient(135deg, #7c3aed 0%, #c026d3 50%, #db2777 100%); } - .animate-pulse-slow { - animation: pulse 3s cubic-bezier(0.4, 0, 0.6, 1) infinite; + /* Text gradients */ + .gradient-text { + background: linear-gradient(135deg, #a78bfa 0%, #f472b6 50%, #22d3ee 100%); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + background-clip: text; } - .animate-float { - animation: float 6s ease-in-out infinite; + /* Chart gradient */ + .chart-gradient { + background: linear-gradient(180deg, rgba(139, 92, 246, 0.2) 0%, rgba(139, 92, 246, 0.05) 100%); + } + + /* Glow effects */ + .animate-glow { + animation: glow 2s ease-in-out infinite alternate; + } + + @keyframes glow { + from { + box-shadow: 0 0 20px rgba(139, 92, 246, 0.3); + } + to { + box-shadow: 0 0 40px rgba(139, 92, 246, 0.5); + } + } + + /* Animations */ + .animate-fade-in { + animation: fadeIn 0.6s ease-out forwards; } .animate-slide-up { animation: slideUp 0.6s ease-out forwards; + opacity: 0; } - .animate-fade-in { - animation: fadeIn 0.5s ease-out forwards; + .animate-float { + animation: float 6s ease-in-out infinite; } - @keyframes float { - 0%, - 100% { - transform: translateY(0px); - } - 50% { - transform: translateY(-10px); - } + .animate-pulse-slow { + animation: pulse 3s cubic-bezier(0.4, 0, 0.6, 1) infinite; } - @keyframes slideUp { + @keyframes fadeIn { from { opacity: 0; - transform: translateY(20px); } to { opacity: 1; - transform: translateY(0); } } - @keyframes fadeIn { + @keyframes slideUp { from { opacity: 0; + transform: translateY(30px); } to { opacity: 1; + transform: translateY(0); } } - .animate-glow { - animation: glow 2s ease-in-out infinite alternate; + @keyframes float { + 0%, 100% { + transform: translateY(0px); + } + 50% { + transform: translateY(-10px); + } } - @keyframes glow { - from { - box-shadow: 0 0 20px rgba(99, 102, 241, 0.3); + /* Delay utilities for staggered animations */ + .delay-100 { animation-delay: 100ms; } + .delay-200 { animation-delay: 200ms; } + .delay-300 { animation-delay: 300ms; } + .delay-500 { animation-delay: 500ms; } + .delay-700 { animation-delay: 700ms; } + .delay-1000 { animation-delay: 1000ms; } + + /* Scrollbar styling */ + ::-webkit-scrollbar { + width: 8px; + height: 8px; + } + + ::-webkit-scrollbar-track { + background: rgba(255, 255, 255, 0.03); + } + + ::-webkit-scrollbar-thumb { + background: rgba(255, 255, 255, 0.1); + border-radius: 4px; + } + + ::-webkit-scrollbar-thumb:hover { + background: rgba(255, 255, 255, 0.2); + } + + /* Focus styles */ + .focus-ring { + @apply focus:outline-none focus:ring-2 focus:ring-violet-500/50 focus:ring-offset-2 focus:ring-offset-[#0a0a0f]; + } + + /* Shimmer effect for loading states */ + .shimmer { + background: linear-gradient( + 90deg, + rgba(255, 255, 255, 0) 0%, + rgba(255, 255, 255, 0.05) 50%, + rgba(255, 255, 255, 0) 100% + ); + background-size: 200% 100%; + animation: shimmer 1.5s infinite; + } + + @keyframes shimmer { + 0% { + background-position: -200% 0; } - to { - box-shadow: 0 0 30px rgba(99, 102, 241, 0.5); + 100% { + background-position: 200% 0; } } +} - .chart-gradient { - background: linear-gradient(180deg, rgba(99, 102, 241, 0.2) 0%, rgba(99, 102, 241, 0.05) 100%); +/* Utility classes */ +@layer utilities { + .text-balance { + text-wrap: balance; } - .gradient-text { - background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #a855f7 100%); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; + .bg-grid { + background-image: linear-gradient(rgba(255, 255, 255, 0.03) 1px, transparent 1px), + linear-gradient(90deg, rgba(255, 255, 255, 0.03) 1px, transparent 1px); + background-size: 50px 50px; + } + + .bg-radial-gradient { + background: radial-gradient(ellipse at center, rgba(139, 92, 246, 0.15) 0%, transparent 70%); } } diff --git a/webclient/src/app/layout.tsx b/webclient/src/app/layout.tsx index 73953bf..c4700ef 100644 --- a/webclient/src/app/layout.tsx +++ b/webclient/src/app/layout.tsx @@ -1,11 +1,8 @@ import type React from "react" import type { Metadata } from "next" -import { Inter } from "next/font/google" import "./globals.css" import ClientLayout from "./ClientLayout" -const inter = Inter({ subsets: ["latin"] }) - export const metadata: Metadata = { title: "Loopd - Digital Wellness Platform", description: "Take control of your digital habits with intelligent tracking and goal-driven blocking.", @@ -18,7 +15,12 @@ export default function RootLayout({ }) { return ( - + + + + + + {children} diff --git a/webclient/src/components/CustomAuthForm.tsx b/webclient/src/components/CustomAuthForm.tsx index 27db7e6..be26b22 100644 --- a/webclient/src/components/CustomAuthForm.tsx +++ b/webclient/src/components/CustomAuthForm.tsx @@ -1,24 +1,47 @@ 'use client'; -import { useState, useEffect, Suspense } from 'react'; +import { useState, useEffect, Suspense, useMemo } from 'react'; import { useSearchParams } from 'next/navigation'; import { createClient } from '@/lib/supabase/client'; import { Button } from '@/components/ui/button'; import { Input } from '@/components/ui/input'; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'; -import { Shield, Mail, Lock, Eye, EyeOff } from 'lucide-react'; +import { Shield, Mail, Lock, Eye, EyeOff, AlertTriangle } from 'lucide-react'; import { posthog } from '@/lib/posthog'; -const supabase = createClient(); - interface ValidationErrors { email?: string; password?: string; confirmPassword?: string; } +function AuthUnavailable() { + return ( +
+ + +
+
+ +
+
+
+ + Authentication Unavailable + + + Authentication services are currently unavailable. Please try again later or download the desktop app. + +
+
+
+
+ ); +} + function CustomAuthFormInner() { const searchParams = useSearchParams(); + const supabase = useMemo(() => createClient(), []); const [email, setEmail] = useState(''); const [password, setPassword] = useState(''); const [confirmPassword, setConfirmPassword] = useState(''); @@ -42,12 +65,12 @@ function CustomAuthFormInner() { // Real-time validation useEffect(() => { const errors: ValidationErrors = {}; - + // Email validation if (email && !/^[^\s@]+@[^\s@]+\.[^\s@]+$/.test(email)) { errors.email = 'Please enter a valid email address'; } - + // Password validation if (password) { if (password.length < 6) { @@ -56,15 +79,20 @@ function CustomAuthFormInner() { errors.password = 'Password must contain uppercase, lowercase, and number'; } } - + // Confirm password validation (only for sign-up) if (mode === 'sign-up' && confirmPassword && password !== confirmPassword) { errors.confirmPassword = 'Passwords do not match'; } - + setValidationErrors(errors); }, [email, password, confirmPassword, mode]); + // If Supabase is not available, show a message + if (!supabase) { + return ; + } + const handleModeToggle = () => { setIsTransitioning(true); setError(''); @@ -73,7 +101,7 @@ function CustomAuthFormInner() { setEmail(''); setPassword(''); setConfirmPassword(''); - + setTimeout(() => { setMode(mode === 'sign-in' ? 'sign-up' : 'sign-in'); setIsTransitioning(false); @@ -82,35 +110,35 @@ function CustomAuthFormInner() { const handleAuth = async (e: React.FormEvent) => { e.preventDefault(); - + // Check for validation errors if (Object.keys(validationErrors).length > 0) { setError('Please fix the validation errors above'); return; } - + setLoading(true); setError(''); setSuccess(''); - + try { if (mode === 'sign-in') { const { error } = await supabase.auth.signInWithPassword({ email, password }); if (error) { - posthog?.capture('login_error', { + posthog?.capture('login_error', { error_message: error.message, method: 'email_password' }); setError(getErrorMessage(error.message)); } else { - posthog?.capture('login_success', { + posthog?.capture('login_success', { method: 'email_password' }); setSuccess('Signed in successfully!'); } } else { - const { error } = await supabase.auth.signUp({ - email, + const { error } = await supabase.auth.signUp({ + email, password, options: { emailRedirectTo: `${window.location.origin}/auth/confirm`, @@ -120,20 +148,20 @@ function CustomAuthFormInner() { } }); if (error) { - posthog?.capture('signup_error', { + posthog?.capture('signup_error', { error_message: error.message, method: 'email_password' }); setError(getErrorMessage(error.message)); } else { - posthog?.capture('signup_success', { + posthog?.capture('signup_success', { method: 'email_password' }); setSuccess('Check your email to verify your account!'); } } } catch (err) { - posthog?.capture('error_occurred', { + posthog?.capture('error_occurred', { error_type: 'authentication_exception', error_message: err instanceof Error ? err.message : 'Unknown error', method: mode === 'sign-in' ? 'email_password_login' : 'email_password_signup' @@ -147,7 +175,7 @@ function CustomAuthFormInner() { const handleSocialLogin = async (provider: 'google' | 'apple') => { setLoading(true); setError(''); - + try { const { error } = await supabase.auth.signInWithOAuth({ provider, @@ -155,20 +183,20 @@ function CustomAuthFormInner() { redirectTo: `${window.location.origin}/` } }); - + if (error) { - posthog?.capture('login_error', { + posthog?.capture('login_error', { error_message: error.message, method: `oauth_${provider}` }); setError(getErrorMessage(error.message)); } else { - posthog?.capture('login_success', { + posthog?.capture('login_success', { method: `oauth_${provider}` }); } } catch (err) { - posthog?.capture('error_occurred', { + posthog?.capture('error_occurred', { error_type: 'social_login_exception', error_message: err instanceof Error ? err.message : 'Unknown error', method: `oauth_${provider}` @@ -187,13 +215,13 @@ function CustomAuthFormInner() { 'Password should be at least 6 characters': 'Password must be at least 6 characters long.', 'Unable to validate email address: invalid format': 'Please enter a valid email address.' }; - + return errorMap[message] || message; }; - const isFormValid = Object.keys(validationErrors).length === 0 && - email && - password && + const isFormValid = Object.keys(validationErrors).length === 0 && + email && + password && (mode === 'sign-in' || (mode === 'sign-up' && confirmPassword)); return ( @@ -216,14 +244,14 @@ function CustomAuthFormInner() { - + {error && (

{error}

)} - + {success && (

{success}

@@ -282,8 +310,8 @@ function CustomAuthFormInner() { type="email" placeholder="Email address" className={`w-full pl-10 bg-white/5 text-white border transition-all duration-200 focus:outline-none focus:ring-2 focus:ring-blue-500/50 placeholder:text-gray-400 text-base ${ - validationErrors.email - ? 'border-red-500/50 focus:border-red-500/50' + validationErrors.email + ? 'border-red-500/50 focus:border-red-500/50' : 'border-white/10 focus:border-blue-500/50' }`} value={email} @@ -311,8 +339,8 @@ function CustomAuthFormInner() { type={showPassword ? 'text' : 'password'} placeholder="Password" className={`w-full pl-10 pr-10 bg-white/5 text-white border transition-all duration-200 focus:outline-none focus:ring-2 focus:ring-blue-500/50 placeholder:text-gray-400 text-base ${ - validationErrors.password - ? 'border-red-500/50 focus:border-red-500/50' + validationErrors.password + ? 'border-red-500/50 focus:border-red-500/50' : 'border-white/10 focus:border-blue-500/50' }`} value={password} @@ -348,8 +376,8 @@ function CustomAuthFormInner() { type={showConfirmPassword ? 'text' : 'password'} placeholder="Confirm password" className={`w-full pl-10 pr-10 bg-white/5 text-white border transition-all duration-200 focus:outline-none focus:ring-2 focus:ring-blue-500/50 placeholder:text-gray-400 text-base ${ - validationErrors.confirmPassword - ? 'border-red-500/50 focus:border-red-500/50' + validationErrors.confirmPassword + ? 'border-red-500/50 focus:border-red-500/50' : 'border-white/10 focus:border-blue-500/50' }`} value={confirmPassword} @@ -426,4 +454,4 @@ export default function CustomAuthForm() { ); -} \ No newline at end of file +} diff --git a/webclient/src/components/LandingPage.tsx b/webclient/src/components/LandingPage.tsx index b1fbc9e..913d081 100644 --- a/webclient/src/components/LandingPage.tsx +++ b/webclient/src/components/LandingPage.tsx @@ -1,8 +1,7 @@ "use client"; -import { useState } from "react" +import { useState, useEffect } from "react" import { useRouter } from 'next/navigation'; -import { useEffect } from 'react'; import { useUser } from '@/contexts/UserContext'; import { usePostHog } from '@/hooks/usePostHog'; import { Button } from "@/components/ui/button" @@ -11,29 +10,30 @@ import { Badge } from "@/components/ui/badge" import { Input } from "@/components/ui/input" import { Textarea } from "@/components/ui/textarea" import { Label } from "@/components/ui/label" -import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select" import { Shield, Target, BarChart3, - Users, Smartphone, Monitor, Clock, TrendingUp, CheckCircle, - Star, ArrowRight, Menu, X, - AlertTriangle, Mail, - MessageSquare, Send, Download, UserCheck, Zap, - Heart, + Sparkles, + Eye, + Lock, + Activity, + Layers, + Globe, + Timer, } from "lucide-react" export default function LandingPage() { @@ -46,6 +46,23 @@ export default function LandingPage() { email: '', message: '' }) + const [mousePosition, setMousePosition] = useState({ x: 0, y: 0 }); + const [scrollY, setScrollY] = useState(0); + + // Track mouse for interactive effects + useEffect(() => { + const handleMouseMove = (e: MouseEvent) => { + setMousePosition({ x: e.clientX, y: e.clientY }); + }; + const handleScroll = () => setScrollY(window.scrollY); + + window.addEventListener('mousemove', handleMouseMove); + window.addEventListener('scroll', handleScroll); + return () => { + window.removeEventListener('mousemove', handleMouseMove); + window.removeEventListener('scroll', handleScroll); + }; + }, []); // Redirect authenticated users to dashboard useEffect(() => { @@ -57,10 +74,14 @@ export default function LandingPage() { // Show loading state while checking authentication if (loading) { return ( -
+
-
-

Loading...

+
+
+
+
+
+

Loading

); @@ -77,7 +98,7 @@ export default function LandingPage() { source: source || "landing_page", page_name: "landing", }) - + if (page === "signin") { router.push('/login'); } else if (page === "signup") { @@ -110,14 +131,13 @@ export default function LandingPage() { action: 'submit', page_name: "landing", }) - - // Submit to Formspree (replace with your endpoint) + const formData = new FormData(); formData.append('name', contactForm.name); formData.append('email', contactForm.email); formData.append('message', contactForm.message); formData.append('formType', 'general_contact'); - + fetch('https://formspree.io/f/YOUR_FORMSPREE_ENDPOINT', { method: 'POST', body: formData, @@ -140,46 +160,70 @@ export default function LandingPage() { } return ( -
+
+ {/* Animated Background */} +
+
+
+
+
+
+ + {/* Grid Pattern Overlay */} +
+ {/* Navigation */} -