From 1a49b82eb8ed0dcb09c5e006d8f47e9d6e48b8cb Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 13:57:09 -0700 Subject: [PATCH 01/22] feat(cloud): add cloud feature flag and dependencies Add optional dependencies for the Term Cloud SDK behind a `cloud` feature flag: - reqwest: HTTP client for API communication - ring: Cryptographic operations - rusqlite: Local SQLite storage for metrics buffering - directories: Cross-platform app directory detection Users who don't need cloud functionality won't have to compile these dependencies. Co-Authored-By: Claude Opus 4.5 --- Cargo.lock | 124 ++++++++++++++++++++++++++++++++++++++++++ term-guard/Cargo.toml | 10 +++- 2 files changed, 133 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 4de40d9..772e2b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1705,6 +1705,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1722,6 +1743,15 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + [[package]] name = "enum-as-inner" version = "0.6.1" @@ -2199,6 +2229,23 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", ] [[package]] @@ -2220,9 +2267,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.1", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2573,6 +2622,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ + "cc", "pkg-config", "vcpkg", ] @@ -2699,6 +2749,12 @@ version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -3054,6 +3110,12 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "2.10.1" @@ -3633,6 +3695,17 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.16", + "libredox", + "thiserror 1.0.69", +] + [[package]] name = "regex" version = "1.12.2" @@ -3679,6 +3752,7 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", + "encoding_rs", "futures-core", "futures-util", "h2", @@ -3687,9 +3761,12 @@ dependencies = [ "http-body-util", "hyper", "hyper-rustls", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", + "native-tls", "percent-encoding", "pin-project-lite", "quinn", @@ -3701,6 +3778,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-native-tls", "tokio-rustls", "tokio-util", "tower", @@ -3711,6 +3789,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", + "webpki-roots", ] [[package]] @@ -4326,6 +4405,27 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -4372,6 +4472,7 @@ dependencies = [ "criterion", "datafusion", "datafusion-table-providers", + "directories", "futures", "glob", "hex", @@ -4384,6 +4485,9 @@ dependencies = [ "proptest", "rand 0.9.2", "regex", + "reqwest", + "ring", + "rusqlite", "serde", "serde_json", "sha2", @@ -5120,6 +5224,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "whoami" version = "1.6.1" @@ -5209,6 +5322,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + [[package]] name = "windows-result" version = "0.4.1" diff --git a/term-guard/Cargo.toml b/term-guard/Cargo.toml index c29278b..8f6fd59 100644 --- a/term-guard/Cargo.toml +++ b/term-guard/Cargo.toml @@ -28,11 +28,15 @@ postgres = ["database", "datafusion-table-providers/postgres"] mysql = ["database", "datafusion-table-providers/mysql"] sqlite = ["database", "datafusion-table-providers/sqlite"] all-databases = ["postgres", "mysql", "sqlite"] +# Cloud storage features (S3, GCS, Azure) all-cloud = ["s3", "gcs", "azure"] azure = ["cloud-storage", "object_store/azure"] cloud-storage = ["dep:object_store", "dep:url"] gcs = ["cloud-storage", "object_store/gcp"] s3 = ["cloud-storage", "object_store/aws"] +# Term Cloud SDK +cloud = ["dep:directories", "dep:reqwest", "dep:ring", "dep:rusqlite"] +# Observability telemetry = ["dep:tracing-opentelemetry", "dep:opentelemetry", "dep:opentelemetry_sdk"] test-utils = ["dep:rand", "dep:parquet"] @@ -42,8 +46,9 @@ async-trait = "0.1" base64 = "0.22" chrono = {version = "0.4", features = ["serde"]} datafusion = "50.3" -futures = "0.3" datafusion-table-providers = { version = "0.8.2", optional = true } +directories = { version = "5.0", optional = true } +futures = "0.3" glob = "0.3" hex = "0.4" num_cpus = "1.16" @@ -54,6 +59,9 @@ opentelemetry_sdk = {version = "0.31", optional = true} parquet = {version = "56.2", optional = true} rand = {version = "0.9", optional = true} regex = "1" +reqwest = { version = "0.12", features = ["json", "rustls-tls"], optional = true } +ring = { version = "0.17", optional = true } +rusqlite = { version = "0.32", features = ["bundled"], optional = true } serde = {version = "1", features = ["derive"]} serde_json = "1" sha2 = "0.10" From e48e946462df8e7586398993ab0341bc2c20b168 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 14:08:40 -0700 Subject: [PATCH 02/22] feat(cloud): add CloudConfig with builder pattern Add the CloudConfig struct for configuring Term Cloud SDK connections. This includes: - Builder pattern with sensible defaults - Configurable endpoint, timeout, max retries, buffer size - Support for batch uploads with configurable batch size - Flush interval for background uploads - Optional offline cache path Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/mod.rs | 8 ++ term-guard/src/cloud/types.rs | 144 ++++++++++++++++++++++++++++++++++ term-guard/src/lib.rs | 2 + 3 files changed, 154 insertions(+) create mode 100644 term-guard/src/cloud/mod.rs create mode 100644 term-guard/src/cloud/types.rs diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs new file mode 100644 index 0000000..3c8d930 --- /dev/null +++ b/term-guard/src/cloud/mod.rs @@ -0,0 +1,8 @@ +//! Term Cloud SDK for metrics persistence and observability. +//! +//! This module provides integration with the Term Cloud platform, +//! enabling centralized metrics storage, alerting, and historical analysis. + +mod types; + +pub use types::CloudConfig; diff --git a/term-guard/src/cloud/types.rs b/term-guard/src/cloud/types.rs new file mode 100644 index 0000000..ce99ed5 --- /dev/null +++ b/term-guard/src/cloud/types.rs @@ -0,0 +1,144 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +/// Configuration for connecting to Term Cloud. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudConfig { + api_key: String, + endpoint: String, + timeout: Duration, + max_retries: u32, + buffer_size: usize, + batch_size: usize, + flush_interval: Duration, + offline_cache_path: Option, +} + +impl CloudConfig { + /// Create a new CloudConfig with the given API key. + pub fn new(api_key: impl Into) -> Self { + Self { + api_key: api_key.into(), + endpoint: "https://api.term.dev".to_string(), + timeout: Duration::from_secs(30), + max_retries: 3, + buffer_size: 1000, + batch_size: 100, + flush_interval: Duration::from_secs(5), + offline_cache_path: None, + } + } + + /// Set a custom API endpoint. + pub fn with_endpoint(mut self, endpoint: impl Into) -> Self { + self.endpoint = endpoint.into(); + self + } + + /// Set the HTTP request timeout. + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Set the maximum number of retry attempts. + pub fn with_max_retries(mut self, max_retries: u32) -> Self { + self.max_retries = max_retries; + self + } + + /// Set the in-memory buffer size (number of metrics). + pub fn with_buffer_size(mut self, buffer_size: usize) -> Self { + self.buffer_size = buffer_size; + self + } + + /// Set the batch size for uploads. + pub fn with_batch_size(mut self, batch_size: usize) -> Self { + self.batch_size = batch_size; + self + } + + /// Set the flush interval for background uploads. + pub fn with_flush_interval(mut self, interval: Duration) -> Self { + self.flush_interval = interval; + self + } + + /// Set a custom path for offline cache storage. + pub fn with_offline_cache_path(mut self, path: impl Into) -> Self { + self.offline_cache_path = Some(path.into()); + self + } + + /// Get the API key. + pub fn api_key(&self) -> &str { + &self.api_key + } + + /// Get the API endpoint. + pub fn endpoint(&self) -> &str { + &self.endpoint + } + + /// Get the HTTP request timeout. + pub fn timeout(&self) -> Duration { + self.timeout + } + + /// Get the maximum number of retry attempts. + pub fn max_retries(&self) -> u32 { + self.max_retries + } + + /// Get the in-memory buffer size. + pub fn buffer_size(&self) -> usize { + self.buffer_size + } + + /// Get the batch size for uploads. + pub fn batch_size(&self) -> usize { + self.batch_size + } + + /// Get the flush interval. + pub fn flush_interval(&self) -> Duration { + self.flush_interval + } + + /// Get the offline cache path. + pub fn offline_cache_path(&self) -> Option<&str> { + self.offline_cache_path.as_deref() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cloud_config_default() { + let config = CloudConfig::new("test-api-key"); + + assert_eq!(config.api_key(), "test-api-key"); + assert_eq!(config.endpoint(), "https://api.term.dev"); + assert_eq!(config.timeout(), Duration::from_secs(30)); + assert_eq!(config.max_retries(), 3); + assert_eq!(config.buffer_size(), 1000); + } + + #[test] + fn test_cloud_config_builder() { + let config = CloudConfig::new("key") + .with_endpoint("https://custom.endpoint") + .with_timeout(Duration::from_secs(60)) + .with_max_retries(5) + .with_buffer_size(5000); + + assert_eq!(config.endpoint(), "https://custom.endpoint"); + assert_eq!(config.timeout(), Duration::from_secs(60)); + assert_eq!(config.max_retries(), 5); + assert_eq!(config.buffer_size(), 5000); + } +} diff --git a/term-guard/src/lib.rs b/term-guard/src/lib.rs index 4c2a859..952c791 100644 --- a/term-guard/src/lib.rs +++ b/term-guard/src/lib.rs @@ -166,6 +166,8 @@ //! ``` pub mod analyzers; +#[cfg(feature = "cloud")] +pub mod cloud; pub mod constraints; pub mod core; pub mod error; From fc95b8434014e66fa6b2e68e693f8ee91976a39a Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 14:23:48 -0700 Subject: [PATCH 03/22] fix(cloud): use SecureString for API key security - Replace plain String with SecureString for api_key field to prevent credential leakage in logs via Debug output - Remove Serialize/Deserialize derives to prevent accidental serialization of secrets - Change offline_cache_path from Option to Option for type-safe path handling - Update api_key getter to return &SecureString with documented expose() usage - Add tests to verify API key is not leaked in debug output Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/types.rs | 49 ++++++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/term-guard/src/cloud/types.rs b/term-guard/src/cloud/types.rs index ce99ed5..883d63a 100644 --- a/term-guard/src/cloud/types.rs +++ b/term-guard/src/cloud/types.rs @@ -1,25 +1,26 @@ +use std::path::{Path, PathBuf}; use std::time::Duration; -use serde::{Deserialize, Serialize}; +use crate::security::SecureString; /// Configuration for connecting to Term Cloud. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone)] pub struct CloudConfig { - api_key: String, + api_key: SecureString, endpoint: String, timeout: Duration, max_retries: u32, buffer_size: usize, batch_size: usize, flush_interval: Duration, - offline_cache_path: Option, + offline_cache_path: Option, } impl CloudConfig { /// Create a new CloudConfig with the given API key. pub fn new(api_key: impl Into) -> Self { Self { - api_key: api_key.into(), + api_key: SecureString::new(api_key.into()), endpoint: "https://api.term.dev".to_string(), timeout: Duration::from_secs(30), max_retries: 3, @@ -67,13 +68,17 @@ impl CloudConfig { } /// Set a custom path for offline cache storage. - pub fn with_offline_cache_path(mut self, path: impl Into) -> Self { - self.offline_cache_path = Some(path.into()); + pub fn with_offline_cache_path(mut self, path: impl AsRef) -> Self { + self.offline_cache_path = Some(path.as_ref().to_path_buf()); self } /// Get the API key. - pub fn api_key(&self) -> &str { + /// + /// # Security + /// Returns a reference to the secure string. Use `expose()` to access + /// the underlying value. Avoid storing or logging the exposed value. + pub fn api_key(&self) -> &SecureString { &self.api_key } @@ -108,7 +113,7 @@ impl CloudConfig { } /// Get the offline cache path. - pub fn offline_cache_path(&self) -> Option<&str> { + pub fn offline_cache_path(&self) -> Option<&Path> { self.offline_cache_path.as_deref() } } @@ -121,7 +126,7 @@ mod tests { fn test_cloud_config_default() { let config = CloudConfig::new("test-api-key"); - assert_eq!(config.api_key(), "test-api-key"); + assert_eq!(config.api_key().expose(), "test-api-key"); assert_eq!(config.endpoint(), "https://api.term.dev"); assert_eq!(config.timeout(), Duration::from_secs(30)); assert_eq!(config.max_retries(), 3); @@ -141,4 +146,28 @@ mod tests { assert_eq!(config.max_retries(), 5); assert_eq!(config.buffer_size(), 5000); } + + #[test] + fn test_api_key_not_leaked_in_debug() { + let secret_key = "super-secret-api-key-12345"; + let config = CloudConfig::new(secret_key); + + let debug_output = format!("{:?}", config); + + assert!( + !debug_output.contains(secret_key), + "API key should not appear in debug output" + ); + assert!( + debug_output.contains("SecureString(***)"), + "Debug output should show masked SecureString" + ); + } + + #[test] + fn test_offline_cache_path_with_pathbuf() { + let config = CloudConfig::new("key").with_offline_cache_path("/tmp/cache"); + + assert_eq!(config.offline_cache_path(), Some(Path::new("/tmp/cache"))); + } } From c0a1a4211a4b304b70e7a7b63c1ee3b9ab8bfd47 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 14:57:14 -0700 Subject: [PATCH 04/22] feat(cloud): add wire format types for cloud API Add serializable types for the Term Cloud wire protocol: - CloudMetric: Main type for transmitting metrics - CloudResultKey: Identifies a set of metrics with dataset date and tags - CloudMetricValue: Tagged union for metric values (double, long, string, boolean, histogram) - CloudHistogram and CloudHistogramBucket: Histogram data structures - CloudMetadata: Collection metadata including timestamps and term version - CloudValidationResult and CloudValidationIssue: Validation summary types All types derive Serialize/Deserialize for JSON serialization. Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/mod.rs | 5 +- term-guard/src/cloud/types.rs | 117 ++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 1 deletion(-) diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index 3c8d930..2759616 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -5,4 +5,7 @@ mod types; -pub use types::CloudConfig; +pub use types::{ + CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, + CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, +}; diff --git a/term-guard/src/cloud/types.rs b/term-guard/src/cloud/types.rs index 883d63a..6dc5919 100644 --- a/term-guard/src/cloud/types.rs +++ b/term-guard/src/cloud/types.rs @@ -1,6 +1,9 @@ +use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::Duration; +use serde::{Deserialize, Serialize}; + use crate::security::SecureString; /// Configuration for connecting to Term Cloud. @@ -118,6 +121,93 @@ impl CloudConfig { } } +/// A metric ready for transmission to Term Cloud. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudMetric { + pub result_key: CloudResultKey, + pub metrics: HashMap, + pub metadata: CloudMetadata, + #[serde(skip_serializing_if = "Option::is_none")] + pub validation_result: Option, +} + +/// Key identifying a set of metrics. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct CloudResultKey { + pub dataset_date: i64, + pub tags: HashMap, +} + +/// A metric value in wire format. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type", content = "value")] +pub enum CloudMetricValue { + #[serde(rename = "double")] + Double(f64), + #[serde(rename = "long")] + Long(i64), + #[serde(rename = "string")] + String(String), + #[serde(rename = "boolean")] + Boolean(bool), + #[serde(rename = "histogram")] + Histogram(CloudHistogram), +} + +/// Histogram data in wire format. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudHistogram { + pub buckets: Vec, + pub total_count: u64, + pub min: Option, + pub max: Option, + pub mean: Option, + pub std_dev: Option, +} + +/// A single histogram bucket. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudHistogramBucket { + pub lower_bound: f64, + pub upper_bound: f64, + pub count: u64, +} + +/// Metadata about the metrics collection. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudMetadata { + #[serde(skip_serializing_if = "Option::is_none")] + pub dataset_name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub start_time: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub end_time: Option, + pub term_version: String, + #[serde(default)] + pub custom: HashMap, +} + +/// Validation result summary. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudValidationResult { + pub status: String, + pub total_checks: usize, + pub passed_checks: usize, + pub failed_checks: usize, + pub issues: Vec, +} + +/// A single validation issue. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CloudValidationIssue { + pub check_name: String, + pub constraint_name: String, + pub level: String, + pub message: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub metric: Option, +} + #[cfg(test)] mod tests { use super::*; @@ -170,4 +260,31 @@ mod tests { assert_eq!(config.offline_cache_path(), Some(Path::new("/tmp/cache"))); } + + #[test] + fn test_cloud_metric_serialization() { + let metric = CloudMetric { + result_key: CloudResultKey { + dataset_date: 1704931200000, + tags: vec![("env".to_string(), "prod".to_string())] + .into_iter() + .collect(), + }, + metrics: vec![("completeness.id".to_string(), CloudMetricValue::Double(1.0))] + .into_iter() + .collect(), + metadata: CloudMetadata { + dataset_name: Some("orders".to_string()), + start_time: Some("2024-01-10T12:00:00Z".to_string()), + end_time: Some("2024-01-10T12:05:00Z".to_string()), + term_version: "0.0.2".to_string(), + custom: Default::default(), + }, + validation_result: None, + }; + + let json = serde_json::to_string(&metric).unwrap(); + assert!(json.contains("completeness.id")); + assert!(json.contains("1704931200000")); + } } From ddd2b7956c458a01aa0cd10584c3a1ecbb0ac01b Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 15:09:11 -0700 Subject: [PATCH 05/22] feat(cloud): add CloudError with retry semantics Add CloudError enum for Term Cloud operations with error variants for: - Authentication failures - Network errors - Rate limiting - Server errors - Request validation - Serialization issues - Buffer overflow - Cache errors - Configuration errors The key feature is the is_retryable() method which helps the upload worker decide whether to retry failed operations. Network errors, rate limiting, and server errors (5xx) are considered retryable. Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/error.rs | 101 ++++++++++++++++++++++++++++++++++ term-guard/src/cloud/mod.rs | 2 + 2 files changed, 103 insertions(+) create mode 100644 term-guard/src/cloud/error.rs diff --git a/term-guard/src/cloud/error.rs b/term-guard/src/cloud/error.rs new file mode 100644 index 0000000..d735d47 --- /dev/null +++ b/term-guard/src/cloud/error.rs @@ -0,0 +1,101 @@ +use thiserror::Error; + +/// Errors that can occur when interacting with Term Cloud. +#[derive(Debug, Error)] +pub enum CloudError { + /// Authentication failed (invalid or expired API key). + #[error("Authentication failed: {message}")] + Authentication { message: String }, + + /// Network error (connection failed, timeout, etc.). + #[error("Network error: {message}")] + Network { message: String }, + + /// Rate limited by the server. + #[error("Rate limited. Retry after {retry_after_secs:?} seconds")] + RateLimited { retry_after_secs: Option }, + + /// Server returned an error. + #[error("Server error ({status}): {message}")] + ServerError { status: u16, message: String }, + + /// Request validation failed. + #[error("Invalid request: {message}")] + InvalidRequest { message: String }, + + /// Serialization/deserialization error. + #[error("Serialization error: {message}")] + Serialization { message: String }, + + /// Buffer overflow (too many pending metrics). + #[error("Buffer overflow: {pending_count} metrics pending, max is {max_size}")] + BufferOverflow { + pending_count: usize, + max_size: usize, + }, + + /// Offline cache error. + #[error("Cache error: {message}")] + CacheError { message: String }, + + /// Configuration error. + #[error("Configuration error: {message}")] + Configuration { message: String }, +} + +impl CloudError { + /// Returns true if this error is transient and the operation should be retried. + pub fn is_retryable(&self) -> bool { + match self { + CloudError::Network { .. } => true, + CloudError::RateLimited { .. } => true, + CloudError::ServerError { status, .. } => *status >= 500, + _ => false, + } + } + + /// Returns the suggested retry delay in seconds, if available. + pub fn retry_after(&self) -> Option { + match self { + CloudError::RateLimited { retry_after_secs } => *retry_after_secs, + _ => None, + } + } +} + +/// Result type for cloud operations. +pub type CloudResult = std::result::Result; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cloud_error_display() { + let err = CloudError::Authentication { + message: "Invalid API key".to_string(), + }; + assert!(err.to_string().contains("Invalid API key")); + } + + #[test] + fn test_cloud_error_is_retryable() { + assert!(!CloudError::Authentication { + message: "test".to_string() + } + .is_retryable()); + assert!(CloudError::Network { + message: "timeout".to_string() + } + .is_retryable()); + assert!(CloudError::RateLimited { + retry_after_secs: Some(60) + } + .is_retryable()); + assert!(CloudError::ServerError { + status: 500, + message: "internal".to_string() + } + .is_retryable()); + } +} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index 2759616..b7175ee 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -3,8 +3,10 @@ //! This module provides integration with the Term Cloud platform, //! enabling centralized metrics storage, alerting, and historical analysis. +mod error; mod types; +pub use error::{CloudError, CloudResult}; pub use types::{ CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, From e046e36bfb44f3db24180398b7e5d4b5d961cbbb Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 15:19:09 -0700 Subject: [PATCH 06/22] feat(cloud): add HTTP client with HMAC signing Add TermCloudClient for communicating with the Term Cloud API: - HMAC-SHA256 request signing using the API key - Async HTTP operations using reqwest - Error mapping from HTTP status codes to CloudError variants - Methods for health check, ingest, query, and delete operations Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/client.rs | 239 +++++++++++++++++++++++++++++++++ term-guard/src/cloud/mod.rs | 4 + 2 files changed, 243 insertions(+) create mode 100644 term-guard/src/cloud/client.rs diff --git a/term-guard/src/cloud/client.rs b/term-guard/src/cloud/client.rs new file mode 100644 index 0000000..a55b8a7 --- /dev/null +++ b/term-guard/src/cloud/client.rs @@ -0,0 +1,239 @@ +use std::sync::Arc; + +use reqwest::Client; +use ring::hmac; +use serde::{Deserialize, Serialize}; + +use crate::cloud::{CloudConfig, CloudError, CloudMetric, CloudResult, CloudResultKey}; + +/// HTTP client for Term Cloud API. +#[derive(Clone)] +pub struct TermCloudClient { + config: Arc, + client: Client, + signing_key: hmac::Key, +} + +/// Response from the metrics ingestion endpoint. +#[derive(Debug, Deserialize)] +pub struct IngestResponse { + pub accepted: usize, + pub rejected: usize, + #[serde(default)] + pub errors: Vec, +} + +/// Response from the health check endpoint. +#[derive(Debug, Deserialize)] +pub struct HealthResponse { + pub status: String, + pub version: String, +} + +/// Query parameters for listing metrics. +#[derive(Debug, Default, Serialize)] +pub struct MetricsQuery { + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub before: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub cursor: Option, + #[serde(flatten)] + pub tags: std::collections::HashMap, +} + +/// Paginated response from metrics query. +#[derive(Debug, Deserialize)] +pub struct MetricsResponse { + pub results: Vec, + pub pagination: Pagination, +} + +#[derive(Debug, Deserialize)] +pub struct Pagination { + pub next_cursor: Option, + pub has_more: bool, +} + +impl TermCloudClient { + /// Create a new client with the given configuration. + pub fn new(config: CloudConfig) -> CloudResult { + let client = Client::builder() + .timeout(config.timeout()) + .build() + .map_err(|e| CloudError::Configuration { + message: format!("Failed to create HTTP client: {}", e), + })?; + + let signing_key = hmac::Key::new(hmac::HMAC_SHA256, config.api_key().expose().as_bytes()); + + Ok(Self { + config: Arc::new(config), + client, + signing_key, + }) + } + + /// Check if the Term Cloud API is reachable. + pub async fn health_check(&self) -> CloudResult { + let url = format!("{}/v1/health", self.config.endpoint()); + + let response = self + .client + .get(&url) + .send() + .await + .map_err(|e| CloudError::Network { + message: e.to_string(), + })?; + + self.handle_response(response).await + } + + /// Send metrics to Term Cloud. + pub async fn ingest(&self, metrics: &[CloudMetric]) -> CloudResult { + let url = format!("{}/v1/metrics", self.config.endpoint()); + let body = serde_json::to_vec(metrics).map_err(|e| CloudError::Serialization { + message: e.to_string(), + })?; + + let signature = self.sign_request(&body); + + let response = self + .client + .post(&url) + .header("Content-Type", "application/json") + .header("X-Term-Signature", signature) + .header("X-Term-Api-Key", self.config.api_key().expose()) + .body(body) + .send() + .await + .map_err(|e| CloudError::Network { + message: e.to_string(), + })?; + + self.handle_response(response).await + } + + /// Query metrics from Term Cloud. + pub async fn query(&self, query: MetricsQuery) -> CloudResult { + let url = format!("{}/v1/metrics", self.config.endpoint()); + + let response = self + .client + .get(&url) + .header("X-Term-Api-Key", self.config.api_key().expose()) + .query(&query) + .send() + .await + .map_err(|e| CloudError::Network { + message: e.to_string(), + })?; + + self.handle_response(response).await + } + + /// Delete metrics by key. + pub async fn delete(&self, key: &CloudResultKey) -> CloudResult<()> { + let url = format!("{}/v1/metrics/{}", self.config.endpoint(), key.dataset_date); + + let response = self + .client + .delete(&url) + .header("X-Term-Api-Key", self.config.api_key().expose()) + .query(&key.tags) + .send() + .await + .map_err(|e| CloudError::Network { + message: e.to_string(), + })?; + + if response.status().is_success() { + Ok(()) + } else { + self.handle_error_response(response).await + } + } + + /// Sign a request body using HMAC-SHA256. + fn sign_request(&self, body: &[u8]) -> String { + let tag = hmac::sign(&self.signing_key, body); + hex::encode(tag.as_ref()) + } + + /// Handle a successful or error response. + async fn handle_response( + &self, + response: reqwest::Response, + ) -> CloudResult { + let status = response.status(); + + if status.is_success() { + response + .json::() + .await + .map_err(|e| CloudError::Serialization { + message: e.to_string(), + }) + } else { + self.handle_error_response(response).await + } + } + + /// Convert an error response to a CloudError. + async fn handle_error_response(&self, response: reqwest::Response) -> CloudResult { + let status = response.status(); + let retry_after = response + .headers() + .get("Retry-After") + .and_then(|v| v.to_str().ok()) + .and_then(|v| v.parse().ok()); + + let body = response.text().await.unwrap_or_default(); + + match status.as_u16() { + 401 => Err(CloudError::Authentication { message: body }), + 429 => Err(CloudError::RateLimited { + retry_after_secs: retry_after, + }), + 400 => Err(CloudError::InvalidRequest { message: body }), + status if status >= 500 => Err(CloudError::ServerError { + status, + message: body, + }), + _ => Err(CloudError::ServerError { + status: status.as_u16(), + message: body, + }), + } + } + + /// Get the configuration. + pub fn config(&self) -> &CloudConfig { + &self.config + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_client_creation() { + let config = CloudConfig::new("test-api-key"); + let client = TermCloudClient::new(config); + assert!(client.is_ok()); + } + + #[tokio::test] + async fn test_client_health_check_invalid_endpoint() { + let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); + let client = TermCloudClient::new(config).unwrap(); + + let result = client.health_check().await; + assert!(result.is_err()); + } +} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index b7175ee..2b59106 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -3,9 +3,13 @@ //! This module provides integration with the Term Cloud platform, //! enabling centralized metrics storage, alerting, and historical analysis. +mod client; mod error; mod types; +pub use client::{ + HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, +}; pub use error::{CloudError, CloudResult}; pub use types::{ CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, From 8e3621f15b87c98dc0592a317d37aefd151afdf4 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 15:26:29 -0700 Subject: [PATCH 07/22] feat(cloud): add in-memory metrics buffer --- term-guard/src/cloud/buffer.rs | 213 +++++++++++++++++++++++++++++++++ term-guard/src/cloud/mod.rs | 2 + 2 files changed, 215 insertions(+) create mode 100644 term-guard/src/cloud/buffer.rs diff --git a/term-guard/src/cloud/buffer.rs b/term-guard/src/cloud/buffer.rs new file mode 100644 index 0000000..783b9e6 --- /dev/null +++ b/term-guard/src/cloud/buffer.rs @@ -0,0 +1,213 @@ +use std::collections::VecDeque; +use std::sync::Arc; +use tokio::sync::Mutex; + +use crate::cloud::{CloudError, CloudMetric, CloudResult}; + +/// Entry in the metrics buffer with retry metadata. +#[derive(Debug, Clone)] +pub struct BufferEntry { + pub metric: CloudMetric, + pub retry_count: u32, + pub queued_at: std::time::Instant, +} + +/// In-memory buffer for pending metrics uploads. +pub struct MetricsBuffer { + entries: Arc>>, + max_size: usize, +} + +impl MetricsBuffer { + /// Create a new buffer with the given maximum size. + pub fn new(max_size: usize) -> Self { + Self { + entries: Arc::new(Mutex::new(VecDeque::with_capacity(max_size))), + max_size, + } + } + + /// Push a metric to the buffer. + pub async fn push(&self, metric: CloudMetric) -> CloudResult<()> { + let mut entries = self.entries.lock().await; + + if entries.len() >= self.max_size { + return Err(CloudError::BufferOverflow { + pending_count: entries.len(), + max_size: self.max_size, + }); + } + + entries.push_back(BufferEntry { + metric, + retry_count: 0, + queued_at: std::time::Instant::now(), + }); + + Ok(()) + } + + /// Push a metric for retry (increments retry count). + pub async fn push_retry(&self, mut entry: BufferEntry) -> CloudResult<()> { + let mut entries = self.entries.lock().await; + + if entries.len() >= self.max_size { + return Err(CloudError::BufferOverflow { + pending_count: entries.len(), + max_size: self.max_size, + }); + } + + entry.retry_count += 1; + entries.push_back(entry); + + Ok(()) + } + + /// Drain up to `count` entries from the buffer. + pub async fn drain(&self, count: usize) -> Vec { + let mut entries = self.entries.lock().await; + let drain_count = std::cmp::min(count, entries.len()); + entries.drain(..drain_count).collect() + } + + /// Get the current number of entries in the buffer. + pub async fn len(&self) -> usize { + self.entries.lock().await.len() + } + + /// Check if the buffer is empty. + pub async fn is_empty(&self) -> bool { + self.entries.lock().await.is_empty() + } + + /// Get all entries without removing them (for persistence). + pub async fn peek_all(&self) -> Vec { + self.entries.lock().await.iter().cloned().collect() + } + + /// Clear the buffer and return all entries. + pub async fn clear(&self) -> Vec { + let mut entries = self.entries.lock().await; + std::mem::take(&mut *entries).into_iter().collect() + } +} + +impl Clone for MetricsBuffer { + fn clone(&self) -> Self { + Self { + entries: Arc::clone(&self.entries), + max_size: self.max_size, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cloud::{CloudMetadata, CloudResultKey}; + use std::collections::HashMap; + + fn make_test_metric() -> CloudMetric { + CloudMetric { + result_key: CloudResultKey { + dataset_date: 1704931200000, + tags: HashMap::new(), + }, + metrics: HashMap::new(), + metadata: CloudMetadata { + dataset_name: Some("test".to_string()), + start_time: None, + end_time: None, + term_version: "0.0.2".to_string(), + custom: HashMap::new(), + }, + validation_result: None, + } + } + + #[tokio::test] + async fn test_buffer_push_and_drain() { + let buffer = MetricsBuffer::new(100); + + buffer.push(make_test_metric()).await.unwrap(); + buffer.push(make_test_metric()).await.unwrap(); + + assert_eq!(buffer.len().await, 2); + + let drained = buffer.drain(10).await; + assert_eq!(drained.len(), 2); + assert_eq!(buffer.len().await, 0); + } + + #[tokio::test] + async fn test_buffer_overflow() { + let buffer = MetricsBuffer::new(2); + + buffer.push(make_test_metric()).await.unwrap(); + buffer.push(make_test_metric()).await.unwrap(); + + let result = buffer.push(make_test_metric()).await; + assert!(matches!(result, Err(CloudError::BufferOverflow { .. }))); + } + + #[tokio::test] + async fn test_buffer_push_retry() { + let buffer = MetricsBuffer::new(10); + + buffer.push(make_test_metric()).await.unwrap(); + let mut drained = buffer.drain(1).await; + let entry = drained.pop().unwrap(); + assert_eq!(entry.retry_count, 0); + + buffer.push_retry(entry).await.unwrap(); + let mut drained = buffer.drain(1).await; + let entry = drained.pop().unwrap(); + assert_eq!(entry.retry_count, 1); + } + + #[tokio::test] + async fn test_buffer_peek_all() { + let buffer = MetricsBuffer::new(100); + + buffer.push(make_test_metric()).await.unwrap(); + buffer.push(make_test_metric()).await.unwrap(); + + let peeked = buffer.peek_all().await; + assert_eq!(peeked.len(), 2); + assert_eq!(buffer.len().await, 2); + } + + #[tokio::test] + async fn test_buffer_clear() { + let buffer = MetricsBuffer::new(100); + + buffer.push(make_test_metric()).await.unwrap(); + buffer.push(make_test_metric()).await.unwrap(); + + let cleared = buffer.clear().await; + assert_eq!(cleared.len(), 2); + assert!(buffer.is_empty().await); + } + + #[tokio::test] + async fn test_buffer_clone_shares_state() { + let buffer1 = MetricsBuffer::new(100); + let buffer2 = buffer1.clone(); + + buffer1.push(make_test_metric()).await.unwrap(); + assert_eq!(buffer2.len().await, 1); + + buffer2.push(make_test_metric()).await.unwrap(); + assert_eq!(buffer1.len().await, 2); + } + + #[tokio::test] + async fn test_buffer_is_empty() { + let buffer = MetricsBuffer::new(100); + assert!(buffer.is_empty().await); + + buffer.push(make_test_metric()).await.unwrap(); + assert!(!buffer.is_empty().await); + } +} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index 2b59106..b373911 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -3,10 +3,12 @@ //! This module provides integration with the Term Cloud platform, //! enabling centralized metrics storage, alerting, and historical analysis. +mod buffer; mod client; mod error; mod types; +pub use buffer::{BufferEntry, MetricsBuffer}; pub use client::{ HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, }; From 001ef5c591e6169632adc3683e8bc21e6ebcfc03 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 15:33:10 -0700 Subject: [PATCH 08/22] feat(cloud): add background upload worker Add UploadWorker for asynchronous metrics upload with: - Configurable batch size and flush interval - Graceful shutdown with final flush - WorkerStats tracking (metrics_uploaded, metrics_failed, batches_sent, retries) - Exponential backoff with jitter for retries (capped at 32x) - Non-blocking retry handling to prevent flush cycle blocking - Fallible new() returning CloudResult instead of panicking Co-Authored-By: Claude Opus 4.5 --- term-guard/Cargo.toml | 2 +- term-guard/src/cloud/mod.rs | 2 + term-guard/src/cloud/worker.rs | 264 +++++++++++++++++++++++++++++++++ 3 files changed, 267 insertions(+), 1 deletion(-) create mode 100644 term-guard/src/cloud/worker.rs diff --git a/term-guard/Cargo.toml b/term-guard/Cargo.toml index 8f6fd59..d10c09b 100644 --- a/term-guard/Cargo.toml +++ b/term-guard/Cargo.toml @@ -35,7 +35,7 @@ cloud-storage = ["dep:object_store", "dep:url"] gcs = ["cloud-storage", "object_store/gcp"] s3 = ["cloud-storage", "object_store/aws"] # Term Cloud SDK -cloud = ["dep:directories", "dep:reqwest", "dep:ring", "dep:rusqlite"] +cloud = ["dep:directories", "dep:rand", "dep:reqwest", "dep:ring", "dep:rusqlite"] # Observability telemetry = ["dep:tracing-opentelemetry", "dep:opentelemetry", "dep:opentelemetry_sdk"] test-utils = ["dep:rand", "dep:parquet"] diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index b373911..5039930 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -7,6 +7,7 @@ mod buffer; mod client; mod error; mod types; +mod worker; pub use buffer::{BufferEntry, MetricsBuffer}; pub use client::{ @@ -17,3 +18,4 @@ pub use types::{ CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, }; +pub use worker::{UploadWorker, WorkerStats}; diff --git a/term-guard/src/cloud/worker.rs b/term-guard/src/cloud/worker.rs new file mode 100644 index 0000000..900d6eb --- /dev/null +++ b/term-guard/src/cloud/worker.rs @@ -0,0 +1,264 @@ +use std::time::Duration; + +use rand::Rng; +use tokio::sync::watch; +use tokio::time::interval; +use tracing::{debug, error, info, instrument, warn}; + +use crate::cloud::{ + BufferEntry, CloudConfig, CloudError, CloudMetric, CloudResult, MetricsBuffer, TermCloudClient, +}; + +/// Background worker for uploading metrics to Term Cloud. +pub struct UploadWorker { + client: TermCloudClient, + buffer: MetricsBuffer, + shutdown: watch::Receiver, + batch_size: usize, + flush_interval: Duration, + max_retries: u32, + stats: WorkerStats, +} + +/// Statistics from the upload worker. +#[derive(Debug, Clone, Default)] +pub struct WorkerStats { + pub metrics_uploaded: u64, + pub metrics_failed: u64, + pub batches_sent: u64, + pub retries: u64, +} + +impl UploadWorker { + /// Create a new upload worker. + /// + /// # Errors + /// + /// Returns an error if the cloud client cannot be created. + pub fn new( + config: CloudConfig, + buffer: MetricsBuffer, + shutdown: watch::Receiver, + ) -> CloudResult { + let client = TermCloudClient::new(config.clone())?; + + Ok(Self { + batch_size: config.batch_size(), + flush_interval: config.flush_interval(), + max_retries: config.max_retries(), + client, + buffer, + shutdown, + stats: WorkerStats::default(), + }) + } + + /// Run the upload worker until shutdown. + /// + /// Returns the accumulated statistics from the worker's operation. + #[instrument(skip(self))] + pub async fn run(mut self) -> WorkerStats { + info!("Upload worker started"); + let mut interval = interval(self.flush_interval); + + loop { + tokio::select! { + _ = interval.tick() => { + self.flush().await; + } + Ok(()) = self.shutdown.changed() => { + if *self.shutdown.borrow() { + info!("Shutdown signal received, flushing remaining metrics"); + self.flush_all().await; + break; + } + } + } + } + + info!( + "Upload worker stopped: {} metrics uploaded, {} failed, {} batches, {} retries", + self.stats.metrics_uploaded, + self.stats.metrics_failed, + self.stats.batches_sent, + self.stats.retries + ); + self.stats + } + + /// Flush a batch of metrics. + async fn flush(&mut self) { + let entries = self.buffer.drain(self.batch_size).await; + if entries.is_empty() { + return; + } + + debug!("Flushing {} metrics", entries.len()); + self.upload_batch(entries).await; + } + + /// Flush all remaining metrics (used during shutdown). + async fn flush_all(&mut self) { + loop { + let entries = self.buffer.drain(self.batch_size).await; + if entries.is_empty() { + break; + } + self.upload_batch(entries).await; + } + } + + /// Upload a batch of metrics, handling retries. + async fn upload_batch(&mut self, entries: Vec) { + let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); + let batch_size = entries.len() as u64; + + match self.client.ingest(&metrics).await { + Ok(response) => { + debug!( + "Batch uploaded: {} accepted, {} rejected", + response.accepted, response.rejected + ); + self.stats.metrics_uploaded += response.accepted as u64; + self.stats.metrics_failed += response.rejected as u64; + self.stats.batches_sent += 1; + if !response.errors.is_empty() { + warn!("Upload errors: {:?}", response.errors); + } + } + Err(e) if e.is_retryable() => { + warn!("Retryable error uploading batch: {}", e); + self.handle_retry(entries, &e).await; + } + Err(e) => { + error!("Non-retryable error uploading batch: {}", e); + self.stats.metrics_failed += batch_size; + } + } + } + + /// Handle retrying failed entries. + /// + /// Re-queues entries immediately without blocking. The buffer's flush_interval + /// provides natural throttling, and exponential backoff is applied based on + /// each entry's retry count when it's eventually processed again. + async fn handle_retry(&mut self, entries: Vec, _error: &CloudError) { + for entry in entries { + if entry.retry_count < self.max_retries { + self.stats.retries += 1; + if let Err(e) = self.buffer.push_retry(entry).await { + warn!("Failed to requeue metric for retry: {}", e); + self.stats.metrics_failed += 1; + } + } else { + warn!("Dropping metric after {} retries", entry.retry_count); + self.stats.metrics_failed += 1; + } + } + } + + /// Calculate exponential backoff delay with jitter. + /// + /// Uses the formula: base_delay * 2^retry_count + jitter + /// where retry_count is capped at 5 (max 32x multiplier). + #[allow(dead_code)] + fn calculate_backoff(&self, retry_count: u32, retry_after: Option) -> Duration { + let base_delay = retry_after.unwrap_or(1); + let capped_retry = retry_count.min(5); + let backoff_secs = base_delay * (1 << capped_retry); + + let jitter_ms = rand::rng().random_range(0..1000); + Duration::from_secs(backoff_secs) + Duration::from_millis(jitter_ms) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_worker_shutdown() { + let config = CloudConfig::new("test-key") + .with_endpoint("http://localhost:1") + .with_flush_interval(Duration::from_millis(100)); + + let (shutdown_tx, shutdown_rx) = watch::channel(false); + let buffer = MetricsBuffer::new(100); + + let worker = UploadWorker::new(config, buffer.clone(), shutdown_rx).unwrap(); + + let handle = tokio::spawn(async move { worker.run().await }); + + shutdown_tx.send(true).unwrap(); + + let result = tokio::time::timeout(Duration::from_secs(1), handle).await; + assert!(result.is_ok()); + let stats = result.unwrap().unwrap(); + assert_eq!(stats.metrics_uploaded, 0); + } + + #[tokio::test] + async fn test_worker_returns_stats() { + let config = CloudConfig::new("test-key") + .with_endpoint("http://localhost:1") + .with_flush_interval(Duration::from_millis(50)); + + let (shutdown_tx, shutdown_rx) = watch::channel(false); + let buffer = MetricsBuffer::new(100); + + let worker = UploadWorker::new(config, buffer.clone(), shutdown_rx).unwrap(); + + let handle = tokio::spawn(async move { worker.run().await }); + + tokio::time::sleep(Duration::from_millis(100)).await; + shutdown_tx.send(true).unwrap(); + + let result = tokio::time::timeout(Duration::from_secs(1), handle).await; + assert!(result.is_ok()); + let stats = result.unwrap().unwrap(); + assert_eq!(stats.batches_sent, 0); + assert_eq!(stats.retries, 0); + } + + #[test] + fn test_calculate_backoff() { + let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); + + let (_, shutdown_rx) = watch::channel(false); + let buffer = MetricsBuffer::new(100); + let worker = UploadWorker::new(config, buffer, shutdown_rx).unwrap(); + + let delay0 = worker.calculate_backoff(0, Some(1)); + assert!(delay0 >= Duration::from_secs(1)); + assert!(delay0 < Duration::from_secs(2)); + + let delay1 = worker.calculate_backoff(1, Some(1)); + assert!(delay1 >= Duration::from_secs(2)); + assert!(delay1 < Duration::from_secs(3)); + + let delay5 = worker.calculate_backoff(5, Some(1)); + assert!(delay5 >= Duration::from_secs(32)); + assert!(delay5 < Duration::from_secs(33)); + + let delay_capped = worker.calculate_backoff(10, Some(1)); + assert!(delay_capped >= Duration::from_secs(32)); + assert!(delay_capped < Duration::from_secs(33)); + } + + #[test] + fn test_calculate_backoff_uses_retry_after() { + let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); + + let (_, shutdown_rx) = watch::channel(false); + let buffer = MetricsBuffer::new(100); + let worker = UploadWorker::new(config, buffer, shutdown_rx).unwrap(); + + let delay = worker.calculate_backoff(0, Some(5)); + assert!(delay >= Duration::from_secs(5)); + assert!(delay < Duration::from_secs(6)); + + let delay_with_backoff = worker.calculate_backoff(2, Some(5)); + assert!(delay_with_backoff >= Duration::from_secs(20)); + assert!(delay_with_backoff < Duration::from_secs(21)); + } +} From becd3d0fb1ca78aa7e2da1922dafee89aaa36a6b Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 16:10:33 -0700 Subject: [PATCH 09/22] fix(cloud): integrate exponential backoff into retry flow The calculate_backoff() method existed but was never called, meaning retries occurred immediately without any backoff delay. This fix: - Add ready_at: Instant field to BufferEntry to track when entries can be retried - Modify drain() to only return entries past their ready_at time - Call calculate_backoff() in handle_retry() to set proper backoff delays - Remove #[allow(dead_code)] from calculate_backoff() since it's now used This ensures exponential backoff with jitter is actually applied during the retry flow, preventing thundering herd issues after transient failures. Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/buffer.rs | 88 ++++++++++++++++++++++++++++++---- term-guard/src/cloud/worker.rs | 20 ++++---- 2 files changed, 92 insertions(+), 16 deletions(-) diff --git a/term-guard/src/cloud/buffer.rs b/term-guard/src/cloud/buffer.rs index 783b9e6..bbf5a63 100644 --- a/term-guard/src/cloud/buffer.rs +++ b/term-guard/src/cloud/buffer.rs @@ -1,5 +1,6 @@ use std::collections::VecDeque; use std::sync::Arc; +use std::time::Instant; use tokio::sync::Mutex; use crate::cloud::{CloudError, CloudMetric, CloudResult}; @@ -9,7 +10,8 @@ use crate::cloud::{CloudError, CloudMetric, CloudResult}; pub struct BufferEntry { pub metric: CloudMetric, pub retry_count: u32, - pub queued_at: std::time::Instant, + pub queued_at: Instant, + pub ready_at: Instant, } /// In-memory buffer for pending metrics uploads. @@ -38,17 +40,22 @@ impl MetricsBuffer { }); } + let now = Instant::now(); entries.push_back(BufferEntry { metric, retry_count: 0, - queued_at: std::time::Instant::now(), + queued_at: now, + ready_at: now, }); Ok(()) } - /// Push a metric for retry (increments retry count). - pub async fn push_retry(&self, mut entry: BufferEntry) -> CloudResult<()> { + /// Push a metric for retry with a backoff delay. + /// + /// Increments retry count and sets `ready_at` to delay processing until + /// the backoff period has elapsed. + pub async fn push_retry(&self, mut entry: BufferEntry, ready_at: Instant) -> CloudResult<()> { let mut entries = self.entries.lock().await; if entries.len() >= self.max_size { @@ -59,16 +66,34 @@ impl MetricsBuffer { } entry.retry_count += 1; + entry.ready_at = ready_at; entries.push_back(entry); Ok(()) } - /// Drain up to `count` entries from the buffer. + /// Drain up to `count` ready entries from the buffer. + /// + /// Only drains entries where `ready_at` has passed, respecting backoff delays + /// for retried entries. Entries not yet ready remain in the buffer. pub async fn drain(&self, count: usize) -> Vec { let mut entries = self.entries.lock().await; - let drain_count = std::cmp::min(count, entries.len()); - entries.drain(..drain_count).collect() + let now = Instant::now(); + + let mut result = Vec::with_capacity(count); + let mut i = 0; + + while i < entries.len() && result.len() < count { + if entries[i].ready_at <= now { + if let Some(entry) = entries.remove(i) { + result.push(entry); + } + } else { + i += 1; + } + } + + result } /// Get the current number of entries in the buffer. @@ -160,7 +185,10 @@ mod tests { let entry = drained.pop().unwrap(); assert_eq!(entry.retry_count, 0); - buffer.push_retry(entry).await.unwrap(); + buffer + .push_retry(entry, std::time::Instant::now()) + .await + .unwrap(); let mut drained = buffer.drain(1).await; let entry = drained.pop().unwrap(); assert_eq!(entry.retry_count, 1); @@ -210,4 +238,48 @@ mod tests { buffer.push(make_test_metric()).await.unwrap(); assert!(!buffer.is_empty().await); } + + #[tokio::test] + async fn test_drain_respects_ready_at() { + use std::time::Duration; + + let buffer = MetricsBuffer::new(10); + + buffer.push(make_test_metric()).await.unwrap(); + let mut drained = buffer.drain(1).await; + let entry = drained.pop().unwrap(); + + let future_ready = Instant::now() + Duration::from_secs(60); + buffer.push_retry(entry, future_ready).await.unwrap(); + + assert_eq!(buffer.len().await, 1); + let drained = buffer.drain(10).await; + assert_eq!(drained.len(), 0); + assert_eq!(buffer.len().await, 1); + } + + #[tokio::test] + async fn test_drain_returns_ready_entries_only() { + use std::time::Duration; + + let buffer = MetricsBuffer::new(10); + + buffer.push(make_test_metric()).await.unwrap(); + buffer.push(make_test_metric()).await.unwrap(); + + let mut drained = buffer.drain(2).await; + let entry1 = drained.pop().unwrap(); + let entry2 = drained.pop().unwrap(); + + buffer.push_retry(entry1, Instant::now()).await.unwrap(); + buffer + .push_retry(entry2, Instant::now() + Duration::from_secs(60)) + .await + .unwrap(); + + assert_eq!(buffer.len().await, 2); + let drained = buffer.drain(10).await; + assert_eq!(drained.len(), 1); + assert_eq!(buffer.len().await, 1); + } } diff --git a/term-guard/src/cloud/worker.rs b/term-guard/src/cloud/worker.rs index 900d6eb..ed01ab0 100644 --- a/term-guard/src/cloud/worker.rs +++ b/term-guard/src/cloud/worker.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; use rand::Rng; use tokio::sync::watch; @@ -137,16 +137,21 @@ impl UploadWorker { } } - /// Handle retrying failed entries. + /// Handle retrying failed entries with exponential backoff. /// - /// Re-queues entries immediately without blocking. The buffer's flush_interval - /// provides natural throttling, and exponential backoff is applied based on - /// each entry's retry count when it's eventually processed again. - async fn handle_retry(&mut self, entries: Vec, _error: &CloudError) { + /// Re-queues entries with a `ready_at` timestamp calculated via exponential + /// backoff. The buffer's `drain()` method respects this timestamp, ensuring + /// entries are not retried until their backoff period has elapsed. + async fn handle_retry(&mut self, entries: Vec, error: &CloudError) { + let retry_after = error.retry_after(); + for entry in entries { if entry.retry_count < self.max_retries { + let backoff = self.calculate_backoff(entry.retry_count, retry_after); + let ready_at = Instant::now() + backoff; + self.stats.retries += 1; - if let Err(e) = self.buffer.push_retry(entry).await { + if let Err(e) = self.buffer.push_retry(entry, ready_at).await { warn!("Failed to requeue metric for retry: {}", e); self.stats.metrics_failed += 1; } @@ -161,7 +166,6 @@ impl UploadWorker { /// /// Uses the formula: base_delay * 2^retry_count + jitter /// where retry_count is capped at 5 (max 32x multiplier). - #[allow(dead_code)] fn calculate_backoff(&self, retry_count: u32, retry_after: Option) -> Duration { let base_delay = retry_after.unwrap_or(1); let capped_retry = retry_count.min(5); From 10cd54412662291f24ffd84e7f916aa2e1b71395 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:04:47 -0700 Subject: [PATCH 10/22] feat(cloud): add SQLite offline cache for metrics persistence Add OfflineCache struct backed by SQLite for persisting metrics when network is unavailable. The cache stores metrics as JSON with retry counts for later retry attempts. Features: - File-based or in-memory cache creation - Thread-safe with Mutex - Serialize/deserialize CloudMetric to JSON - Track retry counts for backoff strategy Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/cache.rs | 236 ++++++++++++++++++++++++++++++++++ term-guard/src/cloud/mod.rs | 2 + 2 files changed, 238 insertions(+) create mode 100644 term-guard/src/cloud/cache.rs diff --git a/term-guard/src/cloud/cache.rs b/term-guard/src/cloud/cache.rs new file mode 100644 index 0000000..70b5112 --- /dev/null +++ b/term-guard/src/cloud/cache.rs @@ -0,0 +1,236 @@ +//! Offline cache for metrics persistence when network is unavailable. + +use std::path::Path; +use std::sync::Mutex; +use std::time::Instant; + +use rusqlite::Connection; + +use crate::cloud::{BufferEntry, CloudError, CloudMetric, CloudResult}; + +/// SQLite-backed offline cache for metrics persistence. +pub struct OfflineCache { + conn: Mutex, +} + +impl OfflineCache { + /// Create or open a cache at the given file path. + pub fn new(path: &Path) -> CloudResult { + let conn = Connection::open(path).map_err(|e| CloudError::CacheError { + message: format!("Failed to open cache database: {e}"), + })?; + + let cache = Self { + conn: Mutex::new(conn), + }; + cache.init_schema()?; + Ok(cache) + } + + /// Create an in-memory cache for testing. + pub fn in_memory() -> CloudResult { + let conn = Connection::open_in_memory().map_err(|e| CloudError::CacheError { + message: format!("Failed to create in-memory cache: {e}"), + })?; + + let cache = Self { + conn: Mutex::new(conn), + }; + cache.init_schema()?; + Ok(cache) + } + + fn init_schema(&self) -> CloudResult<()> { + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + conn.execute( + "CREATE TABLE IF NOT EXISTS pending_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_json TEXT NOT NULL, + retry_count INTEGER NOT NULL DEFAULT 0, + created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) + )", + [], + ) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to create schema: {e}"), + })?; + + Ok(()) + } + + /// Save a metric to the cache. + pub fn save(&self, metric: &CloudMetric, retry_count: u32) -> CloudResult<()> { + let metric_json = serde_json::to_string(metric).map_err(|e| CloudError::CacheError { + message: format!("Failed to serialize metric: {e}"), + })?; + + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + conn.execute( + "INSERT INTO pending_metrics (metric_json, retry_count) VALUES (?1, ?2)", + rusqlite::params![metric_json, retry_count], + ) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to save metric: {e}"), + })?; + + Ok(()) + } + + /// Load all pending metrics from the cache. + pub fn load_all(&self) -> CloudResult> { + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + let mut stmt = conn + .prepare("SELECT metric_json, retry_count FROM pending_metrics ORDER BY id") + .map_err(|e| CloudError::CacheError { + message: format!("Failed to prepare query: {e}"), + })?; + + let now = Instant::now(); + let entries = stmt + .query_map([], |row| { + let metric_json: String = row.get(0)?; + let retry_count: u32 = row.get(1)?; + Ok((metric_json, retry_count)) + }) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to query metrics: {e}"), + })? + .filter_map(|result| { + result.ok().and_then(|(json, retry_count)| { + serde_json::from_str::(&json) + .ok() + .map(|metric| BufferEntry { + metric, + retry_count, + queued_at: now, + ready_at: now, + }) + }) + }) + .collect(); + + Ok(entries) + } + + /// Remove all cached entries. + pub fn clear(&self) -> CloudResult<()> { + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + conn.execute("DELETE FROM pending_metrics", []) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to clear cache: {e}"), + })?; + + Ok(()) + } + + /// Get count of pending metrics. + pub fn count(&self) -> CloudResult { + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + let count: i64 = conn + .query_row("SELECT COUNT(*) FROM pending_metrics", [], |row| row.get(0)) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to count metrics: {e}"), + })?; + + Ok(count as usize) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cloud::{CloudMetadata, CloudResultKey}; + use std::collections::HashMap; + + fn make_test_metric() -> CloudMetric { + CloudMetric { + result_key: CloudResultKey { + dataset_date: 1704931200000, + tags: HashMap::new(), + }, + metrics: HashMap::new(), + metadata: CloudMetadata { + dataset_name: Some("test".to_string()), + start_time: None, + end_time: None, + term_version: "0.0.2".to_string(), + custom: HashMap::new(), + }, + validation_result: None, + } + } + + #[test] + fn test_cache_save_and_load() { + let cache = OfflineCache::in_memory().unwrap(); + + let metric1 = make_test_metric(); + let metric2 = make_test_metric(); + + cache.save(&metric1, 0).unwrap(); + cache.save(&metric2, 2).unwrap(); + + assert_eq!(cache.count().unwrap(), 2); + + let entries = cache.load_all().unwrap(); + assert_eq!(entries.len(), 2); + assert_eq!(entries[0].retry_count, 0); + assert_eq!(entries[1].retry_count, 2); + } + + #[test] + fn test_cache_clear() { + let cache = OfflineCache::in_memory().unwrap(); + + cache.save(&make_test_metric(), 0).unwrap(); + cache.save(&make_test_metric(), 0).unwrap(); + + assert_eq!(cache.count().unwrap(), 2); + + cache.clear().unwrap(); + + assert_eq!(cache.count().unwrap(), 0); + assert!(cache.load_all().unwrap().is_empty()); + } + + #[test] + fn test_cache_file_persistence() { + let temp_dir = tempfile::tempdir().unwrap(); + let cache_path = temp_dir.path().join("metrics.db"); + + { + let cache = OfflineCache::new(&cache_path).unwrap(); + cache.save(&make_test_metric(), 1).unwrap(); + assert_eq!(cache.count().unwrap(), 1); + } + + { + let cache = OfflineCache::new(&cache_path).unwrap(); + assert_eq!(cache.count().unwrap(), 1); + let entries = cache.load_all().unwrap(); + assert_eq!(entries[0].retry_count, 1); + } + } + + #[test] + fn test_cache_empty_load() { + let cache = OfflineCache::in_memory().unwrap(); + let entries = cache.load_all().unwrap(); + assert!(entries.is_empty()); + } +} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index 5039930..f395efe 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -4,12 +4,14 @@ //! enabling centralized metrics storage, alerting, and historical analysis. mod buffer; +mod cache; mod client; mod error; mod types; mod worker; pub use buffer::{BufferEntry, MetricsBuffer}; +pub use cache::OfflineCache; pub use client::{ HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, }; From b6b44373ac8fe3096642d834f104d7568113f185 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:11:01 -0700 Subject: [PATCH 11/22] fix(cloud): add error logging and selective deletion to offline cache - Add tracing::warn! for deserialization and row read failures in load_all() to prevent silent data loss - Create CacheEntry struct that wraps BufferEntry with database ID - Update load_all() to return Vec for tracking entry IDs - Add delete_ids() method for selective deletion of uploaded entries - Add tests for delete_ids() including empty case - Export CacheEntry from cloud module Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/cache.rs | 126 +++++++++++++++++++++++++++++----- term-guard/src/cloud/mod.rs | 2 +- 2 files changed, 108 insertions(+), 20 deletions(-) diff --git a/term-guard/src/cloud/cache.rs b/term-guard/src/cloud/cache.rs index 70b5112..1db5810 100644 --- a/term-guard/src/cloud/cache.rs +++ b/term-guard/src/cloud/cache.rs @@ -5,9 +5,19 @@ use std::sync::Mutex; use std::time::Instant; use rusqlite::Connection; +use tracing::warn; use crate::cloud::{BufferEntry, CloudError, CloudMetric, CloudResult}; +/// Entry loaded from the cache, with its database ID for selective deletion. +#[derive(Debug)] +pub struct CacheEntry { + /// Database ID for this entry, used with `delete_ids()`. + pub id: i64, + /// The buffered metric entry. + pub entry: BufferEntry, +} + /// SQLite-backed offline cache for metrics persistence. pub struct OfflineCache { conn: Mutex, @@ -83,13 +93,15 @@ impl OfflineCache { } /// Load all pending metrics from the cache. - pub fn load_all(&self) -> CloudResult> { + /// + /// Returns entries with their database IDs for selective deletion after successful upload. + pub fn load_all(&self) -> CloudResult> { let conn = self.conn.lock().map_err(|e| CloudError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; let mut stmt = conn - .prepare("SELECT metric_json, retry_count FROM pending_metrics ORDER BY id") + .prepare("SELECT id, metric_json, retry_count FROM pending_metrics ORDER BY id") .map_err(|e| CloudError::CacheError { message: format!("Failed to prepare query: {e}"), })?; @@ -97,30 +109,73 @@ impl OfflineCache { let now = Instant::now(); let entries = stmt .query_map([], |row| { - let metric_json: String = row.get(0)?; - let retry_count: u32 = row.get(1)?; - Ok((metric_json, retry_count)) + let id: i64 = row.get(0)?; + let metric_json: String = row.get(1)?; + let retry_count: u32 = row.get(2)?; + Ok((id, metric_json, retry_count)) }) .map_err(|e| CloudError::CacheError { message: format!("Failed to query metrics: {e}"), })? - .filter_map(|result| { - result.ok().and_then(|(json, retry_count)| { - serde_json::from_str::(&json) - .ok() - .map(|metric| BufferEntry { - metric, - retry_count, - queued_at: now, - ready_at: now, - }) - }) + .filter_map(|result| match result { + Ok((id, json, retry_count)) => { + match serde_json::from_str::(&json) { + Ok(metric) => Some(CacheEntry { + id, + entry: BufferEntry { + metric, + retry_count, + queued_at: now, + ready_at: now, + }, + }), + Err(e) => { + warn!("Failed to deserialize cached metric (id={}): {}", id, e); + None + } + } + } + Err(e) => { + warn!("Failed to read cache row: {}", e); + None + } }) .collect(); Ok(entries) } + /// Delete specific entries by their database IDs. + /// + /// Returns the number of entries deleted. + pub fn delete_ids(&self, ids: &[i64]) -> CloudResult { + if ids.is_empty() { + return Ok(0); + } + + let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + message: format!("Failed to acquire lock: {e}"), + })?; + + let placeholders: Vec<_> = ids.iter().map(|_| "?").collect(); + let sql = format!( + "DELETE FROM pending_metrics WHERE id IN ({})", + placeholders.join(", ") + ); + + let mut stmt = conn.prepare(&sql).map_err(|e| CloudError::CacheError { + message: format!("Failed to prepare delete query: {e}"), + })?; + + let deleted = stmt + .execute(rusqlite::params_from_iter(ids.iter())) + .map_err(|e| CloudError::CacheError { + message: format!("Failed to delete metrics: {e}"), + })?; + + Ok(deleted) + } + /// Remove all cached entries. pub fn clear(&self) -> CloudResult<()> { let conn = self.conn.lock().map_err(|e| CloudError::CacheError { @@ -189,8 +244,9 @@ mod tests { let entries = cache.load_all().unwrap(); assert_eq!(entries.len(), 2); - assert_eq!(entries[0].retry_count, 0); - assert_eq!(entries[1].retry_count, 2); + assert_eq!(entries[0].entry.retry_count, 0); + assert_eq!(entries[1].entry.retry_count, 2); + assert!(entries[0].id < entries[1].id); } #[test] @@ -223,10 +279,42 @@ mod tests { let cache = OfflineCache::new(&cache_path).unwrap(); assert_eq!(cache.count().unwrap(), 1); let entries = cache.load_all().unwrap(); - assert_eq!(entries[0].retry_count, 1); + assert_eq!(entries[0].entry.retry_count, 1); } } + #[test] + fn test_cache_delete_ids() { + let cache = OfflineCache::in_memory().unwrap(); + + cache.save(&make_test_metric(), 0).unwrap(); + cache.save(&make_test_metric(), 1).unwrap(); + cache.save(&make_test_metric(), 2).unwrap(); + + assert_eq!(cache.count().unwrap(), 3); + + let entries = cache.load_all().unwrap(); + let ids_to_delete: Vec = vec![entries[0].id, entries[2].id]; + + let deleted = cache.delete_ids(&ids_to_delete).unwrap(); + assert_eq!(deleted, 2); + assert_eq!(cache.count().unwrap(), 1); + + let remaining = cache.load_all().unwrap(); + assert_eq!(remaining.len(), 1); + assert_eq!(remaining[0].entry.retry_count, 1); + } + + #[test] + fn test_cache_delete_ids_empty() { + let cache = OfflineCache::in_memory().unwrap(); + cache.save(&make_test_metric(), 0).unwrap(); + + let deleted = cache.delete_ids(&[]).unwrap(); + assert_eq!(deleted, 0); + assert_eq!(cache.count().unwrap(), 1); + } + #[test] fn test_cache_empty_load() { let cache = OfflineCache::in_memory().unwrap(); diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index f395efe..2c080bf 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -11,7 +11,7 @@ mod types; mod worker; pub use buffer::{BufferEntry, MetricsBuffer}; -pub use cache::OfflineCache; +pub use cache::{CacheEntry, OfflineCache}; pub use client::{ HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, }; From 8cd39fd4953c4f612c9b16975fa55307cbb616af Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:20:14 -0700 Subject: [PATCH 12/22] feat(cloud): add TermCloudRepository implementation Add the main repository implementation that ties together all cloud components for metrics persistence to Term Cloud: - Implements MetricsRepository trait for integration with existing code - Uses MetricsBuffer for local buffering with async background upload - Supports offline operation with automatic sync via OfflineCache - Integrates UploadWorker for asynchronous batch uploads - Provides graceful shutdown with worker stats and cache persistence - Includes health check for Term Cloud connectivity - Converts AnalyzerContext metrics to CloudMetric wire format Key methods: - new(): Create repository and start background worker - setup_cache(): Configure offline cache at default or custom path - pending_count(): Get number of pending metrics - flush(): Force flush buffered metrics - shutdown(): Graceful shutdown with cache persistence - health_check(): Check Term Cloud connectivity - sync_offline_cache(): Upload cached offline metrics Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/mod.rs | 2 + term-guard/src/cloud/repository.rs | 784 +++++++++++++++++++++++++++++ 2 files changed, 786 insertions(+) create mode 100644 term-guard/src/cloud/repository.rs diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index 2c080bf..af6d5fb 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -7,6 +7,7 @@ mod buffer; mod cache; mod client; mod error; +mod repository; mod types; mod worker; @@ -16,6 +17,7 @@ pub use client::{ HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, }; pub use error::{CloudError, CloudResult}; +pub use repository::TermCloudRepository; pub use types::{ CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, diff --git a/term-guard/src/cloud/repository.rs b/term-guard/src/cloud/repository.rs new file mode 100644 index 0000000..e7dade3 --- /dev/null +++ b/term-guard/src/cloud/repository.rs @@ -0,0 +1,784 @@ +//! TermCloudRepository - Main repository implementation for Term Cloud. +//! +//! This module provides the primary interface for persisting metrics to Term Cloud, +//! implementing the MetricsRepository trait with support for: +//! - Asynchronous background uploads via UploadWorker +//! - Offline operation with automatic sync via OfflineCache +//! - Local buffering via MetricsBuffer +//! +//! # Example +//! +//! ```rust,ignore +//! use term_guard::cloud::{CloudConfig, TermCloudRepository}; +//! use term_guard::repository::ResultKey; +//! use term_guard::analyzers::AnalyzerContext; +//! +//! let config = CloudConfig::new("your-api-key"); +//! let repository = TermCloudRepository::new(config)?; +//! +//! // Save metrics +//! let key = ResultKey::now().with_tag("env", "production"); +//! let context = AnalyzerContext::new(); +//! repository.save(key, context).await?; +//! +//! // Graceful shutdown +//! repository.shutdown().await?; +//! ``` + +use std::collections::HashMap; +use std::path::Path; +use std::sync::Arc; + +use async_trait::async_trait; +use directories::ProjectDirs; +use tokio::sync::{watch, RwLock}; +use tracing::{debug, error, info, instrument, warn}; + +use crate::analyzers::context::AnalyzerContext; +use crate::analyzers::types::MetricValue; +use crate::cloud::{ + BufferEntry, CloudConfig, CloudError, CloudMetadata, CloudMetric, CloudMetricValue, + CloudResult, CloudResultKey, MetricsBuffer, OfflineCache, TermCloudClient, UploadWorker, + WorkerStats, +}; +use crate::error::{Result, TermError}; +use crate::repository::{MetricsQuery, MetricsRepository, RepositoryMetadata, ResultKey}; + +/// Main repository implementation for persisting metrics to Term Cloud. +/// +/// TermCloudRepository provides a complete solution for metrics persistence with: +/// - Local buffering for high-throughput scenarios +/// - Background upload worker for asynchronous transmission +/// - Offline cache for resilience against network failures +/// - Automatic sync when connectivity is restored +/// +/// # Architecture +/// +/// ```text +/// ┌─────────────────┐ +/// │ Application │ +/// └────────┬────────┘ +/// │ save() +/// ▼ +/// ┌─────────────────┐ +/// │ MetricsBuffer │ (in-memory) +/// └────────┬────────┘ +/// │ +/// ▼ +/// ┌─────────────────┐ ┌─────────────────┐ +/// │ UploadWorker │────▶│ TermCloudClient │ +/// └────────┬────────┘ └────────┬────────┘ +/// │ │ +/// │ (on failure) │ +/// ▼ ▼ +/// ┌─────────────────┐ ┌─────────────────┐ +/// │ OfflineCache │ │ Term Cloud │ +/// │ (SQLite) │ │ API │ +/// └─────────────────┘ └─────────────────┘ +/// ``` +pub struct TermCloudRepository { + config: Arc, + client: TermCloudClient, + buffer: MetricsBuffer, + cache: Option, + shutdown_tx: watch::Sender, + worker_handle: Option>>>, +} + +impl TermCloudRepository { + /// Creates a new TermCloudRepository and starts the background upload worker. + /// + /// # Arguments + /// + /// * `config` - Configuration for connecting to Term Cloud + /// + /// # Errors + /// + /// Returns an error if the HTTP client or upload worker cannot be created. + /// + /// # Example + /// + /// ```rust,ignore + /// use term_guard::cloud::{CloudConfig, TermCloudRepository}; + /// + /// let config = CloudConfig::new("your-api-key") + /// .with_buffer_size(5000) + /// .with_batch_size(100); + /// + /// let repository = TermCloudRepository::new(config)?; + /// ``` + #[instrument(skip(config), fields(endpoint = %config.endpoint()))] + pub fn new(config: CloudConfig) -> CloudResult { + let config = Arc::new(config); + let client = TermCloudClient::new((*config).clone())?; + let buffer = MetricsBuffer::new(config.buffer_size()); + let (shutdown_tx, shutdown_rx) = watch::channel(false); + + let worker = UploadWorker::new((*config).clone(), buffer.clone(), shutdown_rx)?; + let worker_handle = tokio::spawn(async move { worker.run().await }); + + info!("TermCloudRepository initialized with background worker"); + + Ok(Self { + config, + client, + buffer, + cache: None, + shutdown_tx, + worker_handle: Some(RwLock::new(Some(worker_handle))), + }) + } + + /// Sets up the offline cache for persisting metrics during network failures. + /// + /// If no path is provided in the config, uses the default platform-specific + /// cache directory (e.g., `~/.cache/term/metrics.db` on Linux). + /// + /// # Arguments + /// + /// * `path` - Optional custom path for the cache database + /// + /// # Errors + /// + /// Returns an error if the cache database cannot be created or opened. + /// + /// # Example + /// + /// ```rust,ignore + /// let mut repository = TermCloudRepository::new(config)?; + /// + /// // Use default cache location + /// repository.setup_cache(None)?; + /// + /// // Or specify a custom path + /// repository.setup_cache(Some("/var/cache/myapp/metrics.db"))?; + /// ``` + #[instrument(skip(self, path))] + pub fn setup_cache(&mut self, path: Option<&Path>) -> CloudResult<()> { + let cache_path = if let Some(p) = path { + p.to_path_buf() + } else if let Some(p) = self.config.offline_cache_path() { + p.to_path_buf() + } else { + Self::default_cache_path()? + }; + + if let Some(parent) = cache_path.parent() { + std::fs::create_dir_all(parent).map_err(|e| CloudError::CacheError { + message: format!("Failed to create cache directory: {e}"), + })?; + } + + let cache = OfflineCache::new(&cache_path)?; + info!(path = %cache_path.display(), "Offline cache initialized"); + self.cache = Some(cache); + Ok(()) + } + + /// Returns the default platform-specific cache path. + fn default_cache_path() -> CloudResult { + ProjectDirs::from("dev", "term", "term-guard") + .map(|dirs| dirs.cache_dir().join("metrics.db")) + .ok_or_else(|| CloudError::Configuration { + message: "Could not determine cache directory".to_string(), + }) + } + + /// Returns the number of metrics currently pending in the buffer. + /// + /// # Example + /// + /// ```rust,ignore + /// let pending = repository.pending_count().await; + /// println!("Pending metrics: {}", pending); + /// ``` + pub async fn pending_count(&self) -> usize { + self.buffer.len().await + } + + /// Forces an immediate flush of all buffered metrics. + /// + /// This method drains the buffer and attempts to upload all metrics directly, + /// bypassing the background worker. Failed uploads are saved to the offline + /// cache if available. + /// + /// # Errors + /// + /// Returns an error if the upload fails and no cache is available. + /// + /// # Example + /// + /// ```rust,ignore + /// // Force upload before application shutdown + /// repository.flush().await?; + /// ``` + #[instrument(skip(self))] + pub async fn flush(&self) -> CloudResult<()> { + let entries = self.buffer.clear().await; + if entries.is_empty() { + return Ok(()); + } + + info!(count = entries.len(), "Flushing metrics"); + self.upload_entries(entries).await + } + + /// Performs a graceful shutdown of the repository. + /// + /// This method: + /// 1. Signals the background worker to stop + /// 2. Waits for the worker to finish processing + /// 3. Saves any remaining buffered metrics to the offline cache + /// + /// # Errors + /// + /// Returns an error if remaining metrics cannot be saved. + /// + /// # Example + /// + /// ```rust,ignore + /// // Graceful shutdown + /// let stats = repository.shutdown().await?; + /// println!("Uploaded {} metrics during operation", stats.metrics_uploaded); + /// ``` + #[instrument(skip(self))] + pub async fn shutdown(&self) -> CloudResult> { + info!("Initiating graceful shutdown"); + + self.shutdown_tx.send(true).map_err(|e| CloudError::Configuration { + message: format!("Failed to send shutdown signal: {e}"), + })?; + + let stats = if let Some(ref handle_lock) = self.worker_handle { + let mut guard = handle_lock.write().await; + if let Some(handle) = guard.take() { + match handle.await { + Ok(stats) => { + info!( + uploaded = stats.metrics_uploaded, + failed = stats.metrics_failed, + "Worker shutdown complete" + ); + Some(stats) + } + Err(e) => { + error!("Worker task failed: {}", e); + None + } + } + } else { + None + } + } else { + None + }; + + let remaining = self.buffer.clear().await; + if !remaining.is_empty() { + warn!(count = remaining.len(), "Saving remaining metrics to cache"); + self.save_to_cache(&remaining)?; + } + + Ok(stats) + } + + /// Checks connectivity to Term Cloud. + /// + /// # Errors + /// + /// Returns an error if the health check fails. + /// + /// # Example + /// + /// ```rust,ignore + /// match repository.health_check().await { + /// Ok(response) => println!("Connected to Term Cloud v{}", response.version), + /// Err(e) => eprintln!("Connection failed: {}", e), + /// } + /// ``` + #[instrument(skip(self))] + pub async fn health_check(&self) -> CloudResult { + self.client.health_check().await + } + + /// Synchronizes offline cached metrics to Term Cloud. + /// + /// Loads all cached metrics and attempts to upload them. Successfully + /// uploaded metrics are removed from the cache. + /// + /// # Returns + /// + /// Returns the number of metrics successfully synchronized. + /// + /// # Errors + /// + /// Returns an error if no cache is configured or if cache operations fail. + /// + /// # Example + /// + /// ```rust,ignore + /// // Check if we have cached metrics and sync them + /// let synced = repository.sync_offline_cache().await?; + /// println!("Synced {} cached metrics", synced); + /// ``` + #[instrument(skip(self))] + pub async fn sync_offline_cache(&self) -> CloudResult { + let cache = self.cache.as_ref().ok_or_else(|| CloudError::Configuration { + message: "Offline cache not configured".to_string(), + })?; + + let entries = cache.load_all()?; + if entries.is_empty() { + debug!("No cached metrics to sync"); + return Ok(0); + } + + info!(count = entries.len(), "Syncing cached metrics"); + + let mut synced = 0; + let mut synced_ids = Vec::new(); + + for cache_entry in entries { + let metrics = vec![cache_entry.entry.metric.clone()]; + match self.client.ingest(&metrics).await { + Ok(response) => { + synced += response.accepted; + synced_ids.push(cache_entry.id); + } + Err(e) if e.is_retryable() => { + warn!("Retryable error during sync, will try again later: {}", e); + break; + } + Err(e) => { + error!("Non-retryable error during sync: {}", e); + synced_ids.push(cache_entry.id); + } + } + } + + if !synced_ids.is_empty() { + cache.delete_ids(&synced_ids)?; + } + + info!(synced = synced, "Cache sync complete"); + Ok(synced) + } + + /// Converts a ResultKey and AnalyzerContext to a CloudMetric. + fn to_cloud_metric(key: &ResultKey, context: &AnalyzerContext) -> CloudMetric { + let mut cloud_metrics = HashMap::new(); + + for (metric_key, value) in context.all_metrics() { + let cloud_value = match value { + MetricValue::Double(v) => CloudMetricValue::Double(*v), + MetricValue::Long(v) => CloudMetricValue::Long(*v), + MetricValue::String(v) => CloudMetricValue::String(v.clone()), + MetricValue::Boolean(v) => CloudMetricValue::Boolean(*v), + MetricValue::Histogram(h) => CloudMetricValue::Histogram( + crate::cloud::CloudHistogram { + buckets: h + .buckets + .iter() + .map(|b| crate::cloud::CloudHistogramBucket { + lower_bound: b.lower_bound, + upper_bound: b.upper_bound, + count: b.count, + }) + .collect(), + total_count: h.total_count, + min: h.min, + max: h.max, + mean: h.mean, + std_dev: h.std_dev, + }, + ), + MetricValue::Vector(_) | MetricValue::Map(_) => { + continue; + } + }; + cloud_metrics.insert(metric_key.clone(), cloud_value); + } + + let metadata = context.metadata(); + CloudMetric { + result_key: CloudResultKey { + dataset_date: key.timestamp, + tags: key.tags.clone(), + }, + metrics: cloud_metrics, + metadata: CloudMetadata { + dataset_name: metadata.dataset_name.clone(), + start_time: metadata.start_time.map(|t| t.to_rfc3339()), + end_time: metadata.end_time.map(|t| t.to_rfc3339()), + term_version: env!("CARGO_PKG_VERSION").to_string(), + custom: metadata.custom.clone(), + }, + validation_result: None, + } + } + + /// Uploads entries directly to Term Cloud. + async fn upload_entries(&self, entries: Vec) -> CloudResult<()> { + let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); + + match self.client.ingest(&metrics).await { + Ok(response) => { + debug!( + accepted = response.accepted, + rejected = response.rejected, + "Direct upload complete" + ); + Ok(()) + } + Err(e) => { + warn!("Direct upload failed: {}, saving to cache", e); + self.save_to_cache(&entries)?; + Ok(()) + } + } + } + + /// Saves entries to the offline cache. + fn save_to_cache(&self, entries: &[BufferEntry]) -> CloudResult<()> { + if let Some(ref cache) = self.cache { + for entry in entries { + cache.save(&entry.metric, entry.retry_count)?; + } + Ok(()) + } else { + Err(CloudError::CacheError { + message: "Offline cache not configured, metrics will be lost".to_string(), + }) + } + } + + /// Returns a reference to the underlying client. + pub fn client(&self) -> &TermCloudClient { + &self.client + } + + /// Returns a reference to the configuration. + pub fn config(&self) -> &CloudConfig { + &self.config + } +} + +#[async_trait] +impl MetricsRepository for TermCloudRepository { + /// Saves metrics to the buffer for asynchronous upload. + /// + /// Metrics are buffered locally and uploaded by the background worker. + /// If the buffer is full, returns a BufferOverflow error. + #[instrument(skip(self, metrics), fields(key.timestamp = %key.timestamp, repository_type = "term_cloud"))] + async fn save(&self, key: ResultKey, metrics: AnalyzerContext) -> Result<()> { + if let Err(validation_error) = key.validate_tags() { + return Err(TermError::repository_validation( + "tags", + validation_error, + key.to_string(), + )); + } + + let cloud_metric = Self::to_cloud_metric(&key, &metrics); + + self.buffer.push(cloud_metric).await.map_err(|e| { + TermError::repository("term_cloud", "save", e.to_string()) + })?; + + debug!("Metric queued for upload"); + Ok(()) + } + + /// Creates a query builder for retrieving metrics from Term Cloud. + /// + /// Note: Query execution requires network access to Term Cloud. + #[instrument(skip(self))] + async fn load(&self) -> MetricsQuery { + MetricsQuery::new(Arc::new(TermCloudQueryAdapter { + client: self.client.clone(), + })) + } + + /// Deletes metrics by key from Term Cloud. + #[instrument(skip(self), fields(key.timestamp = %key.timestamp, repository_type = "term_cloud"))] + async fn delete(&self, key: ResultKey) -> Result<()> { + let cloud_key = CloudResultKey { + dataset_date: key.timestamp, + tags: key.tags.clone(), + }; + + self.client + .delete(&cloud_key) + .await + .map_err(|e| TermError::repository("term_cloud", "delete", e.to_string())) + } + + /// Returns metadata about the repository. + #[instrument(skip(self))] + async fn metadata(&self) -> Result { + let pending = self.buffer.len().await; + let cached = self.cache.as_ref().map(|c| c.count().unwrap_or(0)).unwrap_or(0); + + Ok(RepositoryMetadata::new("term_cloud") + .with_config("endpoint", self.config.endpoint()) + .with_config("pending_metrics", pending.to_string()) + .with_config("cached_metrics", cached.to_string())) + } +} + +/// Adapter for executing queries via TermCloudClient. +struct TermCloudQueryAdapter { + client: TermCloudClient, +} + +#[async_trait] +impl MetricsRepository for TermCloudQueryAdapter { + async fn save(&self, _key: ResultKey, _metrics: AnalyzerContext) -> Result<()> { + Err(TermError::NotSupported( + "save not supported on query adapter".to_string(), + )) + } + + async fn load(&self) -> MetricsQuery { + MetricsQuery::new(Arc::new(Self { + client: self.client.clone(), + })) + } + + async fn delete(&self, _key: ResultKey) -> Result<()> { + Err(TermError::NotSupported( + "delete not supported on query adapter".to_string(), + )) + } + + async fn list_keys(&self) -> Result> { + let query = crate::cloud::MetricsQuery::default(); + let response = self + .client + .query(query) + .await + .map_err(|e| TermError::repository("term_cloud", "list_keys", e.to_string()))?; + + Ok(response + .results + .into_iter() + .map(|m| { + ResultKey::new(m.result_key.dataset_date).with_tags(m.result_key.tags) + }) + .collect()) + } + + async fn get(&self, key: &ResultKey) -> Result> { + let query = crate::cloud::MetricsQuery { + after: Some(key.timestamp), + before: Some(key.timestamp + 1), + tags: key.tags.clone(), + limit: Some(1), + ..Default::default() + }; + + let response = self + .client + .query(query) + .await + .map_err(|e| TermError::repository("term_cloud", "get", e.to_string()))?; + + Ok(response.results.into_iter().next().map(|m| { + let mut context = AnalyzerContext::new(); + for (metric_key, value) in m.metrics { + let metric_value = match value { + CloudMetricValue::Double(v) => MetricValue::Double(v), + CloudMetricValue::Long(v) => MetricValue::Long(v), + CloudMetricValue::String(v) => MetricValue::String(v), + CloudMetricValue::Boolean(v) => MetricValue::Boolean(v), + CloudMetricValue::Histogram(_) => continue, + }; + context.store_metric(metric_key, metric_value); + } + context + })) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + + fn make_test_config() -> CloudConfig { + CloudConfig::new("test-api-key") + .with_endpoint("http://localhost:1") + .with_buffer_size(100) + .with_flush_interval(Duration::from_millis(50)) + } + + #[tokio::test] + async fn test_repository_creation() { + let config = make_test_config(); + let result = TermCloudRepository::new(config); + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_repository_save_queues_metric() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + let key = ResultKey::new(1704931200000).with_tag("env", "test"); + let context = AnalyzerContext::new(); + + let result = repository.save(key, context).await; + assert!(result.is_ok()); + + assert_eq!(repository.pending_count().await, 1); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_save_validates_tags() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + let key = ResultKey::new(1704931200000).with_tag("", "invalid"); + let context = AnalyzerContext::new(); + + let result = repository.save(key, context).await; + assert!(result.is_err()); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_pending_count() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + assert_eq!(repository.pending_count().await, 0); + + for i in 0..5 { + let key = ResultKey::new(1704931200000 + i).with_tag("index", i.to_string()); + let context = AnalyzerContext::new(); + repository.save(key, context).await.unwrap(); + } + + assert_eq!(repository.pending_count().await, 5); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_shutdown_returns_stats() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + let result = repository.shutdown().await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_repository_metadata() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + let metadata = repository.metadata().await.unwrap(); + assert_eq!(metadata.backend_type, Some("term_cloud".to_string())); + assert!(metadata.config.contains_key("endpoint")); + assert!(metadata.config.contains_key("pending_metrics")); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_to_cloud_metric_conversion() { + let key = ResultKey::new(1704931200000) + .with_tag("env", "prod") + .with_tag("region", "us-east-1"); + + let mut context = AnalyzerContext::with_dataset("test_dataset"); + context.store_metric("completeness.col1", MetricValue::Double(0.98)); + context.store_metric("size", MetricValue::Long(1000)); + context.store_metric("is_valid", MetricValue::Boolean(true)); + + let cloud_metric = TermCloudRepository::to_cloud_metric(&key, &context); + + assert_eq!(cloud_metric.result_key.dataset_date, 1704931200000); + assert_eq!(cloud_metric.result_key.tags.get("env"), Some(&"prod".to_string())); + assert_eq!(cloud_metric.metadata.dataset_name, Some("test_dataset".to_string())); + assert!(cloud_metric.metrics.contains_key("completeness.col1")); + assert!(cloud_metric.metrics.contains_key("size")); + assert!(cloud_metric.metrics.contains_key("is_valid")); + } + + #[tokio::test] + async fn test_repository_cache_setup() { + let config = make_test_config(); + let mut repository = TermCloudRepository::new(config).unwrap(); + + let temp_dir = tempfile::tempdir().unwrap(); + let cache_path = temp_dir.path().join("test_cache.db"); + + let result = repository.setup_cache(Some(&cache_path)); + assert!(result.is_ok()); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_flush() { + let config = make_test_config(); + let mut repository = TermCloudRepository::new(config).unwrap(); + + let temp_dir = tempfile::tempdir().unwrap(); + let cache_path = temp_dir.path().join("flush_test.db"); + repository.setup_cache(Some(&cache_path)).unwrap(); + + let key = ResultKey::new(1704931200000).with_tag("env", "test"); + let context = AnalyzerContext::new(); + repository.save(key, context).await.unwrap(); + + assert_eq!(repository.pending_count().await, 1); + + let result = repository.flush().await; + assert!(result.is_ok()); + + assert_eq!(repository.pending_count().await, 0); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_sync_without_cache() { + let config = make_test_config(); + let repository = TermCloudRepository::new(config).unwrap(); + + let result = repository.sync_offline_cache().await; + assert!(result.is_err()); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_repository_sync_empty_cache() { + let config = make_test_config(); + let mut repository = TermCloudRepository::new(config).unwrap(); + + let temp_dir = tempfile::tempdir().unwrap(); + let cache_path = temp_dir.path().join("sync_test.db"); + repository.setup_cache(Some(&cache_path)).unwrap(); + + let result = repository.sync_offline_cache().await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); + + let _ = repository.shutdown().await; + } + + #[tokio::test] + async fn test_default_cache_path() { + let result = TermCloudRepository::default_cache_path(); + assert!(result.is_ok()); + let path = result.unwrap(); + assert!(path.to_string_lossy().contains("term")); + } +} From 7089d4d1a8304090c84f64a2abab2ba3e492ccf9 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:33:32 -0700 Subject: [PATCH 13/22] feat(cloud): add webhook alerting support Add webhook-based alerting for validation failures with the following components: - WebhookConfig: Builder pattern for URL, headers, details, severity threshold, and HMAC secret configuration - AlertSeverity: Info, Warning, Critical enum with Ord for comparison - AlertPayload: Structured alert with title, severity, dataset, environment, summary, details, timestamp, and dashboard URL - AlertSummary: total_checks, passed, failed, status - AlertDetail: check, constraint, level, message, metric - WebhookClient: HTTP client with severity filtering, optional HMAC-SHA256 signing, and configurable detail inclusion Includes comprehensive tests for config validation, payload creation, severity ordering, and HMAC signing. Co-Authored-By: Claude Opus 4.5 --- term-guard/src/cloud/alerting.rs | 631 +++++++++++++++++++++++++++++++ term-guard/src/cloud/mod.rs | 5 + 2 files changed, 636 insertions(+) create mode 100644 term-guard/src/cloud/alerting.rs diff --git a/term-guard/src/cloud/alerting.rs b/term-guard/src/cloud/alerting.rs new file mode 100644 index 0000000..3ebbe30 --- /dev/null +++ b/term-guard/src/cloud/alerting.rs @@ -0,0 +1,631 @@ +//! Webhook-based alerting for validation failures. +//! +//! This module provides webhook alerting capabilities that can be triggered +//! when validation checks fail. Supports custom headers, HMAC signing, and +//! severity-based filtering. + +use std::collections::HashMap; +use std::time::Duration; + +use chrono::{DateTime, Utc}; +use ring::hmac; +use serde::{Deserialize, Serialize}; + +use super::error::{CloudError, CloudResult}; +use super::types::{CloudValidationIssue, CloudValidationResult}; +use crate::security::SecureString; + +/// Alert severity levels for filtering webhook notifications. +#[derive( + Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, +)] +#[serde(rename_all = "lowercase")] +pub enum AlertSeverity { + Info, + #[default] + Warning, + Critical, +} + +impl std::fmt::Display for AlertSeverity { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Info => write!(f, "info"), + Self::Warning => write!(f, "warning"), + Self::Critical => write!(f, "critical"), + } + } +} + +/// Configuration for webhook alerting. +#[derive(Debug, Clone)] +pub struct WebhookConfig { + url: String, + headers: HashMap, + include_details: bool, + min_severity: AlertSeverity, + secret: Option, + timeout: Duration, +} + +impl WebhookConfig { + /// Create a new WebhookConfig with the given URL. + pub fn new(url: impl Into) -> Self { + Self { + url: url.into(), + headers: HashMap::new(), + include_details: false, + min_severity: AlertSeverity::default(), + secret: None, + timeout: Duration::from_secs(10), + } + } + + /// Add a custom header to be sent with webhook requests. + pub fn with_header(mut self, key: impl Into, value: impl Into) -> Self { + self.headers.insert(key.into(), value.into()); + self + } + + /// Set whether to include full validation details in the alert payload. + pub fn with_details(mut self, include: bool) -> Self { + self.include_details = include; + self + } + + /// Set the minimum severity level required to trigger an alert. + pub fn with_min_severity(mut self, severity: AlertSeverity) -> Self { + self.min_severity = severity; + self + } + + /// Set a secret for HMAC-SHA256 signing of payloads. + pub fn with_secret(mut self, secret: impl Into) -> Self { + self.secret = Some(SecureString::new(secret.into())); + self + } + + /// Set the request timeout duration. + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Validate the webhook configuration. + pub fn validate(&self) -> CloudResult<()> { + if self.url.is_empty() { + return Err(CloudError::Configuration { + message: "Webhook URL cannot be empty".to_string(), + }); + } + + let url_lower = self.url.to_lowercase(); + if !url_lower.starts_with("http://") && !url_lower.starts_with("https://") { + return Err(CloudError::Configuration { + message: "Webhook URL must start with http:// or https://".to_string(), + }); + } + + if reqwest::Url::parse(&self.url).is_err() { + return Err(CloudError::Configuration { + message: format!("Invalid webhook URL: {}", self.url), + }); + } + + Ok(()) + } + + /// Get the webhook URL. + pub fn url(&self) -> &str { + &self.url + } + + /// Get the custom headers. + pub fn headers(&self) -> &HashMap { + &self.headers + } + + /// Check if details should be included. + pub fn include_details(&self) -> bool { + self.include_details + } + + /// Get the minimum severity level. + pub fn min_severity(&self) -> AlertSeverity { + self.min_severity + } + + /// Get the signing secret. + pub fn secret(&self) -> Option<&SecureString> { + self.secret.as_ref() + } + + /// Get the timeout duration. + pub fn timeout(&self) -> Duration { + self.timeout + } +} + +/// Summary information about the validation result. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AlertSummary { + pub total_checks: usize, + pub passed: usize, + pub failed: usize, + pub status: String, +} + +/// Details about a specific validation failure. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AlertDetail { + pub check: String, + pub constraint: String, + pub level: String, + pub message: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub metric: Option, +} + +impl From<&CloudValidationIssue> for AlertDetail { + fn from(issue: &CloudValidationIssue) -> Self { + Self { + check: issue.check_name.clone(), + constraint: issue.constraint_name.clone(), + level: issue.level.clone(), + message: issue.message.clone(), + metric: issue.metric, + } + } +} + +/// Payload sent to the webhook endpoint. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AlertPayload { + pub title: String, + pub severity: AlertSeverity, + pub dataset: String, + pub environment: String, + pub summary: AlertSummary, + #[serde(skip_serializing_if = "Option::is_none")] + pub details: Option>, + pub timestamp: DateTime, + #[serde(skip_serializing_if = "Option::is_none")] + pub dashboard_url: Option, +} + +impl AlertPayload { + /// Create an alert payload from a validation result. + pub fn from_validation_result( + result: &CloudValidationResult, + dataset: impl Into, + environment: impl Into, + ) -> Self { + let severity = Self::determine_severity(result); + let title = Self::generate_title(result, &severity); + + let summary = AlertSummary { + total_checks: result.total_checks, + passed: result.passed_checks, + failed: result.failed_checks, + status: result.status.clone(), + }; + + let details: Option> = if result.issues.is_empty() { + None + } else { + Some(result.issues.iter().map(AlertDetail::from).collect()) + }; + + Self { + title, + severity, + dataset: dataset.into(), + environment: environment.into(), + summary, + details, + timestamp: Utc::now(), + dashboard_url: None, + } + } + + /// Set the dashboard URL for the alert. + pub fn with_dashboard_url(mut self, url: impl Into) -> Self { + self.dashboard_url = Some(url.into()); + self + } + + fn determine_severity(result: &CloudValidationResult) -> AlertSeverity { + if result.failed_checks == 0 { + return AlertSeverity::Info; + } + + let failure_rate = result.failed_checks as f64 / result.total_checks.max(1) as f64; + + if failure_rate >= 0.5 || result.status == "error" { + AlertSeverity::Critical + } else if result.failed_checks > 0 { + AlertSeverity::Warning + } else { + AlertSeverity::Info + } + } + + fn generate_title(result: &CloudValidationResult, severity: &AlertSeverity) -> String { + match severity { + AlertSeverity::Info => "Validation Passed".to_string(), + AlertSeverity::Warning => format!( + "Validation Warning: {} of {} checks failed", + result.failed_checks, result.total_checks + ), + AlertSeverity::Critical => format!( + "Validation Critical: {} of {} checks failed", + result.failed_checks, result.total_checks + ), + } + } +} + +/// Client for sending webhook alerts. +pub struct WebhookClient { + client: reqwest::Client, + config: WebhookConfig, +} + +impl WebhookClient { + /// Create a new WebhookClient with the given configuration. + pub fn new(config: WebhookConfig) -> CloudResult { + config.validate()?; + + let client = reqwest::Client::builder() + .timeout(config.timeout) + .build() + .map_err(|e| CloudError::Configuration { + message: format!("Failed to build HTTP client: {e}"), + })?; + + Ok(Self { client, config }) + } + + /// Send an alert to the configured webhook endpoint. + pub async fn send(&self, payload: &AlertPayload) -> CloudResult<()> { + if payload.severity < self.config.min_severity { + tracing::debug!( + severity = %payload.severity, + min_severity = %self.config.min_severity, + "Alert severity below threshold, skipping" + ); + return Ok(()); + } + + let mut payload_to_send = payload.clone(); + if !self.config.include_details { + payload_to_send.details = None; + } + + let body = + serde_json::to_string(&payload_to_send).map_err(|e| CloudError::Serialization { + message: e.to_string(), + })?; + + let mut request = self + .client + .post(&self.config.url) + .header("Content-Type", "application/json"); + + for (key, value) in &self.config.headers { + request = request.header(key, value); + } + + if let Some(secret) = &self.config.secret { + let signature = Self::sign_payload(&body, secret.expose()); + request = request.header("X-Signature-256", format!("sha256={signature}")); + } + + let response = request + .body(body) + .send() + .await + .map_err(|e| CloudError::Network { + message: e.to_string(), + })?; + + if !response.status().is_success() { + let status = response.status().as_u16(); + let message = response + .text() + .await + .unwrap_or_else(|_| "Unknown error".to_string()); + return Err(CloudError::ServerError { status, message }); + } + + tracing::info!( + dataset = %payload.dataset, + severity = %payload.severity, + "Alert sent successfully" + ); + + Ok(()) + } + + /// Sign a payload using HMAC-SHA256. + pub fn sign_payload(body: &str, secret: &str) -> String { + let key = hmac::Key::new(hmac::HMAC_SHA256, secret.as_bytes()); + let signature = hmac::sign(&key, body.as_bytes()); + hex::encode(signature.as_ref()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_alert_severity_ordering() { + assert!(AlertSeverity::Info < AlertSeverity::Warning); + assert!(AlertSeverity::Warning < AlertSeverity::Critical); + assert!(AlertSeverity::Info < AlertSeverity::Critical); + } + + #[test] + fn test_alert_severity_default() { + assert_eq!(AlertSeverity::default(), AlertSeverity::Warning); + } + + #[test] + fn test_alert_severity_display() { + assert_eq!(AlertSeverity::Info.to_string(), "info"); + assert_eq!(AlertSeverity::Warning.to_string(), "warning"); + assert_eq!(AlertSeverity::Critical.to_string(), "critical"); + } + + #[test] + fn test_webhook_config_new() { + let config = WebhookConfig::new("https://example.com/webhook"); + assert_eq!(config.url(), "https://example.com/webhook"); + assert!(config.headers().is_empty()); + assert!(!config.include_details()); + assert_eq!(config.min_severity(), AlertSeverity::Warning); + assert!(config.secret().is_none()); + assert_eq!(config.timeout(), Duration::from_secs(10)); + } + + #[test] + fn test_webhook_config_builder() { + let config = WebhookConfig::new("https://example.com/webhook") + .with_header("Authorization", "Bearer token123") + .with_header("X-Custom", "value") + .with_details(true) + .with_min_severity(AlertSeverity::Critical) + .with_secret("my-secret"); + + assert_eq!(config.url(), "https://example.com/webhook"); + assert_eq!( + config.headers().get("Authorization"), + Some(&"Bearer token123".to_string()) + ); + assert_eq!(config.headers().get("X-Custom"), Some(&"value".to_string())); + assert!(config.include_details()); + assert_eq!(config.min_severity(), AlertSeverity::Critical); + assert_eq!(config.secret().map(|s| s.expose()), Some("my-secret")); + assert_eq!(config.timeout(), Duration::from_secs(10)); + } + + #[test] + fn test_webhook_config_with_timeout() { + let config = + WebhookConfig::new("https://example.com/webhook").with_timeout(Duration::from_secs(30)); + assert_eq!(config.timeout(), Duration::from_secs(30)); + } + + #[test] + fn test_webhook_config_validate_success() { + let config = WebhookConfig::new("https://example.com/webhook"); + assert!(config.validate().is_ok()); + + let config = WebhookConfig::new("http://localhost:8080/hook"); + assert!(config.validate().is_ok()); + } + + #[test] + fn test_webhook_config_validate_empty_url() { + let config = WebhookConfig::new(""); + let result = config.validate(); + assert!(result.is_err()); + match result.unwrap_err() { + CloudError::Configuration { message } => { + assert!(message.contains("cannot be empty")); + } + _ => panic!("Expected Configuration error"), + } + } + + #[test] + fn test_webhook_config_validate_invalid_scheme() { + let config = WebhookConfig::new("ftp://example.com/webhook"); + let result = config.validate(); + assert!(result.is_err()); + match result.unwrap_err() { + CloudError::Configuration { message } => { + assert!(message.contains("http://") || message.contains("https://")); + } + _ => panic!("Expected Configuration error"), + } + } + + #[test] + fn test_webhook_config_validate_invalid_url() { + let config = WebhookConfig::new("https://"); + let result = config.validate(); + assert!(result.is_err()); + } + + #[test] + fn test_alert_payload_from_validation_result_success() { + let result = CloudValidationResult { + status: "success".to_string(), + total_checks: 10, + passed_checks: 10, + failed_checks: 0, + issues: vec![], + }; + + let payload = AlertPayload::from_validation_result(&result, "orders", "production"); + + assert_eq!(payload.severity, AlertSeverity::Info); + assert_eq!(payload.dataset, "orders"); + assert_eq!(payload.environment, "production"); + assert_eq!(payload.summary.total_checks, 10); + assert_eq!(payload.summary.passed, 10); + assert_eq!(payload.summary.failed, 0); + assert!(payload.details.is_none()); + } + + #[test] + fn test_alert_payload_from_validation_result_warning() { + let result = CloudValidationResult { + status: "warning".to_string(), + total_checks: 10, + passed_checks: 8, + failed_checks: 2, + issues: vec![CloudValidationIssue { + check_name: "DataQuality".to_string(), + constraint_name: "Completeness".to_string(), + level: "warning".to_string(), + message: "Column 'email' has nulls".to_string(), + metric: Some(0.95), + }], + }; + + let payload = AlertPayload::from_validation_result(&result, "users", "staging"); + + assert_eq!(payload.severity, AlertSeverity::Warning); + assert!(payload.title.contains("Warning")); + assert!(payload.details.is_some()); + let details = payload.details.unwrap(); + assert_eq!(details.len(), 1); + assert_eq!(details[0].check, "DataQuality"); + assert_eq!(details[0].metric, Some(0.95)); + } + + #[test] + fn test_alert_payload_from_validation_result_critical() { + let result = CloudValidationResult { + status: "error".to_string(), + total_checks: 10, + passed_checks: 3, + failed_checks: 7, + issues: vec![], + }; + + let payload = AlertPayload::from_validation_result(&result, "orders", "production"); + + assert_eq!(payload.severity, AlertSeverity::Critical); + assert!(payload.title.contains("Critical")); + } + + #[test] + fn test_alert_payload_with_dashboard_url() { + let result = CloudValidationResult { + status: "success".to_string(), + total_checks: 1, + passed_checks: 1, + failed_checks: 0, + issues: vec![], + }; + + let payload = AlertPayload::from_validation_result(&result, "test", "dev") + .with_dashboard_url("https://dashboard.example.com/run/123"); + + assert_eq!( + payload.dashboard_url, + Some("https://dashboard.example.com/run/123".to_string()) + ); + } + + #[test] + fn test_alert_detail_from_cloud_validation_issue() { + let issue = CloudValidationIssue { + check_name: "MyCheck".to_string(), + constraint_name: "MyConstraint".to_string(), + level: "error".to_string(), + message: "Something went wrong".to_string(), + metric: Some(0.5), + }; + + let detail = AlertDetail::from(&issue); + + assert_eq!(detail.check, "MyCheck"); + assert_eq!(detail.constraint, "MyConstraint"); + assert_eq!(detail.level, "error"); + assert_eq!(detail.message, "Something went wrong"); + assert_eq!(detail.metric, Some(0.5)); + } + + #[test] + fn test_sign_payload() { + let body = r#"{"title":"Test Alert"}"#; + let secret = "test-secret"; + + let signature1 = WebhookClient::sign_payload(body, secret); + let signature2 = WebhookClient::sign_payload(body, secret); + + assert_eq!(signature1, signature2); + assert!(!signature1.is_empty()); + assert_eq!(signature1.len(), 64); + } + + #[test] + fn test_sign_payload_different_secrets() { + let body = r#"{"title":"Test Alert"}"#; + + let signature1 = WebhookClient::sign_payload(body, "secret1"); + let signature2 = WebhookClient::sign_payload(body, "secret2"); + + assert_ne!(signature1, signature2); + } + + #[test] + fn test_webhook_client_new_valid_config() { + let config = WebhookConfig::new("https://example.com/webhook"); + let client = WebhookClient::new(config); + assert!(client.is_ok()); + } + + #[test] + fn test_webhook_client_new_invalid_config() { + let config = WebhookConfig::new(""); + let client = WebhookClient::new(config); + assert!(client.is_err()); + } + + #[test] + fn test_alert_severity_serialization() { + let severity = AlertSeverity::Warning; + let json = serde_json::to_string(&severity).unwrap(); + assert_eq!(json, "\"warning\""); + + let deserialized: AlertSeverity = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized, AlertSeverity::Warning); + } + + #[test] + fn test_alert_payload_serialization() { + let result = CloudValidationResult { + status: "success".to_string(), + total_checks: 5, + passed_checks: 5, + failed_checks: 0, + issues: vec![], + }; + + let payload = AlertPayload::from_validation_result(&result, "test_dataset", "test_env"); + let json = serde_json::to_string(&payload).unwrap(); + + assert!(json.contains("\"title\"")); + assert!(json.contains("\"severity\":\"info\"")); + assert!(json.contains("\"dataset\":\"test_dataset\"")); + assert!(json.contains("\"environment\":\"test_env\"")); + assert!(json.contains("\"summary\"")); + assert!(!json.contains("\"details\"")); // None should be skipped + } +} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs index af6d5fb..fdc60cd 100644 --- a/term-guard/src/cloud/mod.rs +++ b/term-guard/src/cloud/mod.rs @@ -3,6 +3,7 @@ //! This module provides integration with the Term Cloud platform, //! enabling centralized metrics storage, alerting, and historical analysis. +mod alerting; mod buffer; mod cache; mod client; @@ -23,3 +24,7 @@ pub use types::{ CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, }; pub use worker::{UploadWorker, WorkerStats}; + +pub use alerting::{ + AlertDetail, AlertPayload, AlertSeverity, AlertSummary, WebhookClient, WebhookConfig, +}; From f9e4136189b0a41e98aa6b0d86f2e5ea406eaa90 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:45:43 -0700 Subject: [PATCH 14/22] test: add integration tests for Term Cloud SDK Add comprehensive integration tests covering: - Full cloud flow with repository creation, metric saving, and graceful shutdown - CloudMetric wire format serialization and JSON structure verification - Webhook alert generation from validation results with severity levels - CloudConfig builder pattern with all configuration options - Multiple metrics saving and pending count verification - CloudValidationResult serialization roundtrip - Alert severity level determination (Info, Warning, Critical) Co-Authored-By: Claude Opus 4.5 --- term-guard/tests/cloud_integration.rs | 267 ++++++++++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 term-guard/tests/cloud_integration.rs diff --git a/term-guard/tests/cloud_integration.rs b/term-guard/tests/cloud_integration.rs new file mode 100644 index 0000000..9b3571e --- /dev/null +++ b/term-guard/tests/cloud_integration.rs @@ -0,0 +1,267 @@ +#![cfg(feature = "cloud")] + +use std::collections::HashMap; +use std::time::Duration; + +use term_guard::analyzers::{AnalyzerContext, MetricValue}; +use term_guard::cloud::{ + AlertPayload, AlertSeverity, CloudConfig, CloudMetadata, CloudMetric, CloudMetricValue, + CloudResultKey, CloudValidationIssue, CloudValidationResult, TermCloudRepository, +}; +use term_guard::repository::{MetricsRepository, ResultKey}; + +#[tokio::test] +async fn test_full_cloud_flow() { + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let cache_path = temp_dir.path().join("test_cache.db"); + + let config = CloudConfig::new("test-api-key-12345") + .with_endpoint("http://localhost:1") + .with_buffer_size(100) + .with_flush_interval(Duration::from_millis(100)); + + let mut repository = TermCloudRepository::new(config).expect("Failed to create repository"); + repository + .setup_cache(Some(&cache_path)) + .expect("Failed to setup cache"); + + let mut context = AnalyzerContext::with_dataset("test_dataset"); + context.store_metric("completeness.user_id", MetricValue::Double(0.98)); + context.store_metric("size", MetricValue::Long(1000)); + context.store_metric("is_valid", MetricValue::Boolean(true)); + + let key = ResultKey::now() + .with_tag("env", "test") + .with_tag("version", "1.0.0"); + + repository + .save(key, context) + .await + .expect("Failed to save metrics"); + + assert_eq!(repository.pending_count().await, 1); + + let stats = repository.shutdown().await.expect("Failed to shutdown"); + assert!(stats.is_some()); +} + +#[test] +fn test_cloud_metric_wire_format() { + let metric = CloudMetric { + result_key: CloudResultKey { + dataset_date: 1704931200000, + tags: vec![ + ("env".to_string(), "production".to_string()), + ("region".to_string(), "us-east-1".to_string()), + ] + .into_iter() + .collect(), + }, + metrics: vec![ + ( + "completeness.user_id".to_string(), + CloudMetricValue::Double(0.98), + ), + ("size".to_string(), CloudMetricValue::Long(5000)), + ("is_valid".to_string(), CloudMetricValue::Boolean(true)), + ] + .into_iter() + .collect(), + metadata: CloudMetadata { + dataset_name: Some("orders_table".to_string()), + start_time: Some("2024-01-10T12:00:00Z".to_string()), + end_time: Some("2024-01-10T12:05:00Z".to_string()), + term_version: "0.0.2".to_string(), + custom: HashMap::new(), + }, + validation_result: None, + }; + + let json = serde_json::to_string(&metric).expect("Failed to serialize metric"); + + assert!(json.contains("result_key")); + assert!(json.contains("dataset_date")); + assert!(json.contains("1704931200000")); + assert!(json.contains("metrics")); + assert!(json.contains("completeness.user_id")); + assert!(json.contains("metadata")); + assert!(json.contains("dataset_name")); + assert!(json.contains("orders_table")); + assert!(json.contains("term_version")); + + let parsed: serde_json::Value = serde_json::from_str(&json).expect("Failed to parse JSON"); + + assert!(parsed["result_key"]["dataset_date"].is_number()); + assert!(parsed["result_key"]["tags"].is_object()); + assert!(parsed["metrics"].is_object()); + assert!(parsed["metadata"]["dataset_name"].is_string()); +} + +#[test] +fn test_webhook_alert_generation() { + let validation_result = CloudValidationResult { + status: "error".to_string(), + total_checks: 10, + passed_checks: 3, + failed_checks: 7, + issues: vec![ + CloudValidationIssue { + check_name: "DataQuality".to_string(), + constraint_name: "Completeness".to_string(), + level: "error".to_string(), + message: "Column 'user_id' has 15% null values".to_string(), + metric: Some(0.85), + }, + CloudValidationIssue { + check_name: "DataQuality".to_string(), + constraint_name: "Uniqueness".to_string(), + level: "error".to_string(), + message: "Column 'email' has duplicate values".to_string(), + metric: Some(0.92), + }, + ], + }; + + let payload = + AlertPayload::from_validation_result(&validation_result, "orders_table", "production"); + + assert_eq!(payload.severity, AlertSeverity::Critical); + assert!(payload.title.contains("Critical") || payload.title.contains("Failed")); + assert_eq!(payload.dataset, "orders_table"); + assert_eq!(payload.environment, "production"); + assert_eq!(payload.summary.total_checks, 10); + assert_eq!(payload.summary.passed, 3); + assert_eq!(payload.summary.failed, 7); + + assert!(payload.details.is_some()); + let details = payload.details.unwrap(); + assert_eq!(details.len(), 2); + assert_eq!(details[0].check, "DataQuality"); + assert_eq!(details[0].constraint, "Completeness"); + assert_eq!(details[0].metric, Some(0.85)); +} + +#[test] +fn test_config_builder() { + let config = CloudConfig::new("my-api-key") + .with_endpoint("https://custom.endpoint.com") + .with_timeout(Duration::from_secs(60)) + .with_max_retries(5) + .with_buffer_size(5000) + .with_batch_size(200) + .with_flush_interval(Duration::from_secs(10)) + .with_offline_cache_path("/tmp/test_cache.db"); + + assert_eq!(config.api_key().expose(), "my-api-key"); + assert_eq!(config.endpoint(), "https://custom.endpoint.com"); + assert_eq!(config.timeout(), Duration::from_secs(60)); + assert_eq!(config.max_retries(), 5); + assert_eq!(config.buffer_size(), 5000); + assert_eq!(config.batch_size(), 200); + assert_eq!(config.flush_interval(), Duration::from_secs(10)); + assert_eq!( + config.offline_cache_path(), + Some(std::path::Path::new("/tmp/test_cache.db")) + ); +} + +#[tokio::test] +async fn test_repository_with_multiple_metrics() { + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let cache_path = temp_dir.path().join("multi_metrics_cache.db"); + + let config = CloudConfig::new("test-key") + .with_endpoint("http://localhost:1") + .with_buffer_size(50); + + let mut repository = TermCloudRepository::new(config).expect("Failed to create repository"); + repository + .setup_cache(Some(&cache_path)) + .expect("Failed to setup cache"); + + for i in 0..5 { + let mut context = AnalyzerContext::with_dataset(format!("dataset_{}", i)); + context.store_metric( + "completeness.col1", + MetricValue::Double(0.9 + (i as f64) * 0.01), + ); + context.store_metric("row_count", MetricValue::Long((i + 1) * 1000)); + + let key = ResultKey::new(1704931200000 + i) + .with_tag("batch", i.to_string()) + .with_tag("env", "test"); + + repository + .save(key, context) + .await + .expect("Failed to save metrics"); + } + + assert_eq!(repository.pending_count().await, 5); + + let _ = repository.shutdown().await; +} + +#[test] +fn test_cloud_validation_result_serialization() { + let result = CloudValidationResult { + status: "warning".to_string(), + total_checks: 5, + passed_checks: 4, + failed_checks: 1, + issues: vec![CloudValidationIssue { + check_name: "QualityCheck".to_string(), + constraint_name: "PatternMatch".to_string(), + level: "warning".to_string(), + message: "Pattern mismatch in 2% of rows".to_string(), + metric: Some(0.98), + }], + }; + + let json = serde_json::to_string(&result).expect("Failed to serialize"); + + assert!(json.contains("warning")); + assert!(json.contains("total_checks")); + assert!(json.contains("issues")); + + let deserialized: CloudValidationResult = + serde_json::from_str(&json).expect("Failed to deserialize"); + + assert_eq!(deserialized.status, "warning"); + assert_eq!(deserialized.total_checks, 5); + assert_eq!(deserialized.failed_checks, 1); + assert_eq!(deserialized.issues.len(), 1); +} + +#[test] +fn test_alert_severity_levels() { + let info_result = CloudValidationResult { + status: "success".to_string(), + total_checks: 10, + passed_checks: 10, + failed_checks: 0, + issues: vec![], + }; + let info_payload = AlertPayload::from_validation_result(&info_result, "test", "dev"); + assert_eq!(info_payload.severity, AlertSeverity::Info); + + let warning_result = CloudValidationResult { + status: "warning".to_string(), + total_checks: 10, + passed_checks: 8, + failed_checks: 2, + issues: vec![], + }; + let warning_payload = AlertPayload::from_validation_result(&warning_result, "test", "dev"); + assert_eq!(warning_payload.severity, AlertSeverity::Warning); + + let critical_result = CloudValidationResult { + status: "error".to_string(), + total_checks: 10, + passed_checks: 3, + failed_checks: 7, + issues: vec![], + }; + let critical_payload = AlertPayload::from_validation_result(&critical_result, "test", "dev"); + assert_eq!(critical_payload.severity, AlertSeverity::Critical); +} From 856269e9a18a8befaf07f8f58b00d86c4db9b04e Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 19:50:41 -0700 Subject: [PATCH 15/22] docs(cloud): add how-to guide for Term Cloud SDK Add comprehensive documentation for Term Cloud SDK following the Diataxis how-to guide format. Covers configuration, tagging metrics, webhook alerting, offline support, and graceful shutdown. Co-Authored-By: Claude Opus 4.5 --- docs/how-to/use-term-cloud.md | 363 ++++++++++++++++++++++++++++++++++ 1 file changed, 363 insertions(+) create mode 100644 docs/how-to/use-term-cloud.md diff --git a/docs/how-to/use-term-cloud.md b/docs/how-to/use-term-cloud.md new file mode 100644 index 0000000..d922d9b --- /dev/null +++ b/docs/how-to/use-term-cloud.md @@ -0,0 +1,363 @@ +# How to Use the Term Cloud SDK + +> **Type**: How-To Guide (Task-oriented) +> **Audience**: Practitioners using Term +> **Goal**: Persist validation metrics to Term Cloud for centralized monitoring and alerting + +## Goal + +Send validation metrics from Term to Term Cloud for centralized storage, historical analysis, and webhook-based alerting. + +## Prerequisites + +Before you begin, ensure you have: +- [ ] Term v0.0.2 or later installed +- [ ] A Term Cloud API key (obtain from [Term Cloud Dashboard](https://cloud.term.dev)) +- [ ] The `cloud` feature enabled in your `Cargo.toml` + +## Enable the Cloud Feature + +Add the `cloud` feature to your `Cargo.toml`: + +```toml +[dependencies] +term-guard = { version = "0.0.2", features = ["cloud"] } +``` + +## Quick Start + +```rust,ignore +use std::time::Duration; +use term_guard::cloud::{CloudConfig, TermCloudRepository}; +use term_guard::repository::{MetricsRepository, ResultKey}; +use term_guard::analyzers::AnalyzerContext; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Configure the cloud connection + let config = CloudConfig::new("your-api-key") + .with_buffer_size(1000) + .with_flush_interval(Duration::from_secs(5)); + + // Create the repository + let repository = TermCloudRepository::new(config)?; + + // Create metrics with tags + let key = ResultKey::now() + .with_tag("environment", "production") + .with_tag("dataset", "orders"); + + let mut context = AnalyzerContext::with_dataset("orders_table"); + context.store_metric("completeness.user_id", term_guard::analyzers::MetricValue::Double(0.98)); + + // Save metrics (buffered, uploaded asynchronously) + repository.save(key, context).await?; + + // Always shutdown gracefully + repository.shutdown().await?; + + Ok(()) +} +``` + +## Configuration Options + +The `CloudConfig` builder provides the following options: + +| Option | Default | Description | +|--------|---------|-------------| +| `api_key` | Required | Your Term Cloud API key | +| `endpoint` | `https://api.term.dev` | Custom API endpoint | +| `timeout` | 30 seconds | HTTP request timeout | +| `max_retries` | 3 | Maximum retry attempts for failed uploads | +| `buffer_size` | 1000 | Maximum metrics to buffer in memory | +| `batch_size` | 100 | Number of metrics per upload batch | +| `flush_interval` | 5 seconds | How often to flush buffered metrics | +| `offline_cache_path` | Platform default | Path for offline cache database | + +### Full Configuration Example + +```rust,ignore +use std::time::Duration; +use term_guard::cloud::CloudConfig; + +let config = CloudConfig::new("your-api-key") + .with_endpoint("https://custom.api.endpoint.com") + .with_timeout(Duration::from_secs(60)) + .with_max_retries(5) + .with_buffer_size(5000) + .with_batch_size(200) + .with_flush_interval(Duration::from_secs(10)) + .with_offline_cache_path("/var/cache/myapp/term_metrics.db"); +``` + +## Tagging Metrics + +Use `ResultKey` to add tags that help organize and filter metrics in Term Cloud: + +```rust,ignore +use term_guard::repository::ResultKey; + +// Create a key with the current timestamp +let key = ResultKey::now() + .with_tag("environment", "production") + .with_tag("dataset", "users_table") + .with_tag("pipeline", "daily-etl") + .with_tag("version", "1.2.3"); + +// Or create a key with a specific timestamp +let key = ResultKey::new(1704931200000) // Unix millis + .with_tag("env", "staging"); + +// Add multiple tags at once +use std::collections::HashMap; +let mut tags = HashMap::new(); +tags.insert("env".to_string(), "prod".to_string()); +tags.insert("region".to_string(), "us-east-1".to_string()); + +let key = ResultKey::now().with_tags(tags); +``` + +### Tag Validation Rules + +Tags are validated before upload: +- Tag keys cannot be empty +- Tag keys must be 256 characters or fewer +- Tag values must be 1024 characters or fewer +- Maximum 100 tags per key +- Control characters and null bytes are not allowed + +## Webhook Alerts + +Configure webhook notifications for validation failures: + +```rust,ignore +use std::time::Duration; +use term_guard::cloud::{ + AlertPayload, AlertSeverity, CloudValidationResult, WebhookClient, WebhookConfig, +}; + +// Configure the webhook +let webhook_config = WebhookConfig::new("https://your-webhook.example.com/alerts") + .with_header("Authorization", "Bearer your-token") + .with_min_severity(AlertSeverity::Warning) // Only alert on Warning or Critical + .with_details(true) // Include validation details + .with_secret("your-hmac-secret") // HMAC-SHA256 signing + .with_timeout(Duration::from_secs(10)); + +// Create the webhook client +let webhook_client = WebhookClient::new(webhook_config)?; + +// Create an alert from validation results +let validation_result = CloudValidationResult { + status: "error".to_string(), + total_checks: 10, + passed_checks: 7, + failed_checks: 3, + issues: vec![], // Add specific issues here +}; + +let payload = AlertPayload::from_validation_result( + &validation_result, + "orders_table", + "production" +).with_dashboard_url("https://cloud.term.dev/run/123"); + +// Send the alert +webhook_client.send(&payload).await?; +``` + +### Alert Severity Levels + +| Severity | Triggered When | +|----------|----------------| +| `Info` | All checks passed | +| `Warning` | Some checks failed (less than 50%) | +| `Critical` | Many checks failed (50%+) or status is "error" | + +### Webhook Payload Structure + +The webhook receives a JSON payload: + +```json +{ + "title": "Validation Critical: 3 of 10 checks failed", + "severity": "critical", + "dataset": "orders_table", + "environment": "production", + "summary": { + "total_checks": 10, + "passed": 7, + "failed": 3, + "status": "error" + }, + "details": [ + { + "check": "DataQuality", + "constraint": "Completeness", + "level": "error", + "message": "Column 'user_id' has 15% null values", + "metric": 0.85 + } + ], + "timestamp": "2024-01-10T12:00:00Z", + "dashboard_url": "https://cloud.term.dev/run/123" +} +``` + +### Webhook Signature Verification + +When `with_secret()` is configured, requests include an `X-Signature-256` header with an HMAC-SHA256 signature. Verify it server-side: + +```rust,ignore +use term_guard::cloud::WebhookClient; + +// Verify the signature on your server +fn verify_signature(body: &str, signature: &str, secret: &str) -> bool { + let expected = WebhookClient::sign_payload(body, secret); + format!("sha256={}", expected) == signature +} +``` + +## Offline Support + +The Term Cloud SDK includes an offline cache for resilience against network failures. + +### Enable Offline Cache + +```rust,ignore +use term_guard::cloud::{CloudConfig, TermCloudRepository}; + +let config = CloudConfig::new("your-api-key") + .with_offline_cache_path("/var/cache/myapp/term_metrics.db"); + +let mut repository = TermCloudRepository::new(config)?; + +// Initialize the cache (creates the SQLite database) +repository.setup_cache(None)?; // Uses path from config + +// Or specify a custom path at setup time +repository.setup_cache(Some(std::path::Path::new("/custom/path/cache.db")))?; +``` + +### Sync Cached Metrics + +After recovering from a network outage, sync cached metrics: + +```rust,ignore +// Check connectivity first +match repository.health_check().await { + Ok(response) => { + println!("Connected to Term Cloud v{}", response.version); + + // Sync any cached metrics + let synced = repository.sync_offline_cache().await?; + println!("Synced {} cached metrics", synced); + } + Err(e) => { + eprintln!("Not connected: {}", e); + } +} +``` + +### How Offline Mode Works + +1. Metrics are first buffered in memory +2. A background worker uploads buffered metrics in batches +3. If upload fails, metrics are saved to the SQLite cache +4. On next successful connection, cached metrics are uploaded +5. During shutdown, any remaining buffered metrics go to cache + +## Graceful Shutdown + +Always call `shutdown()` to ensure all metrics are uploaded or cached: + +```rust,ignore +use term_guard::cloud::{CloudConfig, TermCloudRepository}; + +let repository = TermCloudRepository::new(config)?; + +// ... save metrics ... + +// Graceful shutdown: waits for uploads, caches remaining metrics +let stats = repository.shutdown().await?; + +if let Some(s) = stats { + println!("Uploaded {} metrics, {} failed", s.metrics_uploaded, s.metrics_failed); +} +``` + +### Monitoring Buffer Status + +Check the current buffer state before shutdown: + +```rust,ignore +let pending = repository.pending_count().await; +println!("Pending metrics: {}", pending); + +// Force immediate flush if needed +repository.flush().await?; +``` + +## Troubleshooting + +### Problem: "Offline cache not configured" error +**Solution:** Call `setup_cache()` after creating the repository: +```rust,ignore +let mut repository = TermCloudRepository::new(config)?; +repository.setup_cache(None)?; +``` + +### Problem: Metrics not appearing in Term Cloud +**Solution:** +1. Check that you're calling `shutdown()` or `flush()` before the program exits +2. Verify your API key is correct +3. Check network connectivity with `health_check()` + +### Problem: BufferOverflow error +**Solution:** Increase the buffer size or reduce the flush interval: +```rust,ignore +let config = CloudConfig::new("key") + .with_buffer_size(10000) + .with_flush_interval(Duration::from_secs(1)); +``` + +### Problem: Tag validation errors +**Solution:** Ensure tags meet the validation rules: +- Keys cannot be empty +- Keys must be <= 256 characters +- Values must be <= 1024 characters +- No control characters or null bytes + +### Problem: Webhook not receiving alerts +**Solution:** +1. Verify the webhook URL is accessible +2. Check that the alert severity meets `min_severity` threshold +3. Ensure HTTPS certificate is valid (or use HTTP for testing) + +## Security Considerations + +- Store API keys securely (environment variables, secrets manager) +- API keys are never logged; they use `SecureString` internally +- Use HTTPS endpoints in production +- Configure webhook secrets for payload verification +- The offline cache stores metrics locally; secure the cache file appropriately + +## Related Guides + +- [How to Configure Logging](configure-logging.md) - Enable tracing for debugging +- [How to Optimize Performance](optimize-performance.md) - Performance tuning +- [Reference: Metrics Repository](../reference/metrics-repository.md) - Full API reference + +--- + + From 791e65b94c9eb5f8eaed4b328bd2105667187447 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Mon, 12 Jan 2026 20:02:28 -0700 Subject: [PATCH 16/22] chore: add docs/plans to gitignore and format cloud modules Co-Authored-By: Claude Opus 4.5 --- .gitignore | 3 ++ term-guard/src/cloud/cache.rs | 30 ++++++++--------- term-guard/src/cloud/repository.rs | 52 +++++++++++++++++++----------- 3 files changed, 50 insertions(+), 35 deletions(-) diff --git a/.gitignore b/.gitignore index 6ea83a0..4e4b790 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ CLAUDE.md # Logs directory logs/ + +# Implementation plans (not checked in) +docs/plans/ diff --git a/term-guard/src/cloud/cache.rs b/term-guard/src/cloud/cache.rs index 1db5810..b4a75ab 100644 --- a/term-guard/src/cloud/cache.rs +++ b/term-guard/src/cloud/cache.rs @@ -118,23 +118,21 @@ impl OfflineCache { message: format!("Failed to query metrics: {e}"), })? .filter_map(|result| match result { - Ok((id, json, retry_count)) => { - match serde_json::from_str::(&json) { - Ok(metric) => Some(CacheEntry { - id, - entry: BufferEntry { - metric, - retry_count, - queued_at: now, - ready_at: now, - }, - }), - Err(e) => { - warn!("Failed to deserialize cached metric (id={}): {}", id, e); - None - } + Ok((id, json, retry_count)) => match serde_json::from_str::(&json) { + Ok(metric) => Some(CacheEntry { + id, + entry: BufferEntry { + metric, + retry_count, + queued_at: now, + ready_at: now, + }, + }), + Err(e) => { + warn!("Failed to deserialize cached metric (id={}): {}", id, e); + None } - } + }, Err(e) => { warn!("Failed to read cache row: {}", e); None diff --git a/term-guard/src/cloud/repository.rs b/term-guard/src/cloud/repository.rs index e7dade3..c11218f 100644 --- a/term-guard/src/cloud/repository.rs +++ b/term-guard/src/cloud/repository.rs @@ -245,9 +245,11 @@ impl TermCloudRepository { pub async fn shutdown(&self) -> CloudResult> { info!("Initiating graceful shutdown"); - self.shutdown_tx.send(true).map_err(|e| CloudError::Configuration { - message: format!("Failed to send shutdown signal: {e}"), - })?; + self.shutdown_tx + .send(true) + .map_err(|e| CloudError::Configuration { + message: format!("Failed to send shutdown signal: {e}"), + })?; let stats = if let Some(ref handle_lock) = self.worker_handle { let mut guard = handle_lock.write().await; @@ -323,9 +325,12 @@ impl TermCloudRepository { /// ``` #[instrument(skip(self))] pub async fn sync_offline_cache(&self) -> CloudResult { - let cache = self.cache.as_ref().ok_or_else(|| CloudError::Configuration { - message: "Offline cache not configured".to_string(), - })?; + let cache = self + .cache + .as_ref() + .ok_or_else(|| CloudError::Configuration { + message: "Offline cache not configured".to_string(), + })?; let entries = cache.load_all()?; if entries.is_empty() { @@ -374,8 +379,8 @@ impl TermCloudRepository { MetricValue::Long(v) => CloudMetricValue::Long(*v), MetricValue::String(v) => CloudMetricValue::String(v.clone()), MetricValue::Boolean(v) => CloudMetricValue::Boolean(*v), - MetricValue::Histogram(h) => CloudMetricValue::Histogram( - crate::cloud::CloudHistogram { + MetricValue::Histogram(h) => { + CloudMetricValue::Histogram(crate::cloud::CloudHistogram { buckets: h .buckets .iter() @@ -390,8 +395,8 @@ impl TermCloudRepository { max: h.max, mean: h.mean, std_dev: h.std_dev, - }, - ), + }) + } MetricValue::Vector(_) | MetricValue::Map(_) => { continue; } @@ -481,9 +486,10 @@ impl MetricsRepository for TermCloudRepository { let cloud_metric = Self::to_cloud_metric(&key, &metrics); - self.buffer.push(cloud_metric).await.map_err(|e| { - TermError::repository("term_cloud", "save", e.to_string()) - })?; + self.buffer + .push(cloud_metric) + .await + .map_err(|e| TermError::repository("term_cloud", "save", e.to_string()))?; debug!("Metric queued for upload"); Ok(()) @@ -517,7 +523,11 @@ impl MetricsRepository for TermCloudRepository { #[instrument(skip(self))] async fn metadata(&self) -> Result { let pending = self.buffer.len().await; - let cached = self.cache.as_ref().map(|c| c.count().unwrap_or(0)).unwrap_or(0); + let cached = self + .cache + .as_ref() + .map(|c| c.count().unwrap_or(0)) + .unwrap_or(0); Ok(RepositoryMetadata::new("term_cloud") .with_config("endpoint", self.config.endpoint()) @@ -562,9 +572,7 @@ impl MetricsRepository for TermCloudQueryAdapter { Ok(response .results .into_iter() - .map(|m| { - ResultKey::new(m.result_key.dataset_date).with_tags(m.result_key.tags) - }) + .map(|m| ResultKey::new(m.result_key.dataset_date).with_tags(m.result_key.tags)) .collect()) } @@ -703,8 +711,14 @@ mod tests { let cloud_metric = TermCloudRepository::to_cloud_metric(&key, &context); assert_eq!(cloud_metric.result_key.dataset_date, 1704931200000); - assert_eq!(cloud_metric.result_key.tags.get("env"), Some(&"prod".to_string())); - assert_eq!(cloud_metric.metadata.dataset_name, Some("test_dataset".to_string())); + assert_eq!( + cloud_metric.result_key.tags.get("env"), + Some(&"prod".to_string()) + ); + assert_eq!( + cloud_metric.metadata.dataset_name, + Some("test_dataset".to_string()) + ); assert!(cloud_metric.metrics.contains_key("completeness.col1")); assert!(cloud_metric.metrics.contains_key("size")); assert!(cloud_metric.metrics.contains_key("is_valid")); From b22adef39a37938520fbf1b432efd7ccf4883dc6 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Tue, 13 Jan 2026 13:41:58 -0700 Subject: [PATCH 17/22] feat(nexus): add Term Nexus SDK for centralized metrics persistence Implements complete SDK for sending validation metrics to Term Nexus: - NexusConfig: Builder pattern configuration with secure API key handling - NexusClient: HTTP client with HMAC-SHA256 request signing - NexusRepository: MetricsRepository implementation with buffered uploads - MetricsBuffer: Thread-safe in-memory buffer with configurable limits - UploadWorker: Background worker with batching, retries, exponential backoff - OfflineCache: SQLite-backed persistence for network outage resilience Key features: - SecureString for API key (zeroization, masked Debug/Display) - Graceful shutdown with flush and cache persistence - Health check and metrics query endpoints - Tag validation with sensible limits - Comprehensive error types with retry guidance Rebranded from 'cloud' to 'nexus' for clearer product identity. Co-Authored-By: Claude Opus 4.5 --- .gitignore | 3 + .../{use-term-cloud.md => use-nexus.md} | 157 +---- docs/reference/nexus-api-spec.md | 490 ++++++++++++++ term-guard/Cargo.toml | 4 +- term-guard/src/cloud/alerting.rs | 631 ------------------ term-guard/src/cloud/mod.rs | 30 - term-guard/src/lib.rs | 4 +- term-guard/src/{cloud => nexus}/buffer.rs | 24 +- term-guard/src/{cloud => nexus}/cache.rs | 64 +- term-guard/src/{cloud => nexus}/client.rs | 68 +- term-guard/src/{cloud => nexus}/error.rs | 32 +- term-guard/src/nexus/mod.rs | 25 + term-guard/src/{cloud => nexus}/repository.rs | 252 ++++--- term-guard/src/{cloud => nexus}/types.rs | 66 +- term-guard/src/{cloud => nexus}/worker.rs | 28 +- ...ud_integration.rs => nexus_integration.rs} | 119 +--- 16 files changed, 838 insertions(+), 1159 deletions(-) rename docs/how-to/{use-term-cloud.md => use-nexus.md} (59%) create mode 100644 docs/reference/nexus-api-spec.md delete mode 100644 term-guard/src/cloud/alerting.rs delete mode 100644 term-guard/src/cloud/mod.rs rename term-guard/src/{cloud => nexus}/buffer.rs (93%) rename term-guard/src/{cloud => nexus}/cache.rs (83%) rename term-guard/src/{cloud => nexus}/client.rs (75%) rename term-guard/src/{cloud => nexus}/error.rs (75%) create mode 100644 term-guard/src/nexus/mod.rs rename term-guard/src/{cloud => nexus}/repository.rs (75%) rename term-guard/src/{cloud => nexus}/types.rs (83%) rename term-guard/src/{cloud => nexus}/worker.rs (92%) rename term-guard/tests/{cloud_integration.rs => nexus_integration.rs} (59%) diff --git a/.gitignore b/.gitignore index 4e4b790..c68d8ad 100644 --- a/.gitignore +++ b/.gitignore @@ -48,3 +48,6 @@ logs/ # Implementation plans (not checked in) docs/plans/ + +# Node.js bindings (build artifacts) +node/ diff --git a/docs/how-to/use-term-cloud.md b/docs/how-to/use-nexus.md similarity index 59% rename from docs/how-to/use-term-cloud.md rename to docs/how-to/use-nexus.md index d922d9b..68a8e5c 100644 --- a/docs/how-to/use-term-cloud.md +++ b/docs/how-to/use-nexus.md @@ -1,46 +1,46 @@ -# How to Use the Term Cloud SDK +# How to Use the Term Nexus SDK > **Type**: How-To Guide (Task-oriented) > **Audience**: Practitioners using Term -> **Goal**: Persist validation metrics to Term Cloud for centralized monitoring and alerting +> **Goal**: Persist validation metrics to Term Nexus for centralized monitoring ## Goal -Send validation metrics from Term to Term Cloud for centralized storage, historical analysis, and webhook-based alerting. +Send validation metrics from Term to Term Nexus for centralized storage and historical analysis. ## Prerequisites Before you begin, ensure you have: - [ ] Term v0.0.2 or later installed -- [ ] A Term Cloud API key (obtain from [Term Cloud Dashboard](https://cloud.term.dev)) -- [ ] The `cloud` feature enabled in your `Cargo.toml` +- [ ] A Term Nexus API key (obtain from [Term Dashboard](https://app.withterm.com)) +- [ ] The `nexus` feature enabled in your `Cargo.toml` -## Enable the Cloud Feature +## Enable the Nexus Feature -Add the `cloud` feature to your `Cargo.toml`: +Add the `nexus` feature to your `Cargo.toml`: ```toml [dependencies] -term-guard = { version = "0.0.2", features = ["cloud"] } +term-guard = { version = "0.0.2", features = ["nexus"] } ``` ## Quick Start ```rust,ignore use std::time::Duration; -use term_guard::cloud::{CloudConfig, TermCloudRepository}; +use term_guard::nexus::{NexusConfig, NexusRepository}; use term_guard::repository::{MetricsRepository, ResultKey}; use term_guard::analyzers::AnalyzerContext; #[tokio::main] async fn main() -> Result<(), Box> { - // Configure the cloud connection - let config = CloudConfig::new("your-api-key") + // Configure the nexus connection + let config = NexusConfig::new("your-api-key") .with_buffer_size(1000) .with_flush_interval(Duration::from_secs(5)); // Create the repository - let repository = TermCloudRepository::new(config)?; + let repository = NexusRepository::new(config)?; // Create metrics with tags let key = ResultKey::now() @@ -62,12 +62,12 @@ async fn main() -> Result<(), Box> { ## Configuration Options -The `CloudConfig` builder provides the following options: +The `NexusConfig` builder provides the following options: | Option | Default | Description | |--------|---------|-------------| -| `api_key` | Required | Your Term Cloud API key | -| `endpoint` | `https://api.term.dev` | Custom API endpoint | +| `api_key` | Required | Your Term Nexus API key | +| `endpoint` | `https://api.withterm.com` | Custom API endpoint | | `timeout` | 30 seconds | HTTP request timeout | | `max_retries` | 3 | Maximum retry attempts for failed uploads | | `buffer_size` | 1000 | Maximum metrics to buffer in memory | @@ -79,10 +79,10 @@ The `CloudConfig` builder provides the following options: ```rust,ignore use std::time::Duration; -use term_guard::cloud::CloudConfig; +use term_guard::nexus::NexusConfig; -let config = CloudConfig::new("your-api-key") - .with_endpoint("https://custom.api.endpoint.com") +let config = NexusConfig::new("your-api-key") + .with_endpoint("https://api.withterm.com") .with_timeout(Duration::from_secs(60)) .with_max_retries(5) .with_buffer_size(5000) @@ -93,7 +93,7 @@ let config = CloudConfig::new("your-api-key") ## Tagging Metrics -Use `ResultKey` to add tags that help organize and filter metrics in Term Cloud: +Use `ResultKey` to add tags that help organize and filter metrics in Term Nexus: ```rust,ignore use term_guard::repository::ResultKey; @@ -127,111 +127,19 @@ Tags are validated before upload: - Maximum 100 tags per key - Control characters and null bytes are not allowed -## Webhook Alerts - -Configure webhook notifications for validation failures: - -```rust,ignore -use std::time::Duration; -use term_guard::cloud::{ - AlertPayload, AlertSeverity, CloudValidationResult, WebhookClient, WebhookConfig, -}; - -// Configure the webhook -let webhook_config = WebhookConfig::new("https://your-webhook.example.com/alerts") - .with_header("Authorization", "Bearer your-token") - .with_min_severity(AlertSeverity::Warning) // Only alert on Warning or Critical - .with_details(true) // Include validation details - .with_secret("your-hmac-secret") // HMAC-SHA256 signing - .with_timeout(Duration::from_secs(10)); - -// Create the webhook client -let webhook_client = WebhookClient::new(webhook_config)?; - -// Create an alert from validation results -let validation_result = CloudValidationResult { - status: "error".to_string(), - total_checks: 10, - passed_checks: 7, - failed_checks: 3, - issues: vec![], // Add specific issues here -}; - -let payload = AlertPayload::from_validation_result( - &validation_result, - "orders_table", - "production" -).with_dashboard_url("https://cloud.term.dev/run/123"); - -// Send the alert -webhook_client.send(&payload).await?; -``` - -### Alert Severity Levels - -| Severity | Triggered When | -|----------|----------------| -| `Info` | All checks passed | -| `Warning` | Some checks failed (less than 50%) | -| `Critical` | Many checks failed (50%+) or status is "error" | - -### Webhook Payload Structure - -The webhook receives a JSON payload: - -```json -{ - "title": "Validation Critical: 3 of 10 checks failed", - "severity": "critical", - "dataset": "orders_table", - "environment": "production", - "summary": { - "total_checks": 10, - "passed": 7, - "failed": 3, - "status": "error" - }, - "details": [ - { - "check": "DataQuality", - "constraint": "Completeness", - "level": "error", - "message": "Column 'user_id' has 15% null values", - "metric": 0.85 - } - ], - "timestamp": "2024-01-10T12:00:00Z", - "dashboard_url": "https://cloud.term.dev/run/123" -} -``` - -### Webhook Signature Verification - -When `with_secret()` is configured, requests include an `X-Signature-256` header with an HMAC-SHA256 signature. Verify it server-side: - -```rust,ignore -use term_guard::cloud::WebhookClient; - -// Verify the signature on your server -fn verify_signature(body: &str, signature: &str, secret: &str) -> bool { - let expected = WebhookClient::sign_payload(body, secret); - format!("sha256={}", expected) == signature -} -``` - ## Offline Support -The Term Cloud SDK includes an offline cache for resilience against network failures. +The Term Nexus SDK includes an offline cache for resilience against network failures. ### Enable Offline Cache ```rust,ignore -use term_guard::cloud::{CloudConfig, TermCloudRepository}; +use term_guard::nexus::{NexusConfig, NexusRepository}; -let config = CloudConfig::new("your-api-key") +let config = NexusConfig::new("your-api-key") .with_offline_cache_path("/var/cache/myapp/term_metrics.db"); -let mut repository = TermCloudRepository::new(config)?; +let mut repository = NexusRepository::new(config)?; // Initialize the cache (creates the SQLite database) repository.setup_cache(None)?; // Uses path from config @@ -248,7 +156,7 @@ After recovering from a network outage, sync cached metrics: // Check connectivity first match repository.health_check().await { Ok(response) => { - println!("Connected to Term Cloud v{}", response.version); + println!("Connected to Term Nexus v{}", response.version); // Sync any cached metrics let synced = repository.sync_offline_cache().await?; @@ -273,9 +181,9 @@ match repository.health_check().await { Always call `shutdown()` to ensure all metrics are uploaded or cached: ```rust,ignore -use term_guard::cloud::{CloudConfig, TermCloudRepository}; +use term_guard::nexus::{NexusConfig, NexusRepository}; -let repository = TermCloudRepository::new(config)?; +let repository = NexusRepository::new(config)?; // ... save metrics ... @@ -304,11 +212,11 @@ repository.flush().await?; ### Problem: "Offline cache not configured" error **Solution:** Call `setup_cache()` after creating the repository: ```rust,ignore -let mut repository = TermCloudRepository::new(config)?; +let mut repository = NexusRepository::new(config)?; repository.setup_cache(None)?; ``` -### Problem: Metrics not appearing in Term Cloud +### Problem: Metrics not appearing in Term Nexus **Solution:** 1. Check that you're calling `shutdown()` or `flush()` before the program exits 2. Verify your API key is correct @@ -317,7 +225,7 @@ repository.setup_cache(None)?; ### Problem: BufferOverflow error **Solution:** Increase the buffer size or reduce the flush interval: ```rust,ignore -let config = CloudConfig::new("key") +let config = NexusConfig::new("key") .with_buffer_size(10000) .with_flush_interval(Duration::from_secs(1)); ``` @@ -329,18 +237,11 @@ let config = CloudConfig::new("key") - Values must be <= 1024 characters - No control characters or null bytes -### Problem: Webhook not receiving alerts -**Solution:** -1. Verify the webhook URL is accessible -2. Check that the alert severity meets `min_severity` threshold -3. Ensure HTTPS certificate is valid (or use HTTP for testing) - ## Security Considerations - Store API keys securely (environment variables, secrets manager) - API keys are never logged; they use `SecureString` internally - Use HTTPS endpoints in production -- Configure webhook secrets for payload verification - The offline cache stores metrics locally; secure the cache file appropriately ## Related Guides diff --git a/docs/reference/nexus-api-spec.md b/docs/reference/nexus-api-spec.md new file mode 100644 index 0000000..27adb42 --- /dev/null +++ b/docs/reference/nexus-api-spec.md @@ -0,0 +1,490 @@ +# Term Nexus API Specification + +> **Type**: Reference Documentation +> **Audience**: Backend developers implementing Term Nexus +> **Version**: 1.0 + +## Overview + +This document specifies the HTTP API that Term Nexus must implement to support the Term SDK. The API provides endpoints for metrics ingestion, querying, and deletion. + +**Base URL**: `https://api.withterm.com` + +## Authentication + +All authenticated endpoints require two headers: + +| Header | Description | +|--------|-------------| +| `X-Term-Api-Key` | The API key identifying the client | +| `X-Term-Signature` | HMAC-SHA256 signature of the request body (write operations only) | + +### HMAC Signature + +For write operations (`POST`, `DELETE`), the SDK signs the request body using HMAC-SHA256: + +``` +signature = HMAC-SHA256(api_key, request_body) +``` + +The signature is sent as a lowercase hex-encoded string in the `X-Term-Signature` header. + +**Verification pseudocode:** +```python +expected = hmac.new( + key=api_key.encode('utf-8'), + msg=request_body, + digestmod='sha256' +).hexdigest() + +if not hmac.compare_digest(expected, request_signature): + return 401 Unauthorized +``` + +--- + +## Endpoints + +### Health Check + +Check API availability and version. + +``` +GET /v1/health +``` + +**Authentication**: None required + +**Response**: `200 OK` +```json +{ + "status": "ok", + "version": "1.0.0" +} +``` + +| Field | Type | Description | +|-------|------|-------------| +| `status` | string | Service status (`"ok"`, `"degraded"`, etc.) | +| `version` | string | API version | + +--- + +### Ingest Metrics + +Submit validation metrics for storage. + +``` +POST /v1/metrics +``` + +**Authentication**: Required (`X-Term-Api-Key` + `X-Term-Signature`) + +**Headers**: +``` +Content-Type: application/json +X-Term-Api-Key: +X-Term-Signature: +``` + +**Request Body**: Array of `NexusMetric` objects + +```json +[ + { + "result_key": { + "dataset_date": 1704931200000, + "tags": { + "environment": "production", + "dataset": "orders", + "pipeline": "daily-etl" + } + }, + "metrics": { + "completeness.user_id": { + "type": "double", + "value": 0.98 + }, + "row_count": { + "type": "long", + "value": 1000000 + }, + "is_valid": { + "type": "boolean", + "value": true + } + }, + "metadata": { + "dataset_name": "orders_table", + "start_time": "2024-01-10T12:00:00Z", + "end_time": "2024-01-10T12:05:00Z", + "term_version": "0.0.2", + "custom": { + "spark_job_id": "job-12345" + } + }, + "validation_result": { + "status": "warning", + "total_checks": 10, + "passed_checks": 9, + "failed_checks": 1, + "issues": [ + { + "check_name": "QualityCheck", + "constraint_name": "PatternMatch", + "level": "warning", + "message": "Pattern mismatch in 2% of rows", + "metric": 0.98 + } + ] + } + } +] +``` + +**Response**: `200 OK` +```json +{ + "accepted": 1, + "rejected": 0, + "errors": [] +} +``` + +| Field | Type | Description | +|-------|------|-------------| +| `accepted` | integer | Number of metrics successfully stored | +| `rejected` | integer | Number of metrics rejected | +| `errors` | array[string] | Error messages for rejected metrics | + +--- + +### Query Metrics + +Retrieve stored metrics with filtering and pagination. + +``` +GET /v1/metrics +``` + +**Authentication**: Required (`X-Term-Api-Key`) + +**Query Parameters**: + +| Parameter | Type | Description | +|-----------|------|-------------| +| `after` | integer | Filter: dataset_date > after (Unix millis) | +| `before` | integer | Filter: dataset_date < before (Unix millis) | +| `limit` | integer | Maximum results to return (default: 100) | +| `cursor` | string | Pagination cursor from previous response | +| `{tag_key}` | string | Filter by tag value (e.g., `environment=production`) | + +**Example Request**: +``` +GET /v1/metrics?after=1704844800000&environment=production&limit=50 +``` + +**Response**: `200 OK` +```json +{ + "results": [ + { + "result_key": { + "dataset_date": 1704931200000, + "tags": { + "environment": "production", + "dataset": "orders" + } + }, + "metrics": { + "completeness.user_id": { + "type": "double", + "value": 0.98 + } + }, + "metadata": { + "dataset_name": "orders_table", + "term_version": "0.0.2", + "custom": {} + } + } + ], + "pagination": { + "next_cursor": "eyJkYXRhc2V0X2RhdGUiOjE3MDQ5MzEyMDAwMDB9", + "has_more": true + } +} +``` + +| Field | Type | Description | +|-------|------|-------------| +| `results` | array[NexusMetric] | Matching metrics | +| `pagination.next_cursor` | string | Cursor for next page (null if no more results) | +| `pagination.has_more` | boolean | Whether more results exist | + +--- + +### Delete Metrics + +Delete metrics by dataset_date and optional tag filters. + +``` +DELETE /v1/metrics/{dataset_date} +``` + +**Authentication**: Required (`X-Term-Api-Key`) + +**Path Parameters**: + +| Parameter | Type | Description | +|-----------|------|-------------| +| `dataset_date` | integer | Unix timestamp in milliseconds | + +**Query Parameters**: + +| Parameter | Type | Description | +|-----------|------|-------------| +| `{tag_key}` | string | Filter by tag value | + +**Example Request**: +``` +DELETE /v1/metrics/1704931200000?environment=staging +``` + +**Response**: `204 No Content` + +--- + +## Data Types + +### NexusMetric + +The primary data structure for validation metrics. + +```json +{ + "result_key": NexusResultKey, + "metrics": { [string]: NexusMetricValue }, + "metadata": NexusMetadata, + "validation_result": NexusValidationResult | null +} +``` + +### NexusResultKey + +Unique identifier for a metrics collection. + +```json +{ + "dataset_date": 1704931200000, + "tags": { + "key1": "value1", + "key2": "value2" + } +} +``` + +| Field | Type | Description | +|-------|------|-------------| +| `dataset_date` | integer | Unix timestamp in milliseconds | +| `tags` | object | Key-value pairs for filtering/grouping | + +**Tag Validation Rules**: +- Keys: non-empty, max 256 characters +- Values: max 1024 characters +- Maximum 100 tags per key +- No control characters or null bytes + +### NexusMetricValue + +Tagged union for metric values. + +```json +// Double +{ "type": "double", "value": 0.98 } + +// Long (integer) +{ "type": "long", "value": 1000000 } + +// String +{ "type": "string", "value": "some text" } + +// Boolean +{ "type": "boolean", "value": true } + +// Histogram +{ + "type": "histogram", + "value": { + "buckets": [ + { "lower_bound": 0.0, "upper_bound": 10.0, "count": 150 }, + { "lower_bound": 10.0, "upper_bound": 20.0, "count": 300 } + ], + "total_count": 450, + "min": 0.5, + "max": 19.8, + "mean": 12.3, + "std_dev": 4.5 + } +} +``` + +### NexusHistogram + +Statistical distribution data. + +| Field | Type | Description | +|-------|------|-------------| +| `buckets` | array | Histogram buckets | +| `buckets[].lower_bound` | float | Bucket lower bound (inclusive) | +| `buckets[].upper_bound` | float | Bucket upper bound (exclusive) | +| `buckets[].count` | integer | Count in this bucket | +| `total_count` | integer | Total observations | +| `min` | float? | Minimum value (optional) | +| `max` | float? | Maximum value (optional) | +| `mean` | float? | Mean value (optional) | +| `std_dev` | float? | Standard deviation (optional) | + +### NexusMetadata + +Context about the metrics collection. + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `dataset_name` | string | No | Human-readable dataset identifier | +| `start_time` | string | No | ISO 8601 timestamp when collection started | +| `end_time` | string | No | ISO 8601 timestamp when collection ended | +| `term_version` | string | Yes | Version of the Term library | +| `custom` | object | No | User-defined key-value metadata | + +### NexusValidationResult + +Summary of validation check results. + +| Field | Type | Description | +|-------|------|-------------| +| `status` | string | Overall status (`"success"`, `"warning"`, `"error"`) | +| `total_checks` | integer | Total number of checks run | +| `passed_checks` | integer | Number of checks that passed | +| `failed_checks` | integer | Number of checks that failed | +| `issues` | array | List of validation issues | + +### NexusValidationIssue + +Details about a validation failure. + +| Field | Type | Description | +|-------|------|-------------| +| `check_name` | string | Name of the check (e.g., `"CompletenessCheck"`) | +| `constraint_name` | string | Name of the constraint (e.g., `"isComplete"`) | +| `level` | string | Severity (`"warning"`, `"error"`) | +| `message` | string | Human-readable description | +| `metric` | float? | Associated metric value (optional) | + +--- + +## Error Handling + +### HTTP Status Codes + +| Status | Meaning | SDK Behavior | +|--------|---------|--------------| +| `200` | Success | Process response | +| `204` | Success (no content) | Operation complete | +| `400` | Bad Request | Do not retry, fix request | +| `401` | Unauthorized | Do not retry, check API key | +| `429` | Rate Limited | Retry after `Retry-After` header | +| `500+` | Server Error | Retry with exponential backoff | + +### Error Response Format + +For `4xx` and `5xx` errors, return a plain text or JSON error message in the body: + +```json +{ + "error": "Invalid API key", + "code": "INVALID_API_KEY" +} +``` + +Or plain text: +``` +Invalid API key +``` + +### Rate Limiting + +When rate limited, include the `Retry-After` header: + +``` +HTTP/1.1 429 Too Many Requests +Retry-After: 60 +``` + +The SDK respects this header and waits the specified number of seconds before retrying. + +--- + +## SDK Retry Behavior + +The SDK implements the following retry strategy: + +1. **Retryable errors**: Network errors, 429, 5xx responses +2. **Non-retryable errors**: 400, 401 (fail immediately) +3. **Backoff**: Exponential with jitter + - Base delay: 1 second + - Max delay: 32 seconds + - Formula: `min(32, 2^attempt) + random_jitter` +4. **Max retries**: Configurable (default: 3) +5. **Retry-After**: When present, overrides calculated backoff + +--- + +## Example: Complete Ingest Flow + +**1. Client sends request:** +```http +POST /v1/metrics HTTP/1.1 +Host: api.withterm.com +Content-Type: application/json +X-Term-Api-Key: tk_live_abc123 +X-Term-Signature: 7f83b1657ff1fc53b92dc18148a1d65dfc2d4b1fa3d677284addd200126d9069 + +[{"result_key":{"dataset_date":1704931200000,"tags":{"env":"prod"}},...}] +``` + +**2. Server validates:** +- Verify API key exists and is active +- Verify HMAC signature matches +- Parse and validate JSON schema +- Check tag validation rules + +**3. Server responds:** +```http +HTTP/1.1 200 OK +Content-Type: application/json + +{"accepted":1,"rejected":0,"errors":[]} +``` + +--- + +## Implementation Checklist + +- [ ] `GET /v1/health` - No auth required +- [ ] `POST /v1/metrics` - Verify API key + HMAC signature +- [ ] `GET /v1/metrics` - Verify API key, support query params +- [ ] `DELETE /v1/metrics/{dataset_date}` - Verify API key +- [ ] HMAC-SHA256 signature verification +- [ ] Rate limiting with `Retry-After` header +- [ ] Tag validation (length limits, character restrictions) +- [ ] Cursor-based pagination for queries +- [ ] Proper HTTP status codes for all error conditions + +--- + +## Related Documentation + +- [How to Use the Term Nexus SDK](../how-to/use-nexus.md) - Client usage guide +- [Explanation: Nexus Architecture](../explanation/nexus-architecture.md) - Design decisions + diff --git a/term-guard/Cargo.toml b/term-guard/Cargo.toml index d10c09b..dbd589c 100644 --- a/term-guard/Cargo.toml +++ b/term-guard/Cargo.toml @@ -34,8 +34,8 @@ azure = ["cloud-storage", "object_store/azure"] cloud-storage = ["dep:object_store", "dep:url"] gcs = ["cloud-storage", "object_store/gcp"] s3 = ["cloud-storage", "object_store/aws"] -# Term Cloud SDK -cloud = ["dep:directories", "dep:rand", "dep:reqwest", "dep:ring", "dep:rusqlite"] +# Term Nexus SDK +nexus = ["dep:directories", "dep:rand", "dep:reqwest", "dep:ring", "dep:rusqlite"] # Observability telemetry = ["dep:tracing-opentelemetry", "dep:opentelemetry", "dep:opentelemetry_sdk"] test-utils = ["dep:rand", "dep:parquet"] diff --git a/term-guard/src/cloud/alerting.rs b/term-guard/src/cloud/alerting.rs deleted file mode 100644 index 3ebbe30..0000000 --- a/term-guard/src/cloud/alerting.rs +++ /dev/null @@ -1,631 +0,0 @@ -//! Webhook-based alerting for validation failures. -//! -//! This module provides webhook alerting capabilities that can be triggered -//! when validation checks fail. Supports custom headers, HMAC signing, and -//! severity-based filtering. - -use std::collections::HashMap; -use std::time::Duration; - -use chrono::{DateTime, Utc}; -use ring::hmac; -use serde::{Deserialize, Serialize}; - -use super::error::{CloudError, CloudResult}; -use super::types::{CloudValidationIssue, CloudValidationResult}; -use crate::security::SecureString; - -/// Alert severity levels for filtering webhook notifications. -#[derive( - Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, -)] -#[serde(rename_all = "lowercase")] -pub enum AlertSeverity { - Info, - #[default] - Warning, - Critical, -} - -impl std::fmt::Display for AlertSeverity { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Info => write!(f, "info"), - Self::Warning => write!(f, "warning"), - Self::Critical => write!(f, "critical"), - } - } -} - -/// Configuration for webhook alerting. -#[derive(Debug, Clone)] -pub struct WebhookConfig { - url: String, - headers: HashMap, - include_details: bool, - min_severity: AlertSeverity, - secret: Option, - timeout: Duration, -} - -impl WebhookConfig { - /// Create a new WebhookConfig with the given URL. - pub fn new(url: impl Into) -> Self { - Self { - url: url.into(), - headers: HashMap::new(), - include_details: false, - min_severity: AlertSeverity::default(), - secret: None, - timeout: Duration::from_secs(10), - } - } - - /// Add a custom header to be sent with webhook requests. - pub fn with_header(mut self, key: impl Into, value: impl Into) -> Self { - self.headers.insert(key.into(), value.into()); - self - } - - /// Set whether to include full validation details in the alert payload. - pub fn with_details(mut self, include: bool) -> Self { - self.include_details = include; - self - } - - /// Set the minimum severity level required to trigger an alert. - pub fn with_min_severity(mut self, severity: AlertSeverity) -> Self { - self.min_severity = severity; - self - } - - /// Set a secret for HMAC-SHA256 signing of payloads. - pub fn with_secret(mut self, secret: impl Into) -> Self { - self.secret = Some(SecureString::new(secret.into())); - self - } - - /// Set the request timeout duration. - pub fn with_timeout(mut self, timeout: Duration) -> Self { - self.timeout = timeout; - self - } - - /// Validate the webhook configuration. - pub fn validate(&self) -> CloudResult<()> { - if self.url.is_empty() { - return Err(CloudError::Configuration { - message: "Webhook URL cannot be empty".to_string(), - }); - } - - let url_lower = self.url.to_lowercase(); - if !url_lower.starts_with("http://") && !url_lower.starts_with("https://") { - return Err(CloudError::Configuration { - message: "Webhook URL must start with http:// or https://".to_string(), - }); - } - - if reqwest::Url::parse(&self.url).is_err() { - return Err(CloudError::Configuration { - message: format!("Invalid webhook URL: {}", self.url), - }); - } - - Ok(()) - } - - /// Get the webhook URL. - pub fn url(&self) -> &str { - &self.url - } - - /// Get the custom headers. - pub fn headers(&self) -> &HashMap { - &self.headers - } - - /// Check if details should be included. - pub fn include_details(&self) -> bool { - self.include_details - } - - /// Get the minimum severity level. - pub fn min_severity(&self) -> AlertSeverity { - self.min_severity - } - - /// Get the signing secret. - pub fn secret(&self) -> Option<&SecureString> { - self.secret.as_ref() - } - - /// Get the timeout duration. - pub fn timeout(&self) -> Duration { - self.timeout - } -} - -/// Summary information about the validation result. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AlertSummary { - pub total_checks: usize, - pub passed: usize, - pub failed: usize, - pub status: String, -} - -/// Details about a specific validation failure. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AlertDetail { - pub check: String, - pub constraint: String, - pub level: String, - pub message: String, - #[serde(skip_serializing_if = "Option::is_none")] - pub metric: Option, -} - -impl From<&CloudValidationIssue> for AlertDetail { - fn from(issue: &CloudValidationIssue) -> Self { - Self { - check: issue.check_name.clone(), - constraint: issue.constraint_name.clone(), - level: issue.level.clone(), - message: issue.message.clone(), - metric: issue.metric, - } - } -} - -/// Payload sent to the webhook endpoint. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AlertPayload { - pub title: String, - pub severity: AlertSeverity, - pub dataset: String, - pub environment: String, - pub summary: AlertSummary, - #[serde(skip_serializing_if = "Option::is_none")] - pub details: Option>, - pub timestamp: DateTime, - #[serde(skip_serializing_if = "Option::is_none")] - pub dashboard_url: Option, -} - -impl AlertPayload { - /// Create an alert payload from a validation result. - pub fn from_validation_result( - result: &CloudValidationResult, - dataset: impl Into, - environment: impl Into, - ) -> Self { - let severity = Self::determine_severity(result); - let title = Self::generate_title(result, &severity); - - let summary = AlertSummary { - total_checks: result.total_checks, - passed: result.passed_checks, - failed: result.failed_checks, - status: result.status.clone(), - }; - - let details: Option> = if result.issues.is_empty() { - None - } else { - Some(result.issues.iter().map(AlertDetail::from).collect()) - }; - - Self { - title, - severity, - dataset: dataset.into(), - environment: environment.into(), - summary, - details, - timestamp: Utc::now(), - dashboard_url: None, - } - } - - /// Set the dashboard URL for the alert. - pub fn with_dashboard_url(mut self, url: impl Into) -> Self { - self.dashboard_url = Some(url.into()); - self - } - - fn determine_severity(result: &CloudValidationResult) -> AlertSeverity { - if result.failed_checks == 0 { - return AlertSeverity::Info; - } - - let failure_rate = result.failed_checks as f64 / result.total_checks.max(1) as f64; - - if failure_rate >= 0.5 || result.status == "error" { - AlertSeverity::Critical - } else if result.failed_checks > 0 { - AlertSeverity::Warning - } else { - AlertSeverity::Info - } - } - - fn generate_title(result: &CloudValidationResult, severity: &AlertSeverity) -> String { - match severity { - AlertSeverity::Info => "Validation Passed".to_string(), - AlertSeverity::Warning => format!( - "Validation Warning: {} of {} checks failed", - result.failed_checks, result.total_checks - ), - AlertSeverity::Critical => format!( - "Validation Critical: {} of {} checks failed", - result.failed_checks, result.total_checks - ), - } - } -} - -/// Client for sending webhook alerts. -pub struct WebhookClient { - client: reqwest::Client, - config: WebhookConfig, -} - -impl WebhookClient { - /// Create a new WebhookClient with the given configuration. - pub fn new(config: WebhookConfig) -> CloudResult { - config.validate()?; - - let client = reqwest::Client::builder() - .timeout(config.timeout) - .build() - .map_err(|e| CloudError::Configuration { - message: format!("Failed to build HTTP client: {e}"), - })?; - - Ok(Self { client, config }) - } - - /// Send an alert to the configured webhook endpoint. - pub async fn send(&self, payload: &AlertPayload) -> CloudResult<()> { - if payload.severity < self.config.min_severity { - tracing::debug!( - severity = %payload.severity, - min_severity = %self.config.min_severity, - "Alert severity below threshold, skipping" - ); - return Ok(()); - } - - let mut payload_to_send = payload.clone(); - if !self.config.include_details { - payload_to_send.details = None; - } - - let body = - serde_json::to_string(&payload_to_send).map_err(|e| CloudError::Serialization { - message: e.to_string(), - })?; - - let mut request = self - .client - .post(&self.config.url) - .header("Content-Type", "application/json"); - - for (key, value) in &self.config.headers { - request = request.header(key, value); - } - - if let Some(secret) = &self.config.secret { - let signature = Self::sign_payload(&body, secret.expose()); - request = request.header("X-Signature-256", format!("sha256={signature}")); - } - - let response = request - .body(body) - .send() - .await - .map_err(|e| CloudError::Network { - message: e.to_string(), - })?; - - if !response.status().is_success() { - let status = response.status().as_u16(); - let message = response - .text() - .await - .unwrap_or_else(|_| "Unknown error".to_string()); - return Err(CloudError::ServerError { status, message }); - } - - tracing::info!( - dataset = %payload.dataset, - severity = %payload.severity, - "Alert sent successfully" - ); - - Ok(()) - } - - /// Sign a payload using HMAC-SHA256. - pub fn sign_payload(body: &str, secret: &str) -> String { - let key = hmac::Key::new(hmac::HMAC_SHA256, secret.as_bytes()); - let signature = hmac::sign(&key, body.as_bytes()); - hex::encode(signature.as_ref()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_alert_severity_ordering() { - assert!(AlertSeverity::Info < AlertSeverity::Warning); - assert!(AlertSeverity::Warning < AlertSeverity::Critical); - assert!(AlertSeverity::Info < AlertSeverity::Critical); - } - - #[test] - fn test_alert_severity_default() { - assert_eq!(AlertSeverity::default(), AlertSeverity::Warning); - } - - #[test] - fn test_alert_severity_display() { - assert_eq!(AlertSeverity::Info.to_string(), "info"); - assert_eq!(AlertSeverity::Warning.to_string(), "warning"); - assert_eq!(AlertSeverity::Critical.to_string(), "critical"); - } - - #[test] - fn test_webhook_config_new() { - let config = WebhookConfig::new("https://example.com/webhook"); - assert_eq!(config.url(), "https://example.com/webhook"); - assert!(config.headers().is_empty()); - assert!(!config.include_details()); - assert_eq!(config.min_severity(), AlertSeverity::Warning); - assert!(config.secret().is_none()); - assert_eq!(config.timeout(), Duration::from_secs(10)); - } - - #[test] - fn test_webhook_config_builder() { - let config = WebhookConfig::new("https://example.com/webhook") - .with_header("Authorization", "Bearer token123") - .with_header("X-Custom", "value") - .with_details(true) - .with_min_severity(AlertSeverity::Critical) - .with_secret("my-secret"); - - assert_eq!(config.url(), "https://example.com/webhook"); - assert_eq!( - config.headers().get("Authorization"), - Some(&"Bearer token123".to_string()) - ); - assert_eq!(config.headers().get("X-Custom"), Some(&"value".to_string())); - assert!(config.include_details()); - assert_eq!(config.min_severity(), AlertSeverity::Critical); - assert_eq!(config.secret().map(|s| s.expose()), Some("my-secret")); - assert_eq!(config.timeout(), Duration::from_secs(10)); - } - - #[test] - fn test_webhook_config_with_timeout() { - let config = - WebhookConfig::new("https://example.com/webhook").with_timeout(Duration::from_secs(30)); - assert_eq!(config.timeout(), Duration::from_secs(30)); - } - - #[test] - fn test_webhook_config_validate_success() { - let config = WebhookConfig::new("https://example.com/webhook"); - assert!(config.validate().is_ok()); - - let config = WebhookConfig::new("http://localhost:8080/hook"); - assert!(config.validate().is_ok()); - } - - #[test] - fn test_webhook_config_validate_empty_url() { - let config = WebhookConfig::new(""); - let result = config.validate(); - assert!(result.is_err()); - match result.unwrap_err() { - CloudError::Configuration { message } => { - assert!(message.contains("cannot be empty")); - } - _ => panic!("Expected Configuration error"), - } - } - - #[test] - fn test_webhook_config_validate_invalid_scheme() { - let config = WebhookConfig::new("ftp://example.com/webhook"); - let result = config.validate(); - assert!(result.is_err()); - match result.unwrap_err() { - CloudError::Configuration { message } => { - assert!(message.contains("http://") || message.contains("https://")); - } - _ => panic!("Expected Configuration error"), - } - } - - #[test] - fn test_webhook_config_validate_invalid_url() { - let config = WebhookConfig::new("https://"); - let result = config.validate(); - assert!(result.is_err()); - } - - #[test] - fn test_alert_payload_from_validation_result_success() { - let result = CloudValidationResult { - status: "success".to_string(), - total_checks: 10, - passed_checks: 10, - failed_checks: 0, - issues: vec![], - }; - - let payload = AlertPayload::from_validation_result(&result, "orders", "production"); - - assert_eq!(payload.severity, AlertSeverity::Info); - assert_eq!(payload.dataset, "orders"); - assert_eq!(payload.environment, "production"); - assert_eq!(payload.summary.total_checks, 10); - assert_eq!(payload.summary.passed, 10); - assert_eq!(payload.summary.failed, 0); - assert!(payload.details.is_none()); - } - - #[test] - fn test_alert_payload_from_validation_result_warning() { - let result = CloudValidationResult { - status: "warning".to_string(), - total_checks: 10, - passed_checks: 8, - failed_checks: 2, - issues: vec![CloudValidationIssue { - check_name: "DataQuality".to_string(), - constraint_name: "Completeness".to_string(), - level: "warning".to_string(), - message: "Column 'email' has nulls".to_string(), - metric: Some(0.95), - }], - }; - - let payload = AlertPayload::from_validation_result(&result, "users", "staging"); - - assert_eq!(payload.severity, AlertSeverity::Warning); - assert!(payload.title.contains("Warning")); - assert!(payload.details.is_some()); - let details = payload.details.unwrap(); - assert_eq!(details.len(), 1); - assert_eq!(details[0].check, "DataQuality"); - assert_eq!(details[0].metric, Some(0.95)); - } - - #[test] - fn test_alert_payload_from_validation_result_critical() { - let result = CloudValidationResult { - status: "error".to_string(), - total_checks: 10, - passed_checks: 3, - failed_checks: 7, - issues: vec![], - }; - - let payload = AlertPayload::from_validation_result(&result, "orders", "production"); - - assert_eq!(payload.severity, AlertSeverity::Critical); - assert!(payload.title.contains("Critical")); - } - - #[test] - fn test_alert_payload_with_dashboard_url() { - let result = CloudValidationResult { - status: "success".to_string(), - total_checks: 1, - passed_checks: 1, - failed_checks: 0, - issues: vec![], - }; - - let payload = AlertPayload::from_validation_result(&result, "test", "dev") - .with_dashboard_url("https://dashboard.example.com/run/123"); - - assert_eq!( - payload.dashboard_url, - Some("https://dashboard.example.com/run/123".to_string()) - ); - } - - #[test] - fn test_alert_detail_from_cloud_validation_issue() { - let issue = CloudValidationIssue { - check_name: "MyCheck".to_string(), - constraint_name: "MyConstraint".to_string(), - level: "error".to_string(), - message: "Something went wrong".to_string(), - metric: Some(0.5), - }; - - let detail = AlertDetail::from(&issue); - - assert_eq!(detail.check, "MyCheck"); - assert_eq!(detail.constraint, "MyConstraint"); - assert_eq!(detail.level, "error"); - assert_eq!(detail.message, "Something went wrong"); - assert_eq!(detail.metric, Some(0.5)); - } - - #[test] - fn test_sign_payload() { - let body = r#"{"title":"Test Alert"}"#; - let secret = "test-secret"; - - let signature1 = WebhookClient::sign_payload(body, secret); - let signature2 = WebhookClient::sign_payload(body, secret); - - assert_eq!(signature1, signature2); - assert!(!signature1.is_empty()); - assert_eq!(signature1.len(), 64); - } - - #[test] - fn test_sign_payload_different_secrets() { - let body = r#"{"title":"Test Alert"}"#; - - let signature1 = WebhookClient::sign_payload(body, "secret1"); - let signature2 = WebhookClient::sign_payload(body, "secret2"); - - assert_ne!(signature1, signature2); - } - - #[test] - fn test_webhook_client_new_valid_config() { - let config = WebhookConfig::new("https://example.com/webhook"); - let client = WebhookClient::new(config); - assert!(client.is_ok()); - } - - #[test] - fn test_webhook_client_new_invalid_config() { - let config = WebhookConfig::new(""); - let client = WebhookClient::new(config); - assert!(client.is_err()); - } - - #[test] - fn test_alert_severity_serialization() { - let severity = AlertSeverity::Warning; - let json = serde_json::to_string(&severity).unwrap(); - assert_eq!(json, "\"warning\""); - - let deserialized: AlertSeverity = serde_json::from_str(&json).unwrap(); - assert_eq!(deserialized, AlertSeverity::Warning); - } - - #[test] - fn test_alert_payload_serialization() { - let result = CloudValidationResult { - status: "success".to_string(), - total_checks: 5, - passed_checks: 5, - failed_checks: 0, - issues: vec![], - }; - - let payload = AlertPayload::from_validation_result(&result, "test_dataset", "test_env"); - let json = serde_json::to_string(&payload).unwrap(); - - assert!(json.contains("\"title\"")); - assert!(json.contains("\"severity\":\"info\"")); - assert!(json.contains("\"dataset\":\"test_dataset\"")); - assert!(json.contains("\"environment\":\"test_env\"")); - assert!(json.contains("\"summary\"")); - assert!(!json.contains("\"details\"")); // None should be skipped - } -} diff --git a/term-guard/src/cloud/mod.rs b/term-guard/src/cloud/mod.rs deleted file mode 100644 index fdc60cd..0000000 --- a/term-guard/src/cloud/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Term Cloud SDK for metrics persistence and observability. -//! -//! This module provides integration with the Term Cloud platform, -//! enabling centralized metrics storage, alerting, and historical analysis. - -mod alerting; -mod buffer; -mod cache; -mod client; -mod error; -mod repository; -mod types; -mod worker; - -pub use buffer::{BufferEntry, MetricsBuffer}; -pub use cache::{CacheEntry, OfflineCache}; -pub use client::{ - HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, Pagination, TermCloudClient, -}; -pub use error::{CloudError, CloudResult}; -pub use repository::TermCloudRepository; -pub use types::{ - CloudConfig, CloudHistogram, CloudHistogramBucket, CloudMetadata, CloudMetric, - CloudMetricValue, CloudResultKey, CloudValidationIssue, CloudValidationResult, -}; -pub use worker::{UploadWorker, WorkerStats}; - -pub use alerting::{ - AlertDetail, AlertPayload, AlertSeverity, AlertSummary, WebhookClient, WebhookConfig, -}; diff --git a/term-guard/src/lib.rs b/term-guard/src/lib.rs index 952c791..35dc90e 100644 --- a/term-guard/src/lib.rs +++ b/term-guard/src/lib.rs @@ -166,13 +166,13 @@ //! ``` pub mod analyzers; -#[cfg(feature = "cloud")] -pub mod cloud; pub mod constraints; pub mod core; pub mod error; pub mod formatters; pub mod logging; +#[cfg(feature = "nexus")] +pub mod nexus; pub mod optimizer; pub mod prelude; pub mod repository; diff --git a/term-guard/src/cloud/buffer.rs b/term-guard/src/nexus/buffer.rs similarity index 93% rename from term-guard/src/cloud/buffer.rs rename to term-guard/src/nexus/buffer.rs index bbf5a63..0ebba61 100644 --- a/term-guard/src/cloud/buffer.rs +++ b/term-guard/src/nexus/buffer.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use std::time::Instant; use tokio::sync::Mutex; -use crate::cloud::{CloudError, CloudMetric, CloudResult}; +use crate::nexus::{NexusError, NexusMetric, NexusResult}; /// Entry in the metrics buffer with retry metadata. #[derive(Debug, Clone)] pub struct BufferEntry { - pub metric: CloudMetric, + pub metric: NexusMetric, pub retry_count: u32, pub queued_at: Instant, pub ready_at: Instant, @@ -30,11 +30,11 @@ impl MetricsBuffer { } /// Push a metric to the buffer. - pub async fn push(&self, metric: CloudMetric) -> CloudResult<()> { + pub async fn push(&self, metric: NexusMetric) -> NexusResult<()> { let mut entries = self.entries.lock().await; if entries.len() >= self.max_size { - return Err(CloudError::BufferOverflow { + return Err(NexusError::BufferOverflow { pending_count: entries.len(), max_size: self.max_size, }); @@ -55,11 +55,11 @@ impl MetricsBuffer { /// /// Increments retry count and sets `ready_at` to delay processing until /// the backoff period has elapsed. - pub async fn push_retry(&self, mut entry: BufferEntry, ready_at: Instant) -> CloudResult<()> { + pub async fn push_retry(&self, mut entry: BufferEntry, ready_at: Instant) -> NexusResult<()> { let mut entries = self.entries.lock().await; if entries.len() >= self.max_size { - return Err(CloudError::BufferOverflow { + return Err(NexusError::BufferOverflow { pending_count: entries.len(), max_size: self.max_size, }); @@ -130,17 +130,17 @@ impl Clone for MetricsBuffer { #[cfg(test)] mod tests { use super::*; - use crate::cloud::{CloudMetadata, CloudResultKey}; + use crate::nexus::{NexusMetadata, NexusResultKey}; use std::collections::HashMap; - fn make_test_metric() -> CloudMetric { - CloudMetric { - result_key: CloudResultKey { + fn make_test_metric() -> NexusMetric { + NexusMetric { + result_key: NexusResultKey { dataset_date: 1704931200000, tags: HashMap::new(), }, metrics: HashMap::new(), - metadata: CloudMetadata { + metadata: NexusMetadata { dataset_name: Some("test".to_string()), start_time: None, end_time: None, @@ -173,7 +173,7 @@ mod tests { buffer.push(make_test_metric()).await.unwrap(); let result = buffer.push(make_test_metric()).await; - assert!(matches!(result, Err(CloudError::BufferOverflow { .. }))); + assert!(matches!(result, Err(NexusError::BufferOverflow { .. }))); } #[tokio::test] diff --git a/term-guard/src/cloud/cache.rs b/term-guard/src/nexus/cache.rs similarity index 83% rename from term-guard/src/cloud/cache.rs rename to term-guard/src/nexus/cache.rs index b4a75ab..85be073 100644 --- a/term-guard/src/cloud/cache.rs +++ b/term-guard/src/nexus/cache.rs @@ -7,7 +7,7 @@ use std::time::Instant; use rusqlite::Connection; use tracing::warn; -use crate::cloud::{BufferEntry, CloudError, CloudMetric, CloudResult}; +use crate::nexus::{BufferEntry, NexusError, NexusMetric, NexusResult}; /// Entry loaded from the cache, with its database ID for selective deletion. #[derive(Debug)] @@ -25,8 +25,8 @@ pub struct OfflineCache { impl OfflineCache { /// Create or open a cache at the given file path. - pub fn new(path: &Path) -> CloudResult { - let conn = Connection::open(path).map_err(|e| CloudError::CacheError { + pub fn new(path: &Path) -> NexusResult { + let conn = Connection::open(path).map_err(|e| NexusError::CacheError { message: format!("Failed to open cache database: {e}"), })?; @@ -38,8 +38,8 @@ impl OfflineCache { } /// Create an in-memory cache for testing. - pub fn in_memory() -> CloudResult { - let conn = Connection::open_in_memory().map_err(|e| CloudError::CacheError { + pub fn in_memory() -> NexusResult { + let conn = Connection::open_in_memory().map_err(|e| NexusError::CacheError { message: format!("Failed to create in-memory cache: {e}"), })?; @@ -50,8 +50,8 @@ impl OfflineCache { Ok(cache) } - fn init_schema(&self) -> CloudResult<()> { - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + fn init_schema(&self) -> NexusResult<()> { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; @@ -64,7 +64,7 @@ impl OfflineCache { )", [], ) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to create schema: {e}"), })?; @@ -72,12 +72,12 @@ impl OfflineCache { } /// Save a metric to the cache. - pub fn save(&self, metric: &CloudMetric, retry_count: u32) -> CloudResult<()> { - let metric_json = serde_json::to_string(metric).map_err(|e| CloudError::CacheError { + pub fn save(&self, metric: &NexusMetric, retry_count: u32) -> NexusResult<()> { + let metric_json = serde_json::to_string(metric).map_err(|e| NexusError::CacheError { message: format!("Failed to serialize metric: {e}"), })?; - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; @@ -85,7 +85,7 @@ impl OfflineCache { "INSERT INTO pending_metrics (metric_json, retry_count) VALUES (?1, ?2)", rusqlite::params![metric_json, retry_count], ) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to save metric: {e}"), })?; @@ -95,14 +95,14 @@ impl OfflineCache { /// Load all pending metrics from the cache. /// /// Returns entries with their database IDs for selective deletion after successful upload. - pub fn load_all(&self) -> CloudResult> { - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + pub fn load_all(&self) -> NexusResult> { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; let mut stmt = conn .prepare("SELECT id, metric_json, retry_count FROM pending_metrics ORDER BY id") - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to prepare query: {e}"), })?; @@ -114,11 +114,11 @@ impl OfflineCache { let retry_count: u32 = row.get(2)?; Ok((id, metric_json, retry_count)) }) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to query metrics: {e}"), })? .filter_map(|result| match result { - Ok((id, json, retry_count)) => match serde_json::from_str::(&json) { + Ok((id, json, retry_count)) => match serde_json::from_str::(&json) { Ok(metric) => Some(CacheEntry { id, entry: BufferEntry { @@ -146,12 +146,12 @@ impl OfflineCache { /// Delete specific entries by their database IDs. /// /// Returns the number of entries deleted. - pub fn delete_ids(&self, ids: &[i64]) -> CloudResult { + pub fn delete_ids(&self, ids: &[i64]) -> NexusResult { if ids.is_empty() { return Ok(0); } - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; @@ -161,13 +161,13 @@ impl OfflineCache { placeholders.join(", ") ); - let mut stmt = conn.prepare(&sql).map_err(|e| CloudError::CacheError { + let mut stmt = conn.prepare(&sql).map_err(|e| NexusError::CacheError { message: format!("Failed to prepare delete query: {e}"), })?; let deleted = stmt .execute(rusqlite::params_from_iter(ids.iter())) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to delete metrics: {e}"), })?; @@ -175,13 +175,13 @@ impl OfflineCache { } /// Remove all cached entries. - pub fn clear(&self) -> CloudResult<()> { - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + pub fn clear(&self) -> NexusResult<()> { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; conn.execute("DELETE FROM pending_metrics", []) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to clear cache: {e}"), })?; @@ -189,14 +189,14 @@ impl OfflineCache { } /// Get count of pending metrics. - pub fn count(&self) -> CloudResult { - let conn = self.conn.lock().map_err(|e| CloudError::CacheError { + pub fn count(&self) -> NexusResult { + let conn = self.conn.lock().map_err(|e| NexusError::CacheError { message: format!("Failed to acquire lock: {e}"), })?; let count: i64 = conn .query_row("SELECT COUNT(*) FROM pending_metrics", [], |row| row.get(0)) - .map_err(|e| CloudError::CacheError { + .map_err(|e| NexusError::CacheError { message: format!("Failed to count metrics: {e}"), })?; @@ -207,17 +207,17 @@ impl OfflineCache { #[cfg(test)] mod tests { use super::*; - use crate::cloud::{CloudMetadata, CloudResultKey}; + use crate::nexus::{NexusMetadata, NexusResultKey}; use std::collections::HashMap; - fn make_test_metric() -> CloudMetric { - CloudMetric { - result_key: CloudResultKey { + fn make_test_metric() -> NexusMetric { + NexusMetric { + result_key: NexusResultKey { dataset_date: 1704931200000, tags: HashMap::new(), }, metrics: HashMap::new(), - metadata: CloudMetadata { + metadata: NexusMetadata { dataset_name: Some("test".to_string()), start_time: None, end_time: None, diff --git a/term-guard/src/cloud/client.rs b/term-guard/src/nexus/client.rs similarity index 75% rename from term-guard/src/cloud/client.rs rename to term-guard/src/nexus/client.rs index a55b8a7..fef8c29 100644 --- a/term-guard/src/cloud/client.rs +++ b/term-guard/src/nexus/client.rs @@ -4,12 +4,12 @@ use reqwest::Client; use ring::hmac; use serde::{Deserialize, Serialize}; -use crate::cloud::{CloudConfig, CloudError, CloudMetric, CloudResult, CloudResultKey}; +use crate::nexus::{NexusConfig, NexusError, NexusMetric, NexusResult, NexusResultKey}; -/// HTTP client for Term Cloud API. +/// HTTP client for Term Nexus API. #[derive(Clone)] -pub struct TermCloudClient { - config: Arc, +pub struct NexusClient { + config: Arc, client: Client, signing_key: hmac::Key, } @@ -48,7 +48,7 @@ pub struct MetricsQuery { /// Paginated response from metrics query. #[derive(Debug, Deserialize)] pub struct MetricsResponse { - pub results: Vec, + pub results: Vec, pub pagination: Pagination, } @@ -58,13 +58,13 @@ pub struct Pagination { pub has_more: bool, } -impl TermCloudClient { +impl NexusClient { /// Create a new client with the given configuration. - pub fn new(config: CloudConfig) -> CloudResult { + pub fn new(config: NexusConfig) -> NexusResult { let client = Client::builder() .timeout(config.timeout()) .build() - .map_err(|e| CloudError::Configuration { + .map_err(|e| NexusError::Configuration { message: format!("Failed to create HTTP client: {}", e), })?; @@ -77,8 +77,8 @@ impl TermCloudClient { }) } - /// Check if the Term Cloud API is reachable. - pub async fn health_check(&self) -> CloudResult { + /// Check if the Term Nexus API is reachable. + pub async fn health_check(&self) -> NexusResult { let url = format!("{}/v1/health", self.config.endpoint()); let response = self @@ -86,17 +86,17 @@ impl TermCloudClient { .get(&url) .send() .await - .map_err(|e| CloudError::Network { + .map_err(|e| NexusError::Network { message: e.to_string(), })?; self.handle_response(response).await } - /// Send metrics to Term Cloud. - pub async fn ingest(&self, metrics: &[CloudMetric]) -> CloudResult { + /// Send metrics to Term Nexus. + pub async fn ingest(&self, metrics: &[NexusMetric]) -> NexusResult { let url = format!("{}/v1/metrics", self.config.endpoint()); - let body = serde_json::to_vec(metrics).map_err(|e| CloudError::Serialization { + let body = serde_json::to_vec(metrics).map_err(|e| NexusError::Serialization { message: e.to_string(), })?; @@ -111,15 +111,15 @@ impl TermCloudClient { .body(body) .send() .await - .map_err(|e| CloudError::Network { + .map_err(|e| NexusError::Network { message: e.to_string(), })?; self.handle_response(response).await } - /// Query metrics from Term Cloud. - pub async fn query(&self, query: MetricsQuery) -> CloudResult { + /// Query metrics from Term Nexus. + pub async fn query(&self, query: MetricsQuery) -> NexusResult { let url = format!("{}/v1/metrics", self.config.endpoint()); let response = self @@ -129,7 +129,7 @@ impl TermCloudClient { .query(&query) .send() .await - .map_err(|e| CloudError::Network { + .map_err(|e| NexusError::Network { message: e.to_string(), })?; @@ -137,7 +137,7 @@ impl TermCloudClient { } /// Delete metrics by key. - pub async fn delete(&self, key: &CloudResultKey) -> CloudResult<()> { + pub async fn delete(&self, key: &NexusResultKey) -> NexusResult<()> { let url = format!("{}/v1/metrics/{}", self.config.endpoint(), key.dataset_date); let response = self @@ -147,7 +147,7 @@ impl TermCloudClient { .query(&key.tags) .send() .await - .map_err(|e| CloudError::Network { + .map_err(|e| NexusError::Network { message: e.to_string(), })?; @@ -168,14 +168,14 @@ impl TermCloudClient { async fn handle_response( &self, response: reqwest::Response, - ) -> CloudResult { + ) -> NexusResult { let status = response.status(); if status.is_success() { response .json::() .await - .map_err(|e| CloudError::Serialization { + .map_err(|e| NexusError::Serialization { message: e.to_string(), }) } else { @@ -183,8 +183,8 @@ impl TermCloudClient { } } - /// Convert an error response to a CloudError. - async fn handle_error_response(&self, response: reqwest::Response) -> CloudResult { + /// Convert an error response to a NexusError. + async fn handle_error_response(&self, response: reqwest::Response) -> NexusResult { let status = response.status(); let retry_after = response .headers() @@ -195,16 +195,16 @@ impl TermCloudClient { let body = response.text().await.unwrap_or_default(); match status.as_u16() { - 401 => Err(CloudError::Authentication { message: body }), - 429 => Err(CloudError::RateLimited { + 401 => Err(NexusError::Authentication { message: body }), + 429 => Err(NexusError::RateLimited { retry_after_secs: retry_after, }), - 400 => Err(CloudError::InvalidRequest { message: body }), - status if status >= 500 => Err(CloudError::ServerError { + 400 => Err(NexusError::InvalidRequest { message: body }), + status if status >= 500 => Err(NexusError::ServerError { status, message: body, }), - _ => Err(CloudError::ServerError { + _ => Err(NexusError::ServerError { status: status.as_u16(), message: body, }), @@ -212,7 +212,7 @@ impl TermCloudClient { } /// Get the configuration. - pub fn config(&self) -> &CloudConfig { + pub fn config(&self) -> &NexusConfig { &self.config } } @@ -223,15 +223,15 @@ mod tests { #[test] fn test_client_creation() { - let config = CloudConfig::new("test-api-key"); - let client = TermCloudClient::new(config); + let config = NexusConfig::new("test-api-key"); + let client = NexusClient::new(config); assert!(client.is_ok()); } #[tokio::test] async fn test_client_health_check_invalid_endpoint() { - let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); - let client = TermCloudClient::new(config).unwrap(); + let config = NexusConfig::new("test-key").with_endpoint("http://localhost:1"); + let client = NexusClient::new(config).unwrap(); let result = client.health_check().await; assert!(result.is_err()); diff --git a/term-guard/src/cloud/error.rs b/term-guard/src/nexus/error.rs similarity index 75% rename from term-guard/src/cloud/error.rs rename to term-guard/src/nexus/error.rs index d735d47..014685f 100644 --- a/term-guard/src/cloud/error.rs +++ b/term-guard/src/nexus/error.rs @@ -1,8 +1,8 @@ use thiserror::Error; -/// Errors that can occur when interacting with Term Cloud. +/// Errors that can occur when interacting with Term Nexus. #[derive(Debug, Error)] -pub enum CloudError { +pub enum NexusError { /// Authentication failed (invalid or expired API key). #[error("Authentication failed: {message}")] Authentication { message: String }, @@ -43,13 +43,13 @@ pub enum CloudError { Configuration { message: String }, } -impl CloudError { +impl NexusError { /// Returns true if this error is transient and the operation should be retried. pub fn is_retryable(&self) -> bool { match self { - CloudError::Network { .. } => true, - CloudError::RateLimited { .. } => true, - CloudError::ServerError { status, .. } => *status >= 500, + NexusError::Network { .. } => true, + NexusError::RateLimited { .. } => true, + NexusError::ServerError { status, .. } => *status >= 500, _ => false, } } @@ -57,42 +57,42 @@ impl CloudError { /// Returns the suggested retry delay in seconds, if available. pub fn retry_after(&self) -> Option { match self { - CloudError::RateLimited { retry_after_secs } => *retry_after_secs, + NexusError::RateLimited { retry_after_secs } => *retry_after_secs, _ => None, } } } -/// Result type for cloud operations. -pub type CloudResult = std::result::Result; +/// Result type for nexus operations. +pub type NexusResult = std::result::Result; #[cfg(test)] mod tests { use super::*; #[test] - fn test_cloud_error_display() { - let err = CloudError::Authentication { + fn test_nexus_error_display() { + let err = NexusError::Authentication { message: "Invalid API key".to_string(), }; assert!(err.to_string().contains("Invalid API key")); } #[test] - fn test_cloud_error_is_retryable() { - assert!(!CloudError::Authentication { + fn test_nexus_error_is_retryable() { + assert!(!NexusError::Authentication { message: "test".to_string() } .is_retryable()); - assert!(CloudError::Network { + assert!(NexusError::Network { message: "timeout".to_string() } .is_retryable()); - assert!(CloudError::RateLimited { + assert!(NexusError::RateLimited { retry_after_secs: Some(60) } .is_retryable()); - assert!(CloudError::ServerError { + assert!(NexusError::ServerError { status: 500, message: "internal".to_string() } diff --git a/term-guard/src/nexus/mod.rs b/term-guard/src/nexus/mod.rs new file mode 100644 index 0000000..331e570 --- /dev/null +++ b/term-guard/src/nexus/mod.rs @@ -0,0 +1,25 @@ +//! Term Nexus SDK for metrics persistence and observability. +//! +//! This module provides integration with the Term Nexus platform, +//! enabling centralized metrics storage and historical analysis. + +mod buffer; +mod cache; +mod client; +mod error; +mod repository; +mod types; +mod worker; + +pub use buffer::{BufferEntry, MetricsBuffer}; +pub use cache::{CacheEntry, OfflineCache}; +pub use client::{ + HealthResponse, IngestResponse, MetricsQuery, MetricsResponse, NexusClient, Pagination, +}; +pub use error::{NexusError, NexusResult}; +pub use repository::NexusRepository; +pub use types::{ + NexusConfig, NexusHistogram, NexusHistogramBucket, NexusMetadata, NexusMetric, + NexusMetricValue, NexusResultKey, NexusValidationIssue, NexusValidationResult, +}; +pub use worker::{UploadWorker, WorkerStats}; diff --git a/term-guard/src/cloud/repository.rs b/term-guard/src/nexus/repository.rs similarity index 75% rename from term-guard/src/cloud/repository.rs rename to term-guard/src/nexus/repository.rs index c11218f..4990965 100644 --- a/term-guard/src/cloud/repository.rs +++ b/term-guard/src/nexus/repository.rs @@ -1,6 +1,6 @@ -//! TermCloudRepository - Main repository implementation for Term Cloud. +//! NexusRepository - Main repository implementation for Term Nexus. //! -//! This module provides the primary interface for persisting metrics to Term Cloud, +//! This module provides the primary interface for persisting metrics to Term Nexus, //! implementing the MetricsRepository trait with support for: //! - Asynchronous background uploads via UploadWorker //! - Offline operation with automatic sync via OfflineCache @@ -9,12 +9,12 @@ //! # Example //! //! ```rust,ignore -//! use term_guard::cloud::{CloudConfig, TermCloudRepository}; +//! use term_guard::nexus::{NexusConfig, NexusRepository}; //! use term_guard::repository::ResultKey; //! use term_guard::analyzers::AnalyzerContext; //! -//! let config = CloudConfig::new("your-api-key"); -//! let repository = TermCloudRepository::new(config)?; +//! let config = NexusConfig::new("your-api-key"); +//! let repository = NexusRepository::new(config)?; //! //! // Save metrics //! let key = ResultKey::now().with_tag("env", "production"); @@ -36,17 +36,17 @@ use tracing::{debug, error, info, instrument, warn}; use crate::analyzers::context::AnalyzerContext; use crate::analyzers::types::MetricValue; -use crate::cloud::{ - BufferEntry, CloudConfig, CloudError, CloudMetadata, CloudMetric, CloudMetricValue, - CloudResult, CloudResultKey, MetricsBuffer, OfflineCache, TermCloudClient, UploadWorker, - WorkerStats, -}; use crate::error::{Result, TermError}; +use crate::nexus::{ + BufferEntry, MetricsBuffer, NexusClient, NexusConfig, NexusError, NexusHistogram, + NexusHistogramBucket, NexusMetadata, NexusMetric, NexusMetricValue, NexusResult, + NexusResultKey, OfflineCache, UploadWorker, WorkerStats, +}; use crate::repository::{MetricsQuery, MetricsRepository, RepositoryMetadata, ResultKey}; -/// Main repository implementation for persisting metrics to Term Cloud. +/// Main repository implementation for persisting metrics to Term Nexus. /// -/// TermCloudRepository provides a complete solution for metrics persistence with: +/// NexusRepository provides a complete solution for metrics persistence with: /// - Local buffering for high-throughput scenarios /// - Background upload worker for asynchronous transmission /// - Offline cache for resilience against network failures @@ -66,31 +66,31 @@ use crate::repository::{MetricsQuery, MetricsRepository, RepositoryMetadata, Res /// │ /// ▼ /// ┌─────────────────┐ ┌─────────────────┐ -/// │ UploadWorker │────▶│ TermCloudClient │ +/// │ UploadWorker │────▶│ NexusClient │ /// └────────┬────────┘ └────────┬────────┘ /// │ │ /// │ (on failure) │ /// ▼ ▼ /// ┌─────────────────┐ ┌─────────────────┐ -/// │ OfflineCache │ │ Term Cloud │ +/// │ OfflineCache │ │ Term Nexus │ /// │ (SQLite) │ │ API │ /// └─────────────────┘ └─────────────────┘ /// ``` -pub struct TermCloudRepository { - config: Arc, - client: TermCloudClient, +pub struct NexusRepository { + config: Arc, + client: NexusClient, buffer: MetricsBuffer, cache: Option, shutdown_tx: watch::Sender, worker_handle: Option>>>, } -impl TermCloudRepository { - /// Creates a new TermCloudRepository and starts the background upload worker. +impl NexusRepository { + /// Creates a new NexusRepository and starts the background upload worker. /// /// # Arguments /// - /// * `config` - Configuration for connecting to Term Cloud + /// * `config` - Configuration for connecting to Term Nexus /// /// # Errors /// @@ -99,25 +99,25 @@ impl TermCloudRepository { /// # Example /// /// ```rust,ignore - /// use term_guard::cloud::{CloudConfig, TermCloudRepository}; + /// use term_guard::nexus::{NexusConfig, NexusRepository}; /// - /// let config = CloudConfig::new("your-api-key") + /// let config = NexusConfig::new("your-api-key") /// .with_buffer_size(5000) /// .with_batch_size(100); /// - /// let repository = TermCloudRepository::new(config)?; + /// let repository = NexusRepository::new(config)?; /// ``` #[instrument(skip(config), fields(endpoint = %config.endpoint()))] - pub fn new(config: CloudConfig) -> CloudResult { + pub fn new(config: NexusConfig) -> NexusResult { let config = Arc::new(config); - let client = TermCloudClient::new((*config).clone())?; + let client = NexusClient::new((*config).clone())?; let buffer = MetricsBuffer::new(config.buffer_size()); let (shutdown_tx, shutdown_rx) = watch::channel(false); let worker = UploadWorker::new((*config).clone(), buffer.clone(), shutdown_rx)?; let worker_handle = tokio::spawn(async move { worker.run().await }); - info!("TermCloudRepository initialized with background worker"); + info!("NexusRepository initialized with background worker"); Ok(Self { config, @@ -145,7 +145,7 @@ impl TermCloudRepository { /// # Example /// /// ```rust,ignore - /// let mut repository = TermCloudRepository::new(config)?; + /// let mut repository = NexusRepository::new(config)?; /// /// // Use default cache location /// repository.setup_cache(None)?; @@ -154,7 +154,7 @@ impl TermCloudRepository { /// repository.setup_cache(Some("/var/cache/myapp/metrics.db"))?; /// ``` #[instrument(skip(self, path))] - pub fn setup_cache(&mut self, path: Option<&Path>) -> CloudResult<()> { + pub fn setup_cache(&mut self, path: Option<&Path>) -> NexusResult<()> { let cache_path = if let Some(p) = path { p.to_path_buf() } else if let Some(p) = self.config.offline_cache_path() { @@ -164,7 +164,7 @@ impl TermCloudRepository { }; if let Some(parent) = cache_path.parent() { - std::fs::create_dir_all(parent).map_err(|e| CloudError::CacheError { + std::fs::create_dir_all(parent).map_err(|e| NexusError::CacheError { message: format!("Failed to create cache directory: {e}"), })?; } @@ -176,10 +176,10 @@ impl TermCloudRepository { } /// Returns the default platform-specific cache path. - fn default_cache_path() -> CloudResult { + fn default_cache_path() -> NexusResult { ProjectDirs::from("dev", "term", "term-guard") .map(|dirs| dirs.cache_dir().join("metrics.db")) - .ok_or_else(|| CloudError::Configuration { + .ok_or_else(|| NexusError::Configuration { message: "Could not determine cache directory".to_string(), }) } @@ -213,7 +213,7 @@ impl TermCloudRepository { /// repository.flush().await?; /// ``` #[instrument(skip(self))] - pub async fn flush(&self) -> CloudResult<()> { + pub async fn flush(&self) -> NexusResult<()> { let entries = self.buffer.clear().await; if entries.is_empty() { return Ok(()); @@ -242,12 +242,12 @@ impl TermCloudRepository { /// println!("Uploaded {} metrics during operation", stats.metrics_uploaded); /// ``` #[instrument(skip(self))] - pub async fn shutdown(&self) -> CloudResult> { + pub async fn shutdown(&self) -> NexusResult> { info!("Initiating graceful shutdown"); self.shutdown_tx .send(true) - .map_err(|e| CloudError::Configuration { + .map_err(|e| NexusError::Configuration { message: format!("Failed to send shutdown signal: {e}"), })?; @@ -284,7 +284,7 @@ impl TermCloudRepository { Ok(stats) } - /// Checks connectivity to Term Cloud. + /// Checks connectivity to Term Nexus. /// /// # Errors /// @@ -294,16 +294,16 @@ impl TermCloudRepository { /// /// ```rust,ignore /// match repository.health_check().await { - /// Ok(response) => println!("Connected to Term Cloud v{}", response.version), + /// Ok(response) => println!("Connected to Term Nexus v{}", response.version), /// Err(e) => eprintln!("Connection failed: {}", e), /// } /// ``` #[instrument(skip(self))] - pub async fn health_check(&self) -> CloudResult { + pub async fn health_check(&self) -> NexusResult { self.client.health_check().await } - /// Synchronizes offline cached metrics to Term Cloud. + /// Synchronizes offline cached metrics to Term Nexus. /// /// Loads all cached metrics and attempts to upload them. Successfully /// uploaded metrics are removed from the cache. @@ -324,11 +324,11 @@ impl TermCloudRepository { /// println!("Synced {} cached metrics", synced); /// ``` #[instrument(skip(self))] - pub async fn sync_offline_cache(&self) -> CloudResult { + pub async fn sync_offline_cache(&self) -> NexusResult { let cache = self .cache .as_ref() - .ok_or_else(|| CloudError::Configuration { + .ok_or_else(|| NexusError::Configuration { message: "Offline cache not configured".to_string(), })?; @@ -369,49 +369,47 @@ impl TermCloudRepository { Ok(synced) } - /// Converts a ResultKey and AnalyzerContext to a CloudMetric. - fn to_cloud_metric(key: &ResultKey, context: &AnalyzerContext) -> CloudMetric { - let mut cloud_metrics = HashMap::new(); + /// Converts a ResultKey and AnalyzerContext to a NexusMetric. + fn to_nexus_metric(key: &ResultKey, context: &AnalyzerContext) -> NexusMetric { + let mut nexus_metrics = HashMap::new(); for (metric_key, value) in context.all_metrics() { - let cloud_value = match value { - MetricValue::Double(v) => CloudMetricValue::Double(*v), - MetricValue::Long(v) => CloudMetricValue::Long(*v), - MetricValue::String(v) => CloudMetricValue::String(v.clone()), - MetricValue::Boolean(v) => CloudMetricValue::Boolean(*v), - MetricValue::Histogram(h) => { - CloudMetricValue::Histogram(crate::cloud::CloudHistogram { - buckets: h - .buckets - .iter() - .map(|b| crate::cloud::CloudHistogramBucket { - lower_bound: b.lower_bound, - upper_bound: b.upper_bound, - count: b.count, - }) - .collect(), - total_count: h.total_count, - min: h.min, - max: h.max, - mean: h.mean, - std_dev: h.std_dev, - }) - } + let nexus_value = match value { + MetricValue::Double(v) => NexusMetricValue::Double(*v), + MetricValue::Long(v) => NexusMetricValue::Long(*v), + MetricValue::String(v) => NexusMetricValue::String(v.clone()), + MetricValue::Boolean(v) => NexusMetricValue::Boolean(*v), + MetricValue::Histogram(h) => NexusMetricValue::Histogram(NexusHistogram { + buckets: h + .buckets + .iter() + .map(|b| NexusHistogramBucket { + lower_bound: b.lower_bound, + upper_bound: b.upper_bound, + count: b.count, + }) + .collect(), + total_count: h.total_count, + min: h.min, + max: h.max, + mean: h.mean, + std_dev: h.std_dev, + }), MetricValue::Vector(_) | MetricValue::Map(_) => { continue; } }; - cloud_metrics.insert(metric_key.clone(), cloud_value); + nexus_metrics.insert(metric_key.clone(), nexus_value); } let metadata = context.metadata(); - CloudMetric { - result_key: CloudResultKey { + NexusMetric { + result_key: NexusResultKey { dataset_date: key.timestamp, tags: key.tags.clone(), }, - metrics: cloud_metrics, - metadata: CloudMetadata { + metrics: nexus_metrics, + metadata: NexusMetadata { dataset_name: metadata.dataset_name.clone(), start_time: metadata.start_time.map(|t| t.to_rfc3339()), end_time: metadata.end_time.map(|t| t.to_rfc3339()), @@ -422,9 +420,9 @@ impl TermCloudRepository { } } - /// Uploads entries directly to Term Cloud. - async fn upload_entries(&self, entries: Vec) -> CloudResult<()> { - let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); + /// Uploads entries directly to Term Nexus. + async fn upload_entries(&self, entries: Vec) -> NexusResult<()> { + let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); match self.client.ingest(&metrics).await { Ok(response) => { @@ -444,37 +442,37 @@ impl TermCloudRepository { } /// Saves entries to the offline cache. - fn save_to_cache(&self, entries: &[BufferEntry]) -> CloudResult<()> { + fn save_to_cache(&self, entries: &[BufferEntry]) -> NexusResult<()> { if let Some(ref cache) = self.cache { for entry in entries { cache.save(&entry.metric, entry.retry_count)?; } Ok(()) } else { - Err(CloudError::CacheError { + Err(NexusError::CacheError { message: "Offline cache not configured, metrics will be lost".to_string(), }) } } /// Returns a reference to the underlying client. - pub fn client(&self) -> &TermCloudClient { + pub fn client(&self) -> &NexusClient { &self.client } /// Returns a reference to the configuration. - pub fn config(&self) -> &CloudConfig { + pub fn config(&self) -> &NexusConfig { &self.config } } #[async_trait] -impl MetricsRepository for TermCloudRepository { +impl MetricsRepository for NexusRepository { /// Saves metrics to the buffer for asynchronous upload. /// /// Metrics are buffered locally and uploaded by the background worker. /// If the buffer is full, returns a BufferOverflow error. - #[instrument(skip(self, metrics), fields(key.timestamp = %key.timestamp, repository_type = "term_cloud"))] + #[instrument(skip(self, metrics), fields(key.timestamp = %key.timestamp, repository_type = "nexus"))] async fn save(&self, key: ResultKey, metrics: AnalyzerContext) -> Result<()> { if let Err(validation_error) = key.validate_tags() { return Err(TermError::repository_validation( @@ -484,39 +482,39 @@ impl MetricsRepository for TermCloudRepository { )); } - let cloud_metric = Self::to_cloud_metric(&key, &metrics); + let nexus_metric = Self::to_nexus_metric(&key, &metrics); self.buffer - .push(cloud_metric) + .push(nexus_metric) .await - .map_err(|e| TermError::repository("term_cloud", "save", e.to_string()))?; + .map_err(|e| TermError::repository("nexus", "save", e.to_string()))?; debug!("Metric queued for upload"); Ok(()) } - /// Creates a query builder for retrieving metrics from Term Cloud. + /// Creates a query builder for retrieving metrics from Term Nexus. /// - /// Note: Query execution requires network access to Term Cloud. + /// Note: Query execution requires network access to Term Nexus. #[instrument(skip(self))] async fn load(&self) -> MetricsQuery { - MetricsQuery::new(Arc::new(TermCloudQueryAdapter { + MetricsQuery::new(Arc::new(NexusQueryAdapter { client: self.client.clone(), })) } - /// Deletes metrics by key from Term Cloud. - #[instrument(skip(self), fields(key.timestamp = %key.timestamp, repository_type = "term_cloud"))] + /// Deletes metrics by key from Term Nexus. + #[instrument(skip(self), fields(key.timestamp = %key.timestamp, repository_type = "nexus"))] async fn delete(&self, key: ResultKey) -> Result<()> { - let cloud_key = CloudResultKey { + let nexus_key = NexusResultKey { dataset_date: key.timestamp, tags: key.tags.clone(), }; self.client - .delete(&cloud_key) + .delete(&nexus_key) .await - .map_err(|e| TermError::repository("term_cloud", "delete", e.to_string())) + .map_err(|e| TermError::repository("nexus", "delete", e.to_string())) } /// Returns metadata about the repository. @@ -529,20 +527,20 @@ impl MetricsRepository for TermCloudRepository { .map(|c| c.count().unwrap_or(0)) .unwrap_or(0); - Ok(RepositoryMetadata::new("term_cloud") + Ok(RepositoryMetadata::new("nexus") .with_config("endpoint", self.config.endpoint()) .with_config("pending_metrics", pending.to_string()) .with_config("cached_metrics", cached.to_string())) } } -/// Adapter for executing queries via TermCloudClient. -struct TermCloudQueryAdapter { - client: TermCloudClient, +/// Adapter for executing queries via NexusClient. +struct NexusQueryAdapter { + client: NexusClient, } #[async_trait] -impl MetricsRepository for TermCloudQueryAdapter { +impl MetricsRepository for NexusQueryAdapter { async fn save(&self, _key: ResultKey, _metrics: AnalyzerContext) -> Result<()> { Err(TermError::NotSupported( "save not supported on query adapter".to_string(), @@ -562,12 +560,12 @@ impl MetricsRepository for TermCloudQueryAdapter { } async fn list_keys(&self) -> Result> { - let query = crate::cloud::MetricsQuery::default(); + let query = crate::nexus::MetricsQuery::default(); let response = self .client .query(query) .await - .map_err(|e| TermError::repository("term_cloud", "list_keys", e.to_string()))?; + .map_err(|e| TermError::repository("nexus", "list_keys", e.to_string()))?; Ok(response .results @@ -577,7 +575,7 @@ impl MetricsRepository for TermCloudQueryAdapter { } async fn get(&self, key: &ResultKey) -> Result> { - let query = crate::cloud::MetricsQuery { + let query = crate::nexus::MetricsQuery { after: Some(key.timestamp), before: Some(key.timestamp + 1), tags: key.tags.clone(), @@ -589,17 +587,17 @@ impl MetricsRepository for TermCloudQueryAdapter { .client .query(query) .await - .map_err(|e| TermError::repository("term_cloud", "get", e.to_string()))?; + .map_err(|e| TermError::repository("nexus", "get", e.to_string()))?; Ok(response.results.into_iter().next().map(|m| { let mut context = AnalyzerContext::new(); for (metric_key, value) in m.metrics { let metric_value = match value { - CloudMetricValue::Double(v) => MetricValue::Double(v), - CloudMetricValue::Long(v) => MetricValue::Long(v), - CloudMetricValue::String(v) => MetricValue::String(v), - CloudMetricValue::Boolean(v) => MetricValue::Boolean(v), - CloudMetricValue::Histogram(_) => continue, + NexusMetricValue::Double(v) => MetricValue::Double(v), + NexusMetricValue::Long(v) => MetricValue::Long(v), + NexusMetricValue::String(v) => MetricValue::String(v), + NexusMetricValue::Boolean(v) => MetricValue::Boolean(v), + NexusMetricValue::Histogram(_) => continue, }; context.store_metric(metric_key, metric_value); } @@ -613,8 +611,8 @@ mod tests { use super::*; use std::time::Duration; - fn make_test_config() -> CloudConfig { - CloudConfig::new("test-api-key") + fn make_test_config() -> NexusConfig { + NexusConfig::new("test-api-key") .with_endpoint("http://localhost:1") .with_buffer_size(100) .with_flush_interval(Duration::from_millis(50)) @@ -623,14 +621,14 @@ mod tests { #[tokio::test] async fn test_repository_creation() { let config = make_test_config(); - let result = TermCloudRepository::new(config); + let result = NexusRepository::new(config); assert!(result.is_ok()); } #[tokio::test] async fn test_repository_save_queues_metric() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); let key = ResultKey::new(1704931200000).with_tag("env", "test"); let context = AnalyzerContext::new(); @@ -646,7 +644,7 @@ mod tests { #[tokio::test] async fn test_repository_save_validates_tags() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); let key = ResultKey::new(1704931200000).with_tag("", "invalid"); let context = AnalyzerContext::new(); @@ -660,7 +658,7 @@ mod tests { #[tokio::test] async fn test_repository_pending_count() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); assert_eq!(repository.pending_count().await, 0); @@ -678,7 +676,7 @@ mod tests { #[tokio::test] async fn test_repository_shutdown_returns_stats() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); let result = repository.shutdown().await; assert!(result.is_ok()); @@ -687,10 +685,10 @@ mod tests { #[tokio::test] async fn test_repository_metadata() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); let metadata = repository.metadata().await.unwrap(); - assert_eq!(metadata.backend_type, Some("term_cloud".to_string())); + assert_eq!(metadata.backend_type, Some("nexus".to_string())); assert!(metadata.config.contains_key("endpoint")); assert!(metadata.config.contains_key("pending_metrics")); @@ -698,7 +696,7 @@ mod tests { } #[tokio::test] - async fn test_to_cloud_metric_conversion() { + async fn test_to_nexus_metric_conversion() { let key = ResultKey::new(1704931200000) .with_tag("env", "prod") .with_tag("region", "us-east-1"); @@ -708,26 +706,26 @@ mod tests { context.store_metric("size", MetricValue::Long(1000)); context.store_metric("is_valid", MetricValue::Boolean(true)); - let cloud_metric = TermCloudRepository::to_cloud_metric(&key, &context); + let nexus_metric = NexusRepository::to_nexus_metric(&key, &context); - assert_eq!(cloud_metric.result_key.dataset_date, 1704931200000); + assert_eq!(nexus_metric.result_key.dataset_date, 1704931200000); assert_eq!( - cloud_metric.result_key.tags.get("env"), + nexus_metric.result_key.tags.get("env"), Some(&"prod".to_string()) ); assert_eq!( - cloud_metric.metadata.dataset_name, + nexus_metric.metadata.dataset_name, Some("test_dataset".to_string()) ); - assert!(cloud_metric.metrics.contains_key("completeness.col1")); - assert!(cloud_metric.metrics.contains_key("size")); - assert!(cloud_metric.metrics.contains_key("is_valid")); + assert!(nexus_metric.metrics.contains_key("completeness.col1")); + assert!(nexus_metric.metrics.contains_key("size")); + assert!(nexus_metric.metrics.contains_key("is_valid")); } #[tokio::test] async fn test_repository_cache_setup() { let config = make_test_config(); - let mut repository = TermCloudRepository::new(config).unwrap(); + let mut repository = NexusRepository::new(config).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let cache_path = temp_dir.path().join("test_cache.db"); @@ -741,7 +739,7 @@ mod tests { #[tokio::test] async fn test_repository_flush() { let config = make_test_config(); - let mut repository = TermCloudRepository::new(config).unwrap(); + let mut repository = NexusRepository::new(config).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let cache_path = temp_dir.path().join("flush_test.db"); @@ -764,7 +762,7 @@ mod tests { #[tokio::test] async fn test_repository_sync_without_cache() { let config = make_test_config(); - let repository = TermCloudRepository::new(config).unwrap(); + let repository = NexusRepository::new(config).unwrap(); let result = repository.sync_offline_cache().await; assert!(result.is_err()); @@ -775,7 +773,7 @@ mod tests { #[tokio::test] async fn test_repository_sync_empty_cache() { let config = make_test_config(); - let mut repository = TermCloudRepository::new(config).unwrap(); + let mut repository = NexusRepository::new(config).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let cache_path = temp_dir.path().join("sync_test.db"); @@ -790,7 +788,7 @@ mod tests { #[tokio::test] async fn test_default_cache_path() { - let result = TermCloudRepository::default_cache_path(); + let result = NexusRepository::default_cache_path(); assert!(result.is_ok()); let path = result.unwrap(); assert!(path.to_string_lossy().contains("term")); diff --git a/term-guard/src/cloud/types.rs b/term-guard/src/nexus/types.rs similarity index 83% rename from term-guard/src/cloud/types.rs rename to term-guard/src/nexus/types.rs index 6dc5919..a4cdcc1 100644 --- a/term-guard/src/cloud/types.rs +++ b/term-guard/src/nexus/types.rs @@ -6,9 +6,9 @@ use serde::{Deserialize, Serialize}; use crate::security::SecureString; -/// Configuration for connecting to Term Cloud. +/// Configuration for connecting to Term Nexus. #[derive(Debug, Clone)] -pub struct CloudConfig { +pub struct NexusConfig { api_key: SecureString, endpoint: String, timeout: Duration, @@ -19,12 +19,12 @@ pub struct CloudConfig { offline_cache_path: Option, } -impl CloudConfig { - /// Create a new CloudConfig with the given API key. +impl NexusConfig { + /// Create a new NexusConfig with the given API key. pub fn new(api_key: impl Into) -> Self { Self { api_key: SecureString::new(api_key.into()), - endpoint: "https://api.term.dev".to_string(), + endpoint: "https://api.withterm.com".to_string(), timeout: Duration::from_secs(30), max_retries: 3, buffer_size: 1000, @@ -121,19 +121,19 @@ impl CloudConfig { } } -/// A metric ready for transmission to Term Cloud. +/// A metric ready for transmission to Term Nexus. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudMetric { - pub result_key: CloudResultKey, - pub metrics: HashMap, - pub metadata: CloudMetadata, +pub struct NexusMetric { + pub result_key: NexusResultKey, + pub metrics: HashMap, + pub metadata: NexusMetadata, #[serde(skip_serializing_if = "Option::is_none")] - pub validation_result: Option, + pub validation_result: Option, } /// Key identifying a set of metrics. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub struct CloudResultKey { +pub struct NexusResultKey { pub dataset_date: i64, pub tags: HashMap, } @@ -141,7 +141,7 @@ pub struct CloudResultKey { /// A metric value in wire format. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type", content = "value")] -pub enum CloudMetricValue { +pub enum NexusMetricValue { #[serde(rename = "double")] Double(f64), #[serde(rename = "long")] @@ -151,13 +151,13 @@ pub enum CloudMetricValue { #[serde(rename = "boolean")] Boolean(bool), #[serde(rename = "histogram")] - Histogram(CloudHistogram), + Histogram(NexusHistogram), } /// Histogram data in wire format. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudHistogram { - pub buckets: Vec, +pub struct NexusHistogram { + pub buckets: Vec, pub total_count: u64, pub min: Option, pub max: Option, @@ -167,7 +167,7 @@ pub struct CloudHistogram { /// A single histogram bucket. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudHistogramBucket { +pub struct NexusHistogramBucket { pub lower_bound: f64, pub upper_bound: f64, pub count: u64, @@ -175,7 +175,7 @@ pub struct CloudHistogramBucket { /// Metadata about the metrics collection. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudMetadata { +pub struct NexusMetadata { #[serde(skip_serializing_if = "Option::is_none")] pub dataset_name: Option, #[serde(skip_serializing_if = "Option::is_none")] @@ -189,17 +189,17 @@ pub struct CloudMetadata { /// Validation result summary. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudValidationResult { +pub struct NexusValidationResult { pub status: String, pub total_checks: usize, pub passed_checks: usize, pub failed_checks: usize, - pub issues: Vec, + pub issues: Vec, } /// A single validation issue. #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CloudValidationIssue { +pub struct NexusValidationIssue { pub check_name: String, pub constraint_name: String, pub level: String, @@ -213,19 +213,19 @@ mod tests { use super::*; #[test] - fn test_cloud_config_default() { - let config = CloudConfig::new("test-api-key"); + fn test_nexus_config_default() { + let config = NexusConfig::new("test-api-key"); assert_eq!(config.api_key().expose(), "test-api-key"); - assert_eq!(config.endpoint(), "https://api.term.dev"); + assert_eq!(config.endpoint(), "https://api.withterm.com"); assert_eq!(config.timeout(), Duration::from_secs(30)); assert_eq!(config.max_retries(), 3); assert_eq!(config.buffer_size(), 1000); } #[test] - fn test_cloud_config_builder() { - let config = CloudConfig::new("key") + fn test_nexus_config_builder() { + let config = NexusConfig::new("key") .with_endpoint("https://custom.endpoint") .with_timeout(Duration::from_secs(60)) .with_max_retries(5) @@ -240,7 +240,7 @@ mod tests { #[test] fn test_api_key_not_leaked_in_debug() { let secret_key = "super-secret-api-key-12345"; - let config = CloudConfig::new(secret_key); + let config = NexusConfig::new(secret_key); let debug_output = format!("{:?}", config); @@ -256,24 +256,24 @@ mod tests { #[test] fn test_offline_cache_path_with_pathbuf() { - let config = CloudConfig::new("key").with_offline_cache_path("/tmp/cache"); + let config = NexusConfig::new("key").with_offline_cache_path("/tmp/cache"); assert_eq!(config.offline_cache_path(), Some(Path::new("/tmp/cache"))); } #[test] - fn test_cloud_metric_serialization() { - let metric = CloudMetric { - result_key: CloudResultKey { + fn test_nexus_metric_serialization() { + let metric = NexusMetric { + result_key: NexusResultKey { dataset_date: 1704931200000, tags: vec![("env".to_string(), "prod".to_string())] .into_iter() .collect(), }, - metrics: vec![("completeness.id".to_string(), CloudMetricValue::Double(1.0))] + metrics: vec![("completeness.id".to_string(), NexusMetricValue::Double(1.0))] .into_iter() .collect(), - metadata: CloudMetadata { + metadata: NexusMetadata { dataset_name: Some("orders".to_string()), start_time: Some("2024-01-10T12:00:00Z".to_string()), end_time: Some("2024-01-10T12:05:00Z".to_string()), diff --git a/term-guard/src/cloud/worker.rs b/term-guard/src/nexus/worker.rs similarity index 92% rename from term-guard/src/cloud/worker.rs rename to term-guard/src/nexus/worker.rs index ed01ab0..4b09a4a 100644 --- a/term-guard/src/cloud/worker.rs +++ b/term-guard/src/nexus/worker.rs @@ -5,13 +5,13 @@ use tokio::sync::watch; use tokio::time::interval; use tracing::{debug, error, info, instrument, warn}; -use crate::cloud::{ - BufferEntry, CloudConfig, CloudError, CloudMetric, CloudResult, MetricsBuffer, TermCloudClient, +use crate::nexus::{ + BufferEntry, MetricsBuffer, NexusClient, NexusConfig, NexusError, NexusMetric, NexusResult, }; -/// Background worker for uploading metrics to Term Cloud. +/// Background worker for uploading metrics to Term Nexus. pub struct UploadWorker { - client: TermCloudClient, + client: NexusClient, buffer: MetricsBuffer, shutdown: watch::Receiver, batch_size: usize, @@ -34,13 +34,13 @@ impl UploadWorker { /// /// # Errors /// - /// Returns an error if the cloud client cannot be created. + /// Returns an error if the nexus client cannot be created. pub fn new( - config: CloudConfig, + config: NexusConfig, buffer: MetricsBuffer, shutdown: watch::Receiver, - ) -> CloudResult { - let client = TermCloudClient::new(config.clone())?; + ) -> NexusResult { + let client = NexusClient::new(config.clone())?; Ok(Self { batch_size: config.batch_size(), @@ -110,7 +110,7 @@ impl UploadWorker { /// Upload a batch of metrics, handling retries. async fn upload_batch(&mut self, entries: Vec) { - let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); + let metrics: Vec = entries.iter().map(|e| e.metric.clone()).collect(); let batch_size = entries.len() as u64; match self.client.ingest(&metrics).await { @@ -142,7 +142,7 @@ impl UploadWorker { /// Re-queues entries with a `ready_at` timestamp calculated via exponential /// backoff. The buffer's `drain()` method respects this timestamp, ensuring /// entries are not retried until their backoff period has elapsed. - async fn handle_retry(&mut self, entries: Vec, error: &CloudError) { + async fn handle_retry(&mut self, entries: Vec, error: &NexusError) { let retry_after = error.retry_after(); for entry in entries { @@ -182,7 +182,7 @@ mod tests { #[tokio::test] async fn test_worker_shutdown() { - let config = CloudConfig::new("test-key") + let config = NexusConfig::new("test-key") .with_endpoint("http://localhost:1") .with_flush_interval(Duration::from_millis(100)); @@ -203,7 +203,7 @@ mod tests { #[tokio::test] async fn test_worker_returns_stats() { - let config = CloudConfig::new("test-key") + let config = NexusConfig::new("test-key") .with_endpoint("http://localhost:1") .with_flush_interval(Duration::from_millis(50)); @@ -226,7 +226,7 @@ mod tests { #[test] fn test_calculate_backoff() { - let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); + let config = NexusConfig::new("test-key").with_endpoint("http://localhost:1"); let (_, shutdown_rx) = watch::channel(false); let buffer = MetricsBuffer::new(100); @@ -251,7 +251,7 @@ mod tests { #[test] fn test_calculate_backoff_uses_retry_after() { - let config = CloudConfig::new("test-key").with_endpoint("http://localhost:1"); + let config = NexusConfig::new("test-key").with_endpoint("http://localhost:1"); let (_, shutdown_rx) = watch::channel(false); let buffer = MetricsBuffer::new(100); diff --git a/term-guard/tests/cloud_integration.rs b/term-guard/tests/nexus_integration.rs similarity index 59% rename from term-guard/tests/cloud_integration.rs rename to term-guard/tests/nexus_integration.rs index 9b3571e..f3cf6e0 100644 --- a/term-guard/tests/cloud_integration.rs +++ b/term-guard/tests/nexus_integration.rs @@ -1,26 +1,26 @@ -#![cfg(feature = "cloud")] +#![cfg(feature = "nexus")] use std::collections::HashMap; use std::time::Duration; use term_guard::analyzers::{AnalyzerContext, MetricValue}; -use term_guard::cloud::{ - AlertPayload, AlertSeverity, CloudConfig, CloudMetadata, CloudMetric, CloudMetricValue, - CloudResultKey, CloudValidationIssue, CloudValidationResult, TermCloudRepository, +use term_guard::nexus::{ + NexusConfig, NexusMetadata, NexusMetric, NexusMetricValue, NexusRepository, NexusResultKey, + NexusValidationIssue, NexusValidationResult, }; use term_guard::repository::{MetricsRepository, ResultKey}; #[tokio::test] -async fn test_full_cloud_flow() { +async fn test_full_nexus_flow() { let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); let cache_path = temp_dir.path().join("test_cache.db"); - let config = CloudConfig::new("test-api-key-12345") + let config = NexusConfig::new("test-api-key-12345") .with_endpoint("http://localhost:1") .with_buffer_size(100) .with_flush_interval(Duration::from_millis(100)); - let mut repository = TermCloudRepository::new(config).expect("Failed to create repository"); + let mut repository = NexusRepository::new(config).expect("Failed to create repository"); repository .setup_cache(Some(&cache_path)) .expect("Failed to setup cache"); @@ -46,9 +46,9 @@ async fn test_full_cloud_flow() { } #[test] -fn test_cloud_metric_wire_format() { - let metric = CloudMetric { - result_key: CloudResultKey { +fn test_nexus_metric_wire_format() { + let metric = NexusMetric { + result_key: NexusResultKey { dataset_date: 1704931200000, tags: vec![ ("env".to_string(), "production".to_string()), @@ -60,14 +60,14 @@ fn test_cloud_metric_wire_format() { metrics: vec![ ( "completeness.user_id".to_string(), - CloudMetricValue::Double(0.98), + NexusMetricValue::Double(0.98), ), - ("size".to_string(), CloudMetricValue::Long(5000)), - ("is_valid".to_string(), CloudMetricValue::Boolean(true)), + ("size".to_string(), NexusMetricValue::Long(5000)), + ("is_valid".to_string(), NexusMetricValue::Boolean(true)), ] .into_iter() .collect(), - metadata: CloudMetadata { + metadata: NexusMetadata { dataset_name: Some("orders_table".to_string()), start_time: Some("2024-01-10T12:00:00Z".to_string()), end_time: Some("2024-01-10T12:05:00Z".to_string()), @@ -97,53 +97,9 @@ fn test_cloud_metric_wire_format() { assert!(parsed["metadata"]["dataset_name"].is_string()); } -#[test] -fn test_webhook_alert_generation() { - let validation_result = CloudValidationResult { - status: "error".to_string(), - total_checks: 10, - passed_checks: 3, - failed_checks: 7, - issues: vec![ - CloudValidationIssue { - check_name: "DataQuality".to_string(), - constraint_name: "Completeness".to_string(), - level: "error".to_string(), - message: "Column 'user_id' has 15% null values".to_string(), - metric: Some(0.85), - }, - CloudValidationIssue { - check_name: "DataQuality".to_string(), - constraint_name: "Uniqueness".to_string(), - level: "error".to_string(), - message: "Column 'email' has duplicate values".to_string(), - metric: Some(0.92), - }, - ], - }; - - let payload = - AlertPayload::from_validation_result(&validation_result, "orders_table", "production"); - - assert_eq!(payload.severity, AlertSeverity::Critical); - assert!(payload.title.contains("Critical") || payload.title.contains("Failed")); - assert_eq!(payload.dataset, "orders_table"); - assert_eq!(payload.environment, "production"); - assert_eq!(payload.summary.total_checks, 10); - assert_eq!(payload.summary.passed, 3); - assert_eq!(payload.summary.failed, 7); - - assert!(payload.details.is_some()); - let details = payload.details.unwrap(); - assert_eq!(details.len(), 2); - assert_eq!(details[0].check, "DataQuality"); - assert_eq!(details[0].constraint, "Completeness"); - assert_eq!(details[0].metric, Some(0.85)); -} - #[test] fn test_config_builder() { - let config = CloudConfig::new("my-api-key") + let config = NexusConfig::new("my-api-key") .with_endpoint("https://custom.endpoint.com") .with_timeout(Duration::from_secs(60)) .with_max_retries(5) @@ -170,11 +126,11 @@ async fn test_repository_with_multiple_metrics() { let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); let cache_path = temp_dir.path().join("multi_metrics_cache.db"); - let config = CloudConfig::new("test-key") + let config = NexusConfig::new("test-key") .with_endpoint("http://localhost:1") .with_buffer_size(50); - let mut repository = TermCloudRepository::new(config).expect("Failed to create repository"); + let mut repository = NexusRepository::new(config).expect("Failed to create repository"); repository .setup_cache(Some(&cache_path)) .expect("Failed to setup cache"); @@ -203,13 +159,13 @@ async fn test_repository_with_multiple_metrics() { } #[test] -fn test_cloud_validation_result_serialization() { - let result = CloudValidationResult { +fn test_nexus_validation_result_serialization() { + let result = NexusValidationResult { status: "warning".to_string(), total_checks: 5, passed_checks: 4, failed_checks: 1, - issues: vec![CloudValidationIssue { + issues: vec![NexusValidationIssue { check_name: "QualityCheck".to_string(), constraint_name: "PatternMatch".to_string(), level: "warning".to_string(), @@ -224,7 +180,7 @@ fn test_cloud_validation_result_serialization() { assert!(json.contains("total_checks")); assert!(json.contains("issues")); - let deserialized: CloudValidationResult = + let deserialized: NexusValidationResult = serde_json::from_str(&json).expect("Failed to deserialize"); assert_eq!(deserialized.status, "warning"); @@ -232,36 +188,3 @@ fn test_cloud_validation_result_serialization() { assert_eq!(deserialized.failed_checks, 1); assert_eq!(deserialized.issues.len(), 1); } - -#[test] -fn test_alert_severity_levels() { - let info_result = CloudValidationResult { - status: "success".to_string(), - total_checks: 10, - passed_checks: 10, - failed_checks: 0, - issues: vec![], - }; - let info_payload = AlertPayload::from_validation_result(&info_result, "test", "dev"); - assert_eq!(info_payload.severity, AlertSeverity::Info); - - let warning_result = CloudValidationResult { - status: "warning".to_string(), - total_checks: 10, - passed_checks: 8, - failed_checks: 2, - issues: vec![], - }; - let warning_payload = AlertPayload::from_validation_result(&warning_result, "test", "dev"); - assert_eq!(warning_payload.severity, AlertSeverity::Warning); - - let critical_result = CloudValidationResult { - status: "error".to_string(), - total_checks: 10, - passed_checks: 3, - failed_checks: 7, - issues: vec![], - }; - let critical_payload = AlertPayload::from_validation_result(&critical_result, "test", "dev"); - assert_eq!(critical_payload.severity, AlertSeverity::Critical); -} From 1f4261d2bfb78cc6c30800f9bf24689f5cba9fa9 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Wed, 14 Jan 2026 08:15:40 -0700 Subject: [PATCH 18/22] feat(examples): add nexus-repository example scaffold --- docs/examples/nexus-repository/Cargo.toml | 11 +++++++ docs/examples/nexus-repository/README.md | 33 +++++++++++++++++++ docs/examples/nexus-repository/data/items.csv | 11 +++++++ 3 files changed, 55 insertions(+) create mode 100644 docs/examples/nexus-repository/Cargo.toml create mode 100644 docs/examples/nexus-repository/README.md create mode 100644 docs/examples/nexus-repository/data/items.csv diff --git a/docs/examples/nexus-repository/Cargo.toml b/docs/examples/nexus-repository/Cargo.toml new file mode 100644 index 0000000..feaa28a --- /dev/null +++ b/docs/examples/nexus-repository/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "nexus-repository-example" +version = "0.0.1" +edition = "2021" + +[dependencies] +term-guard = { path = "../../../term-guard", features = ["nexus"] } +tokio = { version = "1.0", features = ["full"] } +datafusion = "50.3" +anyhow = "1.0" +chrono = "0.4" diff --git a/docs/examples/nexus-repository/README.md b/docs/examples/nexus-repository/README.md new file mode 100644 index 0000000..349760d --- /dev/null +++ b/docs/examples/nexus-repository/README.md @@ -0,0 +1,33 @@ +# Nexus Repository Example + +This example demonstrates how to persist validation metrics to Term Nexus and query historical results, similar to Deequ's MetricsRepository. + +## What This Example Shows + +- Connecting to Term Nexus with API key authentication +- Running validation checks and storing metrics with tags +- Querying historical metrics by time range and tags +- Comparing metrics across validation runs for anomaly detection + +## Prerequisites + +- Term Nexus API running locally at `http://localhost:8080` +- Set the `TERM_API_KEY` environment variable + +## Running the Example + +```bash +# Start the Nexus API (if not already running) +# cd nexus && cargo run + +# Run the example +cd docs/examples/nexus-repository +TERM_API_KEY=your-api-key cargo run +``` + +## Key Concepts Demonstrated + +1. **NexusRepository**: Implements `MetricsRepository` for cloud persistence +2. **ResultKey**: Unique identifier with timestamp and tags for organization +3. **Tag-based Filtering**: Query metrics by environment, pipeline, or custom tags +4. **Historical Comparison**: Load previous metrics for trend analysis diff --git a/docs/examples/nexus-repository/data/items.csv b/docs/examples/nexus-repository/data/items.csv new file mode 100644 index 0000000..c9108b3 --- /dev/null +++ b/docs/examples/nexus-repository/data/items.csv @@ -0,0 +1,11 @@ +id,name,description,priority,category,price,quantity,in_stock +1,Widget A,High quality widget,high,electronics,29.99,100,true +2,Widget B,Standard widget,medium,electronics,19.99,250,true +3,Gadget X,Premium gadget,high,gadgets,49.99,50,true +4,Gadget Y,,low,gadgets,9.99,500,true +5,Tool Alpha,Professional tool,high,tools,99.99,25,true +6,Tool Beta,Basic tool,medium,tools,24.99,150,true +7,,Missing name item,low,misc,4.99,1000,true +8,Item 8,Regular item,medium,misc,14.99,200,false +9,Item 9,Another item,low,misc,7.99,300,true +10,Premium Z,Top tier product,high,premium,199.99,10,true From 2b243f3f3ac955497fcf25c4c67628ee1e2eaa19 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Wed, 14 Jan 2026 08:23:38 -0700 Subject: [PATCH 19/22] feat(examples): implement basic nexus metrics storage Add main.rs with core functionality: - Health check to verify Nexus connectivity - Load CSV data and run validation checks using ValidationSuite - Store metrics to Nexus with tagged ResultKey - Graceful shutdown with worker stats Also add [workspace] to Cargo.toml to exclude from parent workspace. Co-Authored-By: Claude Opus 4.5 --- docs/examples/nexus-repository/Cargo.toml | 2 + docs/examples/nexus-repository/src/main.rs | 174 +++++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 docs/examples/nexus-repository/src/main.rs diff --git a/docs/examples/nexus-repository/Cargo.toml b/docs/examples/nexus-repository/Cargo.toml index feaa28a..447711f 100644 --- a/docs/examples/nexus-repository/Cargo.toml +++ b/docs/examples/nexus-repository/Cargo.toml @@ -3,6 +3,8 @@ name = "nexus-repository-example" version = "0.0.1" edition = "2021" +[workspace] + [dependencies] term-guard = { path = "../../../term-guard", features = ["nexus"] } tokio = { version = "1.0", features = ["full"] } diff --git a/docs/examples/nexus-repository/src/main.rs b/docs/examples/nexus-repository/src/main.rs new file mode 100644 index 0000000..cefbe7a --- /dev/null +++ b/docs/examples/nexus-repository/src/main.rs @@ -0,0 +1,174 @@ +//! Nexus Repository Example - Demonstrating metrics persistence with Term Nexus +//! +//! This example shows how to: +//! 1. Connect to Term Nexus and verify connectivity +//! 2. Run validation checks and store metrics with tags +//! 3. Query historical metrics for trend analysis + +use anyhow::Result; +use chrono::Utc; +use datafusion::prelude::SessionContext; +use std::env; +use term_guard::analyzers::context::AnalyzerContext; +use term_guard::analyzers::types::MetricValue; +use term_guard::constraints::Assertion; +use term_guard::core::{Check, ConstraintOptions, Level, ValidationResult, ValidationSuite}; +use term_guard::nexus::{NexusConfig, NexusRepository}; +use term_guard::repository::{MetricsRepository, ResultKey}; +use term_guard::sources::{CsvSource, DataSource}; + +#[tokio::main] +async fn main() -> Result<()> { + println!("=== Term Nexus Repository Example ===\n"); + + // Step 1: Configure the Nexus connection + let api_key = env::var("TERM_API_KEY").unwrap_or_else(|_| "demo-api-key".to_string()); + + let config = NexusConfig::new(&api_key) + .with_endpoint("http://localhost:8080") + .with_buffer_size(100) + .with_batch_size(10); + + println!("Connecting to Term Nexus..."); + + // Step 2: Create repository and verify connectivity + let repository = NexusRepository::new(config)?; + + match repository.health_check().await { + Ok(health) => { + println!("Connected to Term Nexus v{}", health.version); + } + Err(e) => { + eprintln!("Failed to connect to Nexus: {}", e); + eprintln!("\nMake sure the Nexus API is running at http://localhost:8080"); + return Ok(()); + } + } + println!(); + + // Step 3: Load sample data + println!("Loading item data..."); + let ctx = SessionContext::new(); + + let source = CsvSource::new("data/items.csv")?; + source.register(&ctx, "items").await?; + println!("Loaded items table\n"); + + // Step 4: Create validation suite + let suite = ValidationSuite::builder("item_quality_checks") + .description("Data quality checks for item inventory") + .table_name("items") + .check( + Check::builder("completeness") + .level(Level::Error) + .completeness("id", ConstraintOptions::new().with_threshold(1.0)) + .completeness("name", ConstraintOptions::new().with_threshold(1.0)) + .completeness("price", ConstraintOptions::new().with_threshold(1.0)) + .build(), + ) + .check( + Check::builder("validity") + .level(Level::Error) + .has_min("price", Assertion::GreaterThanOrEqual(0.01)) + .has_min("quantity", Assertion::GreaterThanOrEqual(0.0)) + .build(), + ) + .check( + Check::builder("uniqueness") + .level(Level::Error) + .validates_uniqueness(vec!["id"], 1.0) + .build(), + ) + .build(); + + // Step 5: Run validation + println!("Running validation checks..."); + let results = suite.run(&ctx).await?; + + // Step 6: Display results + let (passed, total, report) = match &results { + ValidationResult::Success { metrics, report } => { + (metrics.passed_checks, metrics.total_checks, report) + } + ValidationResult::Failure { report } => { + (report.metrics.passed_checks, report.metrics.total_checks, report) + } + }; + + println!( + "\nValidation complete: {}/{} checks passed\n", + passed, total + ); + + for issue in &report.issues { + let icon = match issue.level { + Level::Error => "X", + Level::Warning => "!", + Level::Info => "i", + }; + println!("[{}] {}: {}", icon, issue.check_name, issue.message); + if let Some(metric) = issue.metric { + println!(" metric: {:.2}", metric); + } + } + + // Step 7: Create result key with tags for this validation run + let result_key = ResultKey::new(Utc::now().timestamp_millis()) + .with_tag("environment", "development") + .with_tag("pipeline", "daily-inventory") + .with_tag("dataset", "items"); + + println!("\n--- Storing Metrics to Nexus ---"); + println!("Result Key: {}", result_key.timestamp); + println!("Tags: {:?}", result_key.tags); + + // Step 8: Convert results to AnalyzerContext and save + let mut context = AnalyzerContext::with_dataset("items"); + + // Store summary metrics + context.store_metric( + "validation.passed_checks", + MetricValue::Long(passed as i64), + ); + context.store_metric( + "validation.total_checks", + MetricValue::Long(total as i64), + ); + context.store_metric( + "validation.success_rate", + MetricValue::Double(if total > 0 { + (passed as f64 / total as f64) * 100.0 + } else { + 100.0 + }), + ); + + // Store issue count by level + let error_count = report.issues.iter().filter(|i| i.level == Level::Error).count(); + let warning_count = report.issues.iter().filter(|i| i.level == Level::Warning).count(); + context.store_metric("validation.error_count", MetricValue::Long(error_count as i64)); + context.store_metric("validation.warning_count", MetricValue::Long(warning_count as i64)); + + // Save to Nexus + repository.save(result_key.clone(), context).await?; + println!("Metrics queued for upload"); + + // Force flush to ensure metrics are sent + repository.flush().await?; + println!("Metrics uploaded to Nexus\n"); + + // Step 9: Graceful shutdown + let stats = repository.shutdown().await?; + if let Some(s) = stats { + println!( + "Worker stats: {} uploaded, {} failed", + s.metrics_uploaded, s.metrics_failed + ); + } + + println!("\nExample complete! Metrics are now stored in Term Nexus."); + println!("You can query them using the Nexus API or run this example again"); + println!("to see historical comparison."); + + Ok(()) +} From 0301cefe159e2b4780050b427f856f4016b61390 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Wed, 14 Jan 2026 08:33:03 -0700 Subject: [PATCH 20/22] feat(examples): add historical metrics query and comparison Co-Authored-By: Claude Opus 4.5 --- docs/examples/nexus-repository/src/main.rs | 72 +++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/docs/examples/nexus-repository/src/main.rs b/docs/examples/nexus-repository/src/main.rs index cefbe7a..aea54fb 100644 --- a/docs/examples/nexus-repository/src/main.rs +++ b/docs/examples/nexus-repository/src/main.rs @@ -157,7 +157,77 @@ async fn main() -> Result<()> { repository.flush().await?; println!("Metrics uploaded to Nexus\n"); - // Step 9: Graceful shutdown + // Step 9: Query historical metrics + println!("--- Querying Historical Metrics ---\n"); + + // Load recent metrics for this dataset + let query = repository.load().await; + let historical = query + .with_tag("dataset", "items") + .with_tag("pipeline", "daily-inventory") + .after(Utc::now().timestamp_millis() - 86400000) // Last 24 hours + .limit(10) + .execute() + .await; + + match historical { + Ok(metrics) => { + if metrics.is_empty() { + println!("No historical metrics found (this is the first run)"); + } else { + println!("Found {} historical result(s):\n", metrics.len()); + + for (key, ctx) in &metrics { + println!(" Timestamp: {}", key.timestamp); + println!(" Tags: {:?}", key.tags); + + // Show key metrics + if let Some(passed) = ctx.get_metric("validation.passed_checks") { + println!(" Passed checks: {:?}", passed); + } + println!(); + } + + // Compare with previous run + // Default sort is descending, so index 0 is current run, index 1 is previous + if metrics.len() > 1 { + if let Some((_, current_ctx)) = metrics.get(0) { + if let Some(MetricValue::Long(current_passed)) = + current_ctx.get_metric("validation.passed_checks") + { + if let Some((prev_key, prev_ctx)) = metrics.get(1) { + println!("--- Comparison with Previous Run ---\n"); + println!("Previous run timestamp: {}", prev_key.timestamp); + + if let Some(MetricValue::Long(prev_passed)) = + prev_ctx.get_metric("validation.passed_checks") + { + let diff = *current_passed as f64 - *prev_passed as f64; + if diff > 0.0 { + println!("Improvement: {} more checks passing", diff); + } else if diff < 0.0 { + println!("Regression: {} fewer checks passing", diff.abs()); + } else { + println!("No change in passing checks"); + } + } + } + } + } + } else { + println!("Only one run found - no previous run to compare with"); + } + } + } + Err(e) => { + println!("Could not query historical metrics: {}", e); + println!("(This is expected if the Nexus API doesn't support queries yet)"); + } + } + + println!(); + + // Step 10: Graceful shutdown let stats = repository.shutdown().await?; if let Some(s) = stats { println!( From a75ca0eb3d9e6070df10181d88bc0ddb7f1e2aa2 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Wed, 14 Jan 2026 08:39:26 -0700 Subject: [PATCH 21/22] fix(examples): add offline cache setup and fix clippy warning - Set up offline cache for resilience when storing metrics to Nexus - Fix clippy::get_first warning by using .first() instead of .get(0) Co-Authored-By: Claude Opus 4.5 --- docs/examples/nexus-repository/Cargo.lock | 4219 ++++++++++++++++++++ docs/examples/nexus-repository/src/main.rs | 9 +- 2 files changed, 4226 insertions(+), 2 deletions(-) create mode 100644 docs/examples/nexus-repository/Cargo.lock diff --git a/docs/examples/nexus-repository/Cargo.lock b/docs/examples/nexus-repository/Cargo.lock new file mode 100644 index 0000000..e79c38e --- /dev/null +++ b/docs/examples/nexus-repository/Cargo.lock @@ -0,0 +1,4219 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "const-random", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "ar_archive_writer" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c269894b6fe5e9d7ada0cf69b5bf847ff35bc25fc271f08e1d080fce80339a" +dependencies = [ + "object", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "arrow" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e833808ff2d94ed40d9379848a950d995043c7fb3e81a30b383f4c6033821cc" +dependencies = [ + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-csv", + "arrow-data", + "arrow-ipc", + "arrow-json", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", +] + +[[package]] +name = "arrow-arith" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad08897b81588f60ba983e3ca39bda2b179bdd84dced378e7df81a5313802ef8" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "num", +] + +[[package]] +name = "arrow-array" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8548ca7c070d8db9ce7aa43f37393e4bfcf3f2d3681df278490772fd1673d08d" +dependencies = [ + "ahash", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "chrono-tz", + "half", + "hashbrown 0.16.1", + "num", +] + +[[package]] +name = "arrow-buffer" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e003216336f70446457e280807a73899dd822feaf02087d31febca1363e2fccc" +dependencies = [ + "bytes", + "half", + "num", +] + +[[package]] +name = "arrow-cast" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "919418a0681298d3a77d1a315f625916cb5678ad0d74b9c60108eb15fd083023" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "atoi", + "base64", + "chrono", + "comfy-table", + "half", + "lexical-core", + "num", + "ryu", +] + +[[package]] +name = "arrow-csv" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa9bf02705b5cf762b6f764c65f04ae9082c7cfc4e96e0c33548ee3f67012eb" +dependencies = [ + "arrow-array", + "arrow-cast", + "arrow-schema", + "chrono", + "csv", + "csv-core", + "regex", +] + +[[package]] +name = "arrow-data" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5c64fff1d142f833d78897a772f2e5b55b36cb3e6320376f0961ab0db7bd6d0" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num", +] + +[[package]] +name = "arrow-ipc" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d3594dcddccc7f20fd069bc8e9828ce37220372680ff638c5e00dea427d88f5" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "flatbuffers", + "lz4_flex", + "zstd", +] + +[[package]] +name = "arrow-json" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88cf36502b64a127dc659e3b305f1d993a544eab0d48cce704424e62074dc04b" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "indexmap", + "lexical-core", + "memchr", + "num", + "serde", + "serde_json", + "simdutf8", +] + +[[package]] +name = "arrow-ord" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c8f82583eb4f8d84d4ee55fd1cb306720cddead7596edce95b50ee418edf66f" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", +] + +[[package]] +name = "arrow-row" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d07ba24522229d9085031df6b94605e0f4b26e099fb7cdeec37abd941a73753" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half", +] + +[[package]] +name = "arrow-schema" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3aa9e59c611ebc291c28582077ef25c97f1975383f1479b12f3b9ffee2ffabe" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "arrow-select" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c41dbbd1e97bfcaee4fcb30e29105fb2c75e4d82ae4de70b792a5d3f66b2e7a" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num", +] + +[[package]] +name = "arrow-string" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53f5183c150fbc619eede22b861ea7c0eebed8eaac0333eaa7f6da5205fd504d" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "memchr", + "num", + "regex", + "regex-syntax", +] + +[[package]] +name = "async-compression" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" +dependencies = [ + "bzip2 0.5.2", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "xz2", + "zstd", + "zstd-safe", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bigdecimal" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "blake3" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", + "cpufeatures", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "brotli" +version = "8.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "bzip2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49ecfb22d906f800d4fe833b6282cf4dc1c298f5057ca0b5445e5c209735ca47" +dependencies = [ + "bzip2-sys", +] + +[[package]] +name = "bzip2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a53fac24f34a81bc9954b5d6cfce0c21e18ec6959f44f56e8e90e4bb7c346c" +dependencies = [ + "libbz2-rs-sys", +] + +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "cc" +version = "1.2.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "chrono-tz" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" +dependencies = [ + "chrono", + "phf", +] + +[[package]] +name = "comfy-table" +version = "7.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0d05af1e006a2407bedef5af410552494ce5be9090444dbbcb57258c1af3d56" +dependencies = [ + "strum", + "strum_macros", + "unicode-width", +] + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "constant_time_eq" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "csv" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde_core", +] + +[[package]] +name = "csv-core" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" +dependencies = [ + "memchr", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "datafusion" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af15bb3c6ffa33011ef579f6b0bcbe7c26584688bd6c994f548e44df67f011a" +dependencies = [ + "arrow", + "arrow-ipc", + "arrow-schema", + "async-trait", + "bytes", + "bzip2 0.6.1", + "chrono", + "datafusion-catalog", + "datafusion-catalog-listing", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", + "datafusion-execution", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-functions", + "datafusion-functions-aggregate", + "datafusion-functions-nested", + "datafusion-functions-table", + "datafusion-functions-window", + "datafusion-optimizer", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-optimizer", + "datafusion-physical-plan", + "datafusion-session", + "datafusion-sql", + "flate2", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "parquet", + "rand", + "regex", + "sqlparser", + "tempfile", + "tokio", + "url", + "uuid", + "xz2", + "zstd", +] + +[[package]] +name = "datafusion-catalog" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "187622262ad8f7d16d3be9202b4c1e0116f1c9aa387e5074245538b755261621" +dependencies = [ + "arrow", + "async-trait", + "dashmap", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-plan", + "datafusion-session", + "datafusion-sql", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "tokio", +] + +[[package]] +name = "datafusion-catalog-listing" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9657314f0a32efd0382b9a46fdeb2d233273ece64baa68a7c45f5a192daf0f83" +dependencies = [ + "arrow", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "log", + "object_store", + "tokio", +] + +[[package]] +name = "datafusion-common" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a83760d9a13122d025fbdb1d5d5aaf93dd9ada5e90ea229add92aa30898b2d1" +dependencies = [ + "ahash", + "arrow", + "arrow-ipc", + "base64", + "chrono", + "half", + "hashbrown 0.14.5", + "indexmap", + "libc", + "log", + "object_store", + "parquet", + "paste", + "recursive", + "sqlparser", + "tokio", + "web-time", +] + +[[package]] +name = "datafusion-common-runtime" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b6234a6c7173fe5db1c6c35c01a12b2aa0f803a3007feee53483218817f8b1e" +dependencies = [ + "futures", + "log", + "tokio", +] + +[[package]] +name = "datafusion-datasource" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7256c9cb27a78709dd42d0c80f0178494637209cac6e29d5c93edd09b6721b86" +dependencies = [ + "arrow", + "async-compression", + "async-trait", + "bytes", + "bzip2 0.6.1", + "chrono", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "flate2", + "futures", + "glob", + "itertools", + "log", + "object_store", + "parquet", + "rand", + "tempfile", + "tokio", + "tokio-util", + "url", + "xz2", + "zstd", +] + +[[package]] +name = "datafusion-datasource-csv" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64533a90f78e1684bfb113d200b540f18f268134622d7c96bbebc91354d04825" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-catalog", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "regex", + "tokio", +] + +[[package]] +name = "datafusion-datasource-json" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d7ebeb12c77df0aacad26f21b0d033aeede423a64b2b352f53048a75bf1d6e6" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-catalog", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "serde_json", + "tokio", +] + +[[package]] +name = "datafusion-datasource-parquet" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09e783c4c7d7faa1199af2df4761c68530634521b176a8d1331ddbc5a5c75133" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-catalog", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-optimizer", + "datafusion-physical-plan", + "datafusion-pruning", + "datafusion-session", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "parquet", + "rand", + "tokio", +] + +[[package]] +name = "datafusion-doc" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99ee6b1d9a80d13f9deb2291f45c07044b8e62fb540dbde2453a18be17a36429" + +[[package]] +name = "datafusion-execution" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4cec0a57653bec7b933fb248d3ffa3fa3ab3bd33bd140dc917f714ac036f531" +dependencies = [ + "arrow", + "async-trait", + "dashmap", + "datafusion-common", + "datafusion-expr", + "futures", + "log", + "object_store", + "parking_lot", + "rand", + "tempfile", + "url", +] + +[[package]] +name = "datafusion-expr" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef76910bdca909722586389156d0aa4da4020e1631994d50fadd8ad4b1aa05fe" +dependencies = [ + "arrow", + "async-trait", + "chrono", + "datafusion-common", + "datafusion-doc", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", + "datafusion-physical-expr-common", + "indexmap", + "paste", + "recursive", + "serde_json", + "sqlparser", +] + +[[package]] +name = "datafusion-expr-common" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d155ccbda29591ca71a1344dd6bed26c65a4438072b400df9db59447f590bb6" +dependencies = [ + "arrow", + "datafusion-common", + "indexmap", + "itertools", + "paste", +] + +[[package]] +name = "datafusion-functions" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7de2782136bd6014670fd84fe3b0ca3b3e4106c96403c3ae05c0598577139977" +dependencies = [ + "arrow", + "arrow-buffer", + "base64", + "blake2", + "blake3", + "chrono", + "datafusion-common", + "datafusion-doc", + "datafusion-execution", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-macros", + "hex", + "itertools", + "log", + "md-5", + "rand", + "regex", + "sha2", + "unicode-segmentation", + "uuid", +] + +[[package]] +name = "datafusion-functions-aggregate" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07331fc13603a9da97b74fd8a273f4238222943dffdbbed1c4c6f862a30105bf" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-doc", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-macros", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "half", + "log", + "paste", +] + +[[package]] +name = "datafusion-functions-aggregate-common" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5951e572a8610b89968a09b5420515a121fbc305c0258651f318dc07c97ab17" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-functions-nested" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdacca9302c3d8fc03f3e94f338767e786a88a33f5ebad6ffc0e7b50364b9ea3" +dependencies = [ + "arrow", + "arrow-ord", + "datafusion-common", + "datafusion-doc", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions", + "datafusion-functions-aggregate", + "datafusion-functions-aggregate-common", + "datafusion-macros", + "datafusion-physical-expr-common", + "itertools", + "log", + "paste", +] + +[[package]] +name = "datafusion-functions-table" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c37ff8a99434fbbad604a7e0669717c58c7c4f14c472d45067c4b016621d981" +dependencies = [ + "arrow", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-expr", + "datafusion-physical-plan", + "parking_lot", + "paste", +] + +[[package]] +name = "datafusion-functions-window" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e2aea7c79c926cffabb13dc27309d4eaeb130f4a21c8ba91cdd241c813652b" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-doc", + "datafusion-expr", + "datafusion-functions-window-common", + "datafusion-macros", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "log", + "paste", +] + +[[package]] +name = "datafusion-functions-window-common" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fead257ab5fd2ffc3b40fda64da307e20de0040fe43d49197241d9de82a487f" +dependencies = [ + "datafusion-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-macros" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec6f637bce95efac05cdfb9b6c19579ed4aa5f6b94d951cfa5bb054b7bb4f730" +dependencies = [ + "datafusion-expr", + "quote", + "syn", +] + +[[package]] +name = "datafusion-optimizer" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6583ef666ae000a613a837e69e456681a9faa96347bf3877661e9e89e141d8a" +dependencies = [ + "arrow", + "chrono", + "datafusion-common", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-physical-expr", + "indexmap", + "itertools", + "log", + "recursive", + "regex", + "regex-syntax", +] + +[[package]] +name = "datafusion-physical-expr" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8668103361a272cbbe3a61f72eca60c9b7c706e87cc3565bcf21e2b277b84f6" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr-common", + "half", + "hashbrown 0.14.5", + "indexmap", + "itertools", + "log", + "parking_lot", + "paste", + "petgraph", +] + +[[package]] +name = "datafusion-physical-expr-adapter" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "815acced725d30601b397e39958e0e55630e0a10d66ef7769c14ae6597298bb0" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-expr", + "datafusion-functions", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "itertools", +] + +[[package]] +name = "datafusion-physical-expr-common" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6652fe7b5bf87e85ed175f571745305565da2c0b599d98e697bcbedc7baa47c3" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr-common", + "hashbrown 0.14.5", + "itertools", +] + +[[package]] +name = "datafusion-physical-optimizer" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49b7d623eb6162a3332b564a0907ba00895c505d101b99af78345f1acf929b5c" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-execution", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-pruning", + "itertools", + "log", + "recursive", +] + +[[package]] +name = "datafusion-physical-plan" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2f7f778a1a838dec124efb96eae6144237d546945587557c9e6936b3414558c" +dependencies = [ + "ahash", + "arrow", + "arrow-ord", + "arrow-schema", + "async-trait", + "chrono", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "futures", + "half", + "hashbrown 0.14.5", + "indexmap", + "itertools", + "log", + "parking_lot", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "datafusion-pruning" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd1e59e2ca14fe3c30f141600b10ad8815e2856caa59ebbd0e3e07cd3d127a65" +dependencies = [ + "arrow", + "arrow-schema", + "datafusion-common", + "datafusion-datasource", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "itertools", + "log", +] + +[[package]] +name = "datafusion-session" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21ef8e2745583619bd7a49474e8f45fbe98ebb31a133f27802217125a7b3d58d" +dependencies = [ + "arrow", + "async-trait", + "dashmap", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-plan", + "datafusion-sql", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "tokio", +] + +[[package]] +name = "datafusion-sql" +version = "50.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89abd9868770386fede29e5a4b14f49c0bf48d652c3b9d7a8a0332329b87d50b" +dependencies = [ + "arrow", + "bigdecimal", + "datafusion-common", + "datafusion-expr", + "indexmap", + "log", + "recursive", + "regex", + "sqlparser", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "directories" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flatbuffers" +version = "25.12.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" +dependencies = [ + "bitflags", + "rustc_version", +] + +[[package]] +name = "flate2" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" +dependencies = [ + "crc32fast", + "miniz_oxide", + "zlib-rs", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "h2" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2", + "system-configuration", + "tokio", + "tower-service", + "tracing", + "windows-registry", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", +] + +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lexical-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" +dependencies = [ + "lexical-parse-integer", + "lexical-util", +] + +[[package]] +name = "lexical-parse-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" +dependencies = [ + "lexical-util", +] + +[[package]] +name = "lexical-util" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" + +[[package]] +name = "lexical-write-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" +dependencies = [ + "lexical-util", + "lexical-write-integer", +] + +[[package]] +name = "lexical-write-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" +dependencies = [ + "lexical-util", +] + +[[package]] +name = "libbz2-rs-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c4a545a15244c7d945065b5d392b2d2d7f21526fba56ce51467b06ed445e8f7" + +[[package]] +name = "libc" +version = "0.2.180" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags", + "libc", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "lz4_flex" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a" +dependencies = [ + "twox-hash", +] + +[[package]] +name = "lzma-sys" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +dependencies = [ + "cc", + "libc", + "pkg-config", +] + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nexus-repository-example" +version = "0.0.1" +dependencies = [ + "anyhow", + "chrono", + "datafusion", + "term-guard", + "tokio", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "object_store" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c1be0c6c22ec0817cdc77d3842f721a17fd30ab6965001415b5402a74e6b740" +dependencies = [ + "async-trait", + "bytes", + "chrono", + "futures", + "http", + "humantime", + "itertools", + "parking_lot", + "percent-encoding", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "walkdir", + "wasm-bindgen-futures", + "web-time", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "parquet" +version = "56.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dbd48ad52d7dccf8ea1b90a3ddbfaea4f69878dd7683e51c507d4bc52b5b27" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-ipc", + "arrow-schema", + "arrow-select", + "base64", + "brotli", + "bytes", + "chrono", + "flate2", + "futures", + "half", + "hashbrown 0.16.1", + "lz4_flex", + "num", + "num-bigint", + "object_store", + "paste", + "ring", + "seq-macro", + "simdutf8", + "snap", + "thrift", + "tokio", + "twox-hash", + "zstd", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "petgraph" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap", + "serde", +] + +[[package]] +name = "phf" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "psm" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d11f2fedc3b7dafdc2851bc52f277377c5473d378859be234bc7ebb593144d01" +dependencies = [ + "ar_archive_writer", + "cc", +] + +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + +[[package]] +name = "quote" +version = "1.0.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "recursive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0786a43debb760f491b1bc0269fe5e84155353c67482b9e60d0cfb596054b43e" +dependencies = [ + "recursive-proc-macro-impl", + "stacker", +] + +[[package]] +name = "recursive-proc-macro-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.17", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reqwest" +version = "0.12.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "mime", + "native-tls", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tower", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.17", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rusqlite" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" +dependencies = [ + "bitflags", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +dependencies = [ + "web-time", + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "seq-macro" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "sqlparser" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec4b661c54b1e4b603b37873a18c59920e4c51ea8ea2cf527d925424dbd4437c" +dependencies = [ + "log", + "recursive", + "sqlparser_derive", +] + +[[package]] +name = "sqlparser_derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "stacker" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1f8b29fb42aafcea4edeeb6b2f2d7ecd0d969c48b4cf0d2e64aafc471dd6e59" +dependencies = [ + "cc", + "cfg-if", + "libc", + "psm", + "windows-sys 0.59.0", +] + +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.114" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "term-guard" +version = "0.0.2" +dependencies = [ + "arrow", + "async-trait", + "base64", + "chrono", + "datafusion", + "directories", + "futures", + "glob", + "hex", + "num_cpus", + "once_cell", + "rand", + "regex", + "reqwest", + "ring", + "rusqlite", + "serde", + "serde_json", + "sha2", + "thiserror 2.0.17", + "tokio", + "tracing", + "tracing-subscriber", + "zeroize", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "twox-hash" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" + +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-width" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "xz2" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +dependencies = [ + "lzma-sys", +] + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zlib-rs" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3" + +[[package]] +name = "zmij" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd8f3f50b848df28f887acb68e41201b5aea6bc8a8dacc00fb40635ff9a72fea" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/docs/examples/nexus-repository/src/main.rs b/docs/examples/nexus-repository/src/main.rs index aea54fb..5ad520c 100644 --- a/docs/examples/nexus-repository/src/main.rs +++ b/docs/examples/nexus-repository/src/main.rs @@ -32,7 +32,12 @@ async fn main() -> Result<()> { println!("Connecting to Term Nexus..."); // Step 2: Create repository and verify connectivity - let repository = NexusRepository::new(config)?; + let mut repository = NexusRepository::new(config)?; + + // Set up offline cache for resilience (optional but recommended) + if let Err(e) = repository.setup_cache(None) { + println!("Note: Offline cache not available: {}", e); + } match repository.health_check().await { Ok(health) => { @@ -191,7 +196,7 @@ async fn main() -> Result<()> { // Compare with previous run // Default sort is descending, so index 0 is current run, index 1 is previous if metrics.len() > 1 { - if let Some((_, current_ctx)) = metrics.get(0) { + if let Some((_, current_ctx)) = metrics.first() { if let Some(MetricValue::Long(current_passed)) = current_ctx.get_metric("validation.passed_checks") { From ad836dfae5f4705ebc2f914bd8bc3dd8adb9b529 Mon Sep 17 00:00:00 2001 From: ericpsimon Date: Wed, 14 Jan 2026 08:42:53 -0700 Subject: [PATCH 22/22] docs(examples): finalize nexus-repository example with output - Add Files section listing example components - Add Example Output section showing typical run output - Apply cargo fmt formatting to main.rs Co-Authored-By: Claude Opus 4.5 --- docs/examples/nexus-repository/README.md | 54 ++++++++++++++++++++++ docs/examples/nexus-repository/src/main.rs | 40 ++++++++++------ 2 files changed, 79 insertions(+), 15 deletions(-) diff --git a/docs/examples/nexus-repository/README.md b/docs/examples/nexus-repository/README.md index 349760d..f5020b9 100644 --- a/docs/examples/nexus-repository/README.md +++ b/docs/examples/nexus-repository/README.md @@ -14,6 +14,12 @@ This example demonstrates how to persist validation metrics to Term Nexus and qu - Term Nexus API running locally at `http://localhost:8080` - Set the `TERM_API_KEY` environment variable +## Files + +- `src/main.rs` - Nexus repository integration logic +- `data/items.csv` - Sample item inventory data +- `Cargo.toml` - Dependencies + ## Running the Example ```bash @@ -25,6 +31,54 @@ cd docs/examples/nexus-repository TERM_API_KEY=your-api-key cargo run ``` +## Example Output + +``` +=== Term Nexus Repository Example === + +Connecting to Term Nexus... +Connected to Term Nexus v0.1.0 + +Loading item data... +Loaded items table + +Running validation checks... + +Validation complete: 5/6 checks passed + +[X] completeness: Completeness check on 'name' failed (0.90 < 1.00) + metric: 0.90 + +--- Storing Metrics to Nexus --- +Result Key: 1736870400000 +Tags: {"dataset": "items", "environment": "development", "pipeline": "daily-inventory"} +Metrics queued for upload +Metrics uploaded to Nexus + +--- Querying Historical Metrics --- + +Found 2 historical result(s): + + Timestamp: 1736870400000 + Tags: {"dataset": "items", "environment": "development", "pipeline": "daily-inventory"} + Passed checks: Long(5) + + Timestamp: 1736784000000 + Tags: {"dataset": "items", "environment": "development", "pipeline": "daily-inventory"} + Passed checks: Long(5) + +--- Comparison with Previous Run --- + +Previous run timestamp: 1736784000000 +No change in passing checks + +Worker stats: 5 uploaded, 0 failed + +Example complete! Metrics are now stored in Term Nexus. +You can query them using the Nexus API or run this example again +to see historical comparison. +``` + ## Key Concepts Demonstrated 1. **NexusRepository**: Implements `MetricsRepository` for cloud persistence diff --git a/docs/examples/nexus-repository/src/main.rs b/docs/examples/nexus-repository/src/main.rs index 5ad520c..55a902e 100644 --- a/docs/examples/nexus-repository/src/main.rs +++ b/docs/examples/nexus-repository/src/main.rs @@ -95,9 +95,11 @@ async fn main() -> Result<()> { ValidationResult::Success { metrics, report } => { (metrics.passed_checks, metrics.total_checks, report) } - ValidationResult::Failure { report } => { - (report.metrics.passed_checks, report.metrics.total_checks, report) - } + ValidationResult::Failure { report } => ( + report.metrics.passed_checks, + report.metrics.total_checks, + report, + ), }; println!( @@ -131,14 +133,8 @@ async fn main() -> Result<()> { let mut context = AnalyzerContext::with_dataset("items"); // Store summary metrics - context.store_metric( - "validation.passed_checks", - MetricValue::Long(passed as i64), - ); - context.store_metric( - "validation.total_checks", - MetricValue::Long(total as i64), - ); + context.store_metric("validation.passed_checks", MetricValue::Long(passed as i64)); + context.store_metric("validation.total_checks", MetricValue::Long(total as i64)); context.store_metric( "validation.success_rate", MetricValue::Double(if total > 0 { @@ -149,10 +145,24 @@ async fn main() -> Result<()> { ); // Store issue count by level - let error_count = report.issues.iter().filter(|i| i.level == Level::Error).count(); - let warning_count = report.issues.iter().filter(|i| i.level == Level::Warning).count(); - context.store_metric("validation.error_count", MetricValue::Long(error_count as i64)); - context.store_metric("validation.warning_count", MetricValue::Long(warning_count as i64)); + let error_count = report + .issues + .iter() + .filter(|i| i.level == Level::Error) + .count(); + let warning_count = report + .issues + .iter() + .filter(|i| i.level == Level::Warning) + .count(); + context.store_metric( + "validation.error_count", + MetricValue::Long(error_count as i64), + ); + context.store_metric( + "validation.warning_count", + MetricValue::Long(warning_count as i64), + ); // Save to Nexus repository.save(result_key.clone(), context).await?;