From d78ba32ff9e056b7f54b15cd9ff0a389e2bbf4c2 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 14:53:21 +0100 Subject: [PATCH 01/40] cache value for default 30s to allow for mobile retries --- src/http_relay.rs | 194 ++++++++++++++++++++++++++++++++++++++++++-- src/waiting_list.rs | 55 ++++++++++++- 2 files changed, 243 insertions(+), 6 deletions(-) diff --git a/src/http_relay.rs b/src/http_relay.rs index 52d50c9..ff0c0d3 100644 --- a/src/http_relay.rs +++ b/src/http_relay.rs @@ -27,6 +27,9 @@ use crate::waiting_list::WaitingList; /// This is to prevent memory leaks and to keep the server responsive. const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); +/// The default time-to-live for cached values after first consumer retrieves them. +const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(30); + #[derive(Clone)] struct AppState { pub config: Config, @@ -46,6 +49,8 @@ impl AppState { struct Config { pub http_port: u16, pub request_timeout: Duration, + /// How long to keep values cached after the first consumer retrieves them. + pub cache_ttl: Duration, } impl Default for Config { @@ -53,6 +58,7 @@ impl Default for Config { Self { http_port: 0, request_timeout: DEFAULT_REQUEST_TIMEOUT, + cache_ttl: DEFAULT_CACHE_TTL, } } } @@ -69,6 +75,15 @@ impl HttpRelayBuilder { self } + /// Configure the TTL for cached values (default: 30 seconds). + /// Values remain available for this duration after the first consumer + /// retrieves them. + pub fn cache_ttl(mut self, ttl: Duration) -> Self { + self.0.cache_ttl = ttl; + + self + } + /// Start running an HTTP relay. pub async fn run(self) -> Result { HttpRelay::start(self.0).await @@ -100,7 +115,7 @@ impl HttpRelay { } async fn start(config: Config) -> Result { - let (app, _) = Self::create_app(config.clone())?; + let (app, app_state) = Self::create_app(config.clone())?; let http_handle = Handle::new(); let shutdown_handle = http_handle.clone(); @@ -116,6 +131,20 @@ impl HttpRelay { .map_err(|error| tracing::error!(?error, "HttpRelay http server error")) }); + // Spawn background task to clean up expired cache entries + let cleanup_interval = Duration::from_secs(1); + let pending_list = app_state.pending_list.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(cleanup_interval).await; + let mut list = pending_list.lock().await; + let removed = list.cleanup_expired_cache(); + if removed > 0 { + tracing::debug!(removed, "Cleaned up expired cache entries"); + } + } + }); + Ok(Self { http_handle: shutdown_handle, http_address, @@ -178,13 +207,20 @@ mod link { ) -> impl IntoResponse { let mut pending_list = state.pending_list.lock().await; + // First, check if there's a cached value + if let Some(cached_body) = pending_list.get_cached(&id) { + return (StatusCode::OK, cached_body); + } + if let Some(producer) = pending_list.remove_producer(&id) { - // Producer is ready to send data + // Producer is ready to send data - cache it for future consumers + let body = producer.body.clone(); + pending_list.insert_cached(&id, body, state.config.cache_ttl); let _ = producer.completion.send(()); return (StatusCode::OK, producer.body); }; - // No producer ready. Insert consumer into pending list and wait for producer to send data. + // No producer ready. Insert consumer into pending list and wait. let receiver = pending_list.insert_consumer(&id); drop(pending_list); @@ -209,13 +245,17 @@ mod link { ) -> impl IntoResponse { let mut pending_list = state.pending_list.lock().await; + // If there's a cached value, remove it (new POST overwrites) + pending_list.cache.remove(&channel); + if let Some(consumer) = pending_list.remove_consumer(&channel) { - // Consumer is ready to receive data + // Consumer is ready to receive data - also cache it + pending_list.insert_cached(&channel, body.clone(), state.config.cache_ttl); let _ = consumer.message_sender.send(body); return (StatusCode::OK, Bytes::new()); }; - // No consumer ready. Insert producer into pending list and wait for consumer to request data. + // No consumer ready. Insert producer into pending list and wait. let receiver = pending_list.insert_producer(&channel, body); drop(pending_list); match tokio::time::timeout(state.config.request_timeout, receiver).await { @@ -302,4 +342,148 @@ mod tests { assert_eq!(response.text(), "Request timed out"); assert!(state.pending_list.lock().await.is_empty()); } + + #[tokio::test] + async fn test_cached_value_multiple_consumers() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer sends data + let producer = async { + let body = axum::body::Bytes::from_static(b"cached data"); + let response = server.post("/link/cache-test").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + // First consumer receives it + let first_consumer = async { + tokio::time::sleep(Duration::from_millis(50)).await; + let response = server.get("/link/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + }; + + tokio::join!(producer, first_consumer); + + // Value should now be cached + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + + // Second consumer can get the same cached value + let response = server.get("/link/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + + // Third consumer also gets it + let response = server.get("/link/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + } + + #[tokio::test] + async fn test_cache_expires() { + let config = Config { + cache_ttl: Duration::from_millis(50), + request_timeout: Duration::from_millis(100), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer sends, consumer receives (value gets cached) + let producer = async { + let body = axum::body::Bytes::from_static(b"ephemeral"); + server.post("/link/expire-test").bytes(body).await; + }; + let consumer = async { + tokio::time::sleep(Duration::from_millis(10)).await; + let response = server.get("/link/expire-test").await; + assert_eq!(response.text(), "ephemeral"); + }; + tokio::join!(producer, consumer); + + // Value is cached + assert!(state.pending_list.lock().await.get_cached("expire-test").is_some()); + + // Wait for cache to expire + tokio::time::sleep(Duration::from_millis(1500)).await; + + // Value should be expired (get_cached returns None) + assert!(state.pending_list.lock().await.get_cached("expire-test").is_none()); + } + + #[tokio::test] + async fn test_post_overwrites_cache() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // First producer-consumer pair + let producer1 = async { + let body = axum::body::Bytes::from_static(b"first value"); + server.post("/link/overwrite-test").bytes(body).await; + }; + let consumer1 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link/overwrite-test").await + }; + tokio::join!(producer1, consumer1); + + // Verify first value is cached + let cached = state.pending_list.lock().await.get_cached("overwrite-test"); + assert_eq!(cached.unwrap().as_ref(), b"first value"); + + // Second producer posts new value - should overwrite and wait + let producer2 = async { + let body = axum::body::Bytes::from_static(b"second value"); + server.post("/link/overwrite-test").bytes(body).await; + }; + let consumer2 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link/overwrite-test").await + }; + let (_, response) = tokio::join!(producer2, consumer2); + assert_eq!(response.text(), "second value"); + + // Verify new value is cached + let cached = state.pending_list.lock().await.get_cached("overwrite-test"); + assert_eq!(cached.unwrap().as_ref(), b"second value"); + } + + #[tokio::test] + async fn test_consumer_first_then_producer_caches() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Consumer waits first, then producer sends + let consumer = async { + let response = server.get("/link/consumer-first").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "delayed data"); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(100)).await; + let body = axum::body::Bytes::from_static(b"delayed data"); + let response = server.post("/link/consumer-first").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + tokio::join!(consumer, producer); + + // Value should be cached for subsequent consumers + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + let response = server.get("/link/consumer-first").await; + assert_eq!(response.text(), "delayed data"); + } } diff --git a/src/waiting_list.rs b/src/waiting_list.rs index aac64a4..4b334c1 100644 --- a/src/waiting_list.rs +++ b/src/waiting_list.rs @@ -1,13 +1,38 @@ use std::collections::HashMap; +use std::time::Instant; use axum::body::Bytes; use tokio::sync::oneshot; -/// A list of waiting producers and consumers. +/// A cached value with its expiration time. +pub struct CachedValue { + /// The cached payload. + pub body: Bytes, + /// When this cached value expires. + pub expires_at: Instant, +} + +impl CachedValue { + /// Creates a new cached value that expires after the given duration. + pub fn new(body: Bytes, ttl: std::time::Duration) -> Self { + Self { + body, + expires_at: Instant::now() + ttl, + } + } + + /// Returns true if this cached value has expired. + pub fn is_expired(&self) -> bool { + Instant::now() >= self.expires_at + } +} + +/// A list of waiting producers and consumers, plus cached values. #[derive(Default)] pub struct WaitingList { pub pending_producers: HashMap, pub pending_consumers: HashMap, + pub cache: HashMap, } impl WaitingList { @@ -30,6 +55,30 @@ impl WaitingList { self.pending_consumers.insert(id.to_string(), consumer); message_receiver } + + /// Gets a cached value if it exists and hasn't expired. + pub fn get_cached(&self, id: &str) -> Option { + self.cache.get(id).and_then(|cached| { + if cached.is_expired() { + None + } else { + Some(cached.body.clone()) + } + }) + } + + /// Inserts a value into the cache with the given TTL. + pub fn insert_cached(&mut self, id: &str, body: Bytes, ttl: std::time::Duration) { + self.cache + .insert(id.to_string(), CachedValue::new(body, ttl)); + } + + /// Removes expired entries from the cache. Returns the number removed. + pub fn cleanup_expired_cache(&mut self) -> usize { + let before = self.cache.len(); + self.cache.retain(|_, v| !v.is_expired()); + before - self.cache.len() + } } #[cfg(test)] @@ -37,6 +86,10 @@ impl WaitingList { pub fn is_empty(&self) -> bool { self.pending_producers.is_empty() && self.pending_consumers.is_empty() } + + pub fn cache_len(&self) -> usize { + self.cache.len() + } } /// A producer that is waiting for a consumer to request data. From 33a917df33fce69c935490b22d88a695e1c03781 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 15:37:38 +0100 Subject: [PATCH 02/40] added link2 with shorter timeout --- Cargo.toml | 6 + src/http_relay.rs | 489 --------------------------- src/http_relay/link.rs | 223 ++++++++++++ src/http_relay/link2.rs | 309 +++++++++++++++++ src/http_relay/mod.rs | 11 + src/http_relay/server.rs | 227 +++++++++++++ src/{ => http_relay}/waiting_list.rs | 51 ++- src/lib.rs | 4 +- src/main.rs | 92 +++++ 9 files changed, 908 insertions(+), 504 deletions(-) delete mode 100644 src/http_relay.rs create mode 100644 src/http_relay/link.rs create mode 100644 src/http_relay/link2.rs create mode 100644 src/http_relay/mod.rs create mode 100644 src/http_relay/server.rs rename src/{ => http_relay}/waiting_list.rs (70%) create mode 100644 src/main.rs diff --git a/Cargo.toml b/Cargo.toml index a6bd626..e958baf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,11 +14,17 @@ categories = ["web-programming"] anyhow = "1.0.99" axum = "0.8.6" axum-server = "0.7.2" +clap = { version = "4", features = ["derive"] } futures-util = "0.3.31" tokio = { version = "1.47.1", features = ["full"] } tracing = "0.1.41" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } url = "2.5.4" tower-http = { version = "0.6.6", features = ["cors", "trace"] } +[[bin]] +name = "http-relay" +path = "src/main.rs" + [dev-dependencies] axum-test = "17.3.0" diff --git a/src/http_relay.rs b/src/http_relay.rs deleted file mode 100644 index ff0c0d3..0000000 --- a/src/http_relay.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! https://httprelay.io/features/link/ - -use std::{ - net::{SocketAddr, TcpListener}, - sync::Arc, - time::Duration, -}; - -use anyhow::Result; - -use axum::{ - body::Bytes, - extract::{Path, State}, - response::IntoResponse, - routing::get, - Router, -}; -use axum_server::Handle; -use tokio::sync::Mutex; - -use tower_http::{cors::CorsLayer, trace::TraceLayer}; -use url::Url; - -use crate::waiting_list::WaitingList; - -/// The timeout for a request to be considered unused. -/// This is to prevent memory leaks and to keep the server responsive. -const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); - -/// The default time-to-live for cached values after first consumer retrieves them. -const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(30); - -#[derive(Clone)] -struct AppState { - pub config: Config, - pub pending_list: Arc>, -} - -impl AppState { - pub fn new(config: Config) -> Self { - Self { - config, - pending_list: Arc::new(Mutex::new(WaitingList::default())), - } - } -} - -#[derive(Debug, Clone)] -struct Config { - pub http_port: u16, - pub request_timeout: Duration, - /// How long to keep values cached after the first consumer retrieves them. - pub cache_ttl: Duration, -} - -impl Default for Config { - fn default() -> Self { - Self { - http_port: 0, - request_timeout: DEFAULT_REQUEST_TIMEOUT, - cache_ttl: DEFAULT_CACHE_TTL, - } - } -} - -/// Builder for [HttpRelay]. -#[derive(Debug, Default)] -pub struct HttpRelayBuilder(Config); - -impl HttpRelayBuilder { - /// Configure the port used for HTTP server. - pub fn http_port(mut self, port: u16) -> Self { - self.0.http_port = port; - - self - } - - /// Configure the TTL for cached values (default: 30 seconds). - /// Values remain available for this duration after the first consumer - /// retrieves them. - pub fn cache_ttl(mut self, ttl: Duration) -> Self { - self.0.cache_ttl = ttl; - - self - } - - /// Start running an HTTP relay. - pub async fn run(self) -> Result { - HttpRelay::start(self.0).await - } -} - -/// An implementation of _some_ of [Http relay spec](https://httprelay.io/). -pub struct HttpRelay { - pub(crate) http_handle: Handle, - http_address: SocketAddr, -} - -impl HttpRelay { - /// Creates the HTTP router for the HTTP relay. - /// Extracted as its own function to make it easier to test. - fn create_app(config: Config) -> Result<(Router, AppState)> { - let app_state = AppState::new(config); - - let app = Router::new() - .route( - "/link/{id}", - get(link::get_handler).post(link::post_handler), - ) - .layer(CorsLayer::very_permissive()) - .layer(TraceLayer::new_for_http()) - .with_state(app_state.clone()); - - Ok((app, app_state)) - } - - async fn start(config: Config) -> Result { - let (app, app_state) = Self::create_app(config.clone())?; - - let http_handle = Handle::new(); - let shutdown_handle = http_handle.clone(); - - let http_listener = TcpListener::bind(SocketAddr::from(([0, 0, 0, 0], config.http_port)))?; - let http_address = http_listener.local_addr()?; - - tokio::spawn(async move { - axum_server::from_tcp(http_listener) - .handle(http_handle.clone()) - .serve(app.into_make_service()) - .await - .map_err(|error| tracing::error!(?error, "HttpRelay http server error")) - }); - - // Spawn background task to clean up expired cache entries - let cleanup_interval = Duration::from_secs(1); - let pending_list = app_state.pending_list.clone(); - tokio::spawn(async move { - loop { - tokio::time::sleep(cleanup_interval).await; - let mut list = pending_list.lock().await; - let removed = list.cleanup_expired_cache(); - if removed > 0 { - tracing::debug!(removed, "Cleaned up expired cache entries"); - } - } - }); - - Ok(Self { - http_handle: shutdown_handle, - http_address, - }) - } - - /// Create [HttpRelayBuilder]. - pub fn builder() -> HttpRelayBuilder { - HttpRelayBuilder::default() - } - - /// Returns the HTTP address of this http relay. - pub fn http_address(&self) -> SocketAddr { - self.http_address - } - - /// Returns the localhost Url of this server. - pub fn local_url(&self) -> Url { - Url::parse(&format!("http://localhost:{}", self.http_address.port())) - .expect("local_url should be formatted fine") - } - - /// Returns the localhost URL of Link endpoints - pub fn local_link_url(&self) -> Url { - let mut url = self.local_url(); - - let mut segments = url - .path_segments_mut() - .expect("HttpRelay::local_link_url path_segments_mut"); - - segments.push("link"); - - drop(segments); - - url - } - - /// Gracefully shuts down the HTTP relay. - pub async fn shutdown(self) -> anyhow::Result<()> { - self.http_handle - .graceful_shutdown(Some(Duration::from_secs(1))); - Ok(()) - } -} - -impl Drop for HttpRelay { - fn drop(&mut self) { - self.http_handle.shutdown(); - } -} - -mod link { - use super::*; - use axum::http::StatusCode; - - /// A consumer requests data using GET method. - pub async fn get_handler( - Path(id): Path, - State(state): State, - ) -> impl IntoResponse { - let mut pending_list = state.pending_list.lock().await; - - // First, check if there's a cached value - if let Some(cached_body) = pending_list.get_cached(&id) { - return (StatusCode::OK, cached_body); - } - - if let Some(producer) = pending_list.remove_producer(&id) { - // Producer is ready to send data - cache it for future consumers - let body = producer.body.clone(); - pending_list.insert_cached(&id, body, state.config.cache_ttl); - let _ = producer.completion.send(()); - return (StatusCode::OK, producer.body); - }; - - // No producer ready. Insert consumer into pending list and wait. - let receiver = pending_list.insert_consumer(&id); - drop(pending_list); - - // Wait for the producer, but with a timeout - match tokio::time::timeout(state.config.request_timeout, receiver).await { - Ok(Ok(message)) => (StatusCode::OK, message), - Ok(Err(_)) => (StatusCode::NOT_FOUND, "Not Found".into()), - Err(_) => { - // Timeout. Remove the consumer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_consumer(&id); - (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) - } - } - } - - /// A producer sends data using POST method. - pub async fn post_handler( - Path(channel): Path, - State(state): State, - body: Bytes, - ) -> impl IntoResponse { - let mut pending_list = state.pending_list.lock().await; - - // If there's a cached value, remove it (new POST overwrites) - pending_list.cache.remove(&channel); - - if let Some(consumer) = pending_list.remove_consumer(&channel) { - // Consumer is ready to receive data - also cache it - pending_list.insert_cached(&channel, body.clone(), state.config.cache_ttl); - let _ = consumer.message_sender.send(body); - return (StatusCode::OK, Bytes::new()); - }; - - // No consumer ready. Insert producer into pending list and wait. - let receiver = pending_list.insert_producer(&channel, body); - drop(pending_list); - match tokio::time::timeout(state.config.request_timeout, receiver).await { - Ok(_) => (StatusCode::OK, Bytes::new()), - Err(_) => { - // Timeout. Remove the producer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_producer(&channel); - (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_delayed_producer() { - let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - let consumer = async { - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "Hello, world!"); - }; - - let producer = async { - tokio::time::sleep(Duration::from_millis(200)).await; // Delayed produce to ensure consumer is waiting - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), ""); - }; - - tokio::join!(consumer, producer); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_delayed_consumer() { - let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - let consumer = async { - tokio::time::sleep(Duration::from_millis(200)).await; // Delayed consumer to ensure producer is waiting - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "Hello, world!"); - }; - - let producer = async { - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), ""); - }; - - tokio::join!(consumer, producer); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_request_timeout() { - let config = Config { - request_timeout: Duration::from_millis(50), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Consumer request timed out - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 408); - assert_eq!(response.text(), "Request timed out"); - assert!(state.pending_list.lock().await.is_empty()); - - // Producer request timed out - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 408); - assert_eq!(response.text(), "Request timed out"); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_cached_value_multiple_consumers() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer sends data - let producer = async { - let body = axum::body::Bytes::from_static(b"cached data"); - let response = server.post("/link/cache-test").bytes(body).await; - assert_eq!(response.status_code(), 200); - }; - - // First consumer receives it - let first_consumer = async { - tokio::time::sleep(Duration::from_millis(50)).await; - let response = server.get("/link/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - }; - - tokio::join!(producer, first_consumer); - - // Value should now be cached - assert_eq!(state.pending_list.lock().await.cache_len(), 1); - - // Second consumer can get the same cached value - let response = server.get("/link/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - - // Third consumer also gets it - let response = server.get("/link/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - } - - #[tokio::test] - async fn test_cache_expires() { - let config = Config { - cache_ttl: Duration::from_millis(50), - request_timeout: Duration::from_millis(100), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer sends, consumer receives (value gets cached) - let producer = async { - let body = axum::body::Bytes::from_static(b"ephemeral"); - server.post("/link/expire-test").bytes(body).await; - }; - let consumer = async { - tokio::time::sleep(Duration::from_millis(10)).await; - let response = server.get("/link/expire-test").await; - assert_eq!(response.text(), "ephemeral"); - }; - tokio::join!(producer, consumer); - - // Value is cached - assert!(state.pending_list.lock().await.get_cached("expire-test").is_some()); - - // Wait for cache to expire - tokio::time::sleep(Duration::from_millis(1500)).await; - - // Value should be expired (get_cached returns None) - assert!(state.pending_list.lock().await.get_cached("expire-test").is_none()); - } - - #[tokio::test] - async fn test_post_overwrites_cache() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // First producer-consumer pair - let producer1 = async { - let body = axum::body::Bytes::from_static(b"first value"); - server.post("/link/overwrite-test").bytes(body).await; - }; - let consumer1 = async { - tokio::time::sleep(Duration::from_millis(50)).await; - server.get("/link/overwrite-test").await - }; - tokio::join!(producer1, consumer1); - - // Verify first value is cached - let cached = state.pending_list.lock().await.get_cached("overwrite-test"); - assert_eq!(cached.unwrap().as_ref(), b"first value"); - - // Second producer posts new value - should overwrite and wait - let producer2 = async { - let body = axum::body::Bytes::from_static(b"second value"); - server.post("/link/overwrite-test").bytes(body).await; - }; - let consumer2 = async { - tokio::time::sleep(Duration::from_millis(50)).await; - server.get("/link/overwrite-test").await - }; - let (_, response) = tokio::join!(producer2, consumer2); - assert_eq!(response.text(), "second value"); - - // Verify new value is cached - let cached = state.pending_list.lock().await.get_cached("overwrite-test"); - assert_eq!(cached.unwrap().as_ref(), b"second value"); - } - - #[tokio::test] - async fn test_consumer_first_then_producer_caches() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Consumer waits first, then producer sends - let consumer = async { - let response = server.get("/link/consumer-first").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "delayed data"); - }; - - let producer = async { - tokio::time::sleep(Duration::from_millis(100)).await; - let body = axum::body::Bytes::from_static(b"delayed data"); - let response = server.post("/link/consumer-first").bytes(body).await; - assert_eq!(response.status_code(), 200); - }; - - tokio::join!(consumer, producer); - - // Value should be cached for subsequent consumers - assert_eq!(state.pending_list.lock().await.cache_len(), 1); - let response = server.get("/link/consumer-first").await; - assert_eq!(response.text(), "delayed data"); - } -} diff --git a/src/http_relay/link.rs b/src/http_relay/link.rs new file mode 100644 index 0000000..45c264d --- /dev/null +++ b/src/http_relay/link.rs @@ -0,0 +1,223 @@ +//! Standard link endpoint without caching - closer to httprelay.io spec. + +use axum::{ + body::Bytes, + extract::{Path, State}, + http::{header, HeaderMap, StatusCode}, + response::{IntoResponse, Response}, +}; + +use super::waiting_list::Message; + +use super::AppState; + +/// Build a response with optional Content-Type header. +fn build_response(status: StatusCode, body: Bytes, content_type: Option) -> Response { + let mut response = (status, body).into_response(); + if let Some(ct) = content_type { + if let Ok(value) = ct.parse() { + response.headers_mut().insert(header::CONTENT_TYPE, value); + } + } + response +} + +/// A consumer requests data using GET method. +pub async fn get_handler( + Path(id): Path, + State(state): State, +) -> Response { + let mut pending_list = state.pending_list.lock().await; + + if let Some(producer) = pending_list.remove_producer(&id) { + let _ = producer.completion.send(()); + return build_response(StatusCode::OK, producer.body, producer.content_type); + }; + + // No producer ready. Insert consumer into pending list and wait. + let receiver = pending_list.insert_consumer(&id); + drop(pending_list); + + // Wait for the producer, but with a timeout + match tokio::time::timeout(state.config.request_timeout, receiver).await { + Ok(Ok(msg)) => build_response(StatusCode::OK, msg.body, msg.content_type), + Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), + Err(_) => { + // Timeout. Remove the consumer from the pending list again + let mut pending_list = state.pending_list.lock().await; + pending_list.remove_consumer(&id); + build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) + } + } +} + +/// A producer sends data using POST method. +pub async fn post_handler( + Path(channel): Path, + State(state): State, + headers: HeaderMap, + body: Bytes, +) -> impl IntoResponse { + let content_type = headers + .get(header::CONTENT_TYPE) + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + let mut pending_list = state.pending_list.lock().await; + + if let Some(consumer) = pending_list.remove_consumer(&channel) { + let msg = Message { + body, + content_type, + }; + let _ = consumer.message_sender.send(msg); + return (StatusCode::OK, Bytes::new()); + }; + + // No consumer ready. Insert producer into pending list and wait. + let receiver = pending_list.insert_producer(&channel, body, content_type); + drop(pending_list); + match tokio::time::timeout(state.config.request_timeout, receiver).await { + Ok(_) => (StatusCode::OK, Bytes::new()), + Err(_) => { + // Timeout. Remove the producer from the pending list again + let mut pending_list = state.pending_list.lock().await; + pending_list.remove_producer(&channel); + (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::http_relay::{Config, HttpRelay}; + + #[tokio::test] + async fn test_delayed_producer() { + let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + let consumer = async { + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "Hello, world!"); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(200)).await; + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), ""); + }; + + tokio::join!(consumer, producer); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_delayed_consumer() { + let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + let consumer = async { + tokio::time::sleep(Duration::from_millis(200)).await; + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "Hello, world!"); + }; + + let producer = async { + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), ""); + }; + + tokio::join!(consumer, producer); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_request_timeout() { + let config = Config { + request_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Consumer request timed out + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 408); + assert_eq!(response.text(), "Request timed out"); + assert!(state.pending_list.lock().await.is_empty()); + + // Producer request timed out + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 408); + assert_eq!(response.text(), "Request timed out"); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_no_caching() { + let config = Config { + cache_ttl: Duration::from_secs(5), + request_timeout: Duration::from_millis(100), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer sends, consumer receives - but no caching on /link/ + let producer = async { + let body = axum::body::Bytes::from_static(b"no cache data"); + server.post("/link/no-cache-test").bytes(body).await; + }; + let consumer = async { + tokio::time::sleep(Duration::from_millis(10)).await; + let response = server.get("/link/no-cache-test").await; + assert_eq!(response.text(), "no cache data"); + }; + tokio::join!(producer, consumer); + + // Value should NOT be cached for /link/ + assert_eq!(state.pending_list.lock().await.cache_len(), 0); + + // Second consumer should timeout since no caching + let response = server.get("/link/no-cache-test").await; + assert_eq!(response.status_code(), 408); + } + + #[tokio::test] + async fn test_content_type_forwarding() { + let (app, _state) = HttpRelay::create_app(Config::default()).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + let consumer = async { + let response = server.get("/link/ct-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), r#"{"key":"value"}"#); + assert_eq!( + response.header("content-type").to_str().unwrap(), + "application/json" + ); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(50)).await; + let body = axum::body::Bytes::from_static(br#"{"key":"value"}"#); + server + .post("/link/ct-test") + .content_type("application/json") + .bytes(body) + .await; + }; + + tokio::join!(consumer, producer); + } +} diff --git a/src/http_relay/link2.rs b/src/http_relay/link2.rs new file mode 100644 index 0000000..0343eb2 --- /dev/null +++ b/src/http_relay/link2.rs @@ -0,0 +1,309 @@ +//! Link endpoint with caching enabled for mobile retry support. + +use axum::{ + body::Bytes, + extract::{Path, State}, + http::{header, HeaderMap, StatusCode}, + response::{IntoResponse, Response}, +}; + +use super::waiting_list::Message; + +use super::AppState; + +/// Build a response with optional Content-Type header. +fn build_response(status: StatusCode, body: Bytes, content_type: Option) -> Response { + let mut response = (status, body).into_response(); + if let Some(ct) = content_type { + if let Ok(value) = ct.parse() { + response.headers_mut().insert(header::CONTENT_TYPE, value); + } + } + response +} + +/// A consumer requests data using GET method. +pub async fn get_handler( + Path(id): Path, + State(state): State, +) -> Response { + let mut pending_list = state.pending_list.lock().await; + + // First, check if there's a cached value + if let Some(cached) = pending_list.get_cached(&id) { + return build_response(StatusCode::OK, cached.body, cached.content_type); + } + + if let Some(producer) = pending_list.remove_producer(&id) { + // Producer is ready to send data - cache it for future consumers + let body = producer.body.clone(); + let content_type = producer.content_type.clone(); + pending_list.insert_cached(&id, body, content_type.clone(), state.config.cache_ttl); + let _ = producer.completion.send(()); + return build_response(StatusCode::OK, producer.body, content_type); + }; + + // No producer ready. Insert consumer into pending list and wait. + let receiver = pending_list.insert_consumer(&id); + drop(pending_list); + + // Wait for the producer, but with a timeout to avoid proxy timeouts + match tokio::time::timeout(state.config.link2_timeout, receiver).await { + Ok(Ok(msg)) => build_response(StatusCode::OK, msg.body, msg.content_type), + Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), + Err(_) => { + // Timeout. Remove the consumer from the pending list again + let mut pending_list = state.pending_list.lock().await; + pending_list.remove_consumer(&id); + build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) + } + } +} + +/// A producer sends data using POST method. +pub async fn post_handler( + Path(channel): Path, + State(state): State, + headers: HeaderMap, + body: Bytes, +) -> impl IntoResponse { + let content_type = headers + .get(header::CONTENT_TYPE) + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + let mut pending_list = state.pending_list.lock().await; + + // If there's a cached value, remove it (new POST overwrites) + pending_list.cache.remove(&channel); + + if let Some(consumer) = pending_list.remove_consumer(&channel) { + // Consumer is ready to receive data - also cache it + pending_list.insert_cached(&channel, body.clone(), content_type.clone(), state.config.cache_ttl); + let msg = Message { + body, + content_type, + }; + let _ = consumer.message_sender.send(msg); + return (StatusCode::OK, Bytes::new()); + }; + + // No consumer ready. Insert producer into pending list and wait. + let receiver = pending_list.insert_producer(&channel, body, content_type); + drop(pending_list); + match tokio::time::timeout(state.config.link2_timeout, receiver).await { + Ok(_) => (StatusCode::OK, Bytes::new()), + Err(_) => { + // Timeout. Remove the producer from the pending list again + let mut pending_list = state.pending_list.lock().await; + pending_list.remove_producer(&channel); + (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::http_relay::{Config, HttpRelay}; + + #[tokio::test] + async fn test_cached_value_multiple_consumers() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer sends data + let producer = async { + let body = axum::body::Bytes::from_static(b"cached data"); + let response = server.post("/link2/cache-test").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + // First consumer receives it + let first_consumer = async { + tokio::time::sleep(Duration::from_millis(50)).await; + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + }; + + tokio::join!(producer, first_consumer); + + // Value should now be cached + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + + // Second consumer can get the same cached value + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + + // Third consumer also gets it + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + } + + #[tokio::test] + async fn test_cache_expires() { + let config = Config { + cache_ttl: Duration::from_millis(50), + link2_timeout: Duration::from_millis(100), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer sends, consumer receives (value gets cached) + let producer = async { + let body = axum::body::Bytes::from_static(b"ephemeral"); + server.post("/link2/expire-test").bytes(body).await; + }; + let consumer = async { + tokio::time::sleep(Duration::from_millis(10)).await; + let response = server.get("/link2/expire-test").await; + assert_eq!(response.text(), "ephemeral"); + }; + tokio::join!(producer, consumer); + + // Value is cached + assert!(state + .pending_list + .lock() + .await + .get_cached("expire-test") + .is_some()); + + // Wait for cache to expire + tokio::time::sleep(Duration::from_millis(1500)).await; + + // Value should be expired (get_cached returns None) + assert!(state + .pending_list + .lock() + .await + .get_cached("expire-test") + .is_none()); + } + + #[tokio::test] + async fn test_post_overwrites_cache() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // First producer-consumer pair + let producer1 = async { + let body = axum::body::Bytes::from_static(b"first value"); + server.post("/link2/overwrite-test").bytes(body).await; + }; + let consumer1 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link2/overwrite-test").await + }; + tokio::join!(producer1, consumer1); + + // Verify first value is cached + let cached = state + .pending_list + .lock() + .await + .get_cached("overwrite-test"); + assert_eq!(cached.unwrap().body.as_ref(), b"first value"); + + // Second producer posts new value - should overwrite and wait + let producer2 = async { + let body = axum::body::Bytes::from_static(b"second value"); + server.post("/link2/overwrite-test").bytes(body).await; + }; + let consumer2 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link2/overwrite-test").await + }; + let (_, response) = tokio::join!(producer2, consumer2); + assert_eq!(response.text(), "second value"); + + // Verify new value is cached + let cached = state + .pending_list + .lock() + .await + .get_cached("overwrite-test"); + assert_eq!(cached.unwrap().body.as_ref(), b"second value"); + } + + #[tokio::test] + async fn test_producer_timeout_does_not_cache() { + let config = Config { + link2_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Producer posts without any consumer waiting - should timeout + let body = axum::body::Bytes::from_static(b"should not cache"); + let response = server.post("/link2/timeout-test").bytes(body).await; + assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT + + // Value should NOT be cached + assert!(state + .pending_list + .lock() + .await + .get_cached("timeout-test") + .is_none()); + } + + #[tokio::test] + async fn test_consumer_timeout() { + let config = Config { + link2_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (app, _state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Consumer waits without any producer - should timeout + let response = server.get("/link2/consumer-timeout-test").await; + assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT + } + + #[tokio::test] + async fn test_consumer_first_then_producer_caches() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (app, state) = HttpRelay::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + + // Consumer waits first, then producer sends + let consumer = async { + let response = server.get("/link2/consumer-first").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "delayed data"); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(100)).await; + let body = axum::body::Bytes::from_static(b"delayed data"); + let response = server.post("/link2/consumer-first").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + tokio::join!(consumer, producer); + + // Value should be cached for subsequent consumers + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + let response = server.get("/link2/consumer-first").await; + assert_eq!(response.text(), "delayed data"); + } +} diff --git a/src/http_relay/mod.rs b/src/http_relay/mod.rs new file mode 100644 index 0000000..d74a257 --- /dev/null +++ b/src/http_relay/mod.rs @@ -0,0 +1,11 @@ +//! HTTP relay server and configuration. + +pub(crate) mod link; +pub(crate) mod link2; +mod server; +mod waiting_list; + +pub use server::{HttpRelay, HttpRelayBuilder}; +pub(crate) use server::AppState; +#[cfg(test)] +pub(crate) use server::Config; diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs new file mode 100644 index 0000000..c812dd2 --- /dev/null +++ b/src/http_relay/server.rs @@ -0,0 +1,227 @@ +//! HTTP relay server implementation. + +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, + sync::Arc, + time::Duration, +}; + +use anyhow::Result; + +use axum::{routing::get, Router}; +use axum_server::Handle; +use tokio::sync::Mutex; + +use tower_http::{cors::CorsLayer, trace::TraceLayer}; +use url::Url; + +use super::{link, link2}; +use super::waiting_list::WaitingList; + +/// The timeout for a request to be considered unused. +const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); + +/// The default time-to-live for cached values after first consumer retrieves them. +const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(30); + +/// The default timeout for link2 endpoints (shorter to avoid proxy timeouts like nginx). +const DEFAULT_LINK2_TIMEOUT: Duration = Duration::from_secs(25); + +#[derive(Clone)] +pub(crate) struct AppState { + pub config: Config, + pub pending_list: Arc>, +} + +impl AppState { + pub fn new(config: Config) -> Self { + Self { + config, + pending_list: Arc::new(Mutex::new(WaitingList::default())), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) struct Config { + pub bind_address: IpAddr, + pub http_port: u16, + pub request_timeout: Duration, + /// How long to keep values cached after the first consumer retrieves them. + pub cache_ttl: Duration, + /// Timeout for link2 endpoints (shorter to avoid proxy timeouts). + pub link2_timeout: Duration, +} + +impl Default for Config { + fn default() -> Self { + Self { + bind_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + http_port: 0, + request_timeout: DEFAULT_REQUEST_TIMEOUT, + cache_ttl: DEFAULT_CACHE_TTL, + link2_timeout: DEFAULT_LINK2_TIMEOUT, + } + } +} + +/// Builder for [HttpRelay]. +#[derive(Debug, Default)] +pub struct HttpRelayBuilder(Config); + +impl HttpRelayBuilder { + /// Configure the address to bind to (default: 0.0.0.0). + pub fn bind_address(mut self, addr: IpAddr) -> Self { + self.0.bind_address = addr; + self + } + + /// Configure the port used for HTTP server. + pub fn http_port(mut self, port: u16) -> Self { + self.0.http_port = port; + self + } + + /// Configure the TTL for cached values (default: 30 seconds). + /// Values remain available for this duration after the first consumer + /// retrieves them. + pub fn cache_ttl(mut self, ttl: Duration) -> Self { + self.0.cache_ttl = ttl; + self + } + + /// Configure the timeout for link2 endpoints (default: 25 seconds). + /// Shorter than the default request timeout to avoid proxy timeouts. + pub fn link2_timeout(mut self, timeout: Duration) -> Self { + self.0.link2_timeout = timeout; + self + } + + /// Start running an HTTP relay. + pub async fn run(self) -> Result { + HttpRelay::start(self.0).await + } +} + +/// An implementation of _some_ of [Http relay spec](https://httprelay.io/). +pub struct HttpRelay { + pub(crate) http_handle: Handle, + http_address: SocketAddr, +} + +impl HttpRelay { + /// Creates the HTTP router for the HTTP relay. + #[cfg(test)] + pub(crate) fn create_app(config: Config) -> Result<(Router, AppState)> { + let app_state = AppState::new(config); + + let app = Router::new() + .route( + "/link/{id}", + get(link::get_handler).post(link::post_handler), + ) + .route( + "/link2/{id}", + get(link2::get_handler).post(link2::post_handler), + ) + .layer(CorsLayer::very_permissive()) + .layer(TraceLayer::new_for_http()) + .with_state(app_state.clone()); + + Ok((app, app_state)) + } + + async fn start(config: Config) -> Result { + let app_state = AppState::new(config.clone()); + + let app = Router::new() + .route( + "/link/{id}", + get(link::get_handler).post(link::post_handler), + ) + .route( + "/link2/{id}", + get(link2::get_handler).post(link2::post_handler), + ) + .layer(CorsLayer::very_permissive()) + .layer(TraceLayer::new_for_http()) + .with_state(app_state.clone()); + + let http_handle = Handle::new(); + let shutdown_handle = http_handle.clone(); + + let http_listener = TcpListener::bind(SocketAddr::new(config.bind_address, config.http_port))?; + let http_address = http_listener.local_addr()?; + + tokio::spawn(async move { + axum_server::from_tcp(http_listener) + .handle(http_handle.clone()) + .serve(app.into_make_service()) + .await + .map_err(|error| tracing::error!(?error, "HttpRelay http server error")) + }); + + // Spawn background task to clean up expired cache entries + let cleanup_interval = Duration::from_secs(1); + let pending_list = app_state.pending_list.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(cleanup_interval).await; + let mut list = pending_list.lock().await; + let removed = list.cleanup_expired_cache(); + if removed > 0 { + tracing::debug!(removed, "Cleaned up expired cache entries"); + } + } + }); + + Ok(Self { + http_handle: shutdown_handle, + http_address, + }) + } + + /// Create [HttpRelayBuilder]. + pub fn builder() -> HttpRelayBuilder { + HttpRelayBuilder::default() + } + + /// Returns the HTTP address of this http relay. + pub fn http_address(&self) -> SocketAddr { + self.http_address + } + + /// Returns the localhost Url of this server. + pub fn local_url(&self) -> Url { + Url::parse(&format!("http://localhost:{}", self.http_address.port())) + .expect("local_url should be formatted fine") + } + + /// Returns the localhost URL of Link endpoints + pub fn local_link_url(&self) -> Url { + let mut url = self.local_url(); + + let mut segments = url + .path_segments_mut() + .expect("HttpRelay::local_link_url path_segments_mut"); + + segments.push("link"); + + drop(segments); + + url + } + + /// Gracefully shuts down the HTTP relay. + pub async fn shutdown(self) -> anyhow::Result<()> { + self.http_handle + .graceful_shutdown(Some(Duration::from_secs(1))); + Ok(()) + } +} + +impl Drop for HttpRelay { + fn drop(&mut self) { + self.http_handle.shutdown(); + } +} diff --git a/src/waiting_list.rs b/src/http_relay/waiting_list.rs similarity index 70% rename from src/waiting_list.rs rename to src/http_relay/waiting_list.rs index 4b334c1..1f4ba6b 100644 --- a/src/waiting_list.rs +++ b/src/http_relay/waiting_list.rs @@ -4,19 +4,29 @@ use std::time::Instant; use axum::body::Bytes; use tokio::sync::oneshot; +/// A message containing body and optional content type. +#[derive(Clone)] +pub struct Message { + pub body: Bytes, + pub content_type: Option, +} + /// A cached value with its expiration time. pub struct CachedValue { /// The cached payload. pub body: Bytes, + /// The content type of the cached payload. + pub content_type: Option, /// When this cached value expires. pub expires_at: Instant, } impl CachedValue { /// Creates a new cached value that expires after the given duration. - pub fn new(body: Bytes, ttl: std::time::Duration) -> Self { + pub fn new(body: Bytes, content_type: Option, ttl: std::time::Duration) -> Self { Self { body, + content_type, expires_at: Instant::now() + ttl, } } @@ -40,8 +50,13 @@ impl WaitingList { self.pending_producers.remove(id) } - pub fn insert_producer(&mut self, id: &str, body: Bytes) -> oneshot::Receiver<()> { - let (producer, completion_receiver) = WaitingProducer::new(body); + pub fn insert_producer( + &mut self, + id: &str, + body: Bytes, + content_type: Option, + ) -> oneshot::Receiver<()> { + let (producer, completion_receiver) = WaitingProducer::new(body, content_type); self.pending_producers.insert(id.to_string(), producer); completion_receiver } @@ -50,27 +65,36 @@ impl WaitingList { self.pending_consumers.remove(id) } - pub fn insert_consumer(&mut self, id: &str) -> oneshot::Receiver { + pub fn insert_consumer(&mut self, id: &str) -> oneshot::Receiver { let (consumer, message_receiver) = WaitingConsumer::new(); self.pending_consumers.insert(id.to_string(), consumer); message_receiver } /// Gets a cached value if it exists and hasn't expired. - pub fn get_cached(&self, id: &str) -> Option { + pub fn get_cached(&self, id: &str) -> Option { self.cache.get(id).and_then(|cached| { if cached.is_expired() { None } else { - Some(cached.body.clone()) + Some(Message { + body: cached.body.clone(), + content_type: cached.content_type.clone(), + }) } }) } /// Inserts a value into the cache with the given TTL. - pub fn insert_cached(&mut self, id: &str, body: Bytes, ttl: std::time::Duration) { + pub fn insert_cached( + &mut self, + id: &str, + body: Bytes, + content_type: Option, + ttl: std::time::Duration, + ) { self.cache - .insert(id.to_string(), CachedValue::new(body, ttl)); + .insert(id.to_string(), CachedValue::new(body, content_type, ttl)); } /// Removes expired entries from the cache. Returns the number removed. @@ -94,18 +118,21 @@ impl WaitingList { /// A producer that is waiting for a consumer to request data. pub struct WaitingProducer { - /// The payload of the producer + /// The payload of the producer. pub body: Bytes, + /// The content type of the payload. + pub content_type: Option, /// The sender to notify the producer that the request has been resolved. pub completion: oneshot::Sender<()>, } impl WaitingProducer { - fn new(body: Bytes) -> (Self, oneshot::Receiver<()>) { + fn new(body: Bytes, content_type: Option) -> (Self, oneshot::Receiver<()>) { let (completion_sender, completion_receiver) = oneshot::channel(); ( Self { body, + content_type, completion: completion_sender, }, completion_receiver, @@ -116,11 +143,11 @@ impl WaitingProducer { /// A consumer that is waiting for a producer to send data. pub struct WaitingConsumer { /// The sender to notify the consumer that the request has been resolved. - pub message_sender: oneshot::Sender, + pub message_sender: oneshot::Sender, } impl WaitingConsumer { - fn new() -> (Self, oneshot::Receiver) { + fn new() -> (Self, oneshot::Receiver) { let (message_sender, message_receiver) = oneshot::channel(); (Self { message_sender }, message_receiver) } diff --git a/src/lib.rs b/src/lib.rs index 2852abd..d42c85d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,9 @@ //! A Rust implementation of _some_ of [Http relay spec](https://httprelay.io/). -//! #![deny(missing_docs)] #![deny(rustdoc::broken_intra_doc_links)] #![cfg_attr(any(), deny(clippy::unwrap_used))] mod http_relay; -mod waiting_list; -pub use http_relay::*; +pub use http_relay::{HttpRelay, HttpRelayBuilder}; diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..7d3e3f8 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,92 @@ +//! HTTP Relay server executable. + +use std::{net::IpAddr, time::Duration}; + +use anyhow::Result; +use clap::Parser; +use http_relay::HttpRelayBuilder; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; + +#[derive(Parser, Debug)] +#[command(name = "http-relay")] +#[command(about = "HTTP relay server for asynchronous producer/consumer communication")] +#[command(version)] +struct Args { + /// Address to bind to + #[arg(short, long, default_value = "0.0.0.0")] + bind: IpAddr, + + /// Port to listen on (0 = random available port) + #[arg(short, long, default_value_t = 8080)] + port: u16, + + /// Cache TTL in seconds for retry support + #[arg(long, default_value_t = 30)] + cache_ttl: u64, + + /// Link2 endpoint timeout in seconds (shorter to avoid proxy timeouts) + #[arg(long, default_value_t = 25)] + link2_timeout: u64, + + /// Verbosity level: -v (info), -vv (debug), -vvv (trace) + #[arg(short, long, action = clap::ArgAction::Count)] + verbose: u8, + + /// Silence all output + #[arg(short, long)] + quiet: bool, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = Args::parse(); + + init_tracing(args.verbose, args.quiet); + + let relay = HttpRelayBuilder::default() + .bind_address(args.bind) + .http_port(args.port) + .cache_ttl(Duration::from_secs(args.cache_ttl)) + .link2_timeout(Duration::from_secs(args.link2_timeout)) + .run() + .await?; + + tracing::info!( + address = %relay.http_address(), + "HTTP relay server started" + ); + tracing::info!( + link = %relay.local_link_url(), + "Link endpoint available at /link/{{id}} and /link2/{{id}}" + ); + + tokio::signal::ctrl_c().await?; + + tracing::info!("Shutting down..."); + relay.shutdown().await?; + + Ok(()) +} + +fn init_tracing(verbose: u8, quiet: bool) { + let level = if quiet { + LevelFilter::OFF + } else { + match verbose { + 0 => LevelFilter::WARN, + 1 => LevelFilter::INFO, + 2 => LevelFilter::DEBUG, + _ => LevelFilter::TRACE, + } + }; + + let filter = EnvFilter::builder() + .with_default_directive(level.into()) + .from_env_lossy(); + + tracing_subscriber::fmt() + .with_env_filter(filter) + .with_target(false) + .init(); +} From 9f6e7d23a500bc0a566108e12e96640b91d59c0d Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 15:55:09 +0100 Subject: [PATCH 03/40] refactored --- src/http_relay/link.rs | 223 --------------- src/http_relay/link2.rs | 309 -------------------- src/http_relay/link_handler.rs | 501 +++++++++++++++++++++++++++++++++ src/http_relay/mod.rs | 5 +- src/http_relay/response.rs | 68 +++++ src/http_relay/server.rs | 39 ++- src/http_relay/waiting_list.rs | 11 +- 7 files changed, 599 insertions(+), 557 deletions(-) delete mode 100644 src/http_relay/link.rs delete mode 100644 src/http_relay/link2.rs create mode 100644 src/http_relay/link_handler.rs create mode 100644 src/http_relay/response.rs diff --git a/src/http_relay/link.rs b/src/http_relay/link.rs deleted file mode 100644 index 45c264d..0000000 --- a/src/http_relay/link.rs +++ /dev/null @@ -1,223 +0,0 @@ -//! Standard link endpoint without caching - closer to httprelay.io spec. - -use axum::{ - body::Bytes, - extract::{Path, State}, - http::{header, HeaderMap, StatusCode}, - response::{IntoResponse, Response}, -}; - -use super::waiting_list::Message; - -use super::AppState; - -/// Build a response with optional Content-Type header. -fn build_response(status: StatusCode, body: Bytes, content_type: Option) -> Response { - let mut response = (status, body).into_response(); - if let Some(ct) = content_type { - if let Ok(value) = ct.parse() { - response.headers_mut().insert(header::CONTENT_TYPE, value); - } - } - response -} - -/// A consumer requests data using GET method. -pub async fn get_handler( - Path(id): Path, - State(state): State, -) -> Response { - let mut pending_list = state.pending_list.lock().await; - - if let Some(producer) = pending_list.remove_producer(&id) { - let _ = producer.completion.send(()); - return build_response(StatusCode::OK, producer.body, producer.content_type); - }; - - // No producer ready. Insert consumer into pending list and wait. - let receiver = pending_list.insert_consumer(&id); - drop(pending_list); - - // Wait for the producer, but with a timeout - match tokio::time::timeout(state.config.request_timeout, receiver).await { - Ok(Ok(msg)) => build_response(StatusCode::OK, msg.body, msg.content_type), - Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), - Err(_) => { - // Timeout. Remove the consumer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_consumer(&id); - build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) - } - } -} - -/// A producer sends data using POST method. -pub async fn post_handler( - Path(channel): Path, - State(state): State, - headers: HeaderMap, - body: Bytes, -) -> impl IntoResponse { - let content_type = headers - .get(header::CONTENT_TYPE) - .and_then(|v| v.to_str().ok()) - .map(|s| s.to_string()); - - let mut pending_list = state.pending_list.lock().await; - - if let Some(consumer) = pending_list.remove_consumer(&channel) { - let msg = Message { - body, - content_type, - }; - let _ = consumer.message_sender.send(msg); - return (StatusCode::OK, Bytes::new()); - }; - - // No consumer ready. Insert producer into pending list and wait. - let receiver = pending_list.insert_producer(&channel, body, content_type); - drop(pending_list); - match tokio::time::timeout(state.config.request_timeout, receiver).await { - Ok(_) => (StatusCode::OK, Bytes::new()), - Err(_) => { - // Timeout. Remove the producer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_producer(&channel); - (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) - } - } -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - - use crate::http_relay::{Config, HttpRelay}; - - #[tokio::test] - async fn test_delayed_producer() { - let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - let consumer = async { - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "Hello, world!"); - }; - - let producer = async { - tokio::time::sleep(Duration::from_millis(200)).await; - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), ""); - }; - - tokio::join!(consumer, producer); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_delayed_consumer() { - let (app, state) = HttpRelay::create_app(Config::default()).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - let consumer = async { - tokio::time::sleep(Duration::from_millis(200)).await; - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "Hello, world!"); - }; - - let producer = async { - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), ""); - }; - - tokio::join!(consumer, producer); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_request_timeout() { - let config = Config { - request_timeout: Duration::from_millis(50), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Consumer request timed out - let response = server.get("/link/123").await; - assert_eq!(response.status_code(), 408); - assert_eq!(response.text(), "Request timed out"); - assert!(state.pending_list.lock().await.is_empty()); - - // Producer request timed out - let body = axum::body::Bytes::from_static(b"Hello, world!"); - let response = server.post("/link/123").bytes(body).await; - assert_eq!(response.status_code(), 408); - assert_eq!(response.text(), "Request timed out"); - assert!(state.pending_list.lock().await.is_empty()); - } - - #[tokio::test] - async fn test_no_caching() { - let config = Config { - cache_ttl: Duration::from_secs(5), - request_timeout: Duration::from_millis(100), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer sends, consumer receives - but no caching on /link/ - let producer = async { - let body = axum::body::Bytes::from_static(b"no cache data"); - server.post("/link/no-cache-test").bytes(body).await; - }; - let consumer = async { - tokio::time::sleep(Duration::from_millis(10)).await; - let response = server.get("/link/no-cache-test").await; - assert_eq!(response.text(), "no cache data"); - }; - tokio::join!(producer, consumer); - - // Value should NOT be cached for /link/ - assert_eq!(state.pending_list.lock().await.cache_len(), 0); - - // Second consumer should timeout since no caching - let response = server.get("/link/no-cache-test").await; - assert_eq!(response.status_code(), 408); - } - - #[tokio::test] - async fn test_content_type_forwarding() { - let (app, _state) = HttpRelay::create_app(Config::default()).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - let consumer = async { - let response = server.get("/link/ct-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), r#"{"key":"value"}"#); - assert_eq!( - response.header("content-type").to_str().unwrap(), - "application/json" - ); - }; - - let producer = async { - tokio::time::sleep(Duration::from_millis(50)).await; - let body = axum::body::Bytes::from_static(br#"{"key":"value"}"#); - server - .post("/link/ct-test") - .content_type("application/json") - .bytes(body) - .await; - }; - - tokio::join!(consumer, producer); - } -} diff --git a/src/http_relay/link2.rs b/src/http_relay/link2.rs deleted file mode 100644 index 0343eb2..0000000 --- a/src/http_relay/link2.rs +++ /dev/null @@ -1,309 +0,0 @@ -//! Link endpoint with caching enabled for mobile retry support. - -use axum::{ - body::Bytes, - extract::{Path, State}, - http::{header, HeaderMap, StatusCode}, - response::{IntoResponse, Response}, -}; - -use super::waiting_list::Message; - -use super::AppState; - -/// Build a response with optional Content-Type header. -fn build_response(status: StatusCode, body: Bytes, content_type: Option) -> Response { - let mut response = (status, body).into_response(); - if let Some(ct) = content_type { - if let Ok(value) = ct.parse() { - response.headers_mut().insert(header::CONTENT_TYPE, value); - } - } - response -} - -/// A consumer requests data using GET method. -pub async fn get_handler( - Path(id): Path, - State(state): State, -) -> Response { - let mut pending_list = state.pending_list.lock().await; - - // First, check if there's a cached value - if let Some(cached) = pending_list.get_cached(&id) { - return build_response(StatusCode::OK, cached.body, cached.content_type); - } - - if let Some(producer) = pending_list.remove_producer(&id) { - // Producer is ready to send data - cache it for future consumers - let body = producer.body.clone(); - let content_type = producer.content_type.clone(); - pending_list.insert_cached(&id, body, content_type.clone(), state.config.cache_ttl); - let _ = producer.completion.send(()); - return build_response(StatusCode::OK, producer.body, content_type); - }; - - // No producer ready. Insert consumer into pending list and wait. - let receiver = pending_list.insert_consumer(&id); - drop(pending_list); - - // Wait for the producer, but with a timeout to avoid proxy timeouts - match tokio::time::timeout(state.config.link2_timeout, receiver).await { - Ok(Ok(msg)) => build_response(StatusCode::OK, msg.body, msg.content_type), - Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), - Err(_) => { - // Timeout. Remove the consumer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_consumer(&id); - build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) - } - } -} - -/// A producer sends data using POST method. -pub async fn post_handler( - Path(channel): Path, - State(state): State, - headers: HeaderMap, - body: Bytes, -) -> impl IntoResponse { - let content_type = headers - .get(header::CONTENT_TYPE) - .and_then(|v| v.to_str().ok()) - .map(|s| s.to_string()); - - let mut pending_list = state.pending_list.lock().await; - - // If there's a cached value, remove it (new POST overwrites) - pending_list.cache.remove(&channel); - - if let Some(consumer) = pending_list.remove_consumer(&channel) { - // Consumer is ready to receive data - also cache it - pending_list.insert_cached(&channel, body.clone(), content_type.clone(), state.config.cache_ttl); - let msg = Message { - body, - content_type, - }; - let _ = consumer.message_sender.send(msg); - return (StatusCode::OK, Bytes::new()); - }; - - // No consumer ready. Insert producer into pending list and wait. - let receiver = pending_list.insert_producer(&channel, body, content_type); - drop(pending_list); - match tokio::time::timeout(state.config.link2_timeout, receiver).await { - Ok(_) => (StatusCode::OK, Bytes::new()), - Err(_) => { - // Timeout. Remove the producer from the pending list again - let mut pending_list = state.pending_list.lock().await; - pending_list.remove_producer(&channel); - (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) - } - } -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - - use crate::http_relay::{Config, HttpRelay}; - - #[tokio::test] - async fn test_cached_value_multiple_consumers() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer sends data - let producer = async { - let body = axum::body::Bytes::from_static(b"cached data"); - let response = server.post("/link2/cache-test").bytes(body).await; - assert_eq!(response.status_code(), 200); - }; - - // First consumer receives it - let first_consumer = async { - tokio::time::sleep(Duration::from_millis(50)).await; - let response = server.get("/link2/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - }; - - tokio::join!(producer, first_consumer); - - // Value should now be cached - assert_eq!(state.pending_list.lock().await.cache_len(), 1); - - // Second consumer can get the same cached value - let response = server.get("/link2/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - - // Third consumer also gets it - let response = server.get("/link2/cache-test").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "cached data"); - } - - #[tokio::test] - async fn test_cache_expires() { - let config = Config { - cache_ttl: Duration::from_millis(50), - link2_timeout: Duration::from_millis(100), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer sends, consumer receives (value gets cached) - let producer = async { - let body = axum::body::Bytes::from_static(b"ephemeral"); - server.post("/link2/expire-test").bytes(body).await; - }; - let consumer = async { - tokio::time::sleep(Duration::from_millis(10)).await; - let response = server.get("/link2/expire-test").await; - assert_eq!(response.text(), "ephemeral"); - }; - tokio::join!(producer, consumer); - - // Value is cached - assert!(state - .pending_list - .lock() - .await - .get_cached("expire-test") - .is_some()); - - // Wait for cache to expire - tokio::time::sleep(Duration::from_millis(1500)).await; - - // Value should be expired (get_cached returns None) - assert!(state - .pending_list - .lock() - .await - .get_cached("expire-test") - .is_none()); - } - - #[tokio::test] - async fn test_post_overwrites_cache() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // First producer-consumer pair - let producer1 = async { - let body = axum::body::Bytes::from_static(b"first value"); - server.post("/link2/overwrite-test").bytes(body).await; - }; - let consumer1 = async { - tokio::time::sleep(Duration::from_millis(50)).await; - server.get("/link2/overwrite-test").await - }; - tokio::join!(producer1, consumer1); - - // Verify first value is cached - let cached = state - .pending_list - .lock() - .await - .get_cached("overwrite-test"); - assert_eq!(cached.unwrap().body.as_ref(), b"first value"); - - // Second producer posts new value - should overwrite and wait - let producer2 = async { - let body = axum::body::Bytes::from_static(b"second value"); - server.post("/link2/overwrite-test").bytes(body).await; - }; - let consumer2 = async { - tokio::time::sleep(Duration::from_millis(50)).await; - server.get("/link2/overwrite-test").await - }; - let (_, response) = tokio::join!(producer2, consumer2); - assert_eq!(response.text(), "second value"); - - // Verify new value is cached - let cached = state - .pending_list - .lock() - .await - .get_cached("overwrite-test"); - assert_eq!(cached.unwrap().body.as_ref(), b"second value"); - } - - #[tokio::test] - async fn test_producer_timeout_does_not_cache() { - let config = Config { - link2_timeout: Duration::from_millis(50), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Producer posts without any consumer waiting - should timeout - let body = axum::body::Bytes::from_static(b"should not cache"); - let response = server.post("/link2/timeout-test").bytes(body).await; - assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT - - // Value should NOT be cached - assert!(state - .pending_list - .lock() - .await - .get_cached("timeout-test") - .is_none()); - } - - #[tokio::test] - async fn test_consumer_timeout() { - let config = Config { - link2_timeout: Duration::from_millis(50), - ..Config::default() - }; - let (app, _state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Consumer waits without any producer - should timeout - let response = server.get("/link2/consumer-timeout-test").await; - assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT - } - - #[tokio::test] - async fn test_consumer_first_then_producer_caches() { - let config = Config { - cache_ttl: Duration::from_secs(5), - ..Config::default() - }; - let (app, state) = HttpRelay::create_app(config).unwrap(); - let server = axum_test::TestServer::new(app).unwrap(); - - // Consumer waits first, then producer sends - let consumer = async { - let response = server.get("/link2/consumer-first").await; - assert_eq!(response.status_code(), 200); - assert_eq!(response.text(), "delayed data"); - }; - - let producer = async { - tokio::time::sleep(Duration::from_millis(100)).await; - let body = axum::body::Bytes::from_static(b"delayed data"); - let response = server.post("/link2/consumer-first").bytes(body).await; - assert_eq!(response.status_code(), 200); - }; - - tokio::join!(consumer, producer); - - // Value should be cached for subsequent consumers - assert_eq!(state.pending_list.lock().await.cache_len(), 1); - let response = server.get("/link2/consumer-first").await; - assert_eq!(response.text(), "delayed data"); - } -} diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs new file mode 100644 index 0000000..75b8d8c --- /dev/null +++ b/src/http_relay/link_handler.rs @@ -0,0 +1,501 @@ +//! Generic link handler that can operate with or without caching. + +use std::time::Duration; + +use axum::{ + body::Bytes, + extract::{Path, State}, + http::{header, HeaderMap, StatusCode}, + response::{IntoResponse, Response}, +}; + +use super::response::{await_consumer_message, await_producer_completion, build_response}; +use super::waiting_list::Message; +use super::AppState; + +/// Configuration for link handler behavior. +#[derive(Clone, Copy)] +pub struct LinkConfig { + /// Whether to use caching for this endpoint. + pub caching_enabled: bool, +} + +impl LinkConfig { + /// Standard link endpoint without caching. + pub const STANDARD: Self = Self { + caching_enabled: false, + }; + + /// Link2 endpoint with caching enabled. + pub const WITH_CACHE: Self = Self { + caching_enabled: true, + }; +} + +/// Returns the timeout duration based on link config. +fn get_timeout(state: &AppState, config: LinkConfig) -> Duration { + if config.caching_enabled { + state.config.link2_timeout + } else { + state.config.request_timeout + } +} + +/// A consumer requests data using GET method. +pub async fn get_handler( + Path(id): Path, + State(state): State, + config: LinkConfig, +) -> Response { + let mut pending_list = state.pending_list.lock().await; + + // Check cache if caching is enabled + if config.caching_enabled { + if let Some(cached) = pending_list.get_cached(&id) { + return build_response(StatusCode::OK, cached.body, cached.content_type); + } + } + + if let Some(producer) = pending_list.remove_producer(&id) { + // Cache the response if caching is enabled + if config.caching_enabled { + pending_list.insert_cached( + &id, + producer.body.clone(), + producer.content_type.clone(), + state.config.cache_ttl, + ); + } + let _ = producer.completion.send(()); + return build_response(StatusCode::OK, producer.body, producer.content_type); + }; + + // No producer ready. Insert consumer into pending list and wait. + let receiver = pending_list.insert_consumer(&id); + drop(pending_list); + + let timeout = get_timeout(&state, config); + let pending_list = state.pending_list.clone(); + await_consumer_message(receiver, timeout, || async move { + pending_list.lock().await.remove_consumer(&id); + }) + .await +} + +/// A producer sends data using POST method. +pub async fn post_handler( + Path(channel): Path, + State(state): State, + headers: HeaderMap, + body: Bytes, + config: LinkConfig, +) -> impl IntoResponse { + let content_type = headers + .get(header::CONTENT_TYPE) + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + let mut pending_list = state.pending_list.lock().await; + + // Invalidate cache if caching is enabled + if config.caching_enabled { + pending_list.remove_cached(&channel); + } + + if let Some(consumer) = pending_list.remove_consumer(&channel) { + // Cache the response if caching is enabled + if config.caching_enabled { + pending_list.insert_cached( + &channel, + body.clone(), + content_type.clone(), + state.config.cache_ttl, + ); + } + let msg = Message { + body, + content_type, + }; + let _ = consumer.message_sender.send(msg); + return (StatusCode::OK, Bytes::new()); + }; + + // No consumer ready. Insert producer into pending list and wait. + let receiver = pending_list.insert_producer(&channel, body, content_type); + drop(pending_list); + + let timeout = get_timeout(&state, config); + let pending_list = state.pending_list.clone(); + await_producer_completion(receiver, timeout, || async move { + pending_list.lock().await.remove_producer(&channel); + }) + .await +} + +// Thin wrapper handlers for the /link/ endpoint (no caching) +pub mod link { + use super::*; + + pub async fn get_handler(path: Path, state: State) -> Response { + super::get_handler(path, state, LinkConfig::STANDARD).await + } + + pub async fn post_handler( + path: Path, + state: State, + headers: HeaderMap, + body: Bytes, + ) -> impl IntoResponse { + super::post_handler(path, state, headers, body, LinkConfig::STANDARD).await + } +} + +// Thin wrapper handlers for the /link2/ endpoint (with caching) +pub mod link2 { + use super::*; + + pub async fn get_handler(path: Path, state: State) -> Response { + super::get_handler(path, state, LinkConfig::WITH_CACHE).await + } + + pub async fn post_handler( + path: Path, + state: State, + headers: HeaderMap, + body: Bytes, + ) -> impl IntoResponse { + super::post_handler(path, state, headers, body, LinkConfig::WITH_CACHE).await + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::http_relay::{Config, HttpRelay}; + + // Tests for standard link endpoint (no caching) + mod link_tests { + use super::*; + + #[tokio::test] + async fn test_delayed_producer() { + let (server, state) = HttpRelay::create_test_server(Config::default()); + + let consumer = async { + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "Hello, world!"); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(200)).await; + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), ""); + }; + + tokio::join!(consumer, producer); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_delayed_consumer() { + let (server, state) = HttpRelay::create_test_server(Config::default()); + + let consumer = async { + tokio::time::sleep(Duration::from_millis(200)).await; + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "Hello, world!"); + }; + + let producer = async { + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), ""); + }; + + tokio::join!(consumer, producer); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_request_timeout() { + let config = Config { + request_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Consumer request timed out + let response = server.get("/link/123").await; + assert_eq!(response.status_code(), 408); + assert_eq!(response.text(), "Request timed out"); + assert!(state.pending_list.lock().await.is_empty()); + + // Producer request timed out + let body = axum::body::Bytes::from_static(b"Hello, world!"); + let response = server.post("/link/123").bytes(body).await; + assert_eq!(response.status_code(), 408); + assert_eq!(response.text(), "Request timed out"); + assert!(state.pending_list.lock().await.is_empty()); + } + + #[tokio::test] + async fn test_no_caching() { + let config = Config { + cache_ttl: Duration::from_secs(5), + request_timeout: Duration::from_millis(100), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Producer sends, consumer receives - but no caching on /link/ + let producer = async { + let body = axum::body::Bytes::from_static(b"no cache data"); + server.post("/link/no-cache-test").bytes(body).await; + }; + let consumer = async { + tokio::time::sleep(Duration::from_millis(10)).await; + let response = server.get("/link/no-cache-test").await; + assert_eq!(response.text(), "no cache data"); + }; + tokio::join!(producer, consumer); + + // Value should NOT be cached for /link/ + assert_eq!(state.pending_list.lock().await.cache_len(), 0); + + // Second consumer should timeout since no caching + let response = server.get("/link/no-cache-test").await; + assert_eq!(response.status_code(), 408); + } + + #[tokio::test] + async fn test_content_type_forwarding() { + let (server, _state) = HttpRelay::create_test_server(Config::default()); + + let consumer = async { + let response = server.get("/link/ct-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), r#"{"key":"value"}"#); + assert_eq!( + response.header("content-type").to_str().unwrap(), + "application/json" + ); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(50)).await; + let body = axum::body::Bytes::from_static(br#"{"key":"value"}"#); + server + .post("/link/ct-test") + .content_type("application/json") + .bytes(body) + .await; + }; + + tokio::join!(consumer, producer); + } + } + + // Tests for link2 endpoint (with caching) + mod link2_tests { + use super::*; + + #[tokio::test] + async fn test_cached_value_multiple_consumers() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Producer sends data + let producer = async { + let body = axum::body::Bytes::from_static(b"cached data"); + let response = server.post("/link2/cache-test").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + // First consumer receives it + let first_consumer = async { + tokio::time::sleep(Duration::from_millis(50)).await; + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + }; + + tokio::join!(producer, first_consumer); + + // Value should now be cached + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + + // Second consumer can get the same cached value + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + + // Third consumer also gets it + let response = server.get("/link2/cache-test").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "cached data"); + } + + #[tokio::test] + async fn test_cache_expires() { + let config = Config { + cache_ttl: Duration::from_millis(50), + link2_timeout: Duration::from_millis(100), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Producer sends, consumer receives (value gets cached) + let producer = async { + let body = axum::body::Bytes::from_static(b"ephemeral"); + server.post("/link2/expire-test").bytes(body).await; + }; + let consumer = async { + tokio::time::sleep(Duration::from_millis(10)).await; + let response = server.get("/link2/expire-test").await; + assert_eq!(response.text(), "ephemeral"); + }; + tokio::join!(producer, consumer); + + // Value is cached + assert!(state + .pending_list + .lock() + .await + .get_cached("expire-test") + .is_some()); + + // Wait for cache to expire + tokio::time::sleep(Duration::from_millis(1500)).await; + + // Value should be expired (get_cached returns None) + assert!(state + .pending_list + .lock() + .await + .get_cached("expire-test") + .is_none()); + } + + #[tokio::test] + async fn test_post_overwrites_cache() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // First producer-consumer pair + let producer1 = async { + let body = axum::body::Bytes::from_static(b"first value"); + server.post("/link2/overwrite-test").bytes(body).await; + }; + let consumer1 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link2/overwrite-test").await + }; + tokio::join!(producer1, consumer1); + + // Verify first value is cached + let cached = state + .pending_list + .lock() + .await + .get_cached("overwrite-test"); + assert_eq!(cached.unwrap().body.as_ref(), b"first value"); + + // Second producer posts new value - should overwrite and wait + let producer2 = async { + let body = axum::body::Bytes::from_static(b"second value"); + server.post("/link2/overwrite-test").bytes(body).await; + }; + let consumer2 = async { + tokio::time::sleep(Duration::from_millis(50)).await; + server.get("/link2/overwrite-test").await + }; + let (_, response) = tokio::join!(producer2, consumer2); + assert_eq!(response.text(), "second value"); + + // Verify new value is cached + let cached = state + .pending_list + .lock() + .await + .get_cached("overwrite-test"); + assert_eq!(cached.unwrap().body.as_ref(), b"second value"); + } + + #[tokio::test] + async fn test_producer_timeout_does_not_cache() { + let config = Config { + link2_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Producer posts without any consumer waiting - should timeout + let body = axum::body::Bytes::from_static(b"should not cache"); + let response = server.post("/link2/timeout-test").bytes(body).await; + assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT + + // Value should NOT be cached + assert!(state + .pending_list + .lock() + .await + .get_cached("timeout-test") + .is_none()); + } + + #[tokio::test] + async fn test_consumer_timeout() { + let config = Config { + link2_timeout: Duration::from_millis(50), + ..Config::default() + }; + let (server, _state) = HttpRelay::create_test_server(config); + + // Consumer waits without any producer - should timeout + let response = server.get("/link2/consumer-timeout-test").await; + assert_eq!(response.status_code(), 408); // REQUEST_TIMEOUT + } + + #[tokio::test] + async fn test_consumer_first_then_producer_caches() { + let config = Config { + cache_ttl: Duration::from_secs(5), + ..Config::default() + }; + let (server, state) = HttpRelay::create_test_server(config); + + // Consumer waits first, then producer sends + let consumer = async { + let response = server.get("/link2/consumer-first").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "delayed data"); + }; + + let producer = async { + tokio::time::sleep(Duration::from_millis(100)).await; + let body = axum::body::Bytes::from_static(b"delayed data"); + let response = server.post("/link2/consumer-first").bytes(body).await; + assert_eq!(response.status_code(), 200); + }; + + tokio::join!(consumer, producer); + + // Value should be cached for subsequent consumers + assert_eq!(state.pending_list.lock().await.cache_len(), 1); + let response = server.get("/link2/consumer-first").await; + assert_eq!(response.text(), "delayed data"); + } + } +} diff --git a/src/http_relay/mod.rs b/src/http_relay/mod.rs index d74a257..ff26fec 100644 --- a/src/http_relay/mod.rs +++ b/src/http_relay/mod.rs @@ -1,10 +1,11 @@ //! HTTP relay server and configuration. -pub(crate) mod link; -pub(crate) mod link2; +mod link_handler; +mod response; mod server; mod waiting_list; +pub(crate) use link_handler::{link, link2}; pub use server::{HttpRelay, HttpRelayBuilder}; pub(crate) use server::AppState; #[cfg(test)] diff --git a/src/http_relay/response.rs b/src/http_relay/response.rs new file mode 100644 index 0000000..4fd6314 --- /dev/null +++ b/src/http_relay/response.rs @@ -0,0 +1,68 @@ +//! Shared response utilities for HTTP relay endpoints. + +use std::future::Future; +use std::time::Duration; + +use axum::{ + body::Bytes, + http::{header, StatusCode}, + response::{IntoResponse, Response}, +}; +use tokio::sync::oneshot; + +use super::waiting_list::Message; + +/// Build a response with optional Content-Type header. +pub fn build_response(status: StatusCode, body: Bytes, content_type: Option) -> Response { + let mut response = (status, body).into_response(); + if let Some(ct) = content_type { + if let Ok(value) = ct.parse() { + response.headers_mut().insert(header::CONTENT_TYPE, value); + } + } + response +} + +/// Awaits a message from the producer with a timeout. +/// On success, returns OK with the message body and content type. +/// On channel close, returns NOT_FOUND. +/// On timeout, calls the async cleanup function and returns REQUEST_TIMEOUT. +pub async fn await_consumer_message( + receiver: oneshot::Receiver, + timeout: Duration, + on_timeout: F, +) -> Response +where + F: FnOnce() -> Fut, + Fut: Future, +{ + match tokio::time::timeout(timeout, receiver).await { + Ok(Ok(msg)) => build_response(StatusCode::OK, msg.body, msg.content_type), + Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), + Err(_) => { + on_timeout().await; + build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) + } + } +} + +/// Awaits completion from the consumer with a timeout. +/// On success, returns OK with empty body. +/// On timeout, calls the async cleanup function and returns REQUEST_TIMEOUT. +pub async fn await_producer_completion( + receiver: oneshot::Receiver<()>, + timeout: Duration, + on_timeout: F, +) -> (StatusCode, Bytes) +where + F: FnOnce() -> Fut, + Fut: Future, +{ + match tokio::time::timeout(timeout, receiver).await { + Ok(_) => (StatusCode::OK, Bytes::new()), + Err(_) => { + on_timeout().await; + (StatusCode::REQUEST_TIMEOUT, "Request timed out".into()) + } + } +} diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index c812dd2..8cf43fe 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -110,12 +110,9 @@ pub struct HttpRelay { } impl HttpRelay { - /// Creates the HTTP router for the HTTP relay. - #[cfg(test)] - pub(crate) fn create_app(config: Config) -> Result<(Router, AppState)> { - let app_state = AppState::new(config); - - let app = Router::new() + /// Builds the router with all routes and middleware. + fn build_router(state: AppState) -> Router { + Router::new() .route( "/link/{id}", get(link::get_handler).post(link::post_handler), @@ -126,26 +123,28 @@ impl HttpRelay { ) .layer(CorsLayer::very_permissive()) .layer(TraceLayer::new_for_http()) - .with_state(app_state.clone()); + .with_state(state) + } + /// Creates the HTTP router for the HTTP relay. + #[cfg(test)] + pub(crate) fn create_app(config: Config) -> Result<(Router, AppState)> { + let app_state = AppState::new(config); + let app = Self::build_router(app_state.clone()); Ok((app, app_state)) } + /// Creates a test server with the given config. Returns both the server and app state. + #[cfg(test)] + pub(crate) fn create_test_server(config: Config) -> (axum_test::TestServer, AppState) { + let (app, state) = Self::create_app(config).unwrap(); + let server = axum_test::TestServer::new(app).unwrap(); + (server, state) + } + async fn start(config: Config) -> Result { let app_state = AppState::new(config.clone()); - - let app = Router::new() - .route( - "/link/{id}", - get(link::get_handler).post(link::post_handler), - ) - .route( - "/link2/{id}", - get(link2::get_handler).post(link2::post_handler), - ) - .layer(CorsLayer::very_permissive()) - .layer(TraceLayer::new_for_http()) - .with_state(app_state.clone()); + let app = Self::build_router(app_state.clone()); let http_handle = Handle::new(); let shutdown_handle = http_handle.clone(); diff --git a/src/http_relay/waiting_list.rs b/src/http_relay/waiting_list.rs index 1f4ba6b..56ac25a 100644 --- a/src/http_relay/waiting_list.rs +++ b/src/http_relay/waiting_list.rs @@ -40,9 +40,9 @@ impl CachedValue { /// A list of waiting producers and consumers, plus cached values. #[derive(Default)] pub struct WaitingList { - pub pending_producers: HashMap, - pub pending_consumers: HashMap, - pub cache: HashMap, + pending_producers: HashMap, + pending_consumers: HashMap, + cache: HashMap, } impl WaitingList { @@ -97,6 +97,11 @@ impl WaitingList { .insert(id.to_string(), CachedValue::new(body, content_type, ttl)); } + /// Removes a value from the cache and returns it if it existed. + pub fn remove_cached(&mut self, id: &str) -> Option { + self.cache.remove(id) + } + /// Removes expired entries from the cache. Returns the number removed. pub fn cleanup_expired_cache(&mut self) -> usize { let before = self.cache.len(); From 195e0e25818e69c3c2933d20f90234636f04528b Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 16:11:34 +0100 Subject: [PATCH 04/40] improved readme --- LICENSE | 21 ++++ README.md | 210 ++++++++++++++++++++++++++++++++++++--- openapi.yaml | 162 ++++++++++++++++++++++++++++++ src/http_relay/server.rs | 4 +- src/main.rs | 8 +- 5 files changed, 386 insertions(+), 19 deletions(-) create mode 100644 LICENSE create mode 100644 openapi.yaml diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7cb6a47 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Pubky + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index e6c6e42..d628ada 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,210 @@ -# HTTP Relay +# pubky-http-relay -A Rust implementation of _some_ of [Http relay spec](https://httprelay.io/). +A Rust implementation of a subset of the [HTTP Relay spec](https://httprelay.io/) +for asynchronous producer/consumer message passing. +## What is this? -## Example +An HTTP relay enables decoupled communication between distributed services. +Instead of direct synchronous calls, producers and consumers communicate through +relay endpoints, each waiting for their counterpart to arrive. + +**Use cases:** +- Connecting services that can't communicate directly +- Mobile apps that need resilient message delivery with retry support +- Decoupled microservice communication + +## Features + +- **Async producer/consumer model** - Producers POST data, consumers GET it +- **Two endpoint variants:** + - `/link/{id}` - Standard relay (10 min timeout) + - `/link2/{id}` - Mobile-friendly with caching (25s timeout, 5 min cache TTL) +- **Mobile resilience** - Cached responses allow retries after connection drops +- **Content-Type preservation** - Forwards producer's Content-Type to consumer +- **Configurable timeouts and caching** + +## Installation + +```bash +cargo install --path . +``` + +Or add as a dependency: + +```toml +[dependencies] +pubky-http-relay = { git = "https://github.com/pubky/pubky-http-relay" } +``` + +## Usage + +### As CLI + +```bash +# Default: bind to 0.0.0.0:8080 +pubky-http-relay + +# Custom configuration +pubky-http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-timeout 25 -vv +``` + +**Options:** + +| Flag | Description | Default | +|------|-------------|---------| +| `--bind ` | Bind address | `0.0.0.0` | +| `--port ` | HTTP port (0 = random) | `8080` | +| `--link2-cache-ttl ` | Cache TTL for link2 | `300` | +| `--link2-timeout ` | Link2 endpoint timeout | `25` | +| `-v` | Verbosity (repeat for more) | warn | +| `-q, --quiet` | Silence output | - | + +### As Library ```rust +use pubky_http_relay::HttpRelayBuilder; + #[tokio::main] -async fn main() { - let http_relay = http_relay::HttpRelay::builder() +async fn main() -> anyhow::Result<()> { + let relay = HttpRelayBuilder::default() .http_port(15412) .run() - .await - .unwrap(); + .await?; + + println!("Running at {}", relay.local_link_url()); + + tokio::signal::ctrl_c().await?; + relay.shutdown().await +} +``` + +## API + +### POST `/link/{id}` or `/link2/{id}` - println!( - "Running http relay {}", - http_relay.local_link_url().as_str() - ); +Producer sends a message. Waits for a consumer to retrieve it. + +```bash +curl -X POST http://localhost:8080/link/my-channel \ + -H "Content-Type: application/json" \ + -d '{"hello": "world"}' +``` + +**Responses:** +- `200 OK` - Consumer received the message +- `408 Request Timeout` - No consumer arrived in time + +### GET `/link/{id}` or `/link2/{id}` + +Consumer retrieves a message. Waits for a producer to send one. + +```bash +curl http://localhost:8080/link/my-channel +``` - tokio::signal::ctrl_c().await.unwrap(); +**Responses:** +- `200 OK` - Returns producer's payload with original Content-Type +- `408 Request Timeout` - No producer arrived in time - http_relay.shutdown().await.unwrap(); +### Endpoint Differences + +| Aspect | `/link/{id}` | `/link2/{id}` | +|--------|--------------|---------------| +| Timeout | 10 minutes | 25 seconds | +| Caching | No | Yes (5 min TTL) | +| Use case | Backwards compatibility | **Recommended** | + +**Use `/link2` for new integrations.** It handles proxy timeouts gracefully and +supports retries via caching. The `/link` endpoint remains available for +backwards compatibility with existing clients. + +### Why Link2 Exists + +The original `/link` endpoint has a problem on mobile devices. When a mobile +app requests data from the relay and then gets backgrounded or killed by the +OS, the HTTP response never actually arrives on the device. From the relay's +perspective, the value was consumed successfully. But the mobile app never +received it—and when the user reopens the app, the value is gone. + +`/link2` solves this with two changes: + +1. **Caching**: After a successful delivery, the value is cached for 5 minutes. + If the mobile app was killed, it can retry and still receive the value. + +2. **Shorter timeout**: The 25-second timeout stays safely under typical proxy + timeouts (nginx, Cloudflare often use 30s), preventing unexpected connection drops. + +## Client Implementation Patterns + +Because `/link2` has a short timeout (25s), clients should implement retry +loops. The producer/consumer may not connect on the first attempt, and that's +expected behavior. + +### Consumer: Retry Until Value Received + +The consumer loops until it successfully receives the producer's payload: + +```javascript +async function consumeFromRelay(channelId) { + while (true) { + const response = await fetch(`http://relay.example.com/link2/${channelId}`); + + if (response.status === 200) { + return await response.text(); // Success - got the value + } + + if (response.status === 408) { + continue; // Timeout - producer hasn't arrived yet, retry + } + + throw new Error(`Unexpected status: ${response.status}`); + } } ``` + +### Producer: Retry Until Consumer Receives + +The producer loops until a consumer successfully retrieves the message: + +```javascript +async function produceToRelay(channelId, data) { + while (true) { + const response = await fetch(`http://relay.example.com/link2/${channelId}`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(data), + }); + + if (response.status === 200) { + return; // Success - consumer got the value + } + + if (response.status === 408) { + continue; // Timeout - no consumer yet, retry + } + + throw new Error(`Unexpected status: ${response.status}`); + } +} +``` + +### Why Retry Loops? + +- **Short timeouts are intentional**: Proxies (nginx, cloudflare) often + have 30s timeouts. The 25s link2 timeout stays safely under this limit. +- **Cache enables resilience**: Once delivered, the value is cached for 5 min. + If a consumer's connection drops, they can retry and still receive it. +- **408 is not an error**: It just means the counterpart hasn't arrived yet. + Keep trying until they do. + +## Development + +```bash +# Run tests +cargo test + +# Run with debug logging +RUST_LOG=debug cargo run +``` + diff --git a/openapi.yaml b/openapi.yaml new file mode 100644 index 0000000..8e4341f --- /dev/null +++ b/openapi.yaml @@ -0,0 +1,162 @@ +openapi: 3.0.3 +info: + title: pubky-http-relay + description: | + An HTTP relay for asynchronous producer/consumer message passing. + + Producers POST data to a channel, consumers GET it. Each party waits + for their counterpart to arrive. The relay facilitates decoupled + communication between distributed services. + + Two endpoint variants are available: + - `/link/{id}` - Standard relay with 10 minute timeout + - `/link2/{id}` - Mobile-friendly with caching (25s timeout, 5 min cache TTL) + version: 0.1.0 + license: + name: MIT + url: https://opensource.org/licenses/MIT + +servers: + - url: http://localhost:8080 + description: Local development server + +paths: + /link/{id}: + get: + summary: Consume a message + description: | + Consumer retrieves a message from the channel. Blocks until a producer + sends data or the timeout (10 minutes) is reached. + operationId: linkGet + tags: + - link + parameters: + - $ref: '#/components/parameters/channelId' + responses: + '200': + $ref: '#/components/responses/MessageReceived' + '408': + $ref: '#/components/responses/Timeout' + + post: + summary: Produce a message + description: | + Producer sends a message to the channel. Blocks until a consumer + retrieves the data or the timeout (10 minutes) is reached. + operationId: linkPost + tags: + - link + parameters: + - $ref: '#/components/parameters/channelId' + requestBody: + $ref: '#/components/requestBodies/Message' + responses: + '200': + $ref: '#/components/responses/MessageDelivered' + '408': + $ref: '#/components/responses/Timeout' + + /link2/{id}: + get: + summary: Consume a message (with caching) + description: | + Consumer retrieves a message from the channel. If a cached response + exists, returns immediately. Otherwise blocks until a producer sends + data or the timeout (25 seconds) is reached. + + Cached responses persist for 5 minutes (configurable), enabling + retries after connection drops. + operationId: link2Get + tags: + - link2 + parameters: + - $ref: '#/components/parameters/channelId' + responses: + '200': + $ref: '#/components/responses/MessageReceived' + '408': + $ref: '#/components/responses/Timeout' + + post: + summary: Produce a message (invalidates cache) + description: | + Producer sends a message to the channel. Invalidates any existing + cached response. Blocks until a consumer retrieves the data or the + timeout (25 seconds) is reached. + + After the first consumer retrieves the message, it is cached for + subsequent consumers (5 minute TTL). + operationId: link2Post + tags: + - link2 + parameters: + - $ref: '#/components/parameters/channelId' + requestBody: + $ref: '#/components/requestBodies/Message' + responses: + '200': + $ref: '#/components/responses/MessageDelivered' + '408': + $ref: '#/components/responses/Timeout' + +components: + parameters: + channelId: + name: id + in: path + required: true + description: Unique channel identifier. Producers and consumers using the same ID are paired. + schema: + type: string + example: my-channel + + requestBodies: + Message: + description: Message payload to send + required: true + content: + application/octet-stream: + schema: + type: string + format: binary + application/json: + schema: + type: object + example: + hello: world + text/plain: + schema: + type: string + example: Hello, world! + + responses: + MessageReceived: + description: Message successfully retrieved from producer + headers: + Content-Type: + description: Content-Type from the producer's request (if provided) + schema: + type: string + content: + application/octet-stream: + schema: + type: string + format: binary + application/json: + schema: + type: object + text/plain: + schema: + type: string + + MessageDelivered: + description: Message successfully delivered to consumer + + Timeout: + description: Timeout waiting for counterpart (producer or consumer) + +tags: + - name: link + description: Standard relay endpoints (10 minute timeout, no caching) + - name: link2 + description: Mobile-friendly endpoints (25 second timeout, 5 minute cache TTL) diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 8cf43fe..0eac272 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -22,7 +22,7 @@ use super::waiting_list::WaitingList; const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); /// The default time-to-live for cached values after first consumer retrieves them. -const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(30); +const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(5 * 60); /// The default timeout for link2 endpoints (shorter to avoid proxy timeouts like nginx). const DEFAULT_LINK2_TIMEOUT: Duration = Duration::from_secs(25); @@ -82,7 +82,7 @@ impl HttpRelayBuilder { self } - /// Configure the TTL for cached values (default: 30 seconds). + /// Configure the TTL for cached values (default: 5 minutes). /// Values remain available for this duration after the first consumer /// retrieves them. pub fn cache_ttl(mut self, ttl: Duration) -> Self { diff --git a/src/main.rs b/src/main.rs index 7d3e3f8..c077b85 100644 --- a/src/main.rs +++ b/src/main.rs @@ -21,9 +21,9 @@ struct Args { #[arg(short, long, default_value_t = 8080)] port: u16, - /// Cache TTL in seconds for retry support - #[arg(long, default_value_t = 30)] - cache_ttl: u64, + /// Cache TTL in seconds for retry support (link2 endpoint) + #[arg(long, default_value_t = 300)] + link2_cache_ttl: u64, /// Link2 endpoint timeout in seconds (shorter to avoid proxy timeouts) #[arg(long, default_value_t = 25)] @@ -47,7 +47,7 @@ async fn main() -> Result<()> { let relay = HttpRelayBuilder::default() .bind_address(args.bind) .http_port(args.port) - .cache_ttl(Duration::from_secs(args.cache_ttl)) + .cache_ttl(Duration::from_secs(args.link2_cache_ttl)) .link2_timeout(Duration::from_secs(args.link2_timeout)) .run() .await?; From 9607fba507f43a922a52cbfcc3afee14016fa07f Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 16:14:03 +0100 Subject: [PATCH 05/40] updated docs --- openapi.yaml | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/openapi.yaml b/openapi.yaml index 8e4341f..1c9b729 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -8,9 +8,15 @@ info: for their counterpart to arrive. The relay facilitates decoupled communication between distributed services. - Two endpoint variants are available: - - `/link/{id}` - Standard relay with 10 minute timeout - - `/link2/{id}` - Mobile-friendly with caching (25s timeout, 5 min cache TTL) + **Two endpoint variants are available:** + - `/link2/{id}` - **Recommended.** Short timeout (25s) with caching (5 min TTL) + - `/link/{id}` - Backwards compatibility only (10 min timeout, no caching) + + **Use `/link2` for new integrations.** The original `/link` endpoint has a + problem on mobile devices: when an app gets backgrounded or killed while + waiting, the response never arrives, but the relay considers it consumed. + `/link2` solves this with caching (retry and still get the value) and a + shorter timeout that works with proxy timeouts (nginx, Cloudflare use 30s). version: 0.1.0 license: name: MIT @@ -23,8 +29,10 @@ servers: paths: /link/{id}: get: - summary: Consume a message + summary: Consume a message (legacy) description: | + Legacy endpoint for backwards compatibility. Use `/link2` for new integrations. + Consumer retrieves a message from the channel. Blocks until a producer sends data or the timeout (10 minutes) is reached. operationId: linkGet @@ -39,8 +47,10 @@ paths: $ref: '#/components/responses/Timeout' post: - summary: Produce a message + summary: Produce a message (legacy) description: | + Legacy endpoint for backwards compatibility. Use `/link2` for new integrations. + Producer sends a message to the channel. Blocks until a consumer retrieves the data or the timeout (10 minutes) is reached. operationId: linkPost @@ -58,14 +68,16 @@ paths: /link2/{id}: get: - summary: Consume a message (with caching) + summary: Consume a message (recommended) description: | + **Recommended endpoint for new integrations.** + Consumer retrieves a message from the channel. If a cached response exists, returns immediately. Otherwise blocks until a producer sends data or the timeout (25 seconds) is reached. Cached responses persist for 5 minutes (configurable), enabling - retries after connection drops. + retries after connection drops (e.g., mobile app backgrounded). operationId: link2Get tags: - link2 @@ -78,14 +90,17 @@ paths: $ref: '#/components/responses/Timeout' post: - summary: Produce a message (invalidates cache) + summary: Produce a message (recommended) description: | + **Recommended endpoint for new integrations.** + Producer sends a message to the channel. Invalidates any existing cached response. Blocks until a consumer retrieves the data or the timeout (25 seconds) is reached. After the first consumer retrieves the message, it is cached for - subsequent consumers (5 minute TTL). + subsequent consumers (5 minute TTL), enabling retries after connection + drops (e.g., mobile app backgrounded). operationId: link2Post tags: - link2 @@ -157,6 +172,6 @@ components: tags: - name: link - description: Standard relay endpoints (10 minute timeout, no caching) + description: Legacy relay endpoints for backwards compatibility (10 min timeout, no caching) - name: link2 - description: Mobile-friendly endpoints (25 second timeout, 5 minute cache TTL) + description: Recommended endpoints with retry support (25s timeout, 5 min cache TTL) From 441f31bb9a2cc78a4e70a6430d00046ecece0ce4 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 16:37:13 +0100 Subject: [PATCH 06/40] add security limits --- Cargo.toml | 1 + README.md | 5 ++ src/http_relay/link_handler.rs | 116 ++++++++++++++++++++++++++++++++- src/http_relay/server.rs | 43 +++++++++++- src/http_relay/waiting_list.rs | 76 +++++++++++++++++---- src/main.rs | 15 +++++ 6 files changed, 238 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e958baf..72800b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ axum = "0.8.6" axum-server = "0.7.2" clap = { version = "4", features = ["derive"] } futures-util = "0.3.31" +lru = "0.12" tokio = { version = "1.47.1", features = ["full"] } tracing = "0.1.41" tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/README.md b/README.md index d628ada..354f747 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,9 @@ pubky-http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-tim | `--port ` | HTTP port (0 = random) | `8080` | | `--link2-cache-ttl ` | Cache TTL for link2 | `300` | | `--link2-timeout ` | Link2 endpoint timeout | `25` | +| `--max-body-size ` | Max request body size | `10240` (10KB) | +| `--max-pending ` | Max pending requests | `10000` | +| `--max-cache ` | Max cached entries | `10000` | | `-v` | Verbosity (repeat for more) | warn | | `-q, --quiet` | Silence output | - | @@ -94,6 +97,7 @@ curl -X POST http://localhost:8080/link/my-channel \ **Responses:** - `200 OK` - Consumer received the message - `408 Request Timeout` - No consumer arrived in time +- `503 Service Unavailable` - Server at capacity (max pending reached) ### GET `/link/{id}` or `/link2/{id}` @@ -106,6 +110,7 @@ curl http://localhost:8080/link/my-channel **Responses:** - `200 OK` - Returns producer's payload with original Content-Type - `408 Request Timeout` - No producer arrived in time +- `503 Service Unavailable` - Server at capacity (max pending reached) ### Endpoint Differences diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 75b8d8c..1a7e851 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -10,7 +10,7 @@ use axum::{ }; use super::response::{await_consumer_message, await_producer_completion, build_response}; -use super::waiting_list::Message; +use super::waiting_list::{LimitError, Message}; use super::AppState; /// Configuration for link handler behavior. @@ -71,7 +71,16 @@ pub async fn get_handler( }; // No producer ready. Insert consumer into pending list and wait. - let receiver = pending_list.insert_consumer(&id); + let receiver = match pending_list.insert_consumer(&id) { + Ok(r) => r, + Err(LimitError::PendingLimitReached) => { + return build_response( + StatusCode::SERVICE_UNAVAILABLE, + "Server at capacity".into(), + None, + ); + } + }; drop(pending_list); let timeout = get_timeout(&state, config); @@ -121,7 +130,12 @@ pub async fn post_handler( }; // No consumer ready. Insert producer into pending list and wait. - let receiver = pending_list.insert_producer(&channel, body, content_type); + let receiver = match pending_list.insert_producer(&channel, body, content_type) { + Ok(r) => r, + Err(LimitError::PendingLimitReached) => { + return (StatusCode::SERVICE_UNAVAILABLE, Bytes::from("Server at capacity")); + } + }; drop(pending_list); let timeout = get_timeout(&state, config); @@ -498,4 +512,100 @@ mod tests { assert_eq!(response.text(), "delayed data"); } } + + // Tests for resource limits + mod limit_tests { + use crate::http_relay::waiting_list::{LimitError, WaitingList}; + use axum::body::Bytes; + + #[test] + fn test_pending_limit_consumer() { + let mut list = WaitingList::new(2, 10); + + // First two consumers succeed + assert!(list.insert_consumer("c1").is_ok()); + assert!(list.insert_consumer("c2").is_ok()); + assert_eq!(list.pending_count(), 2); + + // Third consumer fails + assert_eq!( + list.insert_consumer("c3").unwrap_err(), + LimitError::PendingLimitReached + ); + } + + #[test] + fn test_pending_limit_producer() { + let mut list = WaitingList::new(2, 10); + + // First two producers succeed + assert!(list.insert_producer("p1", Bytes::new(), None).is_ok()); + assert!(list.insert_producer("p2", Bytes::new(), None).is_ok()); + assert_eq!(list.pending_count(), 2); + + // Third producer fails + assert_eq!( + list.insert_producer("p3", Bytes::new(), None).unwrap_err(), + LimitError::PendingLimitReached + ); + } + + #[test] + fn test_pending_limit_mixed() { + let mut list = WaitingList::new(2, 10); + + // One consumer and one producer + assert!(list.insert_consumer("c1").is_ok()); + assert!(list.insert_producer("p1", Bytes::new(), None).is_ok()); + assert_eq!(list.pending_count(), 2); + + // Both consumer and producer fail at limit + assert_eq!( + list.insert_consumer("c2").unwrap_err(), + LimitError::PendingLimitReached + ); + assert_eq!( + list.insert_producer("p2", Bytes::new(), None).unwrap_err(), + LimitError::PendingLimitReached + ); + } + + #[test] + fn test_cache_lru_eviction() { + let mut list = WaitingList::new(10, 2); + let ttl = std::time::Duration::from_secs(60); + + // Insert two entries + list.insert_cached("k1", Bytes::from("first"), None, ttl); + list.insert_cached("k2", Bytes::from("second"), None, ttl); + assert_eq!(list.cache_len(), 2); + + // Access k1 to make it recently used (k2 is now LRU) + assert!(list.get_cached("k1").is_some()); + + // Third insert should evict k2 (least recently used) + list.insert_cached("k3", Bytes::from("third"), None, ttl); + assert_eq!(list.cache_len(), 2); + + // k2 should be gone (was LRU), k1 and k3 should remain + assert!(list.get_cached("k1").is_some()); + assert!(list.get_cached("k2").is_none()); + assert!(list.get_cached("k3").is_some()); + } + + #[test] + fn test_removing_frees_capacity() { + let mut list = WaitingList::new(1, 10); + + // Insert consumer at limit + assert!(list.insert_consumer("c1").is_ok()); + assert!(list.insert_consumer("c2").is_err()); + + // Remove it + list.remove_consumer("c1"); + + // Now we can insert again + assert!(list.insert_consumer("c2").is_ok()); + } + } } diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 0eac272..8964429 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -8,7 +8,7 @@ use std::{ use anyhow::Result; -use axum::{routing::get, Router}; +use axum::{extract::DefaultBodyLimit, routing::get, Router}; use axum_server::Handle; use tokio::sync::Mutex; @@ -27,6 +27,15 @@ const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(5 * 60); /// The default timeout for link2 endpoints (shorter to avoid proxy timeouts like nginx). const DEFAULT_LINK2_TIMEOUT: Duration = Duration::from_secs(25); +/// Default maximum request body size (10KB). +const DEFAULT_MAX_BODY_SIZE: usize = 10 * 1024; + +/// Default maximum pending requests (producers + consumers). +const DEFAULT_MAX_PENDING: usize = 10_000; + +/// Default maximum cached entries. +const DEFAULT_MAX_CACHE: usize = 10_000; + #[derive(Clone)] pub(crate) struct AppState { pub config: Config, @@ -35,9 +44,10 @@ pub(crate) struct AppState { impl AppState { pub fn new(config: Config) -> Self { + let waiting_list = WaitingList::new(config.max_pending, config.max_cache); Self { config, - pending_list: Arc::new(Mutex::new(WaitingList::default())), + pending_list: Arc::new(Mutex::new(waiting_list)), } } } @@ -51,6 +61,12 @@ pub(crate) struct Config { pub cache_ttl: Duration, /// Timeout for link2 endpoints (shorter to avoid proxy timeouts). pub link2_timeout: Duration, + /// Maximum request body size in bytes. + pub max_body_size: usize, + /// Maximum pending requests (producers + consumers combined). + pub max_pending: usize, + /// Maximum cached entries. + pub max_cache: usize, } impl Default for Config { @@ -61,6 +77,9 @@ impl Default for Config { request_timeout: DEFAULT_REQUEST_TIMEOUT, cache_ttl: DEFAULT_CACHE_TTL, link2_timeout: DEFAULT_LINK2_TIMEOUT, + max_body_size: DEFAULT_MAX_BODY_SIZE, + max_pending: DEFAULT_MAX_PENDING, + max_cache: DEFAULT_MAX_CACHE, } } } @@ -97,6 +116,24 @@ impl HttpRelayBuilder { self } + /// Configure the maximum request body size (default: 10KB). + pub fn max_body_size(mut self, size: usize) -> Self { + self.0.max_body_size = size; + self + } + + /// Configure the maximum pending requests (default: 10000). + pub fn max_pending(mut self, max: usize) -> Self { + self.0.max_pending = max; + self + } + + /// Configure the maximum cached entries (default: 10000). + pub fn max_cache(mut self, max: usize) -> Self { + self.0.max_cache = max; + self + } + /// Start running an HTTP relay. pub async fn run(self) -> Result { HttpRelay::start(self.0).await @@ -112,6 +149,7 @@ pub struct HttpRelay { impl HttpRelay { /// Builds the router with all routes and middleware. fn build_router(state: AppState) -> Router { + let max_body_size = state.config.max_body_size; Router::new() .route( "/link/{id}", @@ -121,6 +159,7 @@ impl HttpRelay { "/link2/{id}", get(link2::get_handler).post(link2::post_handler), ) + .layer(DefaultBodyLimit::max(max_body_size)) .layer(CorsLayer::very_permissive()) .layer(TraceLayer::new_for_http()) .with_state(state) diff --git a/src/http_relay/waiting_list.rs b/src/http_relay/waiting_list.rs index 56ac25a..05d32ef 100644 --- a/src/http_relay/waiting_list.rs +++ b/src/http_relay/waiting_list.rs @@ -1,11 +1,13 @@ use std::collections::HashMap; +use std::num::NonZeroUsize; use std::time::Instant; use axum::body::Bytes; +use lru::LruCache; use tokio::sync::oneshot; /// A message containing body and optional content type. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Message { pub body: Bytes, pub content_type: Option, @@ -38,14 +40,44 @@ impl CachedValue { } /// A list of waiting producers and consumers, plus cached values. -#[derive(Default)] pub struct WaitingList { pending_producers: HashMap, pending_consumers: HashMap, - cache: HashMap, + cache: LruCache, + max_pending: usize, +} + +impl Default for WaitingList { + fn default() -> Self { + Self::new(10_000, 10_000) + } +} + +/// Error returned when a limit is reached. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum LimitError { + /// Maximum pending requests reached. + PendingLimitReached, } impl WaitingList { + /// Creates a new WaitingList with the specified limits. + pub fn new(max_pending: usize, max_cache: usize) -> Self { + // LruCache requires NonZeroUsize, use 1 as minimum if 0 is passed + let cache_cap = NonZeroUsize::new(max_cache).unwrap_or(NonZeroUsize::MIN); + Self { + pending_producers: HashMap::new(), + pending_consumers: HashMap::new(), + cache: LruCache::new(cache_cap), + max_pending, + } + } + + /// Returns the current count of pending requests (producers + consumers). + pub fn pending_count(&self) -> usize { + self.pending_producers.len() + self.pending_consumers.len() + } + pub fn remove_producer(&mut self, id: &str) -> Option { self.pending_producers.remove(id) } @@ -55,24 +87,31 @@ impl WaitingList { id: &str, body: Bytes, content_type: Option, - ) -> oneshot::Receiver<()> { + ) -> Result, LimitError> { + if self.pending_count() >= self.max_pending { + return Err(LimitError::PendingLimitReached); + } let (producer, completion_receiver) = WaitingProducer::new(body, content_type); self.pending_producers.insert(id.to_string(), producer); - completion_receiver + Ok(completion_receiver) } pub fn remove_consumer(&mut self, id: &str) -> Option { self.pending_consumers.remove(id) } - pub fn insert_consumer(&mut self, id: &str) -> oneshot::Receiver { + pub fn insert_consumer(&mut self, id: &str) -> Result, LimitError> { + if self.pending_count() >= self.max_pending { + return Err(LimitError::PendingLimitReached); + } let (consumer, message_receiver) = WaitingConsumer::new(); self.pending_consumers.insert(id.to_string(), consumer); - message_receiver + Ok(message_receiver) } /// Gets a cached value if it exists and hasn't expired. - pub fn get_cached(&self, id: &str) -> Option { + /// Accessing a value promotes it in the LRU cache. + pub fn get_cached(&mut self, id: &str) -> Option { self.cache.get(id).and_then(|cached| { if cached.is_expired() { None @@ -86,6 +125,7 @@ impl WaitingList { } /// Inserts a value into the cache with the given TTL. + /// If the cache is full, automatically evicts the least recently used entry. pub fn insert_cached( &mut self, id: &str, @@ -94,19 +134,29 @@ impl WaitingList { ttl: std::time::Duration, ) { self.cache - .insert(id.to_string(), CachedValue::new(body, content_type, ttl)); + .push(id.to_string(), CachedValue::new(body, content_type, ttl)); } /// Removes a value from the cache and returns it if it existed. pub fn remove_cached(&mut self, id: &str) -> Option { - self.cache.remove(id) + self.cache.pop(id) } /// Removes expired entries from the cache. Returns the number removed. pub fn cleanup_expired_cache(&mut self) -> usize { - let before = self.cache.len(); - self.cache.retain(|_, v| !v.is_expired()); - before - self.cache.len() + // Collect expired keys first to avoid borrow issues + let expired_keys: Vec = self + .cache + .iter() + .filter(|(_, v)| v.is_expired()) + .map(|(k, _)| k.clone()) + .collect(); + + let count = expired_keys.len(); + for key in expired_keys { + self.cache.pop(&key); + } + count } } diff --git a/src/main.rs b/src/main.rs index c077b85..e44dab7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -29,6 +29,18 @@ struct Args { #[arg(long, default_value_t = 25)] link2_timeout: u64, + /// Maximum request body size in bytes (default: 10KB) + #[arg(long, default_value_t = 10 * 1024)] + max_body_size: usize, + + /// Maximum pending requests (producers + consumers combined, default: 10000) + #[arg(long, default_value_t = 10_000)] + max_pending: usize, + + /// Maximum cached entries (default: 10000) + #[arg(long, default_value_t = 10_000)] + max_cache: usize, + /// Verbosity level: -v (info), -vv (debug), -vvv (trace) #[arg(short, long, action = clap::ArgAction::Count)] verbose: u8, @@ -49,6 +61,9 @@ async fn main() -> Result<()> { .http_port(args.port) .cache_ttl(Duration::from_secs(args.link2_cache_ttl)) .link2_timeout(Duration::from_secs(args.link2_timeout)) + .max_body_size(args.max_body_size) + .max_pending(args.max_pending) + .max_cache(args.max_cache) .run() .await?; From e04995ad764c52245d9096bfab5942514f9ce69c Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 16:45:16 +0100 Subject: [PATCH 07/40] added some docs and id length limit --- src/http_relay/link_handler.rs | 48 ++++++++++++++++++++++++++++++++++ src/http_relay/server.rs | 7 +++++ 2 files changed, 55 insertions(+) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 1a7e851..d5cb4ff 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -1,4 +1,14 @@ //! Generic link handler that can operate with or without caching. +//! +//! # Security +//! +//! Channel IDs act as shared secrets. Anyone who knows an ID can read/write to that +//! channel. IDs must be cryptographically random (e.g., 128-bit UUIDs). Predictable +//! IDs allow attackers to intercept messages. +//! +//! When caching is enabled (`/link2`), delivered messages stay in plaintext memory +//! for the TTL duration. Multiple consumers can retrieve the same cached value. +//! Do not relay sensitive one-time credentials via cached endpoints. use std::time::Duration; @@ -13,6 +23,10 @@ use super::response::{await_consumer_message, await_producer_completion, build_r use super::waiting_list::{LimitError, Message}; use super::AppState; +/// Maximum allowed length for channel IDs (in bytes). +/// Prevents DoS via extremely long IDs used as HashMap keys. +const MAX_CHANNEL_ID_LENGTH: usize = 256; + /// Configuration for link handler behavior. #[derive(Clone, Copy)] pub struct LinkConfig { @@ -47,6 +61,10 @@ pub async fn get_handler( State(state): State, config: LinkConfig, ) -> Response { + if id.len() > MAX_CHANNEL_ID_LENGTH { + return build_response(StatusCode::BAD_REQUEST, "Channel ID too long".into(), None); + } + let mut pending_list = state.pending_list.lock().await; // Check cache if caching is enabled @@ -99,6 +117,10 @@ pub async fn post_handler( body: Bytes, config: LinkConfig, ) -> impl IntoResponse { + if channel.len() > MAX_CHANNEL_ID_LENGTH { + return (StatusCode::BAD_REQUEST, Bytes::from("Channel ID too long")); + } + let content_type = headers .get(header::CONTENT_TYPE) .and_then(|v| v.to_str().ok()) @@ -515,6 +537,8 @@ mod tests { // Tests for resource limits mod limit_tests { + use super::*; + use crate::http_relay::link_handler::MAX_CHANNEL_ID_LENGTH; use crate::http_relay::waiting_list::{LimitError, WaitingList}; use axum::body::Bytes; @@ -607,5 +631,29 @@ mod tests { // Now we can insert again assert!(list.insert_consumer("c2").is_ok()); } + + #[tokio::test] + async fn test_channel_id_too_long() { + let (server, _state) = HttpRelay::create_test_server(Config::default()); + + // Create an ID that exceeds the limit + let long_id = "x".repeat(MAX_CHANNEL_ID_LENGTH + 1); + + // GET should reject long IDs + let response = server.get(&format!("/link/{}", long_id)).await; + assert_eq!(response.status_code(), 400); + assert_eq!(response.text(), "Channel ID too long"); + + // POST should reject long IDs + let body = axum::body::Bytes::from_static(b"test"); + let response = server.post(&format!("/link/{}", long_id)).bytes(body).await; + assert_eq!(response.status_code(), 400); + assert_eq!(response.text(), "Channel ID too long"); + + // IDs at the limit should work + let ok_id = "x".repeat(MAX_CHANNEL_ID_LENGTH); + let response = server.get(&format!("/link2/{}", ok_id)).await; + assert_ne!(response.status_code(), 400); // Should timeout, not reject + } } } diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 8964429..063a4e2 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -1,4 +1,11 @@ //! HTTP relay server implementation. +//! +//! # CORS Configuration +//! +//! This server uses permissive CORS (`Access-Control-Allow-Origin: *`) to allow +//! web browsers to communicate from any origin. This is intentional for public +//! relay deployments. For restricted environments, modify `CorsLayer::very_permissive()` +//! in `build_router()`. use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, From 39a55d632ee5cea54a0eaac8cb22d74bb2d0475a Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 16:52:55 +0100 Subject: [PATCH 08/40] added github checks --- .github/workflows/ci.yml | 41 ++++++++++++++++++++++++++++++++++ src/http_relay/link_handler.rs | 22 ++++++------------ src/http_relay/mod.rs | 2 +- src/http_relay/response.rs | 6 ++++- src/http_relay/server.rs | 5 +++-- 5 files changed, 57 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..e1279c6 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,41 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +env: + CARGO_TERM_COLOR: always + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + - run: cargo test + + format: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + - run: cargo fmt --check + + clippy: + name: Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy + - uses: Swatinem/rust-cache@v2 + - run: cargo clippy -- -D warnings diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index d5cb4ff..f775389 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -143,10 +143,7 @@ pub async fn post_handler( state.config.cache_ttl, ); } - let msg = Message { - body, - content_type, - }; + let msg = Message { body, content_type }; let _ = consumer.message_sender.send(msg); return (StatusCode::OK, Bytes::new()); }; @@ -155,7 +152,10 @@ pub async fn post_handler( let receiver = match pending_list.insert_producer(&channel, body, content_type) { Ok(r) => r, Err(LimitError::PendingLimitReached) => { - return (StatusCode::SERVICE_UNAVAILABLE, Bytes::from("Server at capacity")); + return ( + StatusCode::SERVICE_UNAVAILABLE, + Bytes::from("Server at capacity"), + ); } }; drop(pending_list); @@ -441,11 +441,7 @@ mod tests { tokio::join!(producer1, consumer1); // Verify first value is cached - let cached = state - .pending_list - .lock() - .await - .get_cached("overwrite-test"); + let cached = state.pending_list.lock().await.get_cached("overwrite-test"); assert_eq!(cached.unwrap().body.as_ref(), b"first value"); // Second producer posts new value - should overwrite and wait @@ -461,11 +457,7 @@ mod tests { assert_eq!(response.text(), "second value"); // Verify new value is cached - let cached = state - .pending_list - .lock() - .await - .get_cached("overwrite-test"); + let cached = state.pending_list.lock().await.get_cached("overwrite-test"); assert_eq!(cached.unwrap().body.as_ref(), b"second value"); } diff --git a/src/http_relay/mod.rs b/src/http_relay/mod.rs index ff26fec..626700c 100644 --- a/src/http_relay/mod.rs +++ b/src/http_relay/mod.rs @@ -6,7 +6,7 @@ mod server; mod waiting_list; pub(crate) use link_handler::{link, link2}; -pub use server::{HttpRelay, HttpRelayBuilder}; pub(crate) use server::AppState; #[cfg(test)] pub(crate) use server::Config; +pub use server::{HttpRelay, HttpRelayBuilder}; diff --git a/src/http_relay/response.rs b/src/http_relay/response.rs index 4fd6314..5aa45e0 100644 --- a/src/http_relay/response.rs +++ b/src/http_relay/response.rs @@ -41,7 +41,11 @@ where Ok(Err(_)) => build_response(StatusCode::NOT_FOUND, "Not Found".into(), None), Err(_) => { on_timeout().await; - build_response(StatusCode::REQUEST_TIMEOUT, "Request timed out".into(), None) + build_response( + StatusCode::REQUEST_TIMEOUT, + "Request timed out".into(), + None, + ) } } } diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 063a4e2..2690135 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -22,8 +22,8 @@ use tokio::sync::Mutex; use tower_http::{cors::CorsLayer, trace::TraceLayer}; use url::Url; -use super::{link, link2}; use super::waiting_list::WaitingList; +use super::{link, link2}; /// The timeout for a request to be considered unused. const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); @@ -195,7 +195,8 @@ impl HttpRelay { let http_handle = Handle::new(); let shutdown_handle = http_handle.clone(); - let http_listener = TcpListener::bind(SocketAddr::new(config.bind_address, config.http_port))?; + let http_listener = + TcpListener::bind(SocketAddr::new(config.bind_address, config.http_port))?; let http_address = http_listener.local_addr()?; tokio::spawn(async move { From 57633f14952053017e886456c2c22d3096390983 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:18:16 +0100 Subject: [PATCH 09/40] a few fixes --- src/http_relay/link_handler.rs | 67 +++++++++++++++------------------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index f775389..4171a9d 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -21,6 +21,7 @@ use axum::{ use super::response::{await_consumer_message, await_producer_completion, build_response}; use super::waiting_list::{LimitError, Message}; +use super::server::Config; use super::AppState; /// Maximum allowed length for channel IDs (in bytes). @@ -28,30 +29,29 @@ use super::AppState; const MAX_CHANNEL_ID_LENGTH: usize = 256; /// Configuration for link handler behavior. -#[derive(Clone, Copy)] +#[derive(Clone)] pub struct LinkConfig { /// Whether to use caching for this endpoint. pub caching_enabled: bool, + /// Timeout for requests on this endpoint. + pub timeout: Duration, } impl LinkConfig { /// Standard link endpoint without caching. - pub const STANDARD: Self = Self { - caching_enabled: false, - }; + pub fn standard(config: &Config) -> Self { + Self { + caching_enabled: false, + timeout: config.request_timeout, + } + } /// Link2 endpoint with caching enabled. - pub const WITH_CACHE: Self = Self { - caching_enabled: true, - }; -} - -/// Returns the timeout duration based on link config. -fn get_timeout(state: &AppState, config: LinkConfig) -> Duration { - if config.caching_enabled { - state.config.link2_timeout - } else { - state.config.request_timeout + pub fn with_cache(config: &Config) -> Self { + Self { + caching_enabled: true, + timeout: config.link2_timeout, + } } } @@ -67,7 +67,6 @@ pub async fn get_handler( let mut pending_list = state.pending_list.lock().await; - // Check cache if caching is enabled if config.caching_enabled { if let Some(cached) = pending_list.get_cached(&id) { return build_response(StatusCode::OK, cached.body, cached.content_type); @@ -75,7 +74,6 @@ pub async fn get_handler( } if let Some(producer) = pending_list.remove_producer(&id) { - // Cache the response if caching is enabled if config.caching_enabled { pending_list.insert_cached( &id, @@ -88,7 +86,6 @@ pub async fn get_handler( return build_response(StatusCode::OK, producer.body, producer.content_type); }; - // No producer ready. Insert consumer into pending list and wait. let receiver = match pending_list.insert_consumer(&id) { Ok(r) => r, Err(LimitError::PendingLimitReached) => { @@ -101,9 +98,8 @@ pub async fn get_handler( }; drop(pending_list); - let timeout = get_timeout(&state, config); let pending_list = state.pending_list.clone(); - await_consumer_message(receiver, timeout, || async move { + await_consumer_message(receiver, config.timeout, || async move { pending_list.lock().await.remove_consumer(&id); }) .await @@ -128,13 +124,11 @@ pub async fn post_handler( let mut pending_list = state.pending_list.lock().await; - // Invalidate cache if caching is enabled if config.caching_enabled { pending_list.remove_cached(&channel); } if let Some(consumer) = pending_list.remove_consumer(&channel) { - // Cache the response if caching is enabled if config.caching_enabled { pending_list.insert_cached( &channel, @@ -148,7 +142,6 @@ pub async fn post_handler( return (StatusCode::OK, Bytes::new()); }; - // No consumer ready. Insert producer into pending list and wait. let receiver = match pending_list.insert_producer(&channel, body, content_type) { Ok(r) => r, Err(LimitError::PendingLimitReached) => { @@ -160,47 +153,48 @@ pub async fn post_handler( }; drop(pending_list); - let timeout = get_timeout(&state, config); let pending_list = state.pending_list.clone(); - await_producer_completion(receiver, timeout, || async move { + await_producer_completion(receiver, config.timeout, || async move { pending_list.lock().await.remove_producer(&channel); }) .await } -// Thin wrapper handlers for the /link/ endpoint (no caching) pub mod link { use super::*; - pub async fn get_handler(path: Path, state: State) -> Response { - super::get_handler(path, state, LinkConfig::STANDARD).await + pub async fn get_handler(path: Path, State(state): State) -> Response { + let config = LinkConfig::standard(&state.config); + super::get_handler(path, State(state), config).await } pub async fn post_handler( path: Path, - state: State, + State(state): State, headers: HeaderMap, body: Bytes, ) -> impl IntoResponse { - super::post_handler(path, state, headers, body, LinkConfig::STANDARD).await + let config = LinkConfig::standard(&state.config); + super::post_handler(path, State(state), headers, body, config).await } } -// Thin wrapper handlers for the /link2/ endpoint (with caching) pub mod link2 { use super::*; - pub async fn get_handler(path: Path, state: State) -> Response { - super::get_handler(path, state, LinkConfig::WITH_CACHE).await + pub async fn get_handler(path: Path, State(state): State) -> Response { + let config = LinkConfig::with_cache(&state.config); + super::get_handler(path, State(state), config).await } pub async fn post_handler( path: Path, - state: State, + State(state): State, headers: HeaderMap, body: Bytes, ) -> impl IntoResponse { - super::post_handler(path, state, headers, body, LinkConfig::WITH_CACHE).await + let config = LinkConfig::with_cache(&state.config); + super::post_handler(path, State(state), headers, body, config).await } } @@ -210,7 +204,6 @@ mod tests { use crate::http_relay::{Config, HttpRelay}; - // Tests for standard link endpoint (no caching) mod link_tests { use super::*; @@ -337,7 +330,6 @@ mod tests { } } - // Tests for link2 endpoint (with caching) mod link2_tests { use super::*; @@ -527,7 +519,6 @@ mod tests { } } - // Tests for resource limits mod limit_tests { use super::*; use crate::http_relay::link_handler::MAX_CHANNEL_ID_LENGTH; From ff725d06fd68f54382af1617c58cc64e07f9e121 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:22:08 +0100 Subject: [PATCH 10/40] fmt --- src/http_relay/link_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 4171a9d..b5c881d 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -20,8 +20,8 @@ use axum::{ }; use super::response::{await_consumer_message, await_producer_completion, build_response}; -use super::waiting_list::{LimitError, Message}; use super::server::Config; +use super::waiting_list::{LimitError, Message}; use super::AppState; /// Maximum allowed length for channel IDs (in bytes). From 777bcff5d6877fd7713cef496f6d7c2c044f9368 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:34:00 +0100 Subject: [PATCH 11/40] dependency updates --- Cargo.toml | 22 +++++++++++----------- README.md | 12 ++++++------ src/http_relay/server.rs | 3 ++- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 72800b6..99141de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,9 @@ [package] name = "http-relay" description = "A Rust implementation of _some_ of [Http relay spec](https://httprelay.io/)." -version = "0.5.1" +version = "0.6.0" edition = "2021" -authors = ["SeverinAlexB", "SHAcollision", "Nuh"] +authors = ["SeverinAlexB"] license = "MIT" homepage = "https://github.com/pubky/pubky-http-relay" repository = "https://github.com/pubky/pubky-http-relay" @@ -11,21 +11,21 @@ keywords = ["httprelay", "http", "relay"] categories = ["web-programming"] [dependencies] -anyhow = "1.0.99" -axum = "0.8.6" -axum-server = "0.7.2" +anyhow = "1.0.100" +axum = "0.8.8" +axum-server = "0.8.0" clap = { version = "4", features = ["derive"] } futures-util = "0.3.31" -lru = "0.12" -tokio = { version = "1.47.1", features = ["full"] } -tracing = "0.1.41" +lru = "0.16.3" +tokio = { version = "1.49.0", features = ["full"] } +tracing = "0.1.44" tracing-subscriber = { version = "0.3", features = ["env-filter"] } -url = "2.5.4" -tower-http = { version = "0.6.6", features = ["cors", "trace"] } +url = "2.5.8" +tower-http = { version = "0.6.8", features = ["cors", "trace"] } [[bin]] name = "http-relay" path = "src/main.rs" [dev-dependencies] -axum-test = "17.3.0" +axum-test = "18.7.0" diff --git a/README.md b/README.md index 354f747..2f0c0d9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# pubky-http-relay +# http-relay A Rust implementation of a subset of the [HTTP Relay spec](https://httprelay.io/) for asynchronous producer/consumer message passing. @@ -27,14 +27,14 @@ relay endpoints, each waiting for their counterpart to arrive. ## Installation ```bash -cargo install --path . +cargo install http-relay ``` Or add as a dependency: ```toml [dependencies] -pubky-http-relay = { git = "https://github.com/pubky/pubky-http-relay" } +http-relay = "0.6" ``` ## Usage @@ -43,10 +43,10 @@ pubky-http-relay = { git = "https://github.com/pubky/pubky-http-relay" } ```bash # Default: bind to 0.0.0.0:8080 -pubky-http-relay +http-relay # Custom configuration -pubky-http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-timeout 25 -vv +http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-timeout 25 -vv ``` **Options:** @@ -66,7 +66,7 @@ pubky-http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-tim ### As Library ```rust -use pubky_http_relay::HttpRelayBuilder; +use http_relay::HttpRelayBuilder; #[tokio::main] async fn main() -> anyhow::Result<()> { diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 2690135..1d07753 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -149,7 +149,7 @@ impl HttpRelayBuilder { /// An implementation of _some_ of [Http relay spec](https://httprelay.io/). pub struct HttpRelay { - pub(crate) http_handle: Handle, + pub(crate) http_handle: Handle, http_address: SocketAddr, } @@ -201,6 +201,7 @@ impl HttpRelay { tokio::spawn(async move { axum_server::from_tcp(http_listener) + .expect("Failed to create server from TCP listener") .handle(http_handle.clone()) .serve(app.into_make_service()) .await From d2e4a28e00817b10c399ae65c0cdf8ee4ed8a7fe Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:36:59 +0100 Subject: [PATCH 12/40] removed expect --- src/http_relay/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 1d07753..96e6c40 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -199,9 +199,9 @@ impl HttpRelay { TcpListener::bind(SocketAddr::new(config.bind_address, config.http_port))?; let http_address = http_listener.local_addr()?; + let server = axum_server::from_tcp(http_listener)?; tokio::spawn(async move { - axum_server::from_tcp(http_listener) - .expect("Failed to create server from TCP listener") + server .handle(http_handle.clone()) .serve(app.into_make_service()) .await From 0f91f65b176afaaacf82c71ceaa1d6d21c5c731d Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:44:38 +0100 Subject: [PATCH 13/40] added release file --- .github/workflows/release.yml | 82 +++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..7130378 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,82 @@ +name: Release + +on: + push: + tags: ['v*'] + pull_request: + branches: [main, master] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + name: Build ${{ matrix.target }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - target: x86_64-unknown-linux-gnu + os: ubuntu-latest + - target: x86_64-apple-darwin + os: macos-latest + - target: aarch64-apple-darwin + os: macos-latest + - target: x86_64-pc-windows-msvc + os: windows-latest + + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - uses: Swatinem/rust-cache@v2 + with: + key: ${{ matrix.target }} + + - name: Build + run: cargo build --release --target ${{ matrix.target }} + + - name: Package (Unix) + if: matrix.os != 'windows-latest' + run: | + cd target/${{ matrix.target }}/release + tar -czvf ../../../http-relay-${{ matrix.target }}.tar.gz http-relay + + - name: Package (Windows) + if: matrix.os == 'windows-latest' + run: | + cd target/${{ matrix.target }}/release + 7z a ../../../http-relay-${{ matrix.target }}.zip http-relay.exe + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: http-relay-${{ matrix.target }} + path: http-relay-${{ matrix.target }}.* + + release: + name: Create Release + needs: build + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/') + permissions: + contents: write + + steps: + - uses: actions/checkout@v4 + + - name: Download artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + merge-multiple: true + + - name: Create Release + uses: softprops/action-gh-release@v2 + with: + files: artifacts/* + generate_release_notes: true From 6da72877bb59052657c533eb8a3675019c1bc948 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 17:49:17 +0100 Subject: [PATCH 14/40] more release targets --- .github/workflows/release.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7130378..bfebf48 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,12 +19,20 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + - target: x86_64-unknown-linux-musl + os: ubuntu-latest + musl: true + - target: aarch64-unknown-linux-gnu + os: ubuntu-latest + cross: true - target: x86_64-apple-darwin os: macos-latest - target: aarch64-apple-darwin os: macos-latest - target: x86_64-pc-windows-msvc os: windows-latest + - target: aarch64-pc-windows-msvc + os: windows-latest steps: - uses: actions/checkout@v4 @@ -37,6 +45,17 @@ jobs: with: key: ${{ matrix.target }} + - name: Install musl-tools + if: matrix.musl + run: sudo apt-get update && sudo apt-get install -y musl-tools + + - name: Install cross-compiler (Linux ARM) + if: matrix.cross + run: | + sudo apt-get update + sudo apt-get install -y gcc-aarch64-linux-gnu + echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV + - name: Build run: cargo build --release --target ${{ matrix.target }} From 50e0fba4c8a7603407d19e0930aa1b4499eae9b4 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 18:13:22 +0100 Subject: [PATCH 15/40] added docker file --- .github/workflows/release.yml | 17 ++++++++++++----- Cargo.toml | 4 ++-- Dockerfile | 17 +++++++++++++++++ src/http_relay/server.rs | 15 +++++++++++++++ 4 files changed, 46 insertions(+), 7 deletions(-) create mode 100644 Dockerfile diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bfebf48..1010345 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -11,7 +11,7 @@ env: jobs: build: - name: Build ${{ matrix.target }} + name: Build ${{ matrix.name }} runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -19,20 +19,27 @@ jobs: include: - target: x86_64-unknown-linux-gnu os: ubuntu-latest + name: linux-x64 - target: x86_64-unknown-linux-musl os: ubuntu-latest + name: linux-x64-musl musl: true - target: aarch64-unknown-linux-gnu os: ubuntu-latest + name: linux-arm64 cross: true - target: x86_64-apple-darwin os: macos-latest + name: macos-x64 - target: aarch64-apple-darwin os: macos-latest + name: macos-arm64 - target: x86_64-pc-windows-msvc os: windows-latest + name: windows-x64 - target: aarch64-pc-windows-msvc os: windows-latest + name: windows-arm64 steps: - uses: actions/checkout@v4 @@ -63,19 +70,19 @@ jobs: if: matrix.os != 'windows-latest' run: | cd target/${{ matrix.target }}/release - tar -czvf ../../../http-relay-${{ matrix.target }}.tar.gz http-relay + tar -czvf ../../../http-relay-${{ matrix.name }}.tar.gz http-relay - name: Package (Windows) if: matrix.os == 'windows-latest' run: | cd target/${{ matrix.target }}/release - 7z a ../../../http-relay-${{ matrix.target }}.zip http-relay.exe + 7z a ../../../http-relay-${{ matrix.name }}.zip http-relay.exe - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: http-relay-${{ matrix.target }} - path: http-relay-${{ matrix.target }}.* + name: http-relay-${{ matrix.name }} + path: http-relay-${{ matrix.name }}.* release: name: Create Release diff --git a/Cargo.toml b/Cargo.toml index 99141de..ce7fa21 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,8 +5,8 @@ version = "0.6.0" edition = "2021" authors = ["SeverinAlexB"] license = "MIT" -homepage = "https://github.com/pubky/pubky-http-relay" -repository = "https://github.com/pubky/pubky-http-relay" +homepage = "https://github.com/pubky/http-relay" +repository = "https://github.com/pubky/http-relay" keywords = ["httprelay", "http", "relay"] categories = ["web-programming"] diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..87b4d20 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM rust:1.84-alpine AS builder + +RUN apk add --no-cache musl-dev + +WORKDIR /app +COPY Cargo.toml Cargo.lock ./ +COPY src ./src + +RUN cargo build --release + +FROM scratch + +COPY --from=builder /app/target/release/http-relay /http-relay + +EXPOSE 8080 + +ENTRYPOINT ["/http-relay"] diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 96e6c40..25016b2 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -158,6 +158,7 @@ impl HttpRelay { fn build_router(state: AppState) -> Router { let max_body_size = state.config.max_body_size; Router::new() + .route("/", get(|| async { "Http Relay" })) .route( "/link/{id}", get(link::get_handler).post(link::post_handler), @@ -197,6 +198,7 @@ impl HttpRelay { let http_listener = TcpListener::bind(SocketAddr::new(config.bind_address, config.http_port))?; + http_listener.set_nonblocking(true)?; let http_address = http_listener.local_addr()?; let server = axum_server::from_tcp(http_listener)?; @@ -272,3 +274,16 @@ impl Drop for HttpRelay { self.http_handle.shutdown(); } } + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_root_returns_http_relay() { + let (server, _state) = HttpRelay::create_test_server(Config::default()); + let response = server.get("/").await; + assert_eq!(response.status_code(), 200); + assert_eq!(response.text(), "Http Relay"); + } +} From 722c600e2a87a62482e486da461c7676fb483c4b Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 18:13:44 +0100 Subject: [PATCH 16/40] removed build on PR --- .github/workflows/release.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1010345..4432070 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,8 +3,6 @@ name: Release on: push: tags: ['v*'] - pull_request: - branches: [main, master] env: CARGO_TERM_COLOR: always From faa9d4ecb7f047fad13c6fcb8dc208814d1c2b31 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 18:30:32 +0100 Subject: [PATCH 17/40] added badges to readme --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 2f0c0d9..109940b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # http-relay +[![Crates.io](https://img.shields.io/crates/v/http-relay.svg)](https://crates.io/crates/http-relay) +[![CI](https://github.com/pubky/http-relay/actions/workflows/ci.yml/badge.svg)](https://github.com/pubky/http-relay/actions/workflows/ci.yml) +[![Documentation](https://docs.rs/http-relay/badge.svg)](https://docs.rs/http-relay) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) + A Rust implementation of a subset of the [HTTP Relay spec](https://httprelay.io/) for asynchronous producer/consumer message passing. From 119bfdd887116cd94713c11787189b701be8b6dc Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Mon, 2 Feb 2026 18:36:01 +0100 Subject: [PATCH 18/40] bind to 127.0.0.1 by default --- README.md | 14 ++++++++++---- src/http_relay/server.rs | 4 ++-- src/main.rs | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 109940b..5487582 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Documentation](https://docs.rs/http-relay/badge.svg)](https://docs.rs/http-relay) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) -A Rust implementation of a subset of the [HTTP Relay spec](https://httprelay.io/) +A Rust implementation of the `/link` endpoint from the [HTTP Relay spec](https://httprelay.io/) for asynchronous producer/consumer message passing. ## What is this? @@ -14,6 +14,9 @@ An HTTP relay enables decoupled communication between distributed services. Instead of direct synchronous calls, producers and consumers communicate through relay endpoints, each waiting for their counterpart to arrive. +**Non-standard extension:** Adds a `/link2` endpoint optimized for mobile +clients with caching and shorter timeouts. + **Use cases:** - Connecting services that can't communicate directly - Mobile apps that need resilient message delivery with retry support @@ -47,18 +50,21 @@ http-relay = "0.6" ### As CLI ```bash -# Default: bind to 0.0.0.0:8080 +# Default: bind to 127.0.0.1:8080 (localhost only) http-relay +# Bind to all interfaces (for production/Docker) +http-relay --bind 0.0.0.0 + # Custom configuration -http-relay --bind 127.0.0.1 --port 15412 --link2-cache-ttl 300 --link2-timeout 25 -vv +http-relay --bind 0.0.0.0 --port 15412 --link2-cache-ttl 300 --link2-timeout 25 -vv ``` **Options:** | Flag | Description | Default | |------|-------------|---------| -| `--bind ` | Bind address | `0.0.0.0` | +| `--bind ` | Bind address | `127.0.0.1` | | `--port ` | HTTP port (0 = random) | `8080` | | `--link2-cache-ttl ` | Cache TTL for link2 | `300` | | `--link2-timeout ` | Link2 endpoint timeout | `25` | diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 25016b2..cee1211 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -79,7 +79,7 @@ pub(crate) struct Config { impl Default for Config { fn default() -> Self { Self { - bind_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + bind_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port: 0, request_timeout: DEFAULT_REQUEST_TIMEOUT, cache_ttl: DEFAULT_CACHE_TTL, @@ -96,7 +96,7 @@ impl Default for Config { pub struct HttpRelayBuilder(Config); impl HttpRelayBuilder { - /// Configure the address to bind to (default: 0.0.0.0). + /// Configure the address to bind to (default: 127.0.0.1). pub fn bind_address(mut self, addr: IpAddr) -> Self { self.0.bind_address = addr; self diff --git a/src/main.rs b/src/main.rs index e44dab7..4e1e762 100644 --- a/src/main.rs +++ b/src/main.rs @@ -14,7 +14,7 @@ use tracing_subscriber::EnvFilter; #[command(version)] struct Args { /// Address to bind to - #[arg(short, long, default_value = "0.0.0.0")] + #[arg(short, long, default_value = "127.0.0.1")] bind: IpAddr, /// Port to listen on (0 = random available port) From b55fdf7e7dcec4d3227c364803d1f35f0c5bf12e Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 06:52:18 +0100 Subject: [PATCH 19/40] fix license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 7cb6a47..537fe61 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2026 Pubky +Copyright (c) 2026 Synonym Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 816901a82c8c03fd888d814361ec04614a90999a Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 07:08:51 +0100 Subject: [PATCH 20/40] improved test duration --- src/http_relay/link_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index b5c881d..4104372 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -402,7 +402,7 @@ mod tests { .is_some()); // Wait for cache to expire - tokio::time::sleep(Duration::from_millis(1500)).await; + tokio::time::sleep(Duration::from_millis(60)).await; // Value should be expired (get_cached returns None) assert!(state From 96dc7c2b812f3933ec82a0534285df9f107431a3 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 07:17:12 +0100 Subject: [PATCH 21/40] renamed request_timeout to link_timeout --- src/http_relay/link_handler.rs | 8 ++++---- src/http_relay/server.rs | 9 +++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 4104372..6209b8b 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -42,7 +42,7 @@ impl LinkConfig { pub fn standard(config: &Config) -> Self { Self { caching_enabled: false, - timeout: config.request_timeout, + timeout: config.link_timeout, } } @@ -252,9 +252,9 @@ mod tests { } #[tokio::test] - async fn test_request_timeout() { + async fn test_link_timeout() { let config = Config { - request_timeout: Duration::from_millis(50), + link_timeout: Duration::from_millis(50), ..Config::default() }; let (server, state) = HttpRelay::create_test_server(config); @@ -277,7 +277,7 @@ mod tests { async fn test_no_caching() { let config = Config { cache_ttl: Duration::from_secs(5), - request_timeout: Duration::from_millis(100), + link_timeout: Duration::from_millis(100), ..Config::default() }; let (server, state) = HttpRelay::create_test_server(config); diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index cee1211..4515f3c 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -25,8 +25,8 @@ use url::Url; use super::waiting_list::WaitingList; use super::{link, link2}; -/// The timeout for a request to be considered unused. -const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10 * 60); +/// The default timeout for link (v1) endpoints. +const DEFAULT_LINK_TIMEOUT: Duration = Duration::from_secs(10 * 60); /// The default time-to-live for cached values after first consumer retrieves them. const DEFAULT_CACHE_TTL: Duration = Duration::from_secs(5 * 60); @@ -63,7 +63,8 @@ impl AppState { pub(crate) struct Config { pub bind_address: IpAddr, pub http_port: u16, - pub request_timeout: Duration, + /// Timeout for link (v1) endpoints. + pub link_timeout: Duration, /// How long to keep values cached after the first consumer retrieves them. pub cache_ttl: Duration, /// Timeout for link2 endpoints (shorter to avoid proxy timeouts). @@ -81,7 +82,7 @@ impl Default for Config { Self { bind_address: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port: 0, - request_timeout: DEFAULT_REQUEST_TIMEOUT, + link_timeout: DEFAULT_LINK_TIMEOUT, cache_ttl: DEFAULT_CACHE_TTL, link2_timeout: DEFAULT_LINK2_TIMEOUT, max_body_size: DEFAULT_MAX_BODY_SIZE, From 3a6d208b0c36438149adbd7ceae43748218dad5a Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 09:00:23 +0100 Subject: [PATCH 22/40] increased cache cleanup time to 15s --- src/http_relay/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/http_relay/server.rs b/src/http_relay/server.rs index 4515f3c..7443af4 100644 --- a/src/http_relay/server.rs +++ b/src/http_relay/server.rs @@ -212,7 +212,7 @@ impl HttpRelay { }); // Spawn background task to clean up expired cache entries - let cleanup_interval = Duration::from_secs(1); + let cleanup_interval = Duration::from_secs(15); let pending_list = app_state.pending_list.clone(); tokio::spawn(async move { loop { From 4ab768c9edcbbdf7a615508477652bee1e69887b Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 09:15:50 +0100 Subject: [PATCH 23/40] added deprecation warning to /link/ --- README.md | 12 ++++++------ openapi.yaml | 14 ++++++++------ src/http_relay/link_handler.rs | 21 ++++++++++++++++++--- src/main.rs | 2 +- 4 files changed, 33 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 5487582..e69030d 100644 --- a/README.md +++ b/README.md @@ -26,8 +26,8 @@ clients with caching and shorter timeouts. - **Async producer/consumer model** - Producers POST data, consumers GET it - **Two endpoint variants:** - - `/link/{id}` - Standard relay (10 min timeout) - - `/link2/{id}` - Mobile-friendly with caching (25s timeout, 5 min cache TTL) + - `/link/{id}` - **Deprecated.** Standard relay (10 min timeout) + - `/link2/{id}` - **Recommended.** Mobile-friendly with caching (25s timeout, 5 min cache TTL) - **Mobile resilience** - Cached responses allow retries after connection drops - **Content-Type preservation** - Forwards producer's Content-Type to consumer - **Configurable timeouts and caching** @@ -127,13 +127,13 @@ curl http://localhost:8080/link/my-channel | Aspect | `/link/{id}` | `/link2/{id}` | |--------|--------------|---------------| +| Status | **Deprecated** | **Recommended** | | Timeout | 10 minutes | 25 seconds | | Caching | No | Yes (5 min TTL) | -| Use case | Backwards compatibility | **Recommended** | -**Use `/link2` for new integrations.** It handles proxy timeouts gracefully and -supports retries via caching. The `/link` endpoint remains available for -backwards compatibility with existing clients. +**Use `/link2` for all integrations.** It handles proxy timeouts gracefully and +supports retries via caching. The `/link` endpoint is deprecated and remains +only for backwards compatibility with existing clients. ### Why Link2 Exists diff --git a/openapi.yaml b/openapi.yaml index 1c9b729..b5376bd 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -10,7 +10,7 @@ info: **Two endpoint variants are available:** - `/link2/{id}` - **Recommended.** Short timeout (25s) with caching (5 min TTL) - - `/link/{id}` - Backwards compatibility only (10 min timeout, no caching) + - `/link/{id}` - **Deprecated.** (10 min timeout, no caching) **Use `/link2` for new integrations.** The original `/link` endpoint has a problem on mobile devices: when an app gets backgrounded or killed while @@ -29,9 +29,10 @@ servers: paths: /link/{id}: get: - summary: Consume a message (legacy) + summary: Consume a message (deprecated) + deprecated: true description: | - Legacy endpoint for backwards compatibility. Use `/link2` for new integrations. + **Deprecated.** Use `/link2` for all integrations. Consumer retrieves a message from the channel. Blocks until a producer sends data or the timeout (10 minutes) is reached. @@ -47,9 +48,10 @@ paths: $ref: '#/components/responses/Timeout' post: - summary: Produce a message (legacy) + summary: Produce a message (deprecated) + deprecated: true description: | - Legacy endpoint for backwards compatibility. Use `/link2` for new integrations. + **Deprecated.** Use `/link2` for all integrations. Producer sends a message to the channel. Blocks until a consumer retrieves the data or the timeout (10 minutes) is reached. @@ -172,6 +174,6 @@ components: tags: - name: link - description: Legacy relay endpoints for backwards compatibility (10 min timeout, no caching) + description: Deprecated relay endpoints (10 min timeout, no caching). Use link2 instead. - name: link2 description: Recommended endpoints with retry support (25s timeout, 5 min cache TTL) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 6209b8b..998e441 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -160,12 +160,20 @@ pub async fn post_handler( .await } +/// Deprecated: Use `/link2/` instead. +/// +/// The `/link/` endpoint lacks caching support and has a longer default timeout +/// that can cause issues with reverse proxies. Use `/link2/` for new integrations. pub mod link { use super::*; pub async fn get_handler(path: Path, State(state): State) -> Response { let config = LinkConfig::standard(&state.config); - super::get_handler(path, State(state), config).await + let mut response = super::get_handler(path, State(state), config).await; + response + .headers_mut() + .insert("Deprecation", "true".parse().unwrap()); + response } pub async fn post_handler( @@ -173,9 +181,16 @@ pub mod link { State(state): State, headers: HeaderMap, body: Bytes, - ) -> impl IntoResponse { + ) -> Response { let config = LinkConfig::standard(&state.config); - super::post_handler(path, State(state), headers, body, config).await + let mut response = + super::post_handler(path, State(state), headers, body, config) + .await + .into_response(); + response + .headers_mut() + .insert("Deprecation", "true".parse().unwrap()); + response } } diff --git a/src/main.rs b/src/main.rs index 4e1e762..ccb089a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -73,7 +73,7 @@ async fn main() -> Result<()> { ); tracing::info!( link = %relay.local_link_url(), - "Link endpoint available at /link/{{id}} and /link2/{{id}}" + "Endpoints: /link2/{{id}} (recommended), /link/{{id}} (deprecated)" ); tokio::signal::ctrl_c().await?; From aa4070b4e33d534cf2d971c47863faccc04a8a31 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 09:16:43 +0100 Subject: [PATCH 24/40] fmt --- src/http_relay/link_handler.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/http_relay/link_handler.rs b/src/http_relay/link_handler.rs index 998e441..964e64b 100644 --- a/src/http_relay/link_handler.rs +++ b/src/http_relay/link_handler.rs @@ -183,10 +183,9 @@ pub mod link { body: Bytes, ) -> Response { let config = LinkConfig::standard(&state.config); - let mut response = - super::post_handler(path, State(state), headers, body, config) - .await - .into_response(); + let mut response = super::post_handler(path, State(state), headers, body, config) + .await + .into_response(); response .headers_mut() .insert("Deprecation", "true".parse().unwrap()); From ba1327e2752ceeb7ea43c09ff91f5e39f15519c7 Mon Sep 17 00:00:00 2001 From: Severin Buhler Date: Tue, 3 Feb 2026 09:44:38 +0100 Subject: [PATCH 25/40] added demo --- demo/.gitignore | 6 + demo/README.md | 40 ++++ demo/app/layout.tsx | 18 ++ demo/app/page.tsx | 384 +++++++++++++++++++++++++++++++ demo/package-lock.json | 498 +++++++++++++++++++++++++++++++++++++++++ demo/package.json | 21 ++ demo/tsconfig.json | 20 ++ 7 files changed, 987 insertions(+) create mode 100644 demo/.gitignore create mode 100644 demo/README.md create mode 100644 demo/app/layout.tsx create mode 100644 demo/app/page.tsx create mode 100644 demo/package-lock.json create mode 100644 demo/package.json create mode 100644 demo/tsconfig.json diff --git a/demo/.gitignore b/demo/.gitignore new file mode 100644 index 0000000..2263ed4 --- /dev/null +++ b/demo/.gitignore @@ -0,0 +1,6 @@ +node_modules/ +.next/ +out/ +.env*.local +*.tsbuildinfo +next-env.d.ts diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 0000000..0fc05a9 --- /dev/null +++ b/demo/README.md @@ -0,0 +1,40 @@ +# HTTP Relay Demo + +A simple web UI to test the http-relay `/link2` endpoint. + +## Quick Start + +```bash +# 1. Start the relay (from repo root) +cargo run + +# 2. Start the demo (from this folder) +npm install +npm run dev +``` + +Open http://localhost:3000 + +## Usage + +1. **Set Channel ID** - Click "Random" or enter your own +2. **Start Consumer** - Waits for a message on the channel +3. **Send from Producer** - Delivers message to the waiting consumer +4. **Watch the log** - See the request/response flow + +The consumer and producer retry automatically on 408 timeouts until they connect. + +## Sharing Channels + +The channel ID syncs with the URL. Share links like: + +``` +http://localhost:3000?channel=my-channel +``` + +Your friend opens the link → same channel ID is pre-filled → they can immediately start as consumer or producer. + +## Configuration + +- **Relay URL** - Default `http://localhost:8080`, change if your relay runs elsewhere +- **Channel ID** - Any string, shared between consumer and producer diff --git a/demo/app/layout.tsx b/demo/app/layout.tsx new file mode 100644 index 0000000..1dde437 --- /dev/null +++ b/demo/app/layout.tsx @@ -0,0 +1,18 @@ +export const metadata = { + title: 'HTTP Relay Demo', + description: 'Demo for testing http-relay /link2 endpoint', +} + +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + + + {children} + + + ) +} diff --git a/demo/app/page.tsx b/demo/app/page.tsx new file mode 100644 index 0000000..80f0b2b --- /dev/null +++ b/demo/app/page.tsx @@ -0,0 +1,384 @@ +'use client' + +import { useState, useRef, useCallback, useEffect, Suspense } from 'react' +import { useSearchParams } from 'next/navigation' + +type LogEntry = { + timestamp: Date + type: 'consumer' | 'producer' | 'info' | 'error' + message: string +} + +function generateRandomId() { + return Math.random().toString(36).substring(2, 10) +} + +function HomeContent() { + const searchParams = useSearchParams() + const [relayUrl, setRelayUrl] = useState('http://localhost:8080') + const [endpoint, setEndpoint] = useState<'link' | 'link2'>('link2') + const [channelId, setChannelId] = useState('') + const [producerContent, setProducerContent] = useState('hello world') + const [logs, setLogs] = useState([]) + const [consumerRunning, setConsumerRunning] = useState(false) + const [producerRunning, setProducerRunning] = useState(false) + + const consumerAbortRef = useRef(null) + const producerAbortRef = useRef(null) + + // Read channel from URL on mount + useEffect(() => { + const channel = searchParams.get('channel') + if (channel) setChannelId(channel) + }, [searchParams]) + + // Update URL when channel changes + const updateChannelId = useCallback((newId: string) => { + setChannelId(newId) + const url = new URL(window.location.href) + if (newId) { + url.searchParams.set('channel', newId) + } else { + url.searchParams.delete('channel') + } + window.history.replaceState({}, '', url.toString()) + }, []) + + const addLog = useCallback((type: LogEntry['type'], message: string) => { + setLogs(prev => [...prev, { timestamp: new Date(), type, message }]) + }, []) + + const startConsumer = async () => { + if (!channelId.trim()) { + addLog('error', 'Channel ID is required') + return + } + + setConsumerRunning(true) + consumerAbortRef.current = new AbortController() + const id = channelId.trim() + + addLog('consumer', `Starting consumer loop for ID: ${id}`) + + while (true) { + if (consumerAbortRef.current?.signal.aborted) { + addLog('consumer', 'Consumer stopped by user') + break + } + + try { + addLog('consumer', `GET ${relayUrl}/${endpoint}/${id}`) + const response = await fetch(`${relayUrl}/${endpoint}/${id}`, { + signal: consumerAbortRef.current?.signal, + }) + + if (response.status === 200) { + const data = await response.text() + addLog('consumer', `Received: ${data}`) + break + } + + if (response.status === 408) { + addLog('consumer', '408 Timeout - retrying...') + continue + } + + addLog('error', `Unexpected status: ${response.status}`) + break + } catch (err) { + if (err instanceof Error && err.name === 'AbortError') { + addLog('consumer', 'Consumer stopped by user') + break + } + addLog('error', `Consumer error: ${err}`) + break + } + } + + setConsumerRunning(false) + } + + const stopConsumer = () => { + consumerAbortRef.current?.abort() + } + + const startProducer = async () => { + if (!channelId.trim()) { + addLog('error', 'Channel ID is required') + return + } + + setProducerRunning(true) + producerAbortRef.current = new AbortController() + const id = channelId.trim() + const content = producerContent + + addLog('producer', `Starting producer loop for ID: ${id}`) + + while (true) { + if (producerAbortRef.current?.signal.aborted) { + addLog('producer', 'Producer stopped by user') + break + } + + try { + addLog('producer', `POST ${relayUrl}/${endpoint}/${id} with: ${content}`) + const response = await fetch(`${relayUrl}/${endpoint}/${id}`, { + method: 'POST', + headers: { 'Content-Type': 'text/plain' }, + body: content, + signal: producerAbortRef.current?.signal, + }) + + if (response.status === 200) { + addLog('producer', 'Consumer received the message!') + break + } + + if (response.status === 408) { + addLog('producer', '408 Timeout - retrying...') + continue + } + + addLog('error', `Unexpected status: ${response.status}`) + break + } catch (err) { + if (err instanceof Error && err.name === 'AbortError') { + addLog('producer', 'Producer stopped by user') + break + } + addLog('error', `Producer error: ${err}`) + break + } + } + + setProducerRunning(false) + } + + const stopProducer = () => { + producerAbortRef.current?.abort() + } + + const clearLogs = () => setLogs([]) + + const sectionStyle: React.CSSProperties = { + backgroundColor: 'white', + padding: '16px', + borderRadius: '8px', + marginBottom: '16px', + boxShadow: '0 1px 3px rgba(0,0,0,0.1)', + } + + const inputStyle: React.CSSProperties = { + padding: '8px 12px', + border: '1px solid #ddd', + borderRadius: '4px', + fontSize: '14px', + width: '100%', + boxSizing: 'border-box', + } + + const buttonStyle = (active: boolean, color: string): React.CSSProperties => ({ + padding: '8px 16px', + backgroundColor: active ? '#999' : color, + color: 'white', + border: 'none', + borderRadius: '4px', + cursor: active ? 'not-allowed' : 'pointer', + fontSize: '14px', + marginRight: '8px', + }) + + return ( +
+

HTTP Relay Demo

+ + {/* Config Section */} +
+

Configuration

+
+
+ + setRelayUrl(e.target.value)} + placeholder="http://localhost:8080" + style={inputStyle} + /> +
+
+ +
+ + +
+
+
+
+
+ +
+ updateChannelId(e.target.value)} + placeholder="my-channel" + style={{ ...inputStyle, flex: 1 }} + disabled={consumerRunning || producerRunning} + /> + +
+
+
+
+ + {/* Consumer Section */} +
+

Consumer

+
+ {!consumerRunning ? ( + + ) : ( + + )} + {consumerRunning && ( + + Waiting for producer... + + )} +
+
+ + {/* Producer Section */} +
+

Producer

+
+