Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ tokio-comp = ["redis/tokio-rustls-comp"]
default = ["async-std-comp"]

[dependencies]
redis = { version = "0.32.2" }
redis = { version = "0.32.2", features = ["cluster-async"]}
tokio = { version = "1.45.1", features = ["rt", "time"] }
rand = "0.9.1"
futures = "0.3.31"
Expand Down
28 changes: 26 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ This is an implementation of Redlock, the [distributed locking mechanism](http:/
- Lock extending
- Async runtime support (async-std and tokio)
- Async redis
- Support for both standalone Redis and Redis Cluster

## Install

Expand Down Expand Up @@ -45,9 +46,16 @@ async fn main() {
"redis://127.0.0.1:6382/",
];

// Initialize the LockManager using `new`
// Initialize the LockManager using `new` for standalone Redis
let rl = LockManager::new(uris);

// For Redis Cluster, use:
// let cluster_uris = vec![
// vec!["redis://127.0.0.1:7000/", "redis://127.0.0.1:7001/"],
// vec!["redis://127.0.0.1:7002/", "redis://127.0.0.1:7003/"],
// ];
// let rl = LockManager::new_cluster(cluster_uris)?;

// Acquire a lock
let lock = loop {
if let Ok(lock) = rl
Expand All @@ -73,6 +81,11 @@ async fn main() {
}
```

## Locking Behavior

- **Single cluster**: Simple Redis lock (non-distributed, no quorum)
- **Multiple clusters**: Distributed Redlock (quorum-based, N≥3)

## Extending Locks

Extending a lock effectively renews its duration instead of adding extra time to it. For instance, if a 1000ms lock is extended by 1000ms after 500ms pass, it will only last for a total of 1500ms, not 2000ms. This approach is consistent with the [Node.js Redlock implementation](https://www.npmjs.com/package/redlock). See the [extend script](https://github.com/hexcowboy/rslock/blob/main/src/lock.rs#L22-L30).
Expand All @@ -87,7 +100,9 @@ cargo test --all-features

## Examples

Start the redis servers mentioned in the example code:
### Basic Examples

Start the redis servers:

```bash
docker compose -f examples/docker-compose.yml up -d
Expand All @@ -107,6 +122,15 @@ Stop the redis servers:
docker compose -f examples/docker-compose.yml down
```

### Cluster Examples

Test single-cluster (simple lock) and multi-cluster (Redlock) behavior:

```bash
# Executes both single and multi-cluster examples
docker compose -f examples/docker-compose-cluster.yml up --build
```

## Contribute

If you find bugs or want to help otherwise, please [open an issue](https://github.com/hexcowboy/rslock/issues).
Expand Down
3 changes: 3 additions & 0 deletions examples/Dockerfile.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
FROM rust:alpine
RUN apk add --no-cache musl-dev
WORKDIR /app
99 changes: 99 additions & 0 deletions examples/docker-compose-cluster.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
version: '3.8'

networks:
cluster-net:

services:
# CLUSTER 1
c1-n1:
image: redis:7-alpine
container_name: c1-n1
ports: ["7000:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c1-n2:
image: redis:7-alpine
container_name: c1-n2
ports: ["7001:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c1-n3:
image: redis:7-alpine
container_name: c1-n3
ports: ["7002:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

# CLUSTER 2
c2-n1:
image: redis:7-alpine
container_name: c2-n1
ports: ["7100:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c2-n2:
image: redis:7-alpine
container_name: c2-n2
ports: ["7101:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c2-n3:
image: redis:7-alpine
container_name: c2-n3
ports: ["7102:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

# CLUSTER 3
c3-n1:
image: redis:7-alpine
container_name: c3-n1
ports: ["7200:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c3-n2:
image: redis:7-alpine
container_name: c3-n2
ports: ["7201:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

c3-n3:
image: redis:7-alpine
container_name: c3-n3
ports: ["7202:6379"]
command: redis-server --port 6379 --cluster-enabled yes --cluster-config-file nodes.conf --appendonly yes
networks: [cluster-net]

# INIT
cluster-init:
image: redis:7-alpine
depends_on: [c1-n1, c1-n2, c1-n3, c2-n1, c2-n2, c2-n3, c3-n1, c3-n2, c3-n3]
entrypoint: ["/bin/sh", "-c"]
command:
- |
sleep 5
redis-cli --cluster create c1-n1:6379 c1-n2:6379 c1-n3:6379 --cluster-replicas 0 --cluster-yes
redis-cli --cluster create c2-n1:6379 c2-n2:6379 c2-n3:6379 --cluster-replicas 0 --cluster-yes
redis-cli --cluster create c3-n1:6379 c3-n2:6379 c3-n3:6379 --cluster-replicas 0 --cluster-yes
networks: [cluster-net]
restart: "no"

# TEST RUNNER
test-runner:
build:
context: ..
dockerfile: examples/Dockerfile.test
depends_on:
cluster-init:
condition: service_completed_successfully
volumes: ["..:/app"]
working_dir: /app
networks: [cluster-net]
command: sh -c "sleep 2 && cargo run --example single_cluster && cargo run --example multi_cluster"
restart: "no"
93 changes: 93 additions & 0 deletions examples/multi_cluster.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
use rslock::LockManager;
use std::time::Duration;
use tokio::time::sleep;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("REDLOCK TEST (Three Clusters)\n");

let lm = LockManager::new_cluster(vec![
vec!["redis://c1-n1:6379"],
vec!["redis://c2-n1:6379"],
vec!["redis://c3-n1:6379"],
])?;

// Test 1: Basic Redlock
println!("Test 1: Basic Redlock (quorum=2/3)");
let lock = lm.lock(b"resource:{1}", Duration::from_secs(5)).await?;
println!("Lock acquired on quorum: validity={}ms", lock.validity_time);
lm.unlock(&lock).await;
println!("Lock released from all clusters\n");

// Test 2: Concurrent lock attempt (Redlock should prevent)
println!("Test 2: Concurrent Redlock attempt");
let lock1 = lm.lock(b"resource:{2}", Duration::from_secs(5)).await?;
println!("Lock1 acquired on quorum");

let lm2 = LockManager::new_cluster(vec![
vec!["redis://c1-n1:6379"],
vec!["redis://c2-n1:6379"],
vec!["redis://c3-n1:6379"],
])?;

match lm2.lock(b"resource:{2}", Duration::from_secs(5)).await {
Ok(_) => println!("Lock2 should NOT get quorum!"),
Err(_) => println!("Lock2 correctly failed (no quorum)"),
}

lm.unlock(&lock1).await;
println!("Lock1 released from all clusters\n");

// Test 3: Lock expiration across clusters
println!("Test 3: Lock expiration (all clusters)");
let lock = lm.lock(b"resource:{3}", Duration::from_millis(500)).await?;
println!("Lock acquired on quorum: validity={}ms", lock.validity_time);
sleep(Duration::from_millis(600)).await;
println!("Lock expired across all clusters\n");

// Test 4: Re-acquire after expiration
println!("Test 4: Re-acquire after expiration");
let lock = lm.lock(b"resource:{3}", Duration::from_secs(5)).await?;
println!("Lock re-acquired successfully on quorum");
lm.unlock(&lock).await;
println!("Lock released\n");

// Test 5: Multiple resources (Redlock for each)
println!("Test 5: Multiple independent Redlocks");
let lock_a = lm.lock(b"resource:{5a}", Duration::from_secs(5)).await?;
let lock_b = lm.lock(b"resource:{5b}", Duration::from_secs(5)).await?;
let lock_c = lm.lock(b"resource:{5c}", Duration::from_secs(5)).await?;
println!("Three Redlocks acquired (each has quorum)");
lm.unlock(&lock_a).await;
lm.unlock(&lock_b).await;
lm.unlock(&lock_c).await;
println!("All Redlocks released\n");

// Test 6: Validity time check
println!("Test 6: Validity time validation");
let lock = lm.lock(b"resource:{6}", Duration::from_secs(10)).await?;
let validity = lock.validity_time;
println!("Lock acquired: validity={}ms", validity);

if validity > 9000 && validity < 10000 {
println!("Validity time is reasonable (9-10s)");
} else {
println!("Validity time unexpected: {}ms", validity);
}

lm.unlock(&lock).await;
println!("Lock released\n");

// Test 7: Quick successive locks
println!("Test 7: Quick successive locks");
for i in 0..5 {
let resource = format!("resource:{{7:{}}}", i);
let lock = lm.lock(resource.as_bytes(), Duration::from_secs(2)).await?;
println!(" Lock {} acquired", i);
lm.unlock(&lock).await;
}
println!("All successive locks worked\n");

println!("REDLOCK: All tests passed!\n");
Ok(())
}
69 changes: 69 additions & 0 deletions examples/single_cluster.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
use rslock::LockManager;
use std::time::Duration;
use tokio::time::sleep;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("SIMPLE REDIS LOCK TEST (Single Cluster)\n");

let lm = LockManager::new_cluster(vec![
vec!["redis://c1-n1:6379"],
])?;

// Test 1: Basic lock/unlock
println!("Test 1: Basic lock/unlock");
let lock = lm.lock(b"resource:{1}", Duration::from_secs(5)).await?;
println!("Lock acquired: validity={}ms", lock.validity_time);
lm.unlock(&lock).await;
println!("Lock released\n");

// Test 2: Lock expiration
println!("Test 2: Lock expiration");
let lock = lm.lock(b"resource:{2}", Duration::from_millis(500)).await?;
println!("Lock acquired: validity={}ms", lock.validity_time);
sleep(Duration::from_millis(600)).await;
println!("⏰ Lock expired (no unlock needed)\n");

// Test 3: Concurrent lock attempt
println!("Test 3: Concurrent lock (should fail)");
let lock1 = lm.lock(b"resource:{3}", Duration::from_secs(5)).await?;
println!("Lock1 acquired by first client");

let lm2 = LockManager::new_cluster(vec![
vec!["redis://c1-n1:6379"],
])?;

match lm2.lock(b"resource:{3}", Duration::from_secs(5)).await {
Ok(_) => println!("Lock2 should NOT succeed!"),
Err(_) => println!("Lock2 correctly rejected"),
}

lm.unlock(&lock1).await;
println!("Lock1 released\n");

// Test 4: Re-acquire after unlock
println!("Test 4: Re-acquire after unlock");
let lock = lm.lock(b"resource:{4}", Duration::from_secs(5)).await?;
println!("Lock acquired");
lm.unlock(&lock).await;
println!("Lock released");

let lock2 = lm.lock(b"resource:{4}", Duration::from_secs(5)).await?;
println!("Lock re-acquired successfully");
lm.unlock(&lock2).await;
println!("Lock released\n");

// Test 5: Multiple resources
println!("Test 5: Multiple independent resources");
let lock_a = lm.lock(b"resource:{5a}", Duration::from_secs(5)).await?;
let lock_b = lm.lock(b"resource:{5b}", Duration::from_secs(5)).await?;
let lock_c = lm.lock(b"resource:{5c}", Duration::from_secs(5)).await?;
println!("Three locks acquired simultaneously");
lm.unlock(&lock_a).await;
lm.unlock(&lock_b).await;
lm.unlock(&lock_c).await;
println!("All locks released\n");

println!("SIMPLE REDIS LOCK: All tests passed!\n");
Ok(())
}
Loading