From 7e6c4fd4f3108eb678abb544bcac537fb9925c60 Mon Sep 17 00:00:00 2001 From: Dillon G Date: Tue, 5 Aug 2025 00:25:06 -0700 Subject: [PATCH] Update to Rust Edition 2024 --- akd/Cargo.toml | 4 +- akd/benches/azks.rs | 6 +- akd/benches/directory.rs | 4 +- akd/src/append_only_zks.rs | 30 ++++---- akd/src/auditor.rs | 6 +- akd/src/directory.rs | 14 +++- akd/src/errors.rs | 2 +- akd/src/helper_structs.rs | 2 +- akd/src/lib.rs | 4 +- akd/src/storage/cache/high_parallelism.rs | 6 +- akd/src/storage/cache/tests.rs | 2 +- akd/src/storage/manager/mod.rs | 14 ++-- akd/src/storage/manager/tests.rs | 2 +- akd/src/storage/memory.rs | 4 +- akd/src/storage/mod.rs | 2 +- akd/src/storage/tests.rs | 28 +++---- akd/src/storage/transaction.rs | 6 +- akd/src/tests/mod.rs | 4 +- akd/src/tests/test_core_protocol.rs | 7 +- akd/src/tests/test_errors.rs | 74 ++++++++++--------- akd/src/tests/test_preloads.rs | 4 +- akd/src/tree_node.rs | 34 ++++----- akd/src/utils.rs | 2 +- akd_core/Cargo.toml | 6 +- akd_core/benches/parallel_vrfs.rs | 8 +- akd_core/src/configuration/experimental.rs | 2 +- akd_core/src/configuration/whatsapp_v1.rs | 4 +- akd_core/src/ecvrf/ecvrf_impl.rs | 6 +- akd_core/src/ecvrf/tests.rs | 72 +++++++++--------- akd_core/src/ecvrf/traits.rs | 2 +- akd_core/src/proto/mod.rs | 8 +- akd_core/src/proto/tests.rs | 24 +++--- akd_core/src/types/mod.rs | 2 +- akd_core/src/types/node_label/mod.rs | 2 +- akd_core/src/types/node_label/tests.rs | 24 +++--- akd_core/src/utils.rs | 16 ++-- akd_core/src/verify/history.rs | 4 +- akd_core/src/verify/lookup.rs | 2 +- akd_core/src/verify/mod.rs | 2 +- examples/Cargo.toml | 8 +- .../examples/example_tests.rs | 4 +- examples/src/fixture_generator/generator.rs | 24 +++--- examples/src/fixture_generator/reader/yaml.rs | 4 +- examples/src/fixture_generator/writer/yaml.rs | 2 +- examples/src/mysql_demo/directory_host.rs | 2 +- examples/src/mysql_demo/mod.rs | 4 +- examples/src/mysql_demo/mysql.rs | 6 +- examples/src/mysql_demo/mysql_storables.rs | 43 +++++------ examples/src/mysql_demo/tests/memory_tests.rs | 10 ++- .../src/mysql_demo/tests/mysql_db_tests.rs | 4 +- examples/src/mysql_demo/tests/mysql_tests.rs | 43 +++++------ examples/src/mysql_demo/tests/test_util.rs | 20 ++--- examples/src/test_vectors/mod.rs | 6 +- examples/src/wasm_client/mod.rs | 2 +- examples/src/whatsapp_kt_auditor/auditor.rs | 9 ++- examples/src/whatsapp_kt_auditor/mod.rs | 2 +- xtask/Cargo.toml | 2 +- 57 files changed, 337 insertions(+), 303 deletions(-) diff --git a/akd/Cargo.toml b/akd/Cargo.toml index a283701d..fc7812cc 100644 --- a/akd/Cargo.toml +++ b/akd/Cargo.toml @@ -4,7 +4,7 @@ version = "0.12.0-pre.12" authors = ["akd contributors"] description = "An implementation of an auditable key directory" license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" keywords = ["key-transparency", "akd"] repository = "https://github.com/facebook/akd" readme = "../README.md" @@ -68,7 +68,7 @@ colored = { version = "2", optional = true } once_cell = { version = "1", optional = true } paste = { version = "1", optional = true } protobuf = { version = "3", optional = true } -rand = { version = "0.8", optional = true } +rand = { version = "0.9.2", optional = true } serde = { version = "1", features = ["derive"], optional = true } tracing = { version = "0.1.40", optional = true } diff --git a/akd/benches/azks.rs b/akd/benches/azks.rs index 10427dc8..f0d432a0 100644 --- a/akd/benches/azks.rs +++ b/akd/benches/azks.rs @@ -10,11 +10,11 @@ extern crate criterion; mod common; +use akd::NamedConfiguration; use akd::append_only_zks::{AzksParallelismConfig, InsertMode}; use akd::auditor; use akd::storage::manager::StorageManager; use akd::storage::memory::AsyncInMemoryDatabase; -use akd::NamedConfiguration; use akd::{Azks, AzksElement, AzksValue, NodeLabel}; use criterion::{BatchSize, Criterion}; use rand::rngs::StdRng; @@ -203,10 +203,10 @@ fn gen_nodes(rng: &mut impl Rng, num_nodes: usize) -> Vec { (0..num_nodes) .map(|_| { let label = NodeLabel { - label_val: rng.gen::<[u8; 32]>(), + label_val: rng.random::<[u8; 32]>(), label_len: 256, }; - let value = AzksValue(rng.gen::<[u8; 32]>()); + let value = AzksValue(rng.random::<[u8; 32]>()); AzksElement { label, value } }) .collect() diff --git a/akd/benches/directory.rs b/akd/benches/directory.rs index 9c7a155a..b0eff93f 100644 --- a/akd/benches/directory.rs +++ b/akd/benches/directory.rs @@ -10,14 +10,14 @@ extern crate criterion; mod common; +use akd::NamedConfiguration; use akd::append_only_zks::AzksParallelismConfig; use akd::ecvrf::HardCodedAkdVRF; use akd::storage::manager::StorageManager; use akd::storage::memory::AsyncInMemoryDatabase; -use akd::NamedConfiguration; use akd::{AkdLabel, AkdValue, Directory}; use criterion::{BatchSize, Criterion}; -use rand::distributions::Alphanumeric; +use rand::distr::Alphanumeric; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; diff --git a/akd/src/append_only_zks.rs b/akd/src/append_only_zks.rs index b0c98c3b..ce96aabf 100644 --- a/akd/src/append_only_zks.rs +++ b/akd/src/append_only_zks.rs @@ -7,21 +7,21 @@ //! An implementation of an append-only zero knowledge set +use crate::Configuration; use crate::hash::EMPTY_DIGEST; use crate::helper_structs::LookupInfo; use crate::log::{debug, info}; use crate::storage::manager::StorageManager; use crate::storage::types::StorageType; use crate::tree_node::{ - new_interior_node, new_leaf_node, new_root_node, node_to_azks_value, node_to_label, - NodeHashingMode, NodeKey, TreeNode, TreeNodeType, + NodeHashingMode, NodeKey, TreeNode, TreeNodeType, new_interior_node, new_leaf_node, + new_root_node, node_to_azks_value, node_to_label, }; -use crate::Configuration; use crate::{ + ARITY, AppendOnlyProof, AzksElement, AzksValue, Digest, Direction, MembershipProof, NodeLabel, + NonMembershipProof, PrefixOrdering, SiblingProof, SingleAppendOnlyProof, SizeOf, errors::{AkdError, DirectoryError, ParallelismError, TreeNodeError}, storage::{Database, Storable}, - AppendOnlyProof, AzksElement, AzksValue, Digest, Direction, MembershipProof, NodeLabel, - NonMembershipProof, PrefixOrdering, SiblingProof, SingleAppendOnlyProof, SizeOf, ARITY, }; use async_recursion::async_recursion; @@ -1320,8 +1320,8 @@ type AppendOnlyHelper = (Vec, Vec); #[cfg(test)] mod tests { use super::*; - use crate::storage::types::DbRecord; use crate::storage::StorageUtil; + use crate::storage::types::DbRecord; use crate::test_config; use crate::tree_node::TreeNodeWithPreviousValue; use crate::utils::byte_arr_from_u64; @@ -1331,7 +1331,7 @@ mod tests { storage::memory::AsyncInMemoryDatabase, }; use itertools::Itertools; - use rand::{rngs::StdRng, seq::SliceRandom, RngCore, SeedableRng}; + use rand::{RngCore, SeedableRng, rngs::StdRng, seq::SliceRandom}; use std::time::Duration; #[cfg(feature = "greedy_lookup_preload")] @@ -1460,9 +1460,9 @@ mod tests { let right_child_hash = leaf_hashes[2 * i + 1].clone(); layer_1_hashes.push(( TC::compute_parent_hash_from_children( - &AzksValue(left_child_hash.0 .0), + &AzksValue(left_child_hash.0.0), &left_child_hash.1, - &AzksValue(right_child_hash.0 .0), + &AzksValue(right_child_hash.0.0), &right_child_hash.1, ), NodeLabel::new(byte_arr_from_u64(j << 62), 2u32).value::(), @@ -1475,9 +1475,9 @@ mod tests { let right_child_hash = layer_1_hashes[2 * i + 1].clone(); layer_2_hashes.push(( TC::compute_parent_hash_from_children( - &AzksValue(left_child_hash.0 .0), + &AzksValue(left_child_hash.0.0), &left_child_hash.1, - &AzksValue(right_child_hash.0 .0), + &AzksValue(right_child_hash.0.0), &right_child_hash.1, ), NodeLabel::new(byte_arr_from_u64(j << 63), 1u32).value::(), @@ -1485,9 +1485,9 @@ mod tests { } let expected = TC::compute_root_hash_from_val(&TC::compute_parent_hash_from_children( - &AzksValue(layer_2_hashes[0].0 .0), + &AzksValue(layer_2_hashes[0].0.0), &layer_2_hashes[0].1, - &AzksValue(layer_2_hashes[1].0 .0), + &AzksValue(layer_2_hashes[1].0.0), &layer_2_hashes[1].1, )); @@ -1813,8 +1813,8 @@ mod tests { } test_config!(test_azks_element_set_get_longest_common_prefix); - async fn test_azks_element_set_get_longest_common_prefix( - ) -> Result<(), AkdError> { + async fn test_azks_element_set_get_longest_common_prefix() + -> Result<(), AkdError> { let num_nodes = 10; let database = AsyncInMemoryDatabase::new(); let db = StorageManager::new_no_cache(database); diff --git a/akd/src/auditor.rs b/akd/src/auditor.rs index d0f616a0..0e648371 100644 --- a/akd/src/auditor.rs +++ b/akd/src/auditor.rs @@ -7,16 +7,16 @@ //! Code for an auditor of a authenticated key directory -use akd_core::configuration::Configuration; use akd_core::AzksElement; +use akd_core::configuration::Configuration; -use crate::append_only_zks::AzksParallelismConfig; use crate::AzksValue; +use crate::append_only_zks::AzksParallelismConfig; use crate::{ + AppendOnlyProof, Azks, Digest, SingleAppendOnlyProof, append_only_zks::InsertMode, errors::{AkdError, AuditorError, AzksError}, storage::{manager::StorageManager, memory::AsyncInMemoryDatabase}, - AppendOnlyProof, Azks, Digest, SingleAppendOnlyProof, }; /// Verifies an audit proof, given start and end hashes for a merkle patricia tree. diff --git a/akd/src/directory.rs b/akd/src/directory.rs index f72c8201..a6813d3f 100644 --- a/akd/src/directory.rs +++ b/akd/src/directory.rs @@ -12,9 +12,9 @@ use crate::ecvrf::{VRFKeyStorage, VRFPublicKey}; use crate::errors::{AkdError, DirectoryError, StorageError}; use crate::helper_structs::LookupInfo; use crate::log::{error, info}; +use crate::storage::Database; use crate::storage::manager::StorageManager; use crate::storage::types::{DbRecord, ValueState, ValueStateRetrievalFlag}; -use crate::storage::Database; use crate::{ AkdLabel, AkdValue, AppendOnlyProof, AzksElement, Digest, EpochHash, HistoryProof, LookupProof, UpdateProof, @@ -207,7 +207,9 @@ where } if update_set.is_empty() { - info!("After filtering for duplicated user information, there is no publish which is necessary (0 updates)"); + info!( + "After filtering for duplicated user information, there is no publish which is necessary (0 updates)" + ); // The AZKS has not been updated/mutated at this point, so we can just return the root hash from before let root_hash = current_azks.get_root_hash::(&self.storage).await?; return Ok(EpochHash(current_epoch, root_hash)); @@ -752,7 +754,9 @@ where match got { DbRecord::Azks(azks) => Ok(azks), _ => { - error!("No AZKS can be found. You should re-initialize the directory to create a new one"); + error!( + "No AZKS can be found. You should re-initialize the directory to create a new one" + ); Err(AkdError::Storage(StorageError::NotFound( "AZKS not found".to_string(), ))) @@ -1097,7 +1101,9 @@ impl Directory = update_set.to_vec(); if azks_element_set.is_empty() { - info!("After filtering for duplicated user information, there is no publish which is necessary (0 updates)"); + info!( + "After filtering for duplicated user information, there is no publish which is necessary (0 updates)" + ); // The AZKS has not been updated/mutated at this point, so we can just return the root hash from before let root_hash = current_azks.get_root_hash::(&self.storage).await?; return Ok(EpochHash(current_epoch, root_hash)); diff --git a/akd/src/errors.rs b/akd/src/errors.rs index cfb0aece..06062507 100644 --- a/akd/src/errors.rs +++ b/akd/src/errors.rs @@ -8,8 +8,8 @@ //! Errors for various data structure operations. use core::fmt; -use crate::node_label::NodeLabel; use crate::Direction; +use crate::node_label::NodeLabel; /// Symbolizes a AkdError, thrown by the akd. #[cfg_attr(test, derive(PartialEq, Eq))] diff --git a/akd/src/helper_structs.rs b/akd/src/helper_structs.rs index 707e5329..b5b0f072 100644 --- a/akd/src/helper_structs.rs +++ b/akd/src/helper_structs.rs @@ -9,7 +9,7 @@ //! to make it easier to pass arguments around. use crate::Digest; -use crate::{storage::types::ValueState, NodeLabel}; +use crate::{NodeLabel, storage::types::ValueState}; /// Root hash of the tree and its associated epoch #[derive(Debug, Clone, Hash, PartialEq, Eq)] diff --git a/akd/src/lib.rs b/akd/src/lib.rs index 1cbb7016..87db1fac 100644 --- a/akd/src/lib.rs +++ b/akd/src/lib.rs @@ -557,8 +557,8 @@ pub mod log { pub mod local_auditing; pub use akd_core::{ - configuration, configuration::*, ecvrf, hash, hash::Digest, proto, types::*, verify, - verify::history::HistoryParams, ARITY, + ARITY, configuration, configuration::*, ecvrf, hash, hash::Digest, proto, types::*, verify, + verify::history::HistoryParams, }; #[macro_use] diff --git a/akd/src/storage/cache/high_parallelism.rs b/akd/src/storage/cache/high_parallelism.rs index 05fe9d18..16f25c3e 100644 --- a/akd/src/storage/cache/high_parallelism.rs +++ b/akd/src/storage/cache/high_parallelism.rs @@ -15,10 +15,10 @@ use crate::storage::Storable; use akd_core::SizeOf; use dashmap::DashMap; +use std::sync::Arc; #[cfg(feature = "runtime_metrics")] use std::sync::atomic::AtomicU64; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::RwLock; @@ -86,7 +86,9 @@ impl TimedCache { debug!("Retained cache size is {retained_size} bytes"); if retained_size > memory_limit_bytes { - info!("Retained cache size has exceeded the predefined limit, cleaning old entries"); + info!( + "Retained cache size has exceeded the predefined limit, cleaning old entries" + ); // calculate the percentage we'd need to trim off to get to 100% utilization and take another 5% let percent_clean = 0.05 + 1.0 - (memory_limit_bytes as f64) / (retained_size as f64); diff --git a/akd/src/storage/cache/tests.rs b/akd/src/storage/cache/tests.rs index b8f422fc..83aab1a0 100644 --- a/akd/src/storage/cache/tests.rs +++ b/akd/src/storage/cache/tests.rs @@ -10,8 +10,8 @@ use super::*; use std::time::Duration; -use crate::storage::types::{ValueState, ValueStateKey}; use crate::storage::DbRecord; +use crate::storage::types::{ValueState, ValueStateKey}; use crate::{AkdLabel, AkdValue, NodeLabel}; #[tokio::test] diff --git a/akd/src/storage/manager/mod.rs b/akd/src/storage/manager/mod.rs index 7328f5c4..0eed7314 100644 --- a/akd/src/storage/manager/mod.rs +++ b/akd/src/storage/manager/mod.rs @@ -9,28 +9,28 @@ //! to manage interactions with the data layer to optimize things like caching and //! transaction management +use crate::AkdLabel; +use crate::AkdValue; use crate::log::debug; #[cfg(feature = "runtime_metrics")] use crate::log::info; +use crate::storage::Database; +use crate::storage::DbSetState; +use crate::storage::Storable; +use crate::storage::StorageError; use crate::storage::cache::TimedCache; use crate::storage::transaction::Transaction; use crate::storage::types::DbRecord; use crate::storage::types::KeyData; use crate::storage::types::ValueState; -use crate::storage::Database; -use crate::storage::DbSetState; -use crate::storage::Storable; -use crate::storage::StorageError; -use crate::AkdLabel; -use crate::AkdValue; use std::collections::HashMap; use std::collections::HashSet; +use std::sync::Arc; #[cfg(feature = "runtime_metrics")] use std::sync::atomic::AtomicU64; #[cfg(feature = "runtime_metrics")] use std::sync::atomic::Ordering; -use std::sync::Arc; use std::time::Duration; use super::types::ValueStateRetrievalFlag; diff --git a/akd/src/storage/manager/tests.rs b/akd/src/storage/manager/tests.rs index ac890b4e..228892f5 100644 --- a/akd/src/storage/manager/tests.rs +++ b/akd/src/storage/manager/tests.rs @@ -11,7 +11,7 @@ use akd_core::hash::EMPTY_DIGEST; use super::*; use crate::storage::memory::AsyncInMemoryDatabase; -use crate::storage::{types::*, StorageUtil}; +use crate::storage::{StorageUtil, types::*}; use crate::tree_node::{NodeKey, TreeNodeWithPreviousValue}; use crate::*; diff --git a/akd/src/storage/memory.rs b/akd/src/storage/memory.rs index df50abf6..f20bd7d4 100644 --- a/akd/src/storage/memory.rs +++ b/akd/src/storage/memory.rs @@ -224,7 +224,7 @@ impl Database for AsyncInMemoryDatabase { ValueStateRetrievalFlag::SpecificVersion(version) if version == kvp.version => { - return Ok(kvp.clone()) + return Ok(kvp.clone()); } ValueStateRetrievalFlag::LeqEpoch(epoch) if epoch == kvp.epoch => { return Ok(kvp.clone()); @@ -244,7 +244,7 @@ impl Database for AsyncInMemoryDatabase { } } ValueStateRetrievalFlag::SpecificEpoch(epoch) if epoch == kvp.epoch => { - return Ok(kvp.clone()) + return Ok(kvp.clone()); } _ => continue, } diff --git a/akd/src/storage/mod.rs b/akd/src/storage/mod.rs index abc5102a..de5162f4 100644 --- a/akd/src/storage/mod.rs +++ b/akd/src/storage/mod.rs @@ -13,7 +13,7 @@ use crate::{AkdLabel, AkdValue}; use async_trait::async_trait; #[cfg(feature = "serde_serialization")] -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, de::DeserializeOwned}; use std::collections::HashMap; use std::hash::Hash; use std::marker::{Send, Sync}; diff --git a/akd/src/storage/tests.rs b/akd/src/storage/tests.rs index b841a364..bae66492 100644 --- a/akd/src/storage/tests.rs +++ b/akd/src/storage/tests.rs @@ -7,19 +7,19 @@ //! Test utilities of storage layers implementing the storage primatives for AKD +use crate::NodeLabel; use crate::errors::StorageError; -use crate::storage::types::*; use crate::storage::Database; use crate::storage::StorageManager; +use crate::storage::types::*; use crate::tree_node::*; use crate::utils::byte_arr_from_u64; -use crate::NodeLabel; use crate::{AkdLabel, AkdValue}; -use akd_core::hash::EMPTY_DIGEST; use akd_core::AzksValue; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use akd_core::hash::EMPTY_DIGEST; +use rand::Rng; +use rand::distr::Alphanumeric; use std::time::{Duration, Instant}; type Azks = crate::append_only_zks::Azks; @@ -134,7 +134,7 @@ async fn test_get_and_set_item(storage: &Ns) { async fn test_batch_get_items(storage: &Ns) { let mut rand_users: Vec> = vec![]; for _ in 0..20 { - let str: String = thread_rng() + let str: String = rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) @@ -239,7 +239,7 @@ async fn test_batch_get_items(storage: &Ns) { .find(|&x| { if let DbRecord::ValueState(value_state) = &x { return value_state.username == result.0 - && value_state.version == result.1 .0; + && value_state.version == result.1.0; } false }) @@ -253,7 +253,7 @@ async fn test_batch_get_items(storage: &Ns) { }); // assert it matches what was given matches what was retrieved - assert_eq!(Some(result.1 .0), initial_record); + assert_eq!(Some(result.1.0), initial_record); } } } @@ -280,7 +280,7 @@ async fn test_batch_get_items(storage: &Ns) { .find(|&x| { if let DbRecord::ValueState(value_state) = &x { return value_state.username == result.0 - && value_state.version == result.1 .0; + && value_state.version == result.1.0; } false }) @@ -293,7 +293,7 @@ async fn test_batch_get_items(storage: &Ns) { } }); // assert it matches what was given matches what was retrieved - assert_eq!(Some(result.1 .0), initial_record); + assert_eq!(Some(result.1.0), initial_record); } } } @@ -302,7 +302,7 @@ async fn test_batch_get_items(storage: &Ns) { async fn test_transactions(storage: &StorageManager) { let mut rand_users: Vec> = vec![]; for _ in 0..20 { - let str: String = thread_rng() + let str: String = rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) @@ -380,14 +380,14 @@ async fn test_transactions(storage: &StorageManager) { } async fn test_user_data(storage: &S) { - let rand_user = thread_rng() + let rand_user = rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) .collect::() .as_bytes() .to_vec(); - let rand_value = thread_rng() + let rand_value = rand::rng() .sample_iter(&Alphanumeric) .take(1028) .map(char::from) @@ -581,7 +581,7 @@ async fn test_user_data(storage: &S) { async fn test_tombstoning_data( storage: &StorageManager, ) -> Result<(), crate::errors::AkdError> { - let rand_user = thread_rng() + let rand_user = rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) diff --git a/akd/src/storage/transaction.rs b/akd/src/storage/transaction.rs index 3359551d..41ed3506 100644 --- a/akd/src/storage/transaction.rs +++ b/akd/src/storage/transaction.rs @@ -10,17 +10,17 @@ use crate::errors::StorageError; #[cfg(feature = "runtime_metrics")] use crate::log::info; +use crate::storage::Storable; use crate::storage::types::DbRecord; use crate::storage::types::ValueState; use crate::storage::types::ValueStateRetrievalFlag; -use crate::storage::Storable; use dashmap::DashMap; use std::collections::HashMap; +use std::sync::Arc; #[cfg(feature = "runtime_metrics")] use std::sync::atomic::AtomicU64; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; /// Represents an in-memory transaction, keeping a mutable state /// of the changes. When you "commit" this transaction, you return the @@ -284,7 +284,7 @@ mod tests { use crate::tree_node::*; use crate::utils::byte_arr_from_u64; use crate::{AkdLabel, AkdValue, AzksValue}; - use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; + use rand::{SeedableRng, rngs::StdRng, seq::SliceRandom}; #[test] fn test_commit_order() -> Result<(), StorageError> { diff --git a/akd/src/tests/mod.rs b/akd/src/tests/mod.rs index 7dc755f2..1a1f85fb 100644 --- a/akd/src/tests/mod.rs +++ b/akd/src/tests/mod.rs @@ -14,14 +14,14 @@ mod test_preloads; use std::collections::HashMap; use crate::{ + AkdLabel, AkdValue, Azks, errors::StorageError, storage::{ + Database, DbSetState, Storable, memory::AsyncInMemoryDatabase, types::{DbRecord, KeyData, ValueState, ValueStateRetrievalFlag}, - Database, DbSetState, Storable, }, tree_node::TreeNodeWithPreviousValue, - AkdLabel, AkdValue, Azks, }; // Below contains the mock code for constructing a `MockLocalDatabase` diff --git a/akd/src/tests/test_core_protocol.rs b/akd/src/tests/test_core_protocol.rs index 2da31d4a..02eb0ab7 100644 --- a/akd/src/tests/test_core_protocol.rs +++ b/akd/src/tests/test_core_protocol.rs @@ -9,9 +9,11 @@ //! functionality and error handling upon verification use akd_core::{configuration::Configuration, hash::DIGEST_BYTES}; -use rand::{rngs::StdRng, SeedableRng}; +use rand::{SeedableRng, rngs::StdRng}; use crate::{ + AkdLabel, AkdValue, AppendOnlyProof, EpochHash, HistoryParams, HistoryVerificationParams, + VerifyResult, append_only_zks::AzksParallelismConfig, auditor::{audit_verify, verify_consecutive_append_only}, client::{key_history_verify, lookup_verify}, @@ -19,8 +21,7 @@ use crate::{ ecvrf::{HardCodedAkdVRF, VRFKeyStorage}, errors::AkdError, storage::{manager::StorageManager, memory::AsyncInMemoryDatabase}, - test_config, AkdLabel, AkdValue, AppendOnlyProof, EpochHash, HistoryParams, - HistoryVerificationParams, VerifyResult, + test_config, }; // A simple test to ensure that the empty tree hashes to the correct value diff --git a/akd/src/tests/test_errors.rs b/akd/src/tests/test_errors.rs index d598f18f..08be2a60 100644 --- a/akd/src/tests/test_errors.rs +++ b/akd/src/tests/test_errors.rs @@ -15,18 +15,18 @@ use crate::append_only_zks::AzksParallelismConfig; use crate::storage::types::KeyData; use crate::tree_node::TreeNodeWithPreviousValue; use crate::{ + AkdLabel, AkdValue, Azks, EpochHash, HistoryParams, HistoryVerificationParams, NodeLabel, auditor::audit_verify, client::{key_history_verify, lookup_verify}, directory::{Directory, PublishCorruption, ReadOnlyDirectory}, ecvrf::{HardCodedAkdVRF, VRFKeyStorage}, errors::{AkdError, DirectoryError, StorageError}, storage::{ - manager::StorageManager, memory::AsyncInMemoryDatabase, types::DbRecord, types::ValueState, - Database, + Database, manager::StorageManager, memory::AsyncInMemoryDatabase, types::DbRecord, + types::ValueState, }, test_config, - tests::{setup_mocked_db, MockLocalDatabase}, - AkdLabel, AkdValue, Azks, EpochHash, HistoryParams, HistoryVerificationParams, NodeLabel, + tests::{MockLocalDatabase, setup_mocked_db}, }; // This test is meant to test the function poll_for_azks_change @@ -415,7 +415,7 @@ async fn test_key_history_verify_malformed() -> Result<(), Ak let akd = Directory::::new(storage, vrf.clone(), AzksParallelismConfig::default()).await?; - let mut rng = rand::rngs::OsRng; + let mut rng = rand::rng(); for _ in 0..100 { let mut updates = vec![]; updates.push(( @@ -465,17 +465,19 @@ async fn test_key_history_verify_malformed() -> Result<(), Ak HistoryParams::MostRecent(6), HistoryParams::default(), ] { - assert!(key_history_verify::( - vrf_pk.as_bytes(), - root_hash, - current_epoch, - target_label.clone(), - key_history_proof.clone(), - HistoryVerificationParams::Default { - history_params: bad_params - }, - ) - .is_err()); + assert!( + key_history_verify::( + vrf_pk.as_bytes(), + root_hash, + current_epoch, + target_label.clone(), + key_history_proof.clone(), + HistoryVerificationParams::Default { + history_params: bad_params + }, + ) + .is_err() + ); } let mut malformed_proof_1 = key_history_proof.clone(); @@ -506,15 +508,17 @@ async fn test_key_history_verify_malformed() -> Result<(), Ak malformed_proof_3, malformed_proof_4, ] { - assert!(key_history_verify::( - vrf_pk.as_bytes(), - root_hash, - current_epoch, - target_label.clone(), - malformed_proof, - correct_verification_params - ) - .is_err()); + assert!( + key_history_verify::( + vrf_pk.as_bytes(), + root_hash, + current_epoch, + target_label.clone(), + malformed_proof, + correct_verification_params + ) + .is_err() + ); } let mut malformed_proof_start_version_is_zero = key_history_proof.clone(); @@ -527,15 +531,17 @@ async fn test_key_history_verify_malformed() -> Result<(), Ak malformed_proof_start_version_is_zero, malformed_proof_end_version_exceeds_epoch, ] { - assert!(key_history_verify::( - vrf_pk.as_bytes(), - root_hash, - current_epoch, - target_label.clone(), - malformed_proof, - correct_verification_params, - ) - .is_err()); + assert!( + key_history_verify::( + vrf_pk.as_bytes(), + root_hash, + current_epoch, + target_label.clone(), + malformed_proof, + correct_verification_params, + ) + .is_err() + ); } Ok(()) diff --git a/akd/src/tests/test_preloads.rs b/akd/src/tests/test_preloads.rs index 3ffca6b0..f4a8d2b5 100644 --- a/akd/src/tests/test_preloads.rs +++ b/akd/src/tests/test_preloads.rs @@ -10,15 +10,15 @@ use akd_core::configuration::Configuration; use crate::{ + AkdLabel, AkdValue, append_only_zks::AzksParallelismConfig, directory::Directory, ecvrf::HardCodedAkdVRF, errors::{AkdError, StorageError}, storage::{manager::StorageManager, memory::AsyncInMemoryDatabase}, test_config, - tests::{setup_mocked_db, MockLocalDatabase}, + tests::{MockLocalDatabase, setup_mocked_db}, tree_node::TreeNodeWithPreviousValue, - AkdLabel, AkdValue, }; test_config!(test_publish_op_makes_no_get_requests); diff --git a/akd/src/tree_node.rs b/akd/src/tree_node.rs index a1057451..1b210937 100644 --- a/akd/src/tree_node.rs +++ b/akd/src/tree_node.rs @@ -7,14 +7,14 @@ //! The implementation of a node for a history patricia tree +use crate::AzksValue; +use crate::PrefixOrdering; use crate::errors::{AkdError, StorageError, TreeNodeError}; use crate::hash::EMPTY_DIGEST; use crate::storage::manager::StorageManager; use crate::storage::types::{DbRecord, StorageType}; use crate::storage::{Database, Storable}; -use crate::AzksValue; -use crate::PrefixOrdering; -use crate::{node_label::*, Direction}; +use crate::{Direction, node_label::*}; use akd_core::configuration::Configuration; #[cfg(feature = "serde_serialization")] use akd_core::utils::serde_helpers::{azks_value_hex_deserialize, azks_value_hex_serialize}; @@ -414,7 +414,7 @@ impl TreeNode { // Set child according to given direction. match self.label.get_prefix_ordering(child_node.label) { PrefixOrdering::Invalid => { - return Err(TreeNodeError::NoDirection(child_node.label, None)) + return Err(TreeNodeError::NoDirection(child_node.label, None)); } PrefixOrdering::WithZero => { self.left_child = Some(child_node.label); @@ -561,8 +561,8 @@ mod tests { use akd_core::hash::DIGEST_BYTES; use super::*; - use crate::utils::byte_arr_from_u64; use crate::NodeLabel; + use crate::utils::byte_arr_from_u64; type InMemoryDb = crate::storage::memory::AsyncInMemoryDatabase; use crate::storage::manager::StorageManager; use crate::test_config; @@ -779,9 +779,9 @@ mod tests { let right_child_expected_hash = ( TC::compute_parent_hash_from_children( - &AzksValue(leaf_2_hash.0 .0), + &AzksValue(leaf_2_hash.0.0), &leaf_2_hash.1, - &AzksValue(leaf_1_hash.0 .0), + &AzksValue(leaf_1_hash.0.0), &leaf_1_hash.1, ), NodeLabel::new(byte_arr_from_u64(0b1u64 << 63), 1u32).value::(), @@ -796,9 +796,9 @@ mod tests { }; let expected = TC::compute_root_hash_from_val(&TC::compute_parent_hash_from_children( - &AzksValue(leaf_0_hash.0 .0), + &AzksValue(leaf_0_hash.0.0), &leaf_0_hash.1, - &AzksValue(right_child_expected_hash.0 .0), + &AzksValue(right_child_expected_hash.0.0), &right_child_expected_hash.1, )); assert!(root_digest == expected, "Root hash not equal to expected"); @@ -806,8 +806,8 @@ mod tests { } test_config!(test_insert_single_leaf_below_root_both_sides); - async fn test_insert_single_leaf_below_root_both_sides( - ) -> Result<(), AkdError> { + async fn test_insert_single_leaf_below_root_both_sides() + -> Result<(), AkdError> { let database = InMemoryDb::new(); let db = StorageManager::new_no_cache(database); let mut root = new_root_node::(); @@ -896,9 +896,9 @@ mod tests { // Children: left: leaf2, right: leaf1, label: 1 let right_child_expected_hash = ( TC::compute_parent_hash_from_children( - &AzksValue(leaf_2_hash.0 .0), + &AzksValue(leaf_2_hash.0.0), &leaf_2_hash.1, - &AzksValue(leaf_1_hash.0 .0), + &AzksValue(leaf_1_hash.0.0), &leaf_1_hash.1, ), NodeLabel::new(byte_arr_from_u64(0b1u64 << 63), 1u32).value::(), @@ -907,9 +907,9 @@ mod tests { // Children: left: new_leaf, right: leaf3, label: 0 let left_child_expected_hash = ( TC::compute_parent_hash_from_children( - &AzksValue(leaf_0_hash.0 .0), + &AzksValue(leaf_0_hash.0.0), &leaf_0_hash.1, - &AzksValue(leaf_3_hash.0 .0), + &AzksValue(leaf_3_hash.0.0), &leaf_3_hash.1, ), NodeLabel::new(byte_arr_from_u64(0b0u64), 1u32).value::(), @@ -969,9 +969,9 @@ mod tests { let right_child_hash = leaf_hashes[2 * i + 1].clone(); layer_1_hashes.push(( TC::compute_parent_hash_from_children( - &AzksValue(left_child_hash.0 .0), + &AzksValue(left_child_hash.0.0), &left_child_hash.1, - &AzksValue(right_child_hash.0 .0), + &AzksValue(right_child_hash.0.0), &right_child_hash.1, ), NodeLabel::new(byte_arr_from_u64(j << 62), 2u32).value::(), diff --git a/akd/src/utils.rs b/akd/src/utils.rs index 76fb812a..16352663 100644 --- a/akd/src/utils.rs +++ b/akd/src/utils.rs @@ -24,7 +24,7 @@ pub(crate) fn byte_arr_from_u64(input_int: u64) -> [u8; 32] { #[cfg(any(test, feature = "public_tests"))] pub(crate) fn random_label(rng: &mut impl rand::Rng) -> crate::NodeLabel { crate::NodeLabel { - label_val: rng.gen::<[u8; 32]>(), + label_val: rng.random::<[u8; 32]>(), label_len: 256, } } diff --git a/akd_core/Cargo.toml b/akd_core/Cargo.toml index ff9f539d..a18b0ebd 100644 --- a/akd_core/Cargo.toml +++ b/akd_core/Cargo.toml @@ -4,7 +4,7 @@ version = "0.12.0-pre.12" authors = ["akd contributors"] description = "Core utilities for the akd crate" license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" keywords = ["key-transparency", "akd"] repository = "https://github.com/facebook/akd" readme = "../README.md" @@ -48,7 +48,7 @@ zeroize = "1" ## Optional dependencies ## blake3 = { version = "1", optional = true, default-features = false } protobuf = { version = "3", optional = true } -rand = { version = "0.8", optional = true } +rand = { version = "0.9.2", optional = true } serde = { version = "1", features = ["derive"], optional = true } serde_bytes = { version = "0.11", optional = true } tokio = { version = "1", features = ["rt"], optional = true } @@ -59,7 +59,7 @@ bincode = "1" itertools = "0.13" proptest = "1" proptest-derive = "0.6" -rand = "0.8" +rand = "0.9.2" serde = { version = "1", features = ["derive"] } criterion = "0.5" diff --git a/akd_core/benches/parallel_vrfs.rs b/akd_core/benches/parallel_vrfs.rs index 454ba8a2..09bf6c9a 100644 --- a/akd_core/benches/parallel_vrfs.rs +++ b/akd_core/benches/parallel_vrfs.rs @@ -9,12 +9,12 @@ extern crate criterion; use self::criterion::*; +use akd_core::VersionFreshness; use akd_core::configuration::NamedConfiguration; use akd_core::ecvrf::{VRFExpandedPrivateKey, VRFPublicKey}; -use akd_core::VersionFreshness; -use akd_core::{ecvrf::VRFKeyStorage, AkdLabel, AkdValue}; -use rand::distributions::Alphanumeric; +use akd_core::{AkdLabel, AkdValue, ecvrf::VRFKeyStorage}; use rand::Rng; +use rand::distr::Alphanumeric; macro_rules! bench_config { ( $x:ident ) => { @@ -71,7 +71,7 @@ fn main() { bench_config!(bench_single_vrf); fn bench_single_vrf(c: &mut Criterion) { - let rng = rand::rngs::OsRng; + let rng = rand::rng(); // Generate a random label let label = AkdLabel::from( diff --git a/akd_core/src/configuration/experimental.rs b/akd_core/src/configuration/experimental.rs index 3127d219..30627bcc 100644 --- a/akd_core/src/configuration/experimental.rs +++ b/akd_core/src/configuration/experimental.rs @@ -11,7 +11,7 @@ use core::marker::PhantomData; use super::traits::DomainLabel; use crate::configuration::Configuration; -use crate::hash::{Digest, DIGEST_BYTES}; +use crate::hash::{DIGEST_BYTES, Digest}; use crate::utils::i2osp_array; use crate::{AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness}; diff --git a/akd_core/src/configuration/whatsapp_v1.rs b/akd_core/src/configuration/whatsapp_v1.rs index 841c875d..fb198caf 100644 --- a/akd_core/src/configuration/whatsapp_v1.rs +++ b/akd_core/src/configuration/whatsapp_v1.rs @@ -8,10 +8,10 @@ //! Defines the WhatsApp v1 configuration use crate::configuration::Configuration; -use crate::hash::{Digest, DIGEST_BYTES}; +use crate::hash::{DIGEST_BYTES, Digest}; use crate::utils::i2osp_array; use crate::{ - AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, NodeLabel, VersionFreshness, EMPTY_VALUE, + AkdLabel, AkdValue, AzksValue, AzksValueWithEpoch, EMPTY_VALUE, NodeLabel, VersionFreshness, }; #[cfg(feature = "nostd")] diff --git a/akd_core/src/ecvrf/ecvrf_impl.rs b/akd_core/src/ecvrf/ecvrf_impl.rs index d3b4b852..4d264607 100644 --- a/akd_core/src/ecvrf/ecvrf_impl.rs +++ b/akd_core/src/ecvrf/ecvrf_impl.rs @@ -34,11 +34,11 @@ const NODE_LABEL_LEN: usize = 32; * * If you still see the error, you can simply ignore. It's harmless. */ +use ed25519_dalek::SECRET_KEY_LENGTH; use ed25519_dalek::SecretKey as ed25519_PrivateKey; use ed25519_dalek::Sha512; use ed25519_dalek::SigningKey as ed25519_SigningKey; use ed25519_dalek::VerifyingKey as ed25519_PublicKey; -use ed25519_dalek::SECRET_KEY_LENGTH; use ed25519_dalek::{Digest, PUBLIC_KEY_LENGTH}; const SUITE: u8 = 0x03; @@ -214,7 +214,7 @@ impl VRFPublicKey { None => { return Err(VrfError::Verification( "Failed to decompress public key into Edwards point".to_string(), - )) + )); } }; let cprime = hash_points( @@ -358,7 +358,7 @@ impl TryFrom<&[u8]> for Proof { None => { return Err(VrfError::PublicKey( "Failed to decompress public key into Edwards Point".to_string(), - )) + )); } }; diff --git a/akd_core/src/ecvrf/tests.rs b/akd_core/src/ecvrf/tests.rs index 7f366eaf..a3d8d274 100644 --- a/akd_core/src/ecvrf/tests.rs +++ b/akd_core/src/ecvrf/tests.rs @@ -19,7 +19,7 @@ use curve25519_dalek::{ scalar::Scalar as ed25519_Scalar, }; use ed25519_dalek::{ - self, VerifyingKey as ed25519_PublicKey, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, + self, PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH, VerifyingKey as ed25519_PublicKey, }; #[cfg(feature = "serde_serialization")] use proptest::prelude::*; @@ -142,48 +142,48 @@ struct VRFTestVector { } /// These test vectors are taken from [RFC9381, Section B.3](https://www.ietf.org/rfc/rfc9381.html#name-ecvrf-edwards25519-sha512-t). -const TESTVECTORS : [VRFTestVector; 3] = [ +const TESTVECTORS: [VRFTestVector; 3] = [ // Example 16 VRFTestVector { - SK : "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - PK : "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", - alpha : b"", - x : "307c83864f2833cb427a2ef1c00a013cfdff2768d980c0a3a520f006904de94f", + SK: "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + PK: "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", + alpha: b"", + x: "307c83864f2833cb427a2ef1c00a013cfdff2768d980c0a3a520f006904de94f", // try_and_increment succeeded on ctr = 0 - H : "91bbed02a99461df1ad4c6564a5f5d829d0b90cfc7903e7a5797bd658abf3318", - k : "7100f3d9eadb6dc4743b029736ff283f5be494128df128df2817106f345b8594b6d6da2d6fb0b4c0257eb337675d96eab49cf39e66cc2c9547c2bf8b2a6afae4", - U : "aef27c725be964c6a9bf4c45ca8e35df258c1878b838f37d9975523f09034071", - V : "5016572f71466c646c119443455d6cb9b952f07d060ec8286d678615d55f954f", - pi : "8657106690b5526245a92b003bb079ccd1a92130477671f6fc01ad16f26f723f26f8a57ccaed74ee1b190bed1f479d9727d2d0f9b005a6e456a35d4fb0daab1268a1b0db10836d9826a528ca76567805", - beta : "90cf1df3b703cce59e2a35b925d411164068269d7b2d29f3301c03dd757876ff66b71dda49d2de59d03450451af026798e8f81cd2e333de5cdf4f3e140fdd8ae", + H: "91bbed02a99461df1ad4c6564a5f5d829d0b90cfc7903e7a5797bd658abf3318", + k: "7100f3d9eadb6dc4743b029736ff283f5be494128df128df2817106f345b8594b6d6da2d6fb0b4c0257eb337675d96eab49cf39e66cc2c9547c2bf8b2a6afae4", + U: "aef27c725be964c6a9bf4c45ca8e35df258c1878b838f37d9975523f09034071", + V: "5016572f71466c646c119443455d6cb9b952f07d060ec8286d678615d55f954f", + pi: "8657106690b5526245a92b003bb079ccd1a92130477671f6fc01ad16f26f723f26f8a57ccaed74ee1b190bed1f479d9727d2d0f9b005a6e456a35d4fb0daab1268a1b0db10836d9826a528ca76567805", + beta: "90cf1df3b703cce59e2a35b925d411164068269d7b2d29f3301c03dd757876ff66b71dda49d2de59d03450451af026798e8f81cd2e333de5cdf4f3e140fdd8ae", }, // Example 17 VRFTestVector { - SK : "4ccd089b28ff96da9db6c346ec114e0f5b8a319f35aba624da8cf6ed4fb8a6fb", - PK : "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", - alpha : b"\x72", - x : "68bd9ed75882d52815a97585caf4790a7f6c6b3b7f821c5e259a24b02e502e51", + SK: "4ccd089b28ff96da9db6c346ec114e0f5b8a319f35aba624da8cf6ed4fb8a6fb", + PK: "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", + alpha: b"\x72", + x: "68bd9ed75882d52815a97585caf4790a7f6c6b3b7f821c5e259a24b02e502e51", // try_and_increment succeeded on ctr = 1 - H : "5b659fc3d4e9263fd9a4ed1d022d75eaacc20df5e09f9ea937502396598dc551", - k : "42589bbf0c485c3c91c1621bb4bfe04aed7be76ee48f9b00793b2342acb9c167cab856f9f9d4febc311330c20b0a8afd3743d05433e8be8d32522ecdc16cc5ce", - U : "1dcb0a4821a2c48bf53548228b7f170962988f6d12f5439f31987ef41f034ab3", - V : "fd03c0bf498c752161bae4719105a074630a2aa5f200ff7b3995f7bfb1513423", - pi : "f3141cd382dc42909d19ec5110469e4feae18300e94f304590abdced48aed5933bf0864a62558b3ed7f2fea45c92a465301b3bbf5e3e54ddf2d935be3b67926da3ef39226bbc355bdc9850112c8f4b02", - beta : "eb4440665d3891d668e7e0fcaf587f1b4bd7fbfe99d0eb2211ccec90496310eb5e33821bc613efb94db5e5b54c70a848a0bef4553a41befc57663b56373a5031", + H: "5b659fc3d4e9263fd9a4ed1d022d75eaacc20df5e09f9ea937502396598dc551", + k: "42589bbf0c485c3c91c1621bb4bfe04aed7be76ee48f9b00793b2342acb9c167cab856f9f9d4febc311330c20b0a8afd3743d05433e8be8d32522ecdc16cc5ce", + U: "1dcb0a4821a2c48bf53548228b7f170962988f6d12f5439f31987ef41f034ab3", + V: "fd03c0bf498c752161bae4719105a074630a2aa5f200ff7b3995f7bfb1513423", + pi: "f3141cd382dc42909d19ec5110469e4feae18300e94f304590abdced48aed5933bf0864a62558b3ed7f2fea45c92a465301b3bbf5e3e54ddf2d935be3b67926da3ef39226bbc355bdc9850112c8f4b02", + beta: "eb4440665d3891d668e7e0fcaf587f1b4bd7fbfe99d0eb2211ccec90496310eb5e33821bc613efb94db5e5b54c70a848a0bef4553a41befc57663b56373a5031", }, // Example 18 VRFTestVector { - SK : "c5aa8df43f9f837bedb7442f31dcb7b166d38535076f094b85ce3a2e0b4458f7", - PK : "fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025", - alpha : b"\xaf\x82", - x : "909a8b755ed902849023a55b15c23d11ba4d7f4ec5c2f51b1325a181991ea95c", + SK: "c5aa8df43f9f837bedb7442f31dcb7b166d38535076f094b85ce3a2e0b4458f7", + PK: "fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025", + alpha: b"\xaf\x82", + x: "909a8b755ed902849023a55b15c23d11ba4d7f4ec5c2f51b1325a181991ea95c", // try_and_increment succeeded on ctr = 0 - H : "bf4339376f5542811de615e3313d2b36f6f53c0acfebb482159711201192576a", - k : "38b868c335ccda94a088428cbf3ec8bc7955bfaffe1f3bd2aa2c59fc31a0febc59d0e1af3715773ce11b3bbdd7aba8e3505d4b9de6f7e4a96e67e0d6bb6d6c3a", - U : "2bae73e15a64042fcebf062abe7e432b2eca6744f3e8265bc38e009cd577ecd5", - V : "88cba1cb0d4f9b649d9a86026b69de076724a93a65c349c988954f0961c5d506", - pi : "9bc0f79119cc5604bf02d23b4caede71393cedfbb191434dd016d30177ccbf8096bb474e53895c362d8628ee9f9ea3c0e52c7a5c691b6c18c9979866568add7a2d41b00b05081ed0f58ee5e31b3a970e", - beta : "645427e5d00c62a23fb703732fa5d892940935942101e456ecca7bb217c61c452118fec1219202a0edcf038bb6373241578be7217ba85a2687f7a0310b2df19f", + H: "bf4339376f5542811de615e3313d2b36f6f53c0acfebb482159711201192576a", + k: "38b868c335ccda94a088428cbf3ec8bc7955bfaffe1f3bd2aa2c59fc31a0febc59d0e1af3715773ce11b3bbdd7aba8e3505d4b9de6f7e4a96e67e0d6bb6d6c3a", + U: "2bae73e15a64042fcebf062abe7e432b2eca6744f3e8265bc38e009cd577ecd5", + V: "88cba1cb0d4f9b649d9a86026b69de076724a93a65c349c988954f0961c5d506", + pi: "9bc0f79119cc5604bf02d23b4caede71393cedfbb191434dd016d30177ccbf8096bb474e53895c362d8628ee9f9ea3c0e52c7a5c691b6c18c9979866568add7a2d41b00b05081ed0f58ee5e31b3a970e", + beta: "645427e5d00c62a23fb703732fa5d892940935942101e456ecca7bb217c61c452118fec1219202a0edcf038bb6373241578be7217ba85a2687f7a0310b2df19f", }, ]; @@ -261,9 +261,11 @@ fn test_prove() { #[test] fn test_verify() { for tv in TESTVECTORS.iter() { - assert!(from_string!(VRFPublicKey, tv.PK) - .verify(&from_string!(Proof, tv.pi), tv.alpha) - .is_ok()); + assert!( + from_string!(VRFPublicKey, tv.PK) + .verify(&from_string!(Proof, tv.pi), tv.alpha) + .is_ok() + ); } } diff --git a/akd_core/src/ecvrf/traits.rs b/akd_core/src/ecvrf/traits.rs index 39839a16..f0d085f7 100644 --- a/akd_core/src/ecvrf/traits.rs +++ b/akd_core/src/ecvrf/traits.rs @@ -183,7 +183,7 @@ pub trait VRFKeyStorage: Clone + Sync + Send { Err(join_err) => { return Err(VrfError::SigningKey(format!( "Parallel VRF join error {join_err}" - ))) + ))); } Ok((node_label, (label, freshness, version, value))) => { results.push(((label, freshness, version, value), node_label)); diff --git a/akd_core/src/proto/mod.rs b/akd_core/src/proto/mod.rs index fc8b8109..820ac714 100644 --- a/akd_core/src/proto/mod.rs +++ b/akd_core/src/proto/mod.rs @@ -15,7 +15,7 @@ pub mod specs; #[cfg(test)] mod tests; -use crate::{hash::Digest, AzksValue, Bit}; +use crate::{AzksValue, Bit, hash::Digest}; use core::convert::{TryFrom, TryInto}; use protobuf::MessageField; @@ -79,9 +79,7 @@ macro_rules! require_messagefield { } macro_rules! hash_from_bytes { - ($obj:expr) => {{ - crate::hash::try_parse_digest($obj).map_err(Self::Error::Deserialization)? - }}; + ($obj:expr) => {{ crate::hash::try_parse_digest($obj).map_err(Self::Error::Deserialization)? }}; } macro_rules! convert_from_vector { @@ -229,7 +227,7 @@ impl TryFrom<&specs::types::SiblingProof> for crate::SiblingProof { _ => { return Err(ConversionError::Deserialization(format!( "Invalid direction: {direction}" - ))) + ))); } }; diff --git a/akd_core/src/proto/tests.rs b/akd_core/src/proto/tests.rs index 1261867f..7ea93c9e 100644 --- a/akd_core/src/proto/tests.rs +++ b/akd_core/src/proto/tests.rs @@ -10,12 +10,12 @@ use super::specs::types::*; use super::*; use crate::{AzksValue, Direction}; -use rand::{thread_rng, Rng}; +use rand::Rng; // ================= Test helpers ================= // fn random_hash() -> [u8; 32] { - thread_rng().gen::<[u8; 32]>() + rand::rng().random::<[u8; 32]>() } fn random_azks_element() -> crate::AzksElement { @@ -28,7 +28,7 @@ fn random_azks_element() -> crate::AzksElement { fn random_label() -> crate::NodeLabel { let label = crate::NodeLabel { label_val: random_hash(), - label_len: thread_rng().gen::() % 257, // Can be up to 256 + label_len: rand::rng().random::() % 257, // Can be up to 256 }; label.get_prefix(label.label_len) } @@ -114,11 +114,11 @@ fn test_convert_non_membership_proof() { #[test] fn test_convert_lookup_proof() { - let mut rng = thread_rng(); + let mut rng = rand::rng(); let original = crate::LookupProof { - epoch: rng.gen(), + epoch: rng.random(), value: crate::AkdValue(random_hash().to_vec()), - version: rng.gen(), + version: rng.random(), existence_vrf_proof: random_hash().to_vec(), existence_proof: crate::MembershipProof { label: random_label(), @@ -163,11 +163,11 @@ fn test_convert_lookup_proof() { #[test] fn test_convert_update_proof() { - let mut rng = thread_rng(); + let mut rng = rand::rng(); let original = crate::UpdateProof { - epoch: rng.gen(), + epoch: rng.random(), value: crate::AkdValue(random_hash().to_vec()), - version: rng.gen(), + version: rng.random(), existence_vrf_proof: random_hash().to_vec(), existence_proof: crate::MembershipProof { label: random_label(), @@ -213,11 +213,11 @@ fn non_membership_proof() -> crate::NonMembershipProof { } fn upd_proof() -> crate::UpdateProof { - let mut rng = thread_rng(); + let mut rng = rand::rng(); crate::UpdateProof { - epoch: rng.gen(), + epoch: rng.random(), value: crate::AkdValue(random_hash().to_vec()), - version: rng.gen(), + version: rng.random(), existence_vrf_proof: random_hash().to_vec(), existence_proof: crate::MembershipProof { label: random_label(), diff --git a/akd_core/src/types/mod.rs b/akd_core/src/types/mod.rs index 5c8fdefa..d4fdd951 100644 --- a/akd_core/src/types/mod.rs +++ b/akd_core/src/types/mod.rs @@ -12,13 +12,13 @@ //! 2. Key history //! 3. Audit (append-only) +use crate::ARITY; use crate::hash::Digest; #[cfg(feature = "serde_serialization")] use crate::utils::serde_helpers::{ azks_value_hex_deserialize, azks_value_hex_serialize, bytes_deserialize_hex, bytes_serialize_hex, }; -use crate::ARITY; #[cfg(feature = "nostd")] use alloc::string::{String, ToString}; diff --git a/akd_core/src/types/node_label/mod.rs b/akd_core/src/types/node_label/mod.rs index baf23d61..401c2831 100644 --- a/akd_core/src/types/node_label/mod.rs +++ b/akd_core/src/types/node_label/mod.rs @@ -8,7 +8,7 @@ //! This module contains the specifics for NodeLabel only, other types don't have the //! same level of detail and aren't broken into sub-modules -use crate::{configuration::Configuration, PrefixOrdering, SizeOf}; +use crate::{PrefixOrdering, SizeOf, configuration::Configuration}; #[cfg(feature = "serde_serialization")] use crate::utils::serde_helpers::{bytes_deserialize_hex, bytes_serialize_hex}; diff --git a/akd_core/src/types/node_label/tests.rs b/akd_core/src/types/node_label/tests.rs index 43013311..1717554a 100644 --- a/akd_core/src/types/node_label/tests.rs +++ b/akd_core/src/types/node_label/tests.rs @@ -11,14 +11,14 @@ use super::*; use crate::test_config_sync; #[cfg(feature = "nostd")] use alloc::vec; -use rand::{thread_rng, Rng}; +use rand::Rng; // ================= Test helpers ================= // fn random_label() -> crate::NodeLabel { - let mut rng = thread_rng(); + let mut rng = rand::rng(); crate::NodeLabel { - label_val: rng.gen::<[u8; 32]>(), + label_val: rng.random::<[u8; 32]>(), label_len: 256, } } @@ -180,7 +180,8 @@ fn test_byte_arr_from_u64_medium() { let computed = byte_arr_from_u64(val); assert!( expected == computed, - "{}", "Byte from u64 conversion wrong for medium, ~2 byte u64! Expected {expected:?} and got {computed:?}" + "{}", + "Byte from u64 conversion wrong for medium, ~2 byte u64! Expected {expected:?} and got {computed:?}" ) } @@ -200,7 +201,8 @@ fn test_byte_arr_from_u64_larger() { let computed = byte_arr_from_u64(val); assert!( expected == computed, - "{}", "Byte from u64 conversion wrong for larger, ~3 byte u64! Expected {expected:?} and got {computed:?}" + "{}", + "Byte from u64 conversion wrong for larger, ~3 byte u64! Expected {expected:?} and got {computed:?}" ) } @@ -310,7 +312,8 @@ fn test_node_label_lcp_self_prefix_leading_one() { let computed = label_1.get_longest_common_prefix::(label_2); assert!( computed == expected, - "{}", "Longest common substring with self with leading one, not equal to itself! Expected: {expected:?}, Got: {computed:?}" + "{}", + "Longest common substring with self with leading one, not equal to itself! Expected: {expected:?}, Got: {computed:?}" ) } @@ -335,7 +338,8 @@ fn test_node_label_lcp_other_one() { let computed = label_1.get_longest_common_prefix::(label_2); assert!( computed == expected, - "{}", "Longest common substring with other with leading one, not equal to expected! Expected: {expected:?}, Computed: {computed:?}" + "{}", + "Longest common substring with other with leading one, not equal to expected! Expected: {expected:?}, Computed: {computed:?}" ) } @@ -420,7 +424,8 @@ fn test_get_dir_example() { let computed = label_2.get_prefix_ordering(label_1); assert!( computed == expected, - "{}", "Direction not equal to expected. Node = {label_1:?}, prefix = {label_2:?}, computed = {computed:?}" + "{}", + "Direction not equal to expected. Node = {label_1:?}, prefix = {label_2:?}, computed = {computed:?}" ) } @@ -436,7 +441,8 @@ fn test_get_prefix_small() { let computed = label_1.get_prefix(prefix_len); assert!( computed == label_2, - "{}", "Direction not equal to expected. Node = {label_1:?}, prefix = {label_2:?}, computed = {computed:?}" + "{}", + "Direction not equal to expected. Node = {label_1:?}, prefix = {label_2:?}, computed = {computed:?}" ) } diff --git a/akd_core/src/utils.rs b/akd_core/src/utils.rs index 62392758..e91ed6a3 100644 --- a/akd_core/src/utils.rs +++ b/akd_core/src/utils.rs @@ -180,7 +180,9 @@ pub fn get_marker_versions( // function will panic. fn find_max_index_in_skiplist(input: u64) -> usize { if input < MARKER_VERSION_SKIPLIST[0] { - panic!("find_max_index_in_skiplist called with input less than smallest element of MARKER_VERSION_SKIPLIST"); + panic!( + "find_max_index_in_skiplist called with input less than smallest element of MARKER_VERSION_SKIPLIST" + ); } let mut i = 0; while i < MARKER_VERSION_SKIPLIST.len() { @@ -292,7 +294,7 @@ macro_rules! test_config_sync { mod tests { use super::*; use alloc::vec; - use rand::{rngs::OsRng, Rng}; + use rand::Rng; #[test] fn test_get_marker_versions() { @@ -331,24 +333,24 @@ mod tests { } fn gen_versions( - rng: &mut OsRng, + rng: &mut rand::rngs::ThreadRng, start_type: &RangeType, end_type: &RangeType, epoch_type: &RangeType, ) -> (u64, u64, u64) { let small_jump = 10; let medium_jump = 1000; - let start_version: u64 = rng.gen_range(match start_type { + let start_version: u64 = rng.random_range(match start_type { RangeType::Small => 1..small_jump, RangeType::Medium => 1..medium_jump, RangeType::Large => 1..u64::MAX - 2 * (small_jump + medium_jump), }); - let end_version: u64 = rng.gen_range(match end_type { + let end_version: u64 = rng.random_range(match end_type { RangeType::Small => start_version..start_version + small_jump, RangeType::Medium => start_version..start_version + medium_jump, RangeType::Large => start_version..u64::MAX - small_jump - medium_jump, }); - let epoch: u64 = rng.gen_range(match epoch_type { + let epoch: u64 = rng.random_range(match epoch_type { RangeType::Small => end_version..end_version + small_jump, RangeType::Medium => end_version..end_version + medium_jump, RangeType::Large => end_version..u64::MAX, @@ -362,7 +364,7 @@ mod tests { let iterations = 10000; let options = [RangeType::Small, RangeType::Medium, RangeType::Large]; - let mut rng = OsRng; + let mut rng = rand::rng(); for (start_type, end_type, epoch_type) in itertools::iproduct!(&options, &options, &options) { for _ in 0..iterations { diff --git a/akd_core/src/verify/history.rs b/akd_core/src/verify/history.rs index 19e4983a..4107a656 100644 --- a/akd_core/src/verify/history.rs +++ b/akd_core/src/verify/history.rs @@ -7,11 +7,11 @@ //! Verification of key history proofs +use super::VerificationError; use super::base::{ verify_existence, verify_existence_with_commitment, verify_existence_with_val, verify_nonexistence, }; -use super::VerificationError; use crate::configuration::Configuration; use crate::hash::Digest; @@ -135,7 +135,7 @@ fn verify_with_history_params( Ordering::Greater => { return Err(VerificationError::HistoryProof(format!( "Expected at most {recency} update proofs, but got {num_proofs} of them", - ))) + ))); } Ordering::Less => { if start_version != 1 { diff --git a/akd_core/src/verify/lookup.rs b/akd_core/src/verify/lookup.rs index fc3a1042..a6d659d6 100644 --- a/akd_core/src/verify/lookup.rs +++ b/akd_core/src/verify/lookup.rs @@ -7,8 +7,8 @@ //! Verification of lookup proofs -use super::base::{verify_existence, verify_existence_with_val, verify_nonexistence}; use super::VerificationError; +use super::base::{verify_existence, verify_existence_with_val, verify_nonexistence}; use crate::configuration::Configuration; use crate::hash::Digest; diff --git a/akd_core/src/verify/mod.rs b/akd_core/src/verify/mod.rs index 549d0bc6..1cd5dec8 100644 --- a/akd_core/src/verify/mod.rs +++ b/akd_core/src/verify/mod.rs @@ -82,5 +82,5 @@ impl From for VerificationError { #[cfg(feature = "public_tests")] pub use base::{verify_membership_for_tests_only, verify_nonmembership_for_tests_only}; -pub use history::{key_history_verify, HistoryVerificationParams}; +pub use history::{HistoryVerificationParams, key_history_verify}; pub use lookup::lookup_verify; diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 072d73b6..9ab69e0f 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -3,7 +3,7 @@ name = "examples" version = "0.12.0-pre.12" authors = ["akd contributors"] license = "MIT OR Apache-2.0" -edition = "2021" +edition = "2024" publish = false @@ -28,11 +28,11 @@ hex = "0.4" indicatif = "0.17" log = { version = "0.4", features = ["kv_unstable"] } multi_log = "0.1" -mysql_async = "0.32" -mysql_common = "0.31" +mysql_async = "0.36.1" +mysql_common = "0.35.5" once_cell = "1" protobuf = "3" -rand = "0.8" +rand = "0.9.2" serde = { version = "1", features = ["derive"] } serde_json = "1" thread-id = "4" diff --git a/examples/src/fixture_generator/examples/example_tests.rs b/examples/src/fixture_generator/examples/example_tests.rs index 78a48fee..4f162b3a 100644 --- a/examples/src/fixture_generator/examples/example_tests.rs +++ b/examples/src/fixture_generator/examples/example_tests.rs @@ -10,11 +10,11 @@ use std::fs::File; use akd::{ + NamedConfiguration, append_only_zks::AzksParallelismConfig, directory::Directory, ecvrf::HardCodedAkdVRF, - storage::{memory::AsyncInMemoryDatabase, Database, StorageManager, StorageUtil}, - NamedConfiguration, + storage::{Database, StorageManager, StorageUtil, memory::AsyncInMemoryDatabase}, }; use crate::fixture_generator::reader::Reader; diff --git a/examples/src/fixture_generator/generator.rs b/examples/src/fixture_generator/generator.rs index 835ecd45..a0ccfbce 100644 --- a/examples/src/fixture_generator/generator.rs +++ b/examples/src/fixture_generator/generator.rs @@ -18,14 +18,14 @@ use akd::directory::Directory; use akd::storage::types::DbRecord; use akd::storage::{StorageManager, StorageUtil}; use akd::{AkdLabel, AkdValue, DomainLabel, NamedConfiguration}; -use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; +use rand::rngs::StdRng; use serde::{Deserialize, Serialize}; use crate::fixture_generator::parser::Args; -use crate::fixture_generator::writer::yaml::YamlWriter; use crate::fixture_generator::writer::Writer; +use crate::fixture_generator::writer::yaml::YamlWriter; /// Directory state comprises all database records at a particular epoch. #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -75,14 +75,16 @@ pub(crate) async fn generate(args: &Args // args assertions assert!(args.max_updates >= args.min_updates); - assert!(args - .capture_states - .as_ref() - .is_none_or(|states| states.iter().max().unwrap() <= &args.epochs)); - assert!(args - .capture_deltas - .as_ref() - .is_none_or(|deltas| deltas.iter().max().unwrap() <= &args.epochs)); + assert!( + args.capture_states + .as_ref() + .is_none_or(|states| states.iter().max().unwrap() <= &args.epochs) + ); + assert!( + args.capture_deltas + .as_ref() + .is_none_or(|deltas| deltas.iter().max().unwrap() <= &args.epochs) + ); // process users let mut user_map = HashMap::new(); @@ -152,7 +154,7 @@ pub(crate) async fn generate(args: &Args // generate random key updates if allowed if !args.no_generated_updates { - let num_updates = rng.gen_range(args.min_updates..args.max_updates); + let num_updates = rng.random_range(args.min_updates..args.max_updates); for _ in updates.len()..num_updates as usize { updates.push((AkdLabel::random(&mut rng), AkdValue::random(&mut rng))); } diff --git a/examples/src/fixture_generator/reader/yaml.rs b/examples/src/fixture_generator/reader/yaml.rs index 48f4fada..d03509f2 100644 --- a/examples/src/fixture_generator/reader/yaml.rs +++ b/examples/src/fixture_generator/reader/yaml.rs @@ -15,9 +15,9 @@ use std::result::Result; // import without risk of name clashing use serde::de::DeserializeOwned; +use crate::fixture_generator::YAML_SEPARATOR; use crate::fixture_generator::generator::{Delta, Metadata, State}; use crate::fixture_generator::reader::{Reader, ReaderError}; -use crate::fixture_generator::YAML_SEPARATOR; impl From for ReaderError { fn from(error: std::io::Error) -> Self { @@ -69,7 +69,7 @@ impl YamlFileReader { None => { return Err(ReaderError::Format( "EOF encountered while looking for start of YAML doc".to_string(), - )) + )); } Some(Err(err)) => return Err(ReaderError::Input(err.to_string())), } diff --git a/examples/src/fixture_generator/writer/yaml.rs b/examples/src/fixture_generator/writer/yaml.rs index 933b2f94..4a7bd081 100644 --- a/examples/src/fixture_generator/writer/yaml.rs +++ b/examples/src/fixture_generator/writer/yaml.rs @@ -11,7 +11,7 @@ use std::io::Write; use serde::Serialize; -use crate::fixture_generator::{writer::Writer, YAML_SEPARATOR}; +use crate::fixture_generator::{YAML_SEPARATOR, writer::Writer}; /// YAML format writer. pub(crate) struct YamlWriter { diff --git a/examples/src/mysql_demo/directory_host.rs b/examples/src/mysql_demo/directory_host.rs index 25f0537e..45574c5f 100644 --- a/examples/src/mysql_demo/directory_host.rs +++ b/examples/src/mysql_demo/directory_host.rs @@ -5,10 +5,10 @@ // License, Version 2.0 found in the LICENSE-APACHE file in the root directory // of this source tree. You may select, at your option, one of the above-listed licenses. +use akd::HistoryParams; use akd::configuration::Configuration; use akd::ecvrf::VRFKeyStorage; use akd::storage::Database; -use akd::HistoryParams; use akd::{AkdLabel, AkdValue}; use akd::{Directory, EpochHash}; use log::{error, info}; diff --git a/examples/src/mysql_demo/mod.rs b/examples/src/mysql_demo/mod.rs index 29d15c7a..55ce1bbf 100644 --- a/examples/src/mysql_demo/mod.rs +++ b/examples/src/mysql_demo/mod.rs @@ -7,15 +7,15 @@ //! An example tool for running AKD backed by MySQL storage +use akd::Directory; use akd::append_only_zks::AzksParallelismConfig; use akd::ecvrf::HardCodedAkdVRF; use akd::storage::StorageManager; -use akd::Directory; use clap::{Parser, ValueEnum}; use commands::Command; use log::{debug, error, info, warn}; use mysql::AsyncMySqlDatabase; -use rand::distributions::Alphanumeric; +use rand::distr::Alphanumeric; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use std::convert::From; diff --git a/examples/src/mysql_demo/mysql.rs b/examples/src/mysql_demo/mysql.rs index cc99a47f..daab16ee 100644 --- a/examples/src/mysql_demo/mysql.rs +++ b/examples/src/mysql_demo/mysql.rs @@ -8,12 +8,12 @@ //! This module implements operations for a simple asynchronized mysql database use crate::mysql_demo::mysql_storables::MySqlStorable; +use akd::NodeLabel; use akd::errors::StorageError; use akd::hash::DIGEST_BYTES; use akd::storage::types::{DbRecord, KeyData, StorageType, ValueState, ValueStateRetrievalFlag}; use akd::storage::{Database, Storable}; use akd::tree_node::TreeNodeWithPreviousValue; -use akd::NodeLabel; use akd::{AkdLabel, AkdValue}; use async_trait::async_trait; use log::{debug, error, info, warn}; @@ -539,7 +539,9 @@ impl<'a> AsyncMySqlDatabase { break; } Err(err) => { - warn!("Docker ls returned error \"{err:?}\"\nTrying next possible docker command location"); + warn!( + "Docker ls returned error \"{err:?}\"\nTrying next possible docker command location" + ); } } } diff --git a/examples/src/mysql_demo/mysql_storables.rs b/examples/src/mysql_demo/mysql_storables.rs index 8feccbfb..baf9d248 100644 --- a/examples/src/mysql_demo/mysql_storables.rs +++ b/examples/src/mysql_demo/mysql_storables.rs @@ -9,10 +9,10 @@ use std::convert::TryInto; -use akd::storage::types::{DbRecord, StorageType}; +use akd::NodeLabel; use akd::storage::Storable; +use akd::storage::types::{DbRecord, StorageType}; use akd::tree_node::{NodeKey, TreeNodeWithPreviousValue}; -use akd::NodeLabel; use mysql_async::prelude::*; use mysql_async::*; @@ -24,8 +24,7 @@ pub(crate) const TABLE_USER: &str = "users"; pub(crate) const TEMP_IDS_TABLE: &str = "temp_ids_table"; const SELECT_AZKS_DATA: &str = "`epoch`, `num_nodes`"; -const SELECT_HISTORY_TREE_NODE_DATA: &str = - "`label_len`, `label_val`, `last_epoch`, `least_descendant_ep`, `parent_label_len`, `parent_label_val`, `node_type`, `left_child_len`, `left_child_label_val`, `right_child_len`, `right_child_label_val`, `hash`, `p_last_epoch`, `p_least_descendant_ep`, `p_parent_label_len`, `p_parent_label_val`, `p_node_type`, `p_left_child_len`, `p_left_child_label_val`, `p_right_child_len`, `p_right_child_label_val`, `p_hash`"; +const SELECT_HISTORY_TREE_NODE_DATA: &str = "`label_len`, `label_val`, `last_epoch`, `least_descendant_ep`, `parent_label_len`, `parent_label_val`, `node_type`, `left_child_len`, `left_child_label_val`, `right_child_len`, `right_child_label_val`, `hash`, `p_last_epoch`, `p_least_descendant_ep`, `p_parent_label_len`, `p_parent_label_val`, `p_node_type`, `p_left_child_len`, `p_left_child_label_val`, `p_right_child_len`, `p_right_child_label_val`, `p_hash`"; const SELECT_USER_DATA: &str = "`username`, `epoch`, `version`, `node_label_val`, `node_label_len`, `data`"; @@ -63,12 +62,15 @@ pub(crate) trait MySqlStorable { impl MySqlStorable for DbRecord { fn set_statement(&self) -> String { match &self { - DbRecord::Azks(_) => format!("INSERT INTO `{TABLE_AZKS}` (`key`, {SELECT_AZKS_DATA}) + DbRecord::Azks(_) => format!( + "INSERT INTO `{TABLE_AZKS}` (`key`, {SELECT_AZKS_DATA}) VALUES (:key, :epoch, :num_nodes) ON DUPLICATE KEY UPDATE `epoch` = :epoch - , `num_nodes` = :num_nodes"), - DbRecord::TreeNode(_) => format!("INSERT INTO `{TABLE_HISTORY_TREE_NODES}` ({SELECT_HISTORY_TREE_NODE_DATA}) + , `num_nodes` = :num_nodes" + ), + DbRecord::TreeNode(_) => format!( + "INSERT INTO `{TABLE_HISTORY_TREE_NODES}` ({SELECT_HISTORY_TREE_NODE_DATA}) VALUES (:label_len , :label_val , :last_epoch @@ -113,8 +115,11 @@ impl MySqlStorable for DbRecord { , `p_left_child_label_val` = :p_left_child_label_val , `p_right_child_len` = :p_right_child_len , `p_right_child_label_val` = :p_right_child_label_val - , `p_hash` = :p_hash"), - DbRecord::ValueState(_) => format!("INSERT INTO `{TABLE_USER}` ({SELECT_USER_DATA}) VALUES (:username, :epoch, :version, :node_label_val, :node_label_len, :data)"), + , `p_hash` = :p_hash" + ), + DbRecord::ValueState(_) => format!( + "INSERT INTO `{TABLE_USER}` ({SELECT_USER_DATA}) VALUES (:username, :epoch, :version, :node_label_val, :node_label_len, :data)" + ), } } @@ -390,20 +395,12 @@ impl MySqlStorable for DbRecord { fn get_batch_create_temp_table() -> Option { match St::data_type() { StorageType::Azks => None, - StorageType::TreeNode => { - Some( - format!( - "CREATE TEMPORARY TABLE `{TEMP_IDS_TABLE}`(`label_len` INT UNSIGNED NOT NULL, `label_val` VARBINARY(32) NOT NULL, PRIMARY KEY(`label_len`, `label_val`))" - ) - ) - }, - StorageType::ValueState => { - Some( - format!( - "CREATE TEMPORARY TABLE `{TEMP_IDS_TABLE}`(`username` VARCHAR(256) NOT NULL, `epoch` BIGINT UNSIGNED NOT NULL, PRIMARY KEY(`username`, `epoch`))" - ) - ) - }, + StorageType::TreeNode => Some(format!( + "CREATE TEMPORARY TABLE `{TEMP_IDS_TABLE}`(`label_len` INT UNSIGNED NOT NULL, `label_val` VARBINARY(32) NOT NULL, PRIMARY KEY(`label_len`, `label_val`))" + )), + StorageType::ValueState => Some(format!( + "CREATE TEMPORARY TABLE `{TEMP_IDS_TABLE}`(`username` VARCHAR(256) NOT NULL, `epoch` BIGINT UNSIGNED NOT NULL, PRIMARY KEY(`username`, `epoch`))" + )), } } diff --git a/examples/src/mysql_demo/tests/memory_tests.rs b/examples/src/mysql_demo/tests/memory_tests.rs index 0f1c03fe..c823c92b 100644 --- a/examples/src/mysql_demo/tests/memory_tests.rs +++ b/examples/src/mysql_demo/tests/memory_tests.rs @@ -9,7 +9,7 @@ use crate::{ mysql_demo::tests::test_util::{directory_test_suite, log_init}, test_config_serial, }; -use akd::{ecvrf::HardCodedAkdVRF, storage::StorageManager, Configuration}; +use akd::{Configuration, ecvrf::HardCodedAkdVRF, storage::StorageManager}; use log::info; type InMemoryDb = akd::storage::memory::AsyncInMemoryDatabase; @@ -33,7 +33,9 @@ test_config_serial!(test_directory_operations_with_caching); async fn test_directory_operations_with_caching() { log_init(log::Level::Info); - info!("\n\n******** Starting In-Memory Directory Operations (w/caching) Integration Test ********\n\n"); + info!( + "\n\n******** Starting In-Memory Directory Operations (w/caching) Integration Test ********\n\n" + ); let db = InMemoryDb::new(); @@ -41,5 +43,7 @@ async fn test_directory_operations_with_caching() { let storage_manager = StorageManager::new(db, None, None, None); directory_test_suite::(&storage_manager, 500, &vrf).await; - info!("\n\n******** Finished In-Memory Directory Operations (w/caching) Integration Test ********\n\n"); + info!( + "\n\n******** Finished In-Memory Directory Operations (w/caching) Integration Test ********\n\n" + ); } diff --git a/examples/src/mysql_demo/tests/mysql_db_tests.rs b/examples/src/mysql_demo/tests/mysql_db_tests.rs index c96a3536..6330065f 100644 --- a/examples/src/mysql_demo/tests/mysql_db_tests.rs +++ b/examples/src/mysql_demo/tests/mysql_db_tests.rs @@ -48,6 +48,8 @@ async fn test_mysql_db() { println!("ERROR: Failed to clean MySQL test database with error {error}"); } } else { - println!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); + println!( + "WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running." + ); } } diff --git a/examples/src/mysql_demo/tests/mysql_tests.rs b/examples/src/mysql_demo/tests/mysql_tests.rs index 888f5afc..eebb0314 100644 --- a/examples/src/mysql_demo/tests/mysql_tests.rs +++ b/examples/src/mysql_demo/tests/mysql_tests.rs @@ -11,7 +11,7 @@ use crate::mysql_demo::tests::test_util::{ }; use crate::test_config_serial; use akd::storage::StorageManager; -use akd::{ecvrf::HardCodedAkdVRF, Configuration}; +use akd::{Configuration, ecvrf::HardCodedAkdVRF}; use log::{error, info, warn}; test_config_serial!(test_directory_operations); @@ -47,7 +47,7 @@ async fn test_directory_operations() { // delete all data from the db if let Err(error) = mysql_db.delete_data().await { - error!("Error cleaning mysql prior to test suite: {}", error); + error!("Error cleaning mysql prior to test suite: {error}"); } let vrf = HardCodedAkdVRF {}; @@ -59,13 +59,12 @@ async fn test_directory_operations() { // clean the test infra if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await { - error!( - "ERROR: Failed to clean MySQL test database with error {}", - error - ); + error!("ERROR: Failed to clean MySQL test database with error {error}"); } } else { - warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); + warn!( + "WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running." + ); } info!("\n\n******** Completed MySQL Directory Operations Integration Test ********\n\n"); @@ -75,7 +74,9 @@ test_config_serial!(test_directory_operations_with_caching); async fn test_directory_operations_with_caching() { log_init(log::Level::Info); - info!("\n\n******** Starting MySQL Directory Operations (w/caching) Integration Test ********\n\n"); + info!( + "\n\n******** Starting MySQL Directory Operations (w/caching) Integration Test ********\n\n" + ); if AsyncMySqlDatabase::test_guard() { // create the "test" database @@ -104,7 +105,7 @@ async fn test_directory_operations_with_caching() { // delete all data from the db if let Err(error) = mysql_db.delete_data().await { - error!("Error cleaning mysql prior to test suite: {}", error); + error!("Error cleaning mysql prior to test suite: {error}"); } let vrf = HardCodedAkdVRF {}; @@ -116,16 +117,17 @@ async fn test_directory_operations_with_caching() { // clean the test infra if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await { - error!( - "ERROR: Failed to clean MySQL test database with error {}", - error - ); + error!("ERROR: Failed to clean MySQL test database with error {error}"); } } else { - warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); + warn!( + "WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running." + ); } - info!("\n\n******** Completed MySQL Directory Operations (w/caching) Integration Test ********\n\n"); + info!( + "\n\n******** Completed MySQL Directory Operations (w/caching) Integration Test ********\n\n" + ); } test_config_serial!(test_lookups); @@ -161,7 +163,7 @@ async fn test_lookups() { // delete all data from the db if let Err(error) = mysql_db.delete_data().await { - error!("Error cleaning mysql prior to test suite: {}", error); + error!("Error cleaning mysql prior to test suite: {error}"); } let vrf = HardCodedAkdVRF {}; @@ -172,13 +174,12 @@ async fn test_lookups() { // clean the test infra if let Err(mysql_async::Error::Server(error)) = storage_manager.get_db().drop_tables().await { - error!( - "ERROR: Failed to clean MySQL test database with error {}", - error - ); + error!("ERROR: Failed to clean MySQL test database with error {error}"); } } else { - warn!("WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running."); + warn!( + "WARN: Skipping MySQL test due to test guard noting that the docker container appears to not be running." + ); } info!("\n\n******** Completed MySQL Lookup Tests ********\n\n"); diff --git a/examples/src/mysql_demo/tests/test_util.rs b/examples/src/mysql_demo/tests/test_util.rs index e3fccca9..4cf0a901 100644 --- a/examples/src/mysql_demo/tests/test_util.rs +++ b/examples/src/mysql_demo/tests/test_util.rs @@ -7,18 +7,18 @@ extern crate thread_id; +use akd::Directory; +use akd::HistoryParams; use akd::append_only_zks::AzksParallelismConfig; use akd::configuration::Configuration; use akd::ecvrf::VRFKeyStorage; use akd::storage::{Database, StorageManager}; -use akd::Directory; -use akd::HistoryParams; use akd::{AkdLabel, AkdValue}; -use log::{info, Level, Metadata, Record}; +use log::{Level, Metadata, Record, info}; use once_cell::sync::OnceCell; -use rand::distributions::Alphanumeric; +use rand::Rng; +use rand::distr::Alphanumeric; use rand::seq::IteratorRandom; -use rand::{thread_rng, Rng}; use std::fs::File; use std::io; use std::io::Write; @@ -131,12 +131,12 @@ pub(crate) async fn test_lookups = vec![]; for _ in 0..num_users { users.push( - thread_rng() + rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) @@ -169,7 +169,7 @@ pub(crate) async fn test_lookups = vec![]; for _ in 0..num_users { users.push( - thread_rng() + rand::rng() .sample_iter(&Alphanumeric) .take(30) .map(char::from) diff --git a/examples/src/test_vectors/mod.rs b/examples/src/test_vectors/mod.rs index 2e8fcb7d..c10a94e5 100644 --- a/examples/src/test_vectors/mod.rs +++ b/examples/src/test_vectors/mod.rs @@ -8,14 +8,14 @@ //! Produces test vectors for various structs that can be used to verify operations //! in the client against what the server produces. -use crate::fixture_generator::writer::yaml::YamlWriter; use crate::fixture_generator::writer::Writer; +use crate::fixture_generator::writer::yaml::YamlWriter; use akd::append_only_zks::AzksParallelismConfig; use akd::directory::Directory; use akd::ecvrf::HardCodedAkdVRF; use akd::hash::DIGEST_BYTES; -use akd::storage::memory::AsyncInMemoryDatabase; use akd::storage::StorageManager; +use akd::storage::memory::AsyncInMemoryDatabase; use akd::verify::{key_history_verify, lookup_verify}; use akd::{ AkdLabel, AkdValue, DomainLabel, HistoryParams, HistoryVerificationParams, NamedConfiguration, @@ -282,7 +282,7 @@ async fn generate_impl() -> Result { mod tests { use super::*; use crate::{ - fixture_generator::reader::{yaml::YamlFileReader, Reader}, + fixture_generator::reader::{Reader, yaml::YamlFileReader}, test_config, }; diff --git a/examples/src/wasm_client/mod.rs b/examples/src/wasm_client/mod.rs index 3c3a162b..1021051c 100644 --- a/examples/src/wasm_client/mod.rs +++ b/examples/src/wasm_client/mod.rs @@ -187,8 +187,8 @@ pub mod tests { use akd::append_only_zks::AzksParallelismConfig; use akd::errors::AkdError; - use akd::storage::memory::AsyncInMemoryDatabase; use akd::storage::StorageManager; + use akd::storage::memory::AsyncInMemoryDatabase; use akd::{AkdLabel, AkdValue, Directory}; use protobuf::Message; diff --git a/examples/src/whatsapp_kt_auditor/auditor.rs b/examples/src/whatsapp_kt_auditor/auditor.rs index 8460dec5..82813526 100644 --- a/examples/src/whatsapp_kt_auditor/auditor.rs +++ b/examples/src/whatsapp_kt_auditor/auditor.rs @@ -9,11 +9,11 @@ use super::EpochSummary; -use anyhow::{anyhow, bail, Result}; +use anyhow::{Result, anyhow, bail}; use clap::{Parser, Subcommand}; use std::convert::TryFrom; -use xml::reader::XmlEvent; use xml::EventReader; +use xml::reader::XmlEvent; // Constant strings specific to the XML returned from the Cloudfront bucket query const KEY_STR: &str = "Key"; @@ -86,7 +86,10 @@ pub(crate) fn display_audit_proofs_info(info: &mut [EpochSummary]) -> Result