diff --git a/.DS_Store b/.DS_Store old mode 100755 new mode 100644 diff --git a/.cursorrules b/.cursorrules deleted file mode 100755 index 74ffc49..0000000 --- a/.cursorrules +++ /dev/null @@ -1,155 +0,0 @@ -# Waverless 项目规则列表 - -- 关键概念 - - 规则 - 即当前文件,需要和记忆保持同步 - - review - 项目根目录下的 review.md, 用于描述任务(问题)以及记录设计方案和执行记录 - - design.canvas - 提到canvas就是指他,因为目前没有别的canvas - 项目整体设计图,描述执行流程(数据传递、并行结构),数据结构关系 - - 流程图 | 流程结构 - 使用细致的图表达并行或顺序结构,条件结构;以及数据流转 - 一个阻塞执行的角色应该强化在块里,如子并行task,rpc caller,rpc handler,任务池 - -- 修改canvas要求 - - 每次修改都必须,更新项目下canvas,阅读最新内容 - - 不可擅自删除内容,除非是目标修改内容,其他内容都得保留 - - 要结合原本canvas内的关联内容修改 - - 分离关键执行角色,如rpc caller,rpc handler,任务池,子并行task - - 将代码函数名,类型名都反映在关联逻辑的位置 - - 函数具体逻辑要反映成流程图结构,而不是黏贴代码 - - 例如函数里会spawn任务,就要分离spawn任务和当前函数的对象(概念),然后用图表现他们的关系 - - 例如多个task直接会通过channel通信,就要展现数据流向,以及两边怎么处理数据的发送接收(阻塞or 非阻塞) - - 示例: - pub async fn batch_transfer(unique_id: Vec,version: u64,target_node: NodeID,data: Arc,view: DataGeneralView,) -> WSResult<()> { - let total_size = data.size().await?; - let total_blocks = (total_size + DEFAULT_BLOCK_SIZE - 1) / DEFAULT_BLOCK_SIZE; - let semaphore = Arc::new(Semaphore::new(32)); - let mut handles = Vec::new(); - // 发送所有数据块 - for block_idx in 0..total_blocks { - // 获取信号量许可 - let permit = semaphore.clone().acquire_owned().await.unwrap(); - let offset = block_idx as usize * DEFAULT_BLOCK_SIZE; - let size = DEFAULT_BLOCK_SIZE.min(total_size - offset); - // 读取数据块 - let block_data = data.read_chunk(offset, size).await?; - // 构造请求 - let request = proto::BatchDataRequest {request_id: Some(proto::BatchRequestId {node_id: target_node as u32,sequence: block_idx as u32,}),block_type: data.block_type() as i32,block_index: block_idx as u32,data: block_data,operation: proto::DataOpeType::Write as i32,unique_id: unique_id.clone(),version,}; - // 发送请求 - let view = view.clone(); - let handle = tokio::spawn(async move { - let _permit = permit; // 持有permit直到任务完成 - let resp = view.data_general().rpc_call_batch_data.call(view.p2p(),target_node,request,Some(Duration::from_secs(30)),).await?; - if !resp.success {return Err(WsDataError::BatchTransferFailed {node: target_node,batch: block_idx as u32,reason: resp.error_message,}.into());} - Ok(()) - }); - handles.push(handle); - } - // 等待所有请求完成 - for handle in handles { handle.await??;} - Ok(()) - } - 对象(表上关键类型名) - - 当前函数进程 - - spawn的进程 - - Semaphore - 流程结构 - - 条件和循环 - - 多个task并行 - 数据流向 - - 发送数据转移给子进程 - - semaphore clone 转移给子进程 - 操作(需要表上关键函数名) - - 当前函数进程.预先准备 - - 当前函数进程.阻塞申请semaphore - - 当前函数进程.spawn子进程 - - 子进程.rpc_call - - 子进程释放semaphore - -- 更新canvas流程 - - 更新项目下canvas 以进行编辑 - 使用 python3 scripts/sync_md_files.py from_s3fs, 将从s3fs目录获取最新编辑,将在项目目录下访问到 design.canvas - - 更新s3fs canvas以反馈review最新修改 - 使用 python3 scripts/sync_md_files.py to_s3fs, 将项目目录下的design.canvas 更新到s3fs目录 - -- 提到“我更新了canvas”的情况,执行下python3 scripts/sync_md_files.py from_s3fs - 这样项目下的 {项目根路径}/design.canvas 才是最新的 - 然后在理解分析新的设计 - -- 函数返回 result的情况,如果不想处理,只要要log error - -- log使用tracing库 - -- error的结构是一个 WSError,包含子error结构形如 WsXXXErr,父结构实现Error derive,子结构只需要实现debug - 子结构尽量实现现有分类 - -- 修改代码原则 - 现在review中迭代代码草稿 - 确认草稿后,在更新到当前项目中 - -## 1. 任务执行强制等待规则 -- 制定计划后必须等待用户确认: - - 即使计划看起来很完善 - - 即使修改很简单 - - 即使是修复明显的错误 - - 没有任何例外情况 - -- 执行前检查清单: - - [ ] 任务是否已标记为 working? - - [ ] 修改计划是否已制定? - - [ ] 计划是否已经得到用户确认? - - [ ] 是否在正确的位置记录了计划? - -- 执行顺序强制要求: - 1. 标记任务状态 - 2. 制定修改计划 - 3. **等待用户确认** - 4. 得到确认后执行 - 5. 记录执行结果 - 6. 等待用户下一步指示 - -## 2. 基础工作流规则 -- 开始执行分析任务时: - 先标记当前任务、或子任务为 (working) 状态,working状态同一时间只应该有一个 - -- 处理任务时: - - 如果review还没有计划,则进行计划 - - 如有计划: - - 未执行过计划:等待用户确认后执行 - - 已执行过计划:等待用户指示 - -- 分析完或执行完需要回写review规划或记录时: - 在对应working处更新内容,不要乱选择更新位置 - -- 编译相关: - - agent自行需要编译或用户指明需要编译时: - sudo -E $HOME/.cargo/bin/cargo build 2>&1 | tee compilelog - - 需要分析当前问题时,先阅读 compilelog - -- 步骤管理: - 每次执行完一个大步骤(更新计划 或 执行计划)后,等待用户下一步指示 - -## 3. 设计文件修改规则 -- 修改前的准备: - - 必须先查看目标文件的最新内容 - - 创建两份临时文件拷贝,都带上时间戳: - * 一份用于修改 - * 一份作为备份 - -- 内容修改原则: - - 不得擅自删除或覆盖原有内容 - - 只能修改确实需要更新的相关内容 - - 不相关的内容必须保持原样 - - 如果是对原有内容的覆盖修改,需要明确指出 - -- 文件管理: - - 保持清晰的文件命名规范,包含时间戳 - - 在修改完成后进行必要的备份确认 - -## 4. 规则同步原则 -- 规则更新时: - - 规则文件(.cursorrules)和记忆(MEMORIES)必须同步更新 - - 确保两者内容保持一致性 - - 不允许单独更新其中之一 \ No newline at end of file diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 index 8b3728b..94c1bac --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ target *.zip -Python-3.10.12 -test_temp_dir* \ No newline at end of file +Python-3.10.12 \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock old mode 100755 new mode 100644 index c174000..fb1396f --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] -name = "adler2" -version = "2.0.0" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aes" @@ -30,26 +30,27 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "async-channel" -version = "2.3.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "d37875bd9915b7d67c2f117ea2c30a0989874d0b2cb694fe25403c85763c0c9e" dependencies = [ "concurrent-queue", + "event-listener", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -77,13 +78,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -92,10 +93,10 @@ version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be22061baf4b10a69b85c37dd7eb542021030bf1b0838eef0987d54b091663a6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.4.1", "cfg-if", "futures", - "getrandom 0.2.15", + "getrandom 0.2.10", "libc", "path-absolutize", "serde", @@ -122,9 +123,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.4.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" @@ -137,9 +138,9 @@ dependencies = [ "bitflags 1.3.2", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.31", + "http 0.2.10", + "http-body 0.4.5", + "hyper 0.14.27", "itoa", "matchit", "memchr", @@ -152,7 +153,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "tokio", "tower", "tower-layer", @@ -168,8 +169,8 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 0.2.10", + "http-body 0.4.5", "mime", "rustversion", "tower-layer", @@ -178,17 +179,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", ] [[package]] @@ -236,7 +237,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -247,9 +248,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "block-buffer" @@ -262,9 +263,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "bytecount" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "byteorder" @@ -274,9 +281,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" dependencies = [ "serde", ] @@ -308,15 +315,45 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d88a780e6aa14b75d7be99f374d8b5c315aaf9c12ada1e2b1cb281468584c9" +[[package]] +name = "camino" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", +] + [[package]] name = "cc" -version = "1.2.1" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "jobserver", "libc", - "shlex", ] [[package]] @@ -346,9 +383,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -378,7 +415,7 @@ version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -396,18 +433,18 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] [[package]] name = "concurrent-queue" -version = "2.5.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -439,71 +476,79 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.7" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ + "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.18" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ + "autocfg", + "cfg-if", "crossbeam-utils", + "memoffset", + "scopeguard", ] [[package]] name = "crossbeam-skiplist" -version = "0.1.3" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" +checksum = "883a5821d7d079fcf34ac55f27a833ee61678110f6b97637cc74513c0d0b42fc" dependencies = [ + "cfg-if", "crossbeam-epoch", "crossbeam-utils", + "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crypto-common" @@ -550,38 +595,24 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core 0.9.10", -] - [[package]] name = "deranged" -version = "0.3.11" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ "powerfmt", ] [[package]] name = "derive_more" -version = "0.99.18" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 1.0.109", ] [[package]] @@ -616,22 +647,11 @@ dependencies = [ "winapi", ] -[[package]] -name = "displaydoc" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "downcast-rs" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] name = "dtoa" @@ -641,29 +661,29 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "either" -version = "1.13.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" -version = "0.8.35" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.6.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -674,19 +694,28 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", ] [[package]] name = "event-listener" -version = "5.3.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" dependencies = [ "concurrent-queue", "parking", @@ -695,9 +724,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "d96b852f1345da36d551b9473fa1e2b1eb5c5195585c6c018118bc92a8d91160" dependencies = [ "event-listener", "pin-project-lite", @@ -705,9 +734,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.2.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fiber-for-wasmedge" @@ -730,9 +759,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.35" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -761,9 +790,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -780,9 +809,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -795,9 +824,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -805,15 +834,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -822,38 +851,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -899,9 +928,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", @@ -910,9 +939,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "glob" @@ -922,9 +951,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ "atomic-waker", "bytes", @@ -932,7 +961,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.6.0", + "indexmap 2.0.2", "slab", "tokio", "tokio-util", @@ -947,15 +976,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "hashbrown" -version = "0.15.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" [[package]] name = "heck" @@ -963,12 +986,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - [[package]] name = "hermit-abi" version = "0.1.19" @@ -980,21 +997,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "hex" -version = "0.4.3" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hmac" @@ -1005,11 +1010,20 @@ dependencies = [ "digest", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "http" -version = "0.2.12" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f95b9abcae896730d42b78e09c155ed4ddf82c07b4de772c64aee5b2d8b7c150" dependencies = [ "bytes", "fnv", @@ -1029,20 +1043,20 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http 0.2.12", + "http 0.2.10", "pin-project-lite", ] [[package]] name = "http-body" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", "http 1.1.0", @@ -1050,14 +1064,14 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", + "futures-core", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "pin-project-lite", ] @@ -1069,9 +1083,9 @@ checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" -version = "1.9.5" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1081,21 +1095,21 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 0.2.10", + "http-body 0.4.5", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1104,16 +1118,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", "h2", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "httparse", "itoa", "pin-project-lite", @@ -1122,23 +1136,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.27.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" -dependencies = [ - "futures-util", - "http 1.1.0", - "hyper 1.5.0", - "hyper-util", - "rustls 0.23.17", - "rustls-pki-types", - "tokio", - "tokio-rustls", - "tower-service", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -1147,7 +1144,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.5.0", + "hyper 1.3.1", "hyper-util", "native-tls", "tokio", @@ -1157,141 +1154,24 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.1", - "hyper 1.5.0", + "http-body 1.0.0", + "hyper 1.3.1", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.5", "tokio", + "tower", "tower-service", "tracing", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - -[[package]] -name = "icu_normalizer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_normalizer_data", - "icu_properties", - "icu_provider", - "smallvec", - "utf16_iter", - "utf8_iter", - "write16", - "zerovec", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" - -[[package]] -name = "icu_properties" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" -dependencies = [ - "displaydoc", - "icu_collections", - "icu_locid_transform", - "icu_properties_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_properties_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" - -[[package]] -name = "icu_provider" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", - "writeable", - "yoke", - "zerofrom", - "zerovec", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -1300,23 +1180,12 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" -dependencies = [ - "idna_adapter", - "smallvec", - "utf8_iter", -] - -[[package]] -name = "idna_adapter" -version = "1.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "icu_normalizer", - "icu_properties", + "unicode-bidi", + "unicode-normalization", ] [[package]] @@ -1331,12 +1200,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.14.2", ] [[package]] @@ -1350,9 +1219,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] @@ -1363,27 +1232,16 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.3.3", "libc", "windows-sys 0.48.0", ] [[package]] name = "ipnet" -version = "2.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" - -[[package]] -name = "is-terminal" -version = "0.4.13" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" -dependencies = [ - "hermit-abi 0.4.0", - "libc", - "windows-sys 0.52.0", -] +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" @@ -1396,42 +1254,42 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" @@ -1447,28 +1305,18 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.164" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "windows-targets 0.52.6", -] - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.6.0", - "libc", + "windows-sys 0.48.0", ] [[package]] @@ -1487,9 +1335,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "libc", @@ -1505,21 +1353,15 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" - -[[package]] -name = "litemap" -version = "0.7.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -1527,17 +1369,17 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] -name = "matchers" -version = "0.1.0" +name = "mach2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ - "regex-automata 0.1.10", + "libc", ] [[package]] @@ -1547,20 +1389,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] -name = "md-5" -version = "0.10.6" +name = "memchr" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" -dependencies = [ - "cfg-if", - "digest", -] +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] -name = "memchr" -version = "2.7.4" +name = "memoffset" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] [[package]] name = "mime" @@ -1576,38 +1417,38 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ - "adler2", + "adler", ] [[package]] name = "mio" -version = "1.0.2" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "moka" -version = "0.12.8" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +checksum = "d8017ec3548ffe7d4cef7ac0e12b044c01164a74c0f3119420faeaf13490ad8b" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", "once_cell", - "parking_lot 0.12.3", + "parking_lot 0.12.1", "quanta", "rustc_version", + "skeptic", "smallvec", "tagptr", "thiserror", @@ -1624,7 +1465,7 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 0.2.12", + "http 0.2.10", "httparse", "log", "memchr", @@ -1635,9 +1476,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.10.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "native-tls" @@ -1685,12 +1526,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - [[package]] name = "num-derive" version = "0.3.3" @@ -1704,35 +1539,54 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.19" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.3", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "object" -version = "0.36.5" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.2" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -1749,7 +1603,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -1760,9 +1614,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -1784,9 +1638,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking" -version = "2.2.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -1801,12 +1655,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core 0.9.8", ] [[package]] @@ -1825,15 +1679,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -1849,9 +1703,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.15" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "path-absolutize" @@ -1900,45 +1754,45 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" -version = "0.6.5" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.6.0", + "indexmap 2.0.2", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -1948,9 +1802,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "powerfmt" @@ -1960,21 +1814,18 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" -dependencies = [ - "zerocopy", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -2003,22 +1854,22 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.22.3" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +checksum = "6f87c10af16e0af74010d2a123d202e8363c04db5acfa91d8747f64a8524da3a" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.3", + "parking_lot 0.12.1", "prometheus-client-derive-encode", ] @@ -2030,7 +1881,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -2045,33 +1896,34 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" dependencies = [ "bytes", - "prost-derive 0.12.6", + "prost-derive 0.12.1", ] [[package]] name = "prost-build" -version = "0.12.6" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", - "heck 0.5.0", - "itertools 0.12.1", + "heck", + "itertools 0.11.0", "log", "multimap", "once_cell", "petgraph", "prettyplease", - "prost 0.12.6", + "prost 0.12.1", "prost-types", "regex", - "syn 2.0.87", + "syn 2.0.38", "tempfile", + "which", ] [[package]] @@ -2089,24 +1941,35 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] name = "prost-types" -version = "0.12.6" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" dependencies = [ - "prost 0.12.6", + "prost 0.12.1", +] + +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", ] [[package]] @@ -2121,7 +1984,7 @@ dependencies = [ "quinn", "quinn-proto", "rcgen", - "rustls 0.20.9", + "rustls", "serde", "thiserror", "tokio", @@ -2131,12 +1994,13 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", + "mach2", "once_cell", "raw-cpuid", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2155,7 +2019,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.20.9", + "rustls", "thiserror", "tokio", "tracing", @@ -2172,7 +2036,7 @@ dependencies = [ "rand 0.8.5", "ring 0.16.20", "rustc-hash", - "rustls 0.20.9", + "rustls", "slab", "thiserror", "tinyvec", @@ -2195,9 +2059,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] @@ -2261,7 +2125,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.10", ] [[package]] @@ -2275,18 +2139,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.2.0" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "bitflags 2.6.0", + "bitflags 1.3.2", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -2294,9 +2158,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -2310,7 +2174,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring 0.16.20", - "time 0.3.36", + "time 0.3.30", "yasna", ] @@ -2325,73 +2189,67 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags 2.6.0", + "bitflags 1.3.2", ] [[package]] -name = "redox_users" -version = "0.4.6" +name = "redox_syscall" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror", + "bitflags 1.3.2", ] [[package]] -name = "regex" -version = "1.11.1" +name = "redox_users" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "aho-corasick", - "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "getrandom 0.2.10", + "redox_syscall 0.2.16", + "thiserror", ] [[package]] -name = "regex-automata" -version = "0.1.10" +name = "regex" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ - "regex-syntax 0.6.29", + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", ] [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.8.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ "base64 0.22.1", "bytes", @@ -2400,10 +2258,9 @@ dependencies = [ "futures-util", "h2", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.5.0", - "hyper-rustls", + "hyper 1.3.1", "hyper-tls", "hyper-util", "ipnet", @@ -2418,7 +2275,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -2427,7 +2284,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", + "winreg", ] [[package]] @@ -2447,24 +2304,23 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", - "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.10", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -2474,9 +2330,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc_version" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] @@ -2497,15 +2353,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "linux-raw-sys 0.4.10", + "windows-sys 0.48.0", ] [[package]] @@ -2519,56 +2375,33 @@ dependencies = [ "webpki", ] -[[package]] -name = "rustls" -version = "0.23.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" -dependencies = [ - "once_cell", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - [[package]] name = "rustls-pemfile" -version = "2.2.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" - -[[package]] -name = "rustls-webpki" -version = "0.102.8" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" -dependencies = [ - "ring 0.17.8", - "rustls-pki-types", - "untrusted 0.9.0", -] +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -2581,11 +2414,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -2606,17 +2439,17 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.8", + "ring 0.17.5", "untrusted 0.9.0", ] [[package]] name = "security-framework" -version = "2.11.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ - "bitflags 2.6.0", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -2625,9 +2458,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -2635,47 +2468,49 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +dependencies = [ + "serde", +] [[package]] name = "serde" -version = "1.0.215" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.190" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", - "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -2695,11 +2530,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.34+deprecated" +version = "0.9.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.0.2", "itoa", "ryu", "serde", @@ -2745,13 +2580,28 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark", + "tempfile", + "walkdir", +] + [[package]] name = "slab" version = "0.4.9" @@ -2797,15 +2647,15 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.9.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" +checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" dependencies = [ - "is-terminal", + "atty", "slog", "term", "thread_local", - "time 0.3.36", + "time 0.3.30", ] [[package]] @@ -2835,12 +2685,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -2867,12 +2717,6 @@ dependencies = [ "parking_lot 0.11.2", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "strsim" version = "0.10.0" @@ -2898,9 +2742,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -2913,31 +2757,11 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "sync_wrapper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" -dependencies = [ - "futures-core", -] - -[[package]] -name = "synstructure" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "sysinfo" -version = "0.29.11" +version = "0.29.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" +checksum = "0a18d114d420ada3a891e6bc8e96a2023402203296a47cdd65083377dad18ba5" dependencies = [ "cfg-if", "core-foundation-sys", @@ -2950,20 +2774,20 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ - "bitflags 2.6.0", + "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.6.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ "core-foundation-sys", "libc", @@ -2983,15 +2807,15 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "tempfile" -version = "3.14.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand", - "once_cell", - "rustix 0.38.41", - "windows-sys 0.59.0", + "redox_syscall 0.4.1", + "rustix 0.38.21", + "windows-sys 0.48.0", ] [[package]] @@ -3007,44 +2831,44 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.4.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] [[package]] name = "textwrap" -version = "0.16.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.69" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.69" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", @@ -3063,13 +2887,14 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", - "num-conv", + "libc", + "num_threads", "powerfmt", "serde", "time-core", @@ -3084,29 +2909,18 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ - "num-conv", "time-core", ] -[[package]] -name = "tinystr" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" -dependencies = [ - "displaydoc", - "zerovec", -] - [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -3119,31 +2933,32 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "num_cpus", + "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.5", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -3156,22 +2971,11 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" -dependencies = [ - "rustls 0.23.17", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -3202,12 +3006,12 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.4.1", "bytes", "futures-core", "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "http 0.2.10", + "http-body 0.4.5", "http-range-header", "pin-project-lite", "tower-layer", @@ -3216,15 +3020,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" -version = "0.3.3" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" @@ -3246,7 +3050,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -3271,9 +3075,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.2.0" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ "log", "once_cell", @@ -3282,18 +3086,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ - "matchers", "nu-ansi-term", - "once_cell", - "regex", "sharded-slab", "smallvec", "thread_local", - "tracing", "tracing-core", "tracing-log", ] @@ -3306,9 +3106,9 @@ checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" [[package]] name = "try-lock" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" @@ -3316,29 +3116,53 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" + [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +dependencies = [ + "tinyvec", +] [[package]] name = "unicode-segmentation" -version = "1.12.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" +checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" -version = "0.1.14" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unsafe-libyaml" -version = "0.2.11" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +checksum = "f28467d3e1d3c6586d8f25fa243f544f5800fec42d97032474e17222c2b75cfa" [[package]] name = "untrusted" @@ -3354,47 +3178,35 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - -[[package]] -name = "utf8_iter" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" - [[package]] name = "uuid" -version = "1.11.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.10", "rand 0.8.5", "uuid-macro-internal", ] [[package]] name = "uuid-macro-internal" -version = "1.11.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b91f57fe13a38d0ce9e28a03463d8d3c2468ed03d75375110ec71d93b449a08" +checksum = "9881bea7cbe687e36c9ab3b778c36cd0487402e270304e8b1296d5085303c1a2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -3411,9 +3223,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" @@ -3454,35 +3266,34 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", - "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -3492,9 +3303,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3502,22 +3313,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-encoder" @@ -3537,22 +3348,16 @@ dependencies = [ "async-raft", "async-trait", "axum", - "base64 0.22.1", "bincode", "camelpaste", "clap", "crossbeam-skiplist", - "dashmap", "downcast-rs", "enum-as-inner", "futures", - "hex", - "hyper 0.14.31", "lazy_static", - "md-5", "moka", "parking_lot 0.11.2", - "path-absolutize", "prometheus-client", "prost 0.11.9", "prost-build", @@ -3570,7 +3375,6 @@ dependencies = [ "slotmap", "ssh2", "sysinfo", - "tempfile", "thiserror", "tokio", "tower", @@ -3593,7 +3397,7 @@ checksum = "ad17cbd3b8a8ed1cba44755e616495e13baf3f7e7f9576df7d7a357b928c070a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.38", ] [[package]] @@ -3623,7 +3427,7 @@ dependencies = [ "fiber-for-wasmedge", "lazy_static", "libc", - "parking_lot 0.12.3", + "parking_lot 0.12.1", "paste", "rand 0.8.5", "scoped-tls", @@ -3675,9 +3479,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3689,10 +3493,22 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.8", + "ring 0.17.5", "untrusted 0.9.0", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.21", +] + [[package]] name = "winapi" version = "0.3.9" @@ -3711,11 +3527,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ - "windows-sys 0.59.0", + "winapi", ] [[package]] @@ -3724,36 +3540,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "windows-registry" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" -dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result", - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.42.0" @@ -3784,16 +3570,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -3813,18 +3590,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -3841,9 +3618,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -3859,9 +3636,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -3877,15 +3654,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -3901,9 +3678,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -3919,9 +3696,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -3937,9 +3714,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -3955,21 +3732,19 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "write16" -version = "1.0.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] -name = "writeable" -version = "0.5.5" +name = "winreg" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] [[package]] name = "ws_derive" @@ -3988,101 +3763,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.36", -] - -[[package]] -name = "yoke" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive", - "zerofrom", -] - -[[package]] -name = "yoke-derive" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", - "synstructure", -] - -[[package]] -name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", -] - -[[package]] -name = "zerofrom" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" -dependencies = [ - "zerofrom-derive", -] - -[[package]] -name = "zerofrom-derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", - "synstructure", -] - -[[package]] -name = "zeroize" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" - -[[package]] -name = "zerovec" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" -dependencies = [ - "yoke", - "zerofrom", - "zerovec-derive", -] - -[[package]] -name = "zerovec-derive" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.87", + "time 0.3.30", ] [[package]] @@ -4150,9 +3831,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.11+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml old mode 100755 new mode 100644 index 59351df..7aa3c7e --- a/Cargo.toml +++ b/Cargo.toml @@ -1,27 +1,22 @@ -[workspace] -members = ["src/main"] -resolver = "2" - - -[workspace.package] -version = "0.9.5" +[package] +name = "wasm_serverless" +version = "0.1.0" edition = "2021" -license = "Apache-2.0" -[workspace.dependencies] +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] qp2p = "0.36.4" #{ path = "qp2p" } tokio = { version = "1.32.0", features = ["full"] } thiserror = "1.0.50" -# snafu = "0.8.5" async-trait = "0.1.74" prost = "0.11" parking_lot = "0.11.1" # raft = { version = "0.7.0", default-features = false, features = [ # "prost-codec", # ] } # tikv raft -async-raft = "0.6.1" #{ path = "async-raft/async-raft" } # +async-raft = "0.6.1" #{ path = "async-raft/async-raft" } # tracing = "0.1.40" -tracing-subscriber = { version = "0.3", features = ["env-filter", "std"] } # openraft = "0.8" serde = "1.0.126" serde_json = "1.0.64" @@ -32,13 +27,14 @@ slog-async = "2.3.0" slog-term = "2.4.0" regex = "1" camelpaste = "0.1.0" +tracing-subscriber = "0.3" ws_derive = { path = "./ws_derive" } clap = { version = "3", features = ["derive"] } downcast-rs = "1.2.0" bincode = "1.3.3" crossbeam-skiplist = "0.1" lazy_static = "1.4.0" -axum = { version = "0.6.20", features = ["multipart"] } +axum = {version="0.6.20",features=["multipart"]} async-channel = "2.1.0" sysinfo = "0.29.10" ssh2 = "0.9.4" @@ -46,8 +42,8 @@ moka = { version = "0.12.1", features = ["sync"] } rand = "0.8.5" slotmap = { version = "1.0" } prometheus-client = "0.22.1" -tower-http = { version = "0.4.0", features = ["cors"] } -tower = "0.4.0" +tower-http = {version="0.4.0", features=["cors"]} +tower= "0.4.0" sled = "0.34.7" enum-as-inner = "0.6.0" reqwest = "0.12.4" @@ -55,13 +51,26 @@ futures = "0.3.30" zip-extract = "0.1.3" zip = "0.5.13" walkdir = "2.5.0" -hyper = { version = "0.14.18", features = ["server"] } -md-5 = "0.10.1" -path-absolutize = "3.0.13" -dashmap = "6.1.0" -base64 = "0.22.1" -hex = "0.4.3" -tempfile="3.8" + +[dependencies.uuid] +version = "1.8.0" +features = [ + "v4", # Lets you generate random UUIDs + "fast-rng", # Use a faster (but still sufficiently random) RNG + "macro-diagnostics", # Enable better diagnostics for compile-time UUIDs +] + +# slog-envlogger = { version = "2.1.0", optional = true } + +[build-dependencies] +prost-build = { version = "0.12" } + + +#[target.'cfg( target_os = "macos" )'.dependencies] +#wasmer = "4.2.5" + +[target.'cfg(target_os = "linux")'.dependencies] +wasmedge-sdk = { version = "0.10.1", features = ["async"] } [profile.test] # 0: no optimizations diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/async_init_map.md b/async_init_map.md deleted file mode 100644 index 711a124..0000000 --- a/async_init_map.md +++ /dev/null @@ -1,150 +0,0 @@ -# AsyncInitConcurrentMap 封装(基于dashmap) - -## 设计动机 - -在 Rust 异步编程中,我们经常遇到这样的场景:需要一个并发 Map,同时要支持异步初始化。 - -### 现有方案的问题 - -1. **DashMap 的 or_insert 限制**: -```rust -// DashMap 的 or_insert_with 是同步的 -map.entry(key).or_insert_with(|| { - // 这里不能直接用 async 函数 - // 如果在这里调用 block_on 会导致严重问题 -}); -``` - -2. **同步调用异步的问题**: - - 如果在同步上下文中调用异步函数(如使用 block_on) - - 当前线程会被阻塞 - - 导致其他异步任务无法调度 - - 可能引发死锁 - -### 解决方案 - -我们的方案是将异步初始化逻辑从 entry 的回调中分离出来: - -```rust -// 不在 or_insert_with 中执行异步初始化 -let entry = map.entry(key).or_insert_with(|| { - // 只创建初始状态 - ValueState::Initializing(tx) -}); - -// 在单独的异步任务中执行初始化 -tokio::spawn(async move { - // 这里可以安全地执行异步操作 - match init_fut.await { - Ok(value) => { - let _ = tx.send(value.clone()); // 先发送值 - inner.insert(key, ValueState::Ready(value)); // 再更新状态 - } - Err(e) => { - inner.remove(&key); - drop(tx); // 通知错误 - } - } -}); -``` - -## 核心实现 - -### 状态管理 - -**设计原因**: -- 使用枚举保证状态转换的类型安全 -- 将通知 channel 绑定到初始化状态,确保生命周期正确 -- 避免使用额外的标志位,保持内存效率 - -```rust -enum ValueState { - Initializing(broadcast::Sender), // channel 直接传递值 - Ready(V), -} -``` - -**关键细节**: -- `Initializing` 持有 `broadcast::Sender` 而不是 `oneshot`,支持多个等待者 -- `Ready` 直接持有值,避免额外的引用计数 -- 枚举设计使得状态检查在编译时完成 - -### 读写分离设计 - -**设计原因**: -- 读操作应该尽可能快速且无阻塞 -- 写操作需要保证原子性,但要最小化锁持有时间 -- 异步等待不能持有任何锁 - -1. **快速路径(读)**: -```rust -if let Some(entry) = self.inner.get(&key) { // 只获取读锁 - match entry.value() { - ValueState::Ready(v) => return Ok(v.clone()), - ValueState::Initializing(tx) => { - let mut rx = tx.subscribe(); - drop(entry); // 立即释放读锁 - return Ok(rx.recv().await?); - } - } -} -``` - -**关键细节**: -- 使用 `get()` 而不是 `entry()`,避免不必要的写锁 -- 获取 subscriber 后立即释放锁,允许其他读者访问 -- 值的克隆在锁外进行,最小化锁持有时间 - -2. **初始化路径(写)**: -```rust -let mut rx = { // 使用代码块控制写锁范围 - let entry = self.inner.entry(key.clone()).or_insert_with(|| { - let (tx, _) = broadcast::channel(1); - // 启动异步初始化... - ValueState::Initializing(tx_clone) - }); - entry.value().as_initializing() - .expect("刚插入的值必定处于初始化状态") - .subscribe() -}; // 写锁在这里释放 -``` - -**关键细节**: -- 使用代码块限制 entry 的生命周期,确保写锁及时释放 -- `or_insert_with` 保证检查和插入的原子性 -- 初始化任务在获取 subscriber 后启动,避免竞态条件 - -### 通过 Channel 传递值 - -**设计原因**: -- 直接通过 channel 传递值,避免等待者重新查询 map -- broadcast channel 支持多个等待者同时等待初始化结果 -- 错误处理更简单,关闭 channel 即可通知所有等待者 - -```rust -// 优化后的设计 -enum ValueState { - Initializing(broadcast::Sender), // channel 直接传递值 - Ready(V), -} - -// 初始化完成时 -match init_fut.await { - Ok(value) => { - let _ = tx.send(value.clone()); // 先发送值 - inner.insert(key, ValueState::Ready(value)); // 再更新状态 - } - // ... -} - -// 等待初始化时 -let mut rx = tx.subscribe(); -drop(entry); -return Ok(rx.recv().await?); // 直接从 channel 获取值,无需再查询 map -``` - -**关键细节**: -- 等待者直接从 channel 接收值,无需再次获取锁查询 map -- 使用 broadcast channel 支持多个等待者,而不是 oneshot -- channel 容量为 1 即可,因为只需要传递一次初始化结果 -- 初始化失败时,直接关闭 channel 通知所有等待者,简化错误处理 diff --git a/batch_data_enhancement_plan.md b/batch_data_enhancement_plan.md deleted file mode 100644 index 5137616..0000000 --- a/batch_data_enhancement_plan.md +++ /dev/null @@ -1,280 +0,0 @@ -# 批量数据处理改进计划 - -## 1. 删除代码 [根据review.md] - -### 1.1 src/main/src/general/data/m_data_general/batch.rs -1. 删除 BatchManager 结构体及其实现 -2. 删除 BatchTransfer 结构体及其实现 - -### 1.2 src/main/src/general/data/m_data_general/mod.rs -1. 删除 DataGeneral 中的 batch_manager 字段 -2. 删除 DataGeneral::new() 中的相关初始化代码 - -## 2. 错误处理增强 [根据review.md] - -### 2.1 修改 src/main/src/result.rs -```rust -pub enum WsDataError { - BatchTransferFailed { - request_id: proto::BatchRequestId, - reason: String, - }, - BatchTransferNotFound { - request_id: proto::BatchRequestId, - }, - BatchTransferError { - request_id: proto::BatchRequestId, - msg: String, - }, - WriteDataFailed { - request_id: proto::BatchRequestId, - }, - SplitTaskFailed { - request_id: proto::BatchRequestId, - idx: DataSplitIdx, - }, - VersionMismatch { - expected: u64, - actual: u64, - }, -} -``` - -## 3. 新增代码 [根据review.md] - -### 3.1 src/main/src/general/data/m_data_general/task.rs - -#### WriteSplitDataTaskHandle -```rust -pub struct WriteSplitDataTaskHandle { - tx: mpsc::Sender>, - write_type: WriteSplitDataType, - version: u64, -} - -enum WriteSplitDataType { - File { path: PathBuf }, - Mem { shared_mem: SharedMemHolder }, -} -``` - -#### WriteSplitDataTaskGroup -```rust -enum WriteSplitDataTaskGroup { - ToFile { - unique_id: UniqueId, - file_path: PathBuf, - tasks: Vec>, - rx: mpsc::Receiver>, - expected_size: usize, - current_size: usize, - }, - ToMem { - unique_id: UniqueId, - shared_mem: SharedMemHolder, - tasks: Vec>, - rx: mpsc::Receiver>, - expected_size: usize, - current_size: usize, - } -} -``` - -### 3.2 src/main/src/general/data/m_data_general/mod.rs - -#### SharedWithBatchHandler [根据review.md] -```rust -#[derive(Clone)] -struct SharedWithBatchHandler { - responsor: Arc>>>, -} - -impl SharedWithBatchHandler { - fn new() -> Self { - Self { - responsor: Arc::new(Mutex::new(None)), - } - } - - async fn update_responsor(&self, responsor: RPCResponsor) { - let mut guard = self.responsor.lock().await; - if let Some(old_responsor) = guard.take() { - // 旧的responsor直接返回成功 - if let Err(e) = old_responsor.response(Ok(())).await { - tracing::error!("Failed to respond to old request: {}", e); - } - } - *guard = Some(responsor); - } - - async fn get_final_responsor(&self) -> Option> { - self.responsor.lock().await.take() - } -} -``` - -#### BatchReceiveState [根据review.md] -```rust -// 由DataGeneral持有,存储在DashMap中 -// 用于管理每个批量数据传输请求的状态 -struct BatchReceiveState { - handle: WriteSplitDataTaskHandle, // 写入任务句柄 - shared: SharedWithBatchHandler, // 共享响应器 -} -``` - -impl DataGeneral { - pub fn new() -> Self { - Self { - batch_receive_states: DashMap::new(), - // ... 其他字段初始化 - } - } -} - -## 4. 功能实现 [根据design.canvas] - -### 4.1 process_tasks() 实现 [阻塞循环] -```rust -impl WriteSplitDataTaskGroup { - async fn process_tasks(&mut self) -> WSResult { - loop { - // 1. 检查完成状态 - if let Some(item) = self.try_complete() { - return Ok(item); - } - - // 2. 等待新任务或已有任务完成 - tokio::select! { - Some(new_task) = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => { - tasks.push(new_task); - } - } - } - Some(completed_task) = futures::future::select_all(match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => tasks - }) => { - // 检查任务是否成功完成 - if let Err(e) = completed_task.0 { - tracing::error!("Task failed: {}", e); - return Err(WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: match self { - Self::ToFile { unique_id, .. } | - Self::ToMem { unique_id, .. } => unique_id.clone() - }, - reason: format!("Task failed: {}", e) - })); - } - // 从任务列表中移除已完成的任务 - match self { - Self::ToFile { tasks, current_size, .. } | - Self::ToMem { tasks, current_size, .. } => { - tasks.remove(completed_task.1); - // 更新当前大小 - *current_size += DEFAULT_BLOCK_SIZE; - } - } - } - None = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - // 通道关闭,直接退出 - break; - } - } - } - - Err(WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: match self { - Self::ToFile { unique_id, .. } | - Self::ToMem { unique_id, .. } => unique_id.clone() - }, - reason: "Channel closed".to_string() - })) - } -} -``` - -### 4.2 try_complete() 实现 [同步检查] -```rust -impl WriteSplitDataTaskGroup { - fn try_complete(&self) -> Option { - match self { - Self::ToFile { current_size, expected_size, file_path, .. } => { - if *current_size >= *expected_size { - Some(proto::DataItem::new_file_data(file_path.clone())) - } else { - None - } - } - Self::ToMem { current_size, expected_size, shared_mem, .. } => { - if *current_size >= *expected_size { - Some(proto::DataItem::new_mem_data(shared_mem.clone())) - } else { - None - } - } - } - } -} -``` - -## 5. 日志增强 [根据错误处理规范] - -### 5.1 关键点日志 -```rust -// 文件写入错误 -tracing::error!("Failed to write file data at offset {}: {}", offset, e); - -// 内存写入错误 -tracing::error!("Failed to write memory data at offset {}: {}", offset, e); - -// 任务提交错误 -tracing::error!("Failed to submit task: channel closed, idx: {:?}", idx); - -// 任务组创建 -tracing::debug!( - "Creating new task group: unique_id={:?}, block_type={:?}, version={}", - unique_id, block_type, version -); - -// 响应器更新错误 -tracing::error!("Failed to respond to old request: {}", e); -``` - -## 6. 测试计划 - -### 6.1 单元测试 -1. WriteSplitDataTaskHandle - - 版本号获取 - - 分片任务提交 - - 任务等待 - -2. WriteSplitDataTaskGroup - - 任务组创建 - - 任务处理循环 - - 完成状态检查 - -3. DataItemSource - - 内存数据读取 - - 文件数据读取 - - 块类型判断 - -4. SharedWithBatchHandler - - 响应器更新 - - 旧响应器处理 - - 最终响应器获取 - -### 6.2 集成测试 -1. 文件写入流程 -2. 内存写入流程 -3. 错误处理 -4. 并发控制 diff --git a/src/main/build.rs b/build.rs similarity index 67% rename from src/main/build.rs rename to build.rs index d16dc9e..acd9818 100644 --- a/src/main/build.rs +++ b/build.rs @@ -1,9 +1,6 @@ use std::io::Result; fn main() -> Result<()> { - let mut config = prost_build::Config::new(); - config - .type_attribute("BatchRequestId", "#[derive(Eq, Hash)]"); - config.compile_protos( + prost_build::compile_protos( &[ "src/general/network/proto_src/kv.proto", "src/general/network/proto_src/raft.proto", @@ -11,7 +8,7 @@ fn main() -> Result<()> { "src/general/network/proto_src/metric.proto", "src/general/network/proto_src/remote_sys.proto", "src/general/network/proto_src/data.proto", - "src/general/app/app_shared/process_rpc_proto.proto", + "src/worker/func/shared/process_rpc_proto.proto", ], &["src/"], )?; diff --git a/compilelog b/compilelog deleted file mode 100644 index 15dee7d..0000000 --- a/compilelog +++ /dev/null @@ -1,75 +0,0 @@ -warning: profiles for the non root package will be ignored, specify profiles at the workspace root: -package: /root/prjs/serverless_benchmark_plus/middlewares/waverless/waverless/src/main/Cargo.toml -workspace: /root/prjs/serverless_benchmark_plus/middlewares/waverless/waverless/Cargo.toml -warning: function `path_is_option` is never used - --> ws_derive/src/lib.rs:21:4 - | -21 | fn path_is_option(path: &syn::Path) -> bool { - | ^^^^^^^^^^^^^^ - | - = note: `#[warn(dead_code)]` on by default - -warning: `ws_derive` (lib) generated 1 warning - Compiling wasm_serverless v0.1.0 (/root/prjs/serverless_benchmark_plus/middlewares/waverless/waverless/src/main) -warning: unused import: `crate::util::zip` - --> src/main/src/general/data/m_data_general/dataitem.rs:11:5 - | -11 | use crate::util::zip; - | ^^^^^^^^^^^^^^^^ - | - = note: `#[warn(unused_imports)]` on by default - -warning: unused import: `crate::general::network::proto::DataItem` - --> src/main/src/general/data/m_data_general/mod.rs:10:5 - | -10 | use crate::general::network::proto::DataItem; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -warning: unused import: `WriteSplitTaskResult` - --> src/main/src/general/data/m_data_general/mod.rs:11:36 - | -11 | use dataitem::{DataItemArgWrapper, WriteSplitTaskResult}; - | ^^^^^^^^^^^^^^^^^^^^ - -warning: unused imports: `WSError`, `WSResult`, `WsIoErr` - --> src/main/src/general/m_os/zip.rs:2:21 - | -2 | use crate::result::{WSError, WSResult, WsIoErr}; - | ^^^^^^^ ^^^^^^^^ ^^^^^^^ - -warning: unused imports: `Cursor`, `File`, `Read`, `Seek`, `Write`, `os::unix::fs::PermissionsExt`, `path::Path`, `self`, `self` - --> src/main/src/general/m_os/zip.rs:4:10 - | -4 | fs::{self, File}, - | ^^^^ ^^^^ -5 | io::{self, Cursor, Read, Seek, Write}, - | ^^^^ ^^^^^^ ^^^^ ^^^^ ^^^^^ -6 | os::unix::fs::PermissionsExt, - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -7 | path::Path, - | ^^^^^^^^^^ - -warning: unused import: `walkdir::WalkDir` - --> src/main/src/general/m_os/zip.rs:9:5 - | -9 | use walkdir::WalkDir; - | ^^^^^^^^^^^^^^^^ - -warning: unused imports: `result::ZipError`, `write::FileOptions` - --> src/main/src/general/m_os/zip.rs:10:11 - | -10 | use zip::{result::ZipError, write::FileOptions}; - | ^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^ - -warning: unused import: `crate::general::data::m_data_general::CacheModeVisitor` - --> src/main/src/master/data/m_data_master.rs:4:5 - | -4 | use crate::general::data::m_data_general::CacheModeVisitor; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -warning: unused imports: `CACHE_MODE_MAP_COMMON_KV_MASK`, `CACHE_MODE_TIME_FOREVER_MASK` - --> src/main/src/master/data/m_data_master.rs:17:28 - | -17 | EachNodeSplit, CACHE_MODE_MAP_COMMON_KV_MASK, CACHE_MODE_TIME_FOREVER_MASK, - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - diff --git a/design.canvas b/design.canvas deleted file mode 100755 index aca677a..0000000 --- a/design.canvas +++ /dev/null @@ -1,141 +0,0 @@ -{ - "nodes":[ - {"id":"cb82b904dab26671","type":"group","x":-3420,"y":-1000,"width":6580,"height":3720,"label":"data"}, - {"id":"batch_transfer_group","type":"group","x":-1580,"y":80,"width":4700,"height":1960,"label":"Batch数据传输实现"}, - {"id":"batch_receiver_group","type":"group","x":60,"y":140,"width":2940,"height":1820,"label":"接收端 [DataGeneral]"}, - {"id":"7a2427112a116cd3","type":"group","x":-3360,"y":120,"width":1544,"height":2560,"label":"WriteSplitDataTaskGroup"}, - {"id":"batch_sender_group","type":"group","x":-1520,"y":444,"width":1340,"height":1596,"label":"写入端 [DataGeneral]"}, - {"id":"d3ff298bf342a238","type":"group","x":-1490,"y":817,"width":1290,"height":1195,"label":"fn batch_transfer"}, - {"id":"data_write_flow","type":"group","x":-1580,"y":-880,"width":2680,"height":520,"label":"数据写入流程"}, - {"id":"storage_write_flow","type":"group","x":20,"y":-820,"width":1020,"height":400,"label":"存储节点写入流程"}, - {"id":"7127ed217f71f72d","type":"group","x":-3260,"y":1140,"width":1010,"height":375,"label":"fn register_handle("}, - {"id":"handle_lookup","type":"text","text":"# Handle查找 [条件分支]\n\n## batch_receive_states.get()\n- 已存在: 验证version\n- 不存在: 创建新handle\n","x":395,"y":765,"width":410,"height":210,"color":"2"}, - {"id":"rpc_handle_batch_data","type":"text","text":"# DataGeneral::rpc_handle_batch_data\n\n## 处理流程","x":150,"y":478,"width":570,"height":118,"color":"1"}, - {"id":"state_manager","type":"text","text":"# 状态管理器 [DataGeneral.batch_receive_states]\n\n## 核心数据结构\n```rust\nDashMap\n```\n- BatchReceiveState\n\t- handle: WriteSplitDataTaskHandle\n\t- shared: SharedWithBatchHandler\n## 生命周期\n- 创建: 首次接收分片\n- 更新: 每次接收分片\n- 删除: 写入完成","x":840,"y":171,"width":640,"height":486,"color":"1"}, - {"id":"write_task_file","type":"text","text":"# ToFile 写入流程 [阻塞执行]\n\n## WriteSplitDataTaskGroup::ToFile\n- file_path: PathBuf\n- tasks: Vec>\n- rx: mpsc::Receiver>\n- expected_size: usize\n- current_size: usize\n\n## 操作流程 [文件IO阻塞]\n1. OpenOptions::new()\n .create(true)\n .write(true)\n2. seek(offset)\n3. write_all(data)\n4. 错误记录:\n tracing::error!(\"Failed to write file data at offset {}\")\n","x":-2236,"y":504,"width":400,"height":400,"color":"1"}, - {"id":"b0205b4457afeb2b","type":"text","text":"## SharedMemOwnedAccess\n- 共享内存所有权控制\n- 访问安全保证\n- 生命周期管理","x":-2350,"y":202,"width":364,"height":178}, - {"id":"batch_request1","type":"text","text":"# BatchDataRequest(1)\n- request_id (1)\n- dataset_unique_id (2)\n- data_item_idx (3)\n- block_type (4)\n- block_index: 0 (5)\n- data (6)\n- operation (7)\n- unique_id (8)\n- version (9)","x":-160,"y":544,"width":250,"height":120,"color":"2"}, - {"id":"4dbe01dc59cea4c2","type":"text","text":"### pub struct WriteSplitDataTaskHandle {\n tx: mpsc::Sender>,\n write_type: WriteSplitDataType,\n}","x":-2572,"y":1660,"width":418,"height":202}, - {"id":"write_task_mem","type":"text","text":"# ToMem 写入流程 [阻塞执行]\n\n## WriteSplitDataTaskGroup::ToMem\n- shared_mem: SharedMemHolder\n- tasks: Vec>\n- rx: mpsc::Receiver>\n- expected_size: usize\n- current_size: usize\n\n## 操作流程 [内存写入阻塞]\n1. shared_mem.write(offset, data)\n2. 错误记录:\n tracing::error!(\"Failed to write memory data at offset {}\")\n","x":-2670,"y":486,"width":400,"height":436,"color":"2"}, - {"id":"general_phase2","type":"text","text":"General阶段2:调度\n- 生成unique_id\n- 发送调度请求\n- 等待决策返回","x":-1540,"y":-660,"width":200,"height":100,"color":"1"}, - {"id":"general_phase3","type":"text","text":"General阶段3:分发\n- 解析调度决策\n- 创建写入任务组\n- 初始化并发控制","x":-1540,"y":-490,"width":200,"height":100,"color":"1"}, - {"id":"general_phase1","type":"text","text":"General阶段1:准备\n- 初始化DataItems\n- 计算数据大小\n- 创建SharedMemHolder","x":-1540,"y":-790,"width":200,"height":100,"color":"1"}, - {"id":"02d1bafb13062e3b","type":"text","text":"### batch 接口要和 write作区分\n#### batch是主动推送完整数据\n#### write是将数据写入到系统\n\n- wirte中也会使用batch接口用来在写入之前并行推送缓存","x":-1514,"y":142,"width":445,"height":228}, - {"id":"batch_initiator","type":"text","text":"# 发起节点 [DataGeneral]\n\n## call_batch_data()\n- 分割数据块(1MB)\n- 创建有界任务池\n- 建议并发数=3\n- 任务队列控制","x":-1470,"y":488,"width":300,"height":290,"color":"1"}, - {"id":"9fa1c2f8d08978bb","type":"text","text":"## 判断还有分片?","x":-935,"y":1404,"width":230,"height":80,"color":"3"}, - {"id":"data_reader","type":"text","text":"# 数据读取器 [DataSource]\n\n- 计算数据范围\n- 读取数据块 [阻塞]\n- 错误传播","x":-970,"y":1163,"width":300,"height":200,"color":"3"}, - {"id":"data_source_interface","type":"text","text":"# DataSource 接口设计\n\n## trait DataSource: Send + Sync + 'static\n```rust\nasync fn size(&self) -> WSResult;\nasync fn read_chunk(&self, offset: usize, size: usize) -> WSResult>;\nfn block_type(&self) -> BatchDataBlockType;\n```\n\n## 实现类型\n1. FileDataSource\n - 文件路径管理\n - 异步IO操作\n - 错误处理\n\n2. MemDataSource\n - Arc<[u8]>共享数据\n - 边界检查\n - 零拷贝优化","x":-1459,"y":864,"width":390,"height":646,"color":"4"}, - {"id":"batch_transfer_main","type":"text","text":"# batch_transfer [主控制器]\n\n- 初始化数据源\n- 创建并发控制器\n- 启动传输任务\n- 等待任务完成\n\n[阻塞执行]","x":-970,"y":837,"width":370,"height":294,"color":"1"}, - {"id":"97d3d9fd7432a861","type":"text","text":"# WriteSplitDataTaskHandle::submit_split() 实现 [异步发送]\n\n## match write_type {\n- WriteSplitDataType::File => 文件写入任务\n- WriteSplitDataType::Mem => 内存写入任务\n}\n\n## 发送任务 [channel阻塞]\ntx.send(task).await","x":-2209,"y":1120,"width":347,"height":445}, - {"id":"write_handle_submit","type":"text","text":"# submit_split() [异步发送]\n\n## 执行流程\n1. 根据write_type构造任务\n2. 发送到任务通道\n3. 错误处理和日志\n\n## 阻塞特性\n- File写入: IO阻塞\n- Mem写入: 内存阻塞\n- 通道发送: channel阻塞","x":-2209,"y":1120,"width":347,"height":445,"color":"2"}, - {"id":"f515ecb9aee18fc7","type":"text","text":"# 后续写入 [异步执行]\n\n## 状态管理\n- 写入任务追踪\n- 并发控制\n- 写入顺序保证","x":-2572,"y":1178,"width":302,"height":275}, - {"id":"223edf4677db9339","type":"text","text":"pub struct WriteSplitDataManager {\n // 只存储任务句柄\n handles: DashMap,\n}","x":-3110,"y":960,"width":610,"height":140}, - {"id":"06d4a92778dd83c8","type":"text","text":"# 第一个分片开始写入 [阻塞执行]\n\n## 初始化写入\nfn start_first_split(data: Vec) -> Result<(), WSError> {\n let task = self.build_task(data, 0);\n self.tasks.push(task);\n self.current_size += data.len();\n Ok(())\n}\n\n## 错误处理\n- 写入失败记录日志\n- 返回具体错误类型","x":-3240,"y":1161,"width":455,"height":310}, - {"id":"batch_data_request","type":"text","text":"# Batch RPC Proto定义\n\n## 数据块类型\nenum BatchDataBlockType {\n MEMORY = 0; // 内存数据块\n FILE = 1; // 文件数据块\n}\n\n## 操作类型\nenum DataOpeType {\n Read = 0;\n Write = 1;\n}\n\n## 请求ID\nmessage BatchRequestId {\n uint32 node_id = 1; // 节点ID\n uint64 sequence = 2; // 原子自增序列号\n}\n\n## 请求消息\nmessage BatchDataRequest {\n BatchRequestId request_id = 1; // 请求唯一标识(节点ID + 序列号)\n uint32 dataset_unique_id = 2; // 数据集唯一标识\n uint32 data_item_idx = 3; // 数据项索引\n BatchDataBlockType block_type = 4; // 数据块类型(文件/内存)\n uint32 block_index = 5; // 数据块索引\n bytes data = 6; // 数据块内容\n DataOpeType operation = 7; // 操作类型\n bytes unique_id = 8; // 数据唯一标识\n uint64 version = 9; // 数据版本\n}\n\n## 响应消息\nmessage BatchDataResponse {\n BatchRequestId request_id = 1; // 对应请求ID\n bool success = 2; // 处理状态\n string error_message = 3; // 错误信息\n uint64 version = 4; // 处理后的版本\n}\n","x":-155,"y":1536,"width":550,"height":1184,"color":"2"}, - {"id":"20145fd68e8aaa75","type":"text","text":"# 构造 [同步初始化]\n\n## fn new_task_group 任务组初始化\nfn new_task_group(type_: WriteSplitDataType) -> Self\n### fn calculate_split\n- calculate_spli 根据block size计算出每个split的range\n 支持range 以在分片大小不一时依旧可以用的灵活性\n- ","x":-3220,"y":1520,"width":542,"height":294}, - {"id":"1ec171d545e8995d","type":"text","text":"## SharedMemHolder\n- 共享内存数据访问\n- 资源自动管理","x":-3105,"y":754,"width":300,"height":150}, - {"id":"data_item","type":"text","text":"# 数据项处理\n\n## enum WriteSplitDataTaskGroup\n- 管理数据分片写入任务组\n- 分片合并优化\n- 状态同步\n- 并行控制\n","x":-3010,"y":140,"width":450,"height":280,"color":"3"}, - {"id":"821e415b6438e20d","type":"text","text":"## struct DataSplit\n- 数据分片管理\n- 分片信息维护\n- 分片操作协调\n- 存储节点分配\n- 局部性优化","x":-2952,"y":-132,"width":342,"height":158,"color":"4"}, - {"id":"core_functions","type":"text","text":"## fn write_data\n- 同步/异步写入\n- 数据完整性保证\n- 分片并行写入\n- 缓存节点同步\n- 错误重试机制","x":-2425,"y":-467,"width":280,"height":275,"color":"4"}, - {"id":"data_general_core","type":"text","text":"# 数据管理核心模块\n- 数据流向控制\n- 并行结构管理\n- 错误处理链\n- 资源管理","x":-3070,"y":-446,"width":330,"height":234,"color":"4"}, - {"id":"133214da264cfe72","type":"text","text":"## struct DataGeneral\n- 提供数据读写接口\n- 管理元数据\n- 协调各子模块功能\n- 错误处理和恢复\n- 资源管理","x":-2780,"y":-720,"width":340,"height":214,"color":"4"}, - {"id":"completion_monitor","type":"text","text":"# 完成监控 [独立任务]\n\n## 1. 等待写入完成\n```rust\nhandle.wait_all_tasks().await?;\n```\n\n## 2. 发送最终响应\n```rust\nif let Some(final_responsor) = \n shared.get_final_responsor().await {\n final_responsor.response(Ok(()))\n .await?;\n}\n```\n\n## 3. 清理状态\n```rust\nbatch_receive_states.remove(&unique_id);\n```","x":1635,"y":1335,"width":445,"height":571,"color":"4"}, - {"id":"2dbde64bc1dbac6a","type":"text","text":"## 响应任务(独立任务)","x":1760,"y":1132,"width":365,"height":110}, - {"id":"b31695207931d96e","type":"text","text":"## fn get_or_del_data\n- 数据检索和删除\n- 资源清理\n- 缓存一致性\n- 并发访问控制","x":-2310,"y":-662,"width":330,"height":156,"color":"4"}, - {"id":"task_spawn_flow","type":"text","text":"# 任务生成流程 [异步执行]\n\n## 1. 提交分片数据handle.submit_split\n```rust\nstate.handle.submit_split(\n request.block_idx * DEFAULT_BLOCK_SIZE,\n request.data\n).await?\n```\n\n## 2. 更新响应器shared.update_responsor\n```rust\nstate.shared.update_responsor(responsor).await;\n```\nupdate时,旧的reponsor要先返回","x":480,"y":1106,"width":405,"height":538,"color":"3"}, - {"id":"e156c034cc9ec24f","type":"text","text":"## responsor send","x":595,"y":1755,"width":250,"height":60}, - {"id":"write_task_handle","type":"text","text":"# 写入任务句柄 [WriteSplitDataTaskHandle]\n\n## 关键对象\n```rust\npub struct WriteSplitDataTaskHandle {\n tx: mpsc::Sender>,\n write_type: WriteSplitDataType,\n}\n```\n\n## 核心函数\n```rust\nasync fn submit_split(\n &self,\n offset: usize,\n data: Vec\n) -> WSResult<()>\n```","x":956,"y":765,"width":505,"height":530,"color":"2"}, - {"id":"task_spawner","type":"text","text":"# tokio::spawn 响应任务\n\n```\n\n## 核心函数\n```rust\nfn spawn_write_task(\n data: Vec,\n offset: usize\n) -> JoinHandle<()>\n```","x":1008,"y":1385,"width":400,"height":400,"color":"3"}, - {"id":"rpc_caller","type":"text","text":"# RPC调用器 [view.rpc_call]\n\n- 构造请求\n- 发送数据 [阻塞]\n- 等待响应 [阻塞]\n- 错误处理","x":-520,"y":1267,"width":300,"height":200,"color":"4"}, - {"id":"parallel_task","type":"text","text":"# 并行任务 \n- 持有信号量许可\n- 执行RPC调用\n- 处理响应\n- 自动释放许可\n\n[独立执行]","x":-520,"y":1579,"width":300,"height":200,"color":"6"}, - {"id":"batch_request3","type":"text","text":"# BatchDataRequest(3)\n- request_id (1)\n- dataset_unique_id (2)\n- data_item_idx (3)\n- block_type (4)\n- block_index: 2 (5)\n- data (6)\n- operation (7)\n- unique_id (8)\n- version (9)","x":-160,"y":784,"width":250,"height":120,"color":"2"}, - {"id":"storage_node_5","type":"text","text":"存储节点3","x":-400,"y":-680,"width":150,"height":60,"color":"3"}, - {"id":"storage_node_4","type":"text","text":"存储节点2","x":-400,"y":-760,"width":150,"height":60,"color":"3"}, - {"id":"cache_node_3","type":"text","text":"缓存节点3","x":-400,"y":-480,"width":150,"height":60,"color":"5"}, - {"id":"cache_node_1","type":"text","text":"缓存节点1","x":-400,"y":-640,"width":150,"height":60,"color":"5"}, - {"id":"cache_node_2","type":"text","text":"缓存节点2","x":-400,"y":-560,"width":150,"height":60,"color":"5"}, - {"id":"storage_node_1","type":"text","text":"存储节点1\n接收层:\n- 接收分片请求\n- 版本号验证\n- 数据完整性校验\n写入任务层:\n- 分片范围验证\n- 并发写入控制\n- 错误重试机制\n本地存储层:\n- 数据持久化\n- 版本管理\n- 空间回收\n结果返回:\n- 写入状态\n- 远程版本号\n- 错误信息","x":60,"y":-780,"width":200,"height":280,"color":"1"}, - {"id":"write_task_1","type":"text","text":"写入任务1\n- 分片范围验证\n- 数据完整性检查\n- 并发写入控制\n- 错误重试","x":400,"y":-780,"width":200,"height":120,"color":"2"}, - {"id":"batch_data_constants","type":"text","text":"# 批量数据常量定义\n\n## 数据块大小\n```rust\n/// 默认数据块大小 (4MB)\nconst DEFAULT_BLOCK_SIZE: usize = 4 * 1024 * 1024;\n```\n\n## 数据分片索引\n```rust\n/// 数据分片在整体数据中的偏移量\npub type DataSplitIdx = usize;\n```","x":-160,"y":1052,"width":400,"height":380,"color":"4"}, - {"id":"batch_request2","type":"text","text":"# BatchDataRequest(2)\n- request_id (1)\n- dataset_unique_id (2)\n- data_item_idx (3)\n- block_type (4)\n- block_index: 1 (5)\n- data (6)\n- operation (7)\n- unique_id (8)\n- version (9)","x":-160,"y":664,"width":250,"height":120,"color":"2"}, - {"id":"storage_node_3","type":"text","text":"存储节点1","x":-405,"y":-830,"width":150,"height":60,"color":"3"}, - {"id":"master_node","type":"text","text":"Master节点 [DataMaster]\n- schedule_data()\n1. 生成DataSetMeta\n2. 创建DataSplits\n3. 分配存储节点\n4. 返回调度决策","x":-1080,"y":-790,"width":200,"height":160,"color":"2"}, - {"id":"storage_group","type":"text","text":"存储节点组","x":-600,"y":-790,"width":150,"height":60,"color":"3"}, - {"id":"cache_group","type":"text","text":"缓存节点组","x":-600,"y":-590,"width":150,"height":60,"color":"5"}, - {"id":"write_result_1","type":"text","text":"写入结果1\n- 成功/失败\n- 远程版本号\n- 错误信息","x":660,"y":-560,"width":200,"height":100,"color":"4"}, - {"id":"86a8707f54d19c74","type":"text","text":"join all,并返回","x":-1389,"y":1549,"width":250,"height":60}, - {"id":"task_pool","type":"text","text":"# 任务池 [handles]\n\n- 收集任务句柄\n- 等待任务完成 [阻塞]\n- 错误聚合","x":-1414,"y":1732,"width":300,"height":260,"color":"5"}, - {"id":"5009f9e4bcc6ed6c","type":"text","text":"### 加入任务池","x":-920,"y":1902,"width":250,"height":60}, - {"id":"f8ade98240211305","type":"text","text":"### [tokio::spawn]\n","x":-945,"y":1784,"width":250,"height":60}, - {"id":"concurrency_controller","type":"text","text":"# 并发控制器 [Semaphore]\n\n- 最大并发数: 32\n- 许可获取 [阻塞]\n- 许可释放 [非阻塞]\n- RAII风格管理","x":-970,"y":1536,"width":300,"height":200,"color":"2"}, - {"id":"handle_wait_all","type":"text","text":"# handle.wait_all_tasks [异步等待]\n\n## 核心职责\n- 等待所有分片任务完成\n- 处理任务执行结果\n- 清理任务资源\n\n## 实现细节\n```rust\nasync fn wait_all_tasks(&self) -> WSResult<()> {\n // 等待所有任务完成\n while let Some(task) = rx.recv().await {\n task.await??;\n }\n Ok(())\n}\n```\n\n## 调用时机\n1. 外部调用: 批量传输完成检查\n2. 内部调用: process_tasks完成时","x":-2209,"y":1922,"width":320,"height":400}, - {"id":"0dee80a0e2345514","type":"text","text":"# 完成处理 [同步]\n\n## 执行流程\n1. 合并所有分片数据\n2. 构造最终DataItem\n3. 返回Some(item)给process_tasks\n4. process_tasks收到完成信号后退出循环\n\n## 数据流向\nprocess_tasks -> try_complete -> handle.wait_all_tasks","x":-2176,"y":2380,"width":254,"height":260}, - {"id":"e2576a54f3f852b3","type":"text","text":"# process_tasks() 实现 [阻塞循环]\n\n## 循环处理 [select阻塞]\n1. try_complete() 检查完成状态\n2. tokio::select! {\n - rx.recv() => 接收新任务\n - futures::future::select_all(tasks) => 等待任务完成\n}\n\n## 完成条件\n- current_size >= expected_size\n- 返回 proto::DataItem\n\n## 核心职责\n- 作为group的主事件循环\n- 在new group后立即启动\n- 负责接收和处理所有提交的任务\n- 维护任务状态直到完成\n\n## 执行流程\n1. 循环开始前检查完成状态\n2. 使用select等待新任务或已有任务完成\n3. 处理完成的任务并更新状态\n4. 检查是否达到完成条件\n5. 未完成则继续循环\n6. 完成则返回合并后的数据","x":-3272,"y":1892,"width":517,"height":688}, - {"id":"155106edf5eb3cd7","type":"text","text":"# 检查完成状态 try_complete() 实现 [同步检查]\n\n## 核心职责\n- 是process_tasks内部使用的状态检查\n- 判断是否所有分片都完成\n- 返回最终合并的数据\n\n## 检查流程\n1. 验证current_size是否达到expected_size\n2. 检查所有任务是否完成\n3. 合并分片数据\n4. 返回Option\n\n## 返回值\n- Some(item): 所有分片完成,返回合并数据\n- None: 未完成,继续等待\n\n## 错误处理\n- 分片数据不完整\n- 合并失败\n- 数据损坏","x":-2678,"y":2180,"width":455,"height":400} - ], - "edges":[ - {"id":"master_to_phase2","fromNode":"master_node","fromSide":"left","toNode":"general_phase2","toSide":"right","label":"调度决策\n- version\n- splits\n- nodes"}, - {"id":"phase2_to_phase3","fromNode":"general_phase2","fromSide":"bottom","toNode":"general_phase3","toSide":"top","label":"决策信息"}, - {"id":"phase3_to_storage","fromNode":"general_phase3","fromSide":"right","toNode":"storage_group","toSide":"left","label":"分发存储任务"}, - {"id":"storage_to_nodes","fromNode":"storage_group","fromSide":"right","toNode":"storage_node_3","toSide":"left"}, - {"id":"storage_to_nodes2","fromNode":"storage_group","fromSide":"right","toNode":"storage_node_4","toSide":"left"}, - {"id":"storage_to_nodes3","fromNode":"storage_group","fromSide":"right","toNode":"storage_node_5","toSide":"left"}, - {"id":"phase3_to_cache","fromNode":"general_phase3","fromSide":"right","toNode":"cache_group","toSide":"left","label":"分发缓存任务"}, - {"id":"cache_to_nodes","fromNode":"cache_group","fromSide":"right","toNode":"cache_node_1","toSide":"left"}, - {"id":"cache_to_nodes2","fromNode":"cache_group","fromSide":"right","toNode":"cache_node_2","toSide":"left"}, - {"id":"cache_to_nodes3","fromNode":"cache_group","fromSide":"right","toNode":"cache_node_3","toSide":"left"}, - {"id":"b5a17c0afede8e4a","fromNode":"data_general_core","fromSide":"right","toNode":"133214da264cfe72","toSide":"bottom"}, - {"id":"2ad5991c43fd6098","fromNode":"data_general_core","fromSide":"right","toNode":"821e415b6438e20d","toSide":"top"}, - {"id":"caa45c92a135042c","fromNode":"data_general_core","fromSide":"right","toNode":"core_functions","toSide":"left"}, - {"id":"09c7b9957992d62d","fromNode":"data_general_core","fromSide":"right","toNode":"b31695207931d96e","toSide":"left"}, - {"id":"3d79872a234731c0","fromNode":"cache_node_3","fromSide":"bottom","toNode":"batch_transfer_group","toSide":"top"}, - {"id":"9094221953b6c685","fromNode":"write_task_mem","fromSide":"top","toNode":"b0205b4457afeb2b","toSide":"bottom"}, - {"id":"77ec04f5deef7cee","fromNode":"write_task_mem","fromSide":"left","toNode":"1ec171d545e8995d","toSide":"top"}, - {"id":"7b99fb72410f07d9","fromNode":"06d4a92778dd83c8","fromSide":"bottom","toNode":"20145fd68e8aaa75","toSide":"top"}, - {"id":"df9b4bc9170fdec1","fromNode":"20145fd68e8aaa75","fromSide":"right","toNode":"4dbe01dc59cea4c2","toSide":"left"}, - {"id":"61e0637af4beba94","fromNode":"f515ecb9aee18fc7","fromSide":"left","toNode":"4dbe01dc59cea4c2","toSide":"left"}, - {"id":"f7105db89ffabd1e","fromNode":"20145fd68e8aaa75","fromSide":"bottom","toNode":"e2576a54f3f852b3","toSide":"top"}, - {"id":"7504b1b3a99e992c","fromNode":"4dbe01dc59cea4c2","fromSide":"right","toNode":"97d3d9fd7432a861","toSide":"bottom","label":"获取到handle"}, - {"id":"a993a3f4d7b2211d","fromNode":"97d3d9fd7432a861","fromSide":"left","toNode":"e2576a54f3f852b3","toSide":"right"}, - {"id":"a996588f6c59c88f","fromNode":"e2576a54f3f852b3","fromSide":"bottom","toNode":"155106edf5eb3cd7","toSide":"left"}, - {"id":"a42104592fedd4c7","fromNode":"97d3d9fd7432a861","fromSide":"right","toNode":"write_task_mem","toSide":"bottom"}, - {"id":"c45aaa564ae87a7c","fromNode":"97d3d9fd7432a861","fromSide":"right","toNode":"write_task_file","toSide":"bottom"}, - {"id":"write_flow_1","fromNode":"20145fd68e8aaa75","fromSide":"top","toNode":"06d4a92778dd83c8","toSide":"bottom","label":"初始化完成"}, - {"id":"write_flow_2","fromNode":"06d4a92778dd83c8","fromSide":"right","toNode":"f515ecb9aee18fc7","toSide":"left","label":"首个分片写入完成"}, - {"id":"86a2aa913f7bd3d9","fromNode":"223edf4677db9339","fromSide":"bottom","toNode":"06d4a92778dd83c8","toSide":"top"}, - {"id":"a99c309f19fd9853","fromNode":"batch_request1","fromSide":"right","toNode":"rpc_handle_batch_data","toSide":"left"}, - {"id":"batch_data_flow2","fromNode":"batch_data_constants","fromSide":"top","toNode":"batch_request3","toSide":"bottom","label":"使用常量"}, - {"id":"5e772afc67478d04","fromNode":"rpc_handle_batch_data","fromSide":"bottom","toNode":"handle_lookup","toSide":"top"}, - {"id":"concurrency_to_task","fromNode":"concurrency_controller","fromSide":"bottom","toNode":"f8ade98240211305","toSide":"top"}, - {"id":"task_to_rpc","fromNode":"parallel_task","fromSide":"top","toNode":"rpc_caller","toSide":"bottom","label":"调用"}, - {"id":"213831c4b82c9e93","fromNode":"data_source_interface","fromSide":"right","toNode":"data_reader","toSide":"left"}, - {"id":"7218875ebe7967fa","fromNode":"batch_transfer_main","fromSide":"bottom","toNode":"data_reader","toSide":"top"}, - {"id":"4b20152fe7211934","fromNode":"data_reader","fromSide":"bottom","toNode":"9fa1c2f8d08978bb","toSide":"top"}, - {"id":"4da12698f8ee3b63","fromNode":"rpc_caller","fromSide":"top","toNode":"batch_request3","toSide":"left"}, - {"id":"f4671fc434a3d0e1","fromNode":"f8ade98240211305","fromSide":"bottom","toNode":"5009f9e4bcc6ed6c","toSide":"top","label":"\n"}, - {"id":"9f748faecadaaa42","fromNode":"f8ade98240211305","fromSide":"right","toNode":"parallel_task","toSide":"left"}, - {"id":"8115e7d6d539f0c0","fromNode":"5009f9e4bcc6ed6c","fromSide":"right","toNode":"data_reader","toSide":"right"}, - {"id":"9e8cb09dfe630443","fromNode":"9fa1c2f8d08978bb","fromSide":"bottom","toNode":"concurrency_controller","toSide":"top"}, - {"id":"d95b89e25235928f","fromNode":"9fa1c2f8d08978bb","fromSide":"left","toNode":"86a8707f54d19c74","toSide":"right"}, - {"id":"9debe9b97cdaf245","fromNode":"86a8707f54d19c74","fromSide":"bottom","toNode":"task_pool","toSide":"top"}, - {"id":"a63472bc8934c7f9","fromNode":"5009f9e4bcc6ed6c","fromSide":"left","toNode":"task_pool","toSide":"right"}, - {"id":"f3ca63243b2c22f7","fromNode":"batch_initiator","fromSide":"right","toNode":"batch_transfer_main","toSide":"left"}, - {"id":"handle_to_spawner","fromNode":"write_task_handle","fromSide":"bottom","toNode":"task_spawner","toSide":"top","label":"tokio::spawn()"}, - {"id":"lookup_to_submit","fromNode":"handle_lookup","fromSide":"right","toNode":"write_task_handle","toSide":"left","label":"\n"}, - {"id":"9abc95f005b8b2d8","fromNode":"task_spawner","fromSide":"right","toNode":"2dbde64bc1dbac6a","toSide":"left"}, - {"id":"e6bd3dfca32e245b","fromNode":"handle_lookup","fromSide":"bottom","toNode":"task_spawn_flow","toSide":"top"}, - {"id":"3fca8aa5c568a44d","fromNode":"task_spawner","fromSide":"left","toNode":"task_spawn_flow","toSide":"right"}, - {"id":"0a095928ebb7ac26","fromNode":"2dbde64bc1dbac6a","fromSide":"bottom","toNode":"completion_monitor","toSide":"top"}, - {"id":"dcf437aa83674d1a","fromNode":"completion_monitor","fromSide":"left","toNode":"e156c034cc9ec24f","toSide":"right"}, - {"id":"7ae0cf5ea0bc0b06","fromNode":"task_spawn_flow","fromSide":"bottom","toNode":"e156c034cc9ec24f","toSide":"top"}, - {"id":"49b65724e2a3b08f","fromNode":"e156c034cc9ec24f","fromSide":"left","toNode":"batch_request3","toSide":"right"}, - {"id":"lookup_to_state","fromNode":"handle_lookup","fromSide":"top","toNode":"state_manager","toSide":"bottom","label":"查找/创建 proto::BatchRequestId"}, - {"id":"monitor_to_state","fromNode":"completion_monitor","fromSide":"right","toNode":"state_manager","toSide":"bottom","label":"清理"}, - {"id":"facc3fcfb55cf19d","fromNode":"batch_data_request","fromSide":"top","toNode":"batch_request3","toSide":"bottom"}, - {"id":"271f79d015a55fdf","fromNode":"batch_data_request","fromSide":"right","toNode":"e156c034cc9ec24f","toSide":"bottom"}, - {"id":"6a7413aedbbca964","fromNode":"155106edf5eb3cd7","fromSide":"top","toNode":"e2576a54f3f852b3","toSide":"right","label":"未完成"}, - {"id":"6604bc585e5ffe59","fromNode":"155106edf5eb3cd7","fromSide":"bottom","toNode":"0dee80a0e2345514","toSide":"bottom","label":"完成"}, - {"id":"handle_wait_flow","fromNode":"0dee80a0e2345514","fromSide":"right","toNode":"handle_wait_all","toSide":"right","label":"通知等待完成"}, - {"id":"e732f2950f5744ff","fromNode":"4dbe01dc59cea4c2","fromSide":"bottom","toNode":"handle_wait_all","toSide":"top"} - ] -} \ No newline at end of file diff --git a/design_of_new_batch.md b/design_of_new_batch.md deleted file mode 100755 index bb6ba00..0000000 --- a/design_of_new_batch.md +++ /dev/null @@ -1,699 +0,0 @@ -# 项目分析与修改计划 - - -### 变更 - -#### 核心接口定义 -```rust - - -#### WriteSplitDataTaskGroup 核心实现 -```rust -// 写入任务相关错误 -#[derive(Debug)] -pub enum WsDataErr { - WriteDataFailed { - unique_id: Vec, - }, - SplitTaskFailed { - idx: DataSplitIdx, - }, -} - -// 写入任务句柄,用于提交新的分片任务 -pub struct WriteSplitDataTaskHandle { - tx: mpsc::Sender>, - write_type: WriteSplitDataType, -} - -// 写入类型 -enum WriteSplitDataType { - File { - path: PathBuf, - }, - Mem { - shared_mem: SharedMemHolder, - }, -} - -impl WriteSplitDataTaskHandle { - // 提交新的分片任务 - pub async fn submit_split(&self, idx: DataSplitIdx, data: proto::DataItem) { - let task = match &self.write_type { - WriteSplitDataType::File { path } => { - let path = path.clone(); - let offset = idx.offset; - let data = data.as_bytes().to_vec(); - tokio::spawn(async move { - if let Err(e) = tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .open(&path) - .await - .and_then(|mut file| async move { - file.seek(SeekFrom::Start(offset)).await?; - file.write_all(&data).await - }) - .await - { - tracing::error!("Failed to write file data at offset {}: {}", offset, e); - } - }) - } - WriteSplitDataType::Mem { shared_mem } => { - let mem = shared_mem.clone(); - let offset = idx.offset as usize; - let data = data.as_bytes().to_vec(); - tokio::spawn(async move { - if let Err(e) = mem.write(offset, &data).await { - tracing::error!("Failed to write memory data at offset {}: {}", offset, e); - } - }) - } - }; - - if let Err(e) = self.tx.send(task).await { - tracing::error!("Failed to submit task: channel closed, idx: {:?}", idx); - } - } -} - -// 写入任务组 -enum WriteSplitDataTaskGroup { - // 文件写入模式 - ToFile { - unique_id: UniqueId, // 任务唯一标识 - file_path: PathBuf, // 文件路径 - tasks: Vec>, // 写入任务列表 - rx: mpsc::Receiver>, // 任务接收通道 - expected_size: usize, // 预期总大小 - current_size: usize, // 当前写入大小 - manager: Arc, // 管理器引用 - }, - // 内存写入模式 - ToMem { - unique_id: UniqueId, // 任务唯一标识 - shared_mem: SharedMemHolder, // 共享内存 - tasks: Vec>, // 写入任务列表 - rx: mpsc::Receiver>, // 任务接收通道 - expected_size: usize, // 预期总大小 - current_size: usize, // 当前写入大小 - manager: Arc, // 管理器引用 - } -} - -impl WriteSplitDataTaskGroup { - // 创建新任务组 - async fn new( - unique_id: UniqueId, - splits: Vec>, - block_type: proto::BatchDataBlockType, - manager: Arc, - ) -> (Self, WriteSplitDataTaskHandle) { - // 计算预期总大小 - let expected_size = splits.iter().map(|range| range.len()).sum(); - - // 创建通道 - let (tx, rx) = mpsc::channel(32); - - match block_type { - proto::BatchDataBlockType::File => { - let file_path = PathBuf::from(format!("{}.data", - base64::engine::general_purpose::STANDARD.encode(&unique_id))); - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::File { - path: file_path.clone(), - }, - }; - - let group = Self::ToFile { - unique_id, - file_path, - tasks: Vec::new(), - rx, - expected_size, - current_size: 0, - manager: manager.clone(), - }; - - (group, handle) - } - _ => { - let shared_mem = new_shared_mem(&splits).unwrap_or_default(); - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::Mem { - shared_mem: shared_mem.clone(), - }, - }; - - let group = Self::ToMem { - unique_id, - shared_mem, - tasks: Vec::new(), - rx, - expected_size, - current_size: 0, - manager: manager.clone(), - }; - - (group, handle) - } - } - } - - // 处理任务完成 - async fn handle_completion(&self) { - match self { - Self::ToFile { unique_id, manager, .. } | - Self::ToMem { unique_id, manager, .. } => { - // 从管理器中移除句柄 - manager.remove_handle(unique_id); - } - } - } - - // 任务处理循环 - async fn process_tasks(&mut self) -> WSResult { - loop { - // 检查是否已完成所有写入 - if let Some(result) = self.try_complete() { - // 处理完成,清理资源 - self.handle_completion().await; - return Ok(result); - } - - // 等待新任务或已有任务完成 - tokio::select! { - Some(new_task) = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => { - tasks.push(new_task); - } - } - } - else => { - // 通道关闭,清理资源 - self.handle_completion().await; - break; - } - } - } - - Err(WSError::WsDataError(WsDataErr::WriteDataFailed { - unique_id: match self { - Self::ToFile { unique_id, .. } | - Self::ToMem { unique_id, .. } => unique_id.clone(), - } - })) - } -} - -// WriteSplitDataManager 管理器 -pub struct WriteSplitDataManager { - // 只存储任务句柄 - handles: DashMap, -} - -impl WriteSplitDataManager { - pub fn new() -> Arc { - Arc::new(Self { - handles: DashMap::new(), - }) - } - - // 注册新的任务句柄 - pub fn register_handle( - &self, - request_id: proto::BatchRequestId, - handle: WriteSplitDataTaskHandle, - ) -> WSResult<()> { - // 检查是否已存在 - if self.handles.contains_key(&request_id) { - return Err(WSError::WsDataError(WsDataErr::WriteDataFailed { - request_id, - })); - } - - // 存储句柄 - self.handles.insert(request_id, handle); - Ok(()) - } - - // 获取已存在的任务句柄 - pub fn get_handle(&self, request_id: &proto::BatchRequestId) -> Option { - self.handles.get(request_id).map(|h| h.clone()) - } - - // 移除任务句柄 - pub fn remove_handle(&self, request_id: &proto::BatchRequestId) { - self.handles.remove(request_id); - } -} - -## 修改 使用情况以适配新接口 计划 - -### 1. 修改 get_or_del_data 函数 - -```diff - pub async fn get_or_del_data(&self, GetOrDelDataArg { meta, unique_id, ty }: GetOrDelDataArg) - -> WSResult<(DataSetMetaV2, HashMap)> - { - let want_idxs: Vec = WantIdxIter::new(&ty, meta.data_item_cnt() as DataItemIdx).collect(); - - let mut groups = Vec::new(); - let mut idxs = Vec::new(); - let p2p = self.view.p2p(); - let mut ret = HashMap::new(); - - for idx in want_idxs { - // 为每个数据项创建独立的任务组 - let (tx, rx) = tokio::sync::mpsc::channel(1); - let splits = vec![0..1]; - let splits = vec![0..1]; - let (mut group, handle) = WriteSplitDataTaskGroup::new( - unique_id.clone(), - splits, - match ty { - GetOrDelDataArgType::Delete => proto::BatchDataBlockType::Delete, - _ => proto::BatchDataBlockType::Memory, - }, - Arc::clone(&self.manager), - ).await; - - let p2p = p2p.clone(); - let unique_id = unique_id.clone(); - let data_node = meta.get_data_node(idx); - let delete = matches!(ty, GetOrDelDataArgType::Delete); - let rpc_call = self.rpc_call_get_data.clone(); - - let handle_clone = handle.clone(); - let handle = tokio::spawn(async move { - let resp = rpc_call.call( - p2p, - data_node, - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete, - return_data: true, - }, - Some(Duration::from_secs(60)), - ).await?; - - if !resp.success { - tracing::error!("Failed to get data for idx {}: {}", idx, resp.message); - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - }.into()); - } - - handle_clone.submit_split(0, resp.data[0].clone()).await; - Ok::<_, WSError>(()) - }); - - groups.push(group); - idxs.push((idx, handle)); - } - - // 等待所有RPC任务完成 - for (group, (idx, handle)) in groups.into_iter().zip(idxs.into_iter()) { - if let Err(e) = handle.await.map_err(|e| WSError::from(e))?.map_err(|e| e) { - tracing::error!("RPC task failed for idx {}: {}", idx, e); - continue; - } - - match group.join().await { - Ok(data_item) => { - ret.insert(idx, data_item); - } - Err(e) => { - tracing::error!("Task group join failed for idx {}: {}", idx, e); - } - } - } - - Ok(ret) -} -``` - -### 2. Batch数据处理流程更新 - -#### 2.1 WriteSplitDataTaskHandle扩展 等待全部完成的函数 - -```rust -impl WriteSplitDataTaskHandle { - ... - - /// 等待所有已提交的写入任务完成 - pub async fn wait_all_tasks(self) -> WSResult<()> { - } -} -``` - -#### 2.2 BatchTransfer 实现 - -```rust -/// 数据源接口 -#[async_trait] -pub trait DataSource: Send + Sync + 'static { - /// 获取数据总大小 - async fn size(&self) -> WSResult; - /// 读取指定范围的数据 - async fn read_chunk(&self, offset: usize, size: usize) -> WSResult>; - /// 获取数据块类型 - fn block_type(&self) -> BatchDataBlockType; -} - -/// 批量传输数据 -pub async fn ( - unique_id: Vec, - version: u64, - target_node: NodeID, - data: Arc, - view: DataGeneralView, -) -> WSResult<()> { - let total_size = data.size().await?; - let total_blocks = (total_size + DEFAULT_BLOCK_SIZE - 1) / DEFAULT_BLOCK_SIZE; - let semaphore = Arc::new(Semaphore::new(32)); - let mut handles = Vec::new(); - - // 发送所有数据块 - for block_idx in 0..total_blocks { - // 获取信号量许可 - let permit = semaphore.clone().acquire_owned().await.unwrap(); - - let offset = block_idx as usize * DEFAULT_BLOCK_SIZE; - let size = DEFAULT_BLOCK_SIZE.min(total_size - offset); - - // 读取数据块 - let block_data = data.read_chunk(offset, size).await?; - - // 构造请求 - let request = proto::BatchDataRequest { - request_id: Some(proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u32, - }), - block_type: data.block_type() as i32, - block_index: block_idx as u32, - data: block_data, - operation: proto::DataOpeType::Write as i32, - unique_id: unique_id.clone(), - version, - }; - - // 发送请求 - let view = view.clone(); - let handle = tokio::spawn(async move { - let _permit = permit; // 持有permit直到任务完成 - let resp = view.data_general().rpc_call_batch_data.call( - view.p2p(), - target_node, - request, - Some(Duration::from_secs(30)), - ).await?; - - if !resp.success { - return Err(WsDataError::BatchTransferFailed { - node: target_node, - batch: block_idx as u32, - reason: resp.error_message, - }.into()); - } - - Ok(()) - }); - - handles.push(handle); - } - - // 等待所有请求完成 - for handle in handles { - handle.await??; - } - - Ok(()) -} -``` - -#### 2.3 DataGeneral RPC处理实现 - -```rust -/// 默认数据块大小 (4MB) -const DEFAULT_BLOCK_SIZE: usize = 4 * 1024 * 1024; - -/// 批量数据传输状态 -struct BatchTransferState { - handle: WriteSplitDataTaskHandle, - shared: SharedWithBatchHandler, -} - -/// 共享状态,用于记录最新的请求响应器 -#[derive(Clone)] -struct SharedWithBatchHandler { - responsor: Arc>>>, -} - -impl SharedWithBatchHandler { - fn new() -> Self { - Self { - responsor: Arc::new(Mutex::new(None)), - } - } - - async fn update_responsor(&self, responsor: RPCResponsor) { - let mut guard = self.responsor.lock().await; - if let Some(old_responsor) = guard.take() { - // 旧的responsor直接返回成功 - if let Err(e) = old_responsor.response(Ok(())).await { - tracing::error!("Failed to respond to old request: {}", e); - } - } - *guard = Some(responsor); - } - - async fn get_final_responsor(&self) -> Option> { - self.responsor.lock().await.take() - } -} - -impl DataGeneral { - /// 创建新的DataGeneral实例 - pub fn new() -> Self { - Self { - batch_receive_states: DashMap::new(), - // ...其他字段 - } - } -} - -impl DataGeneral { - /// 处理批量数据写入请求 - /// - /// # 处理流程 - /// 1. 从batch_receive_states查询或创建传输状态 - /// 2. 使用WriteSplitDataTaskHandle提交写入任务 - /// 3. 等待写入完成并返回结果 - pub async fn rpc_handle_batch_data( - &self, - request: BatchDataRequest, - responsor: RPCResponsor, - ) -> WSResult<()> { - // 1. 从batch_receive_states查询或创建传输状态 - let state = if let Some(state) = self.batch_receive_states.get(&request.unique_id) { - // 验证版本号 - if state.handle.version() != request.version { - tracing::error!( - "Version mismatch for transfer {}, expected {}, got {}", - hex::encode(&request.unique_id), - state.handle.version(), - request.version - ); - return Err(WSError::BatchError(WsBatchErr::VersionMismatch { - expected: state.handle.version(), - actual: request.version, - })); - } - state - } else { - // 创建新的写入任务组 - let (group, handle) = WriteSplitDataTaskGroup::new( - request.unique_id.clone(), - calculate_splits(request.total_blocks), - request.block_type, - ).await?; - - // 创建共享状态 - let shared = SharedWithBatchHandler::new(); - let state = BatchTransferState { handle: handle.clone(), shared: shared.clone() }; - - // 启动等待完成的任务 - let unique_id = request.unique_id.clone(); - let batch_receive_states = self.batch_receive_states.clone(); - tokio::spawn(async move { - // 等待所有任务完成 - if let Err(e) = handle.wait_all_tasks().await { - tracing::error!( - "Failed to complete transfer {}: {}", - hex::encode(&unique_id), - e - ); - // 获取最后的responsor并返回错误 - if let Some(final_responsor) = shared.get_final_responsor().await { - if let Err(e) = final_responsor.response(Err(e)).await { - tracing::error!("Failed to send error response: {}", e); - } - } - // 清理状态 - batch_receive_states.remove(&unique_id); - return; - } - - // 获取最后的responsor并返回成功 - if let Some(final_responsor) = shared.get_final_responsor().await { - if let Err(e) = final_responsor.response(Ok(())).await { - tracing::error!("Failed to send success response: {}", e); - } - } - // 清理状态 - batch_receive_states.remove(&unique_id); - }); - - // 插入新状态 - self.batch_receive_states.insert(request.unique_id.clone(), state); - self.batch_receive_states.get(&request.unique_id).unwrap() - }; - - // 2. 使用WriteSplitDataTaskHandle提交写入任务 - let offset = request.block_idx as usize * DEFAULT_BLOCK_SIZE; - - if let Err(e) = state.handle.submit_split(offset, request.data).await { - tracing::error!( - "Failed to submit split for transfer {}, block {}: {}", - hex::encode(&request.unique_id), - request.block_idx, - e - ); - return Err(e); - } - - // 3. 更新共享状态中的responsor - state.shared.update_responsor(responsor).await; - - tracing::debug!( - "Successfully submitted block {} for transfer {}", - request.block_idx, - hex::encode(&request.unique_id) - ); - - Ok(()) - } -} - -/// 计算数据分片范围 -fn calculate_splits(total_blocks: u32) -> Vec> { - let mut splits = Vec::with_capacity(total_blocks as usize); - for i in 0..total_blocks { - let start = i as usize * DEFAULT_BLOCK_SIZE; - let end = start + DEFAULT_BLOCK_SIZE; - splits.push(start..end); - } - splits -} - -/// 数据源实现 -pub struct FileDataSource { - path: PathBuf, - file: Option, -} - -impl FileDataSource { - pub fn new(path: PathBuf) -> Self { - Self { - path, - file: None, - } - } -} - -#[async_trait] -impl DataSource for FileDataSource { - async fn size(&self) -> WSResult { - tokio::fs::metadata(&self.path) - .await - .map(|m| m.len() as usize) - .map_err(|e| WsDataError::ReadSourceFailed { - source: format!("{}", self.path.display()), - error: e.to_string(), - }.into()) - } - - async fn read_chunk(&self, offset: usize, size: usize) -> WSResult> { - let mut file = tokio::fs::File::open(&self.path).await - .map_err(|e| WsDataError::ReadSourceFailed { - source: format!("{}", self.path.display()), - error: e.to_string(), - })?; - - file.seek(SeekFrom::Start(offset as u64)).await - .map_err(|e| WsDataError::ReadSourceFailed { - source: format!("{}", self.path.display()), - error: e.to_string(), - })?; - - let mut buf = vec![0; size]; - file.read_exact(&mut buf).await - .map_err(|e| WsDataError::ReadSourceFailed { - source: format!("{}", self.path.display()), - error: e.to_string(), - })?; - - Ok(buf) - } - - fn block_type(&self) -> BatchDataBlockType { - BatchDataBlockType::File - } -} - -pub struct MemDataSource { - data: Arc<[u8]>, -} - -impl MemDataSource { - pub fn new(data: Vec) -> Self { - Self { - data: data.into() - } - } -} - -#[async_trait] -impl DataSource for MemDataSource { - async fn size(&self) -> WSResult { - Ok(self.data.len()) - } - - async fn read_chunk(&self, offset: usize, size: usize) -> WSResult> { - if offset + size > self.data.len() { - return Err(WsDataError::ReadSourceFailed { - source: "memory".into(), - error: "read beyond bounds".into(), - }.into()); - } - Ok(self.data[offset..offset + size].to_vec()) - } - - fn block_type(&self) -> BatchDataBlockType { - BatchDataBlockType::Memory - } -} diff --git a/entrypoint.sh b/entrypoint.sh deleted file mode 100755 index 0ad7f50..0000000 --- a/entrypoint.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -set -eo pipefail - -echo "Starting OpenHands..." -if [[ $NO_SETUP == "true" ]]; then - echo "Skipping setup, running as $(whoami)" - "$@" - exit 0 -fi - -if [ "$(id -u)" -ne 0 ]; then - echo "The OpenHands entrypoint.sh must run as root" - exit 1 -fi - -echo "hosts file:" -cat /etc/hosts - -if [ -z "$SANDBOX_USER_ID" ]; then - echo "SANDBOX_USER_ID is not set" - exit 1 -fi - -if [ -z "$WORKSPACE_MOUNT_PATH" ]; then - # This is set to /opt/workspace in the Dockerfile. But if the user isn't mounting, we want to unset it so that OpenHands doesn't mount at all - unset WORKSPACE_BASE -fi - -if [[ "$SANDBOX_USER_ID" -eq 0 ]]; then - echo "Running OpenHands as root" - export RUN_AS_OPENHANDS=false - mkdir -p /root/.cache/ms-playwright/ - if [ -d "/home/openhands/.cache/ms-playwright/" ]; then - mv /home/openhands/.cache/ms-playwright/ /root/.cache/ - fi - "$@" -else - echo "Setting up enduser with id $SANDBOX_USER_ID" - if id "enduser" &>/dev/null; then - echo "User enduser already exists. Skipping creation." - else - if ! useradd -l -m -u $SANDBOX_USER_ID -s /bin/bash enduser; then - echo "Failed to create user enduser with id $SANDBOX_USER_ID. Moving openhands user." - incremented_id=$(($SANDBOX_USER_ID + 1)) - usermod -u $incremented_id openhands - if ! useradd -l -m -u $SANDBOX_USER_ID -s /bin/bash enduser; then - echo "Failed to create user enduser with id $SANDBOX_USER_ID for a second time. Exiting." - exit 1 - fi - fi - fi - usermod -aG app enduser - # get the user group of /var/run/docker.sock and set openhands to that group - DOCKER_SOCKET_GID=$(stat -c '%g' /var/run/docker.sock) - echo "Docker socket group id: $DOCKER_SOCKET_GID" - if getent group $DOCKER_SOCKET_GID; then - echo "Group with id $DOCKER_SOCKET_GID already exists" - else - echo "Creating group with id $DOCKER_SOCKET_GID" - groupadd -g $DOCKER_SOCKET_GID docker - fi - - mkdir -p /home/enduser/.cache/huggingface/hub/ - mkdir -p /home/enduser/.cache/ms-playwright/ - if [ -d "/home/openhands/.cache/ms-playwright/" ]; then - mv /home/openhands/.cache/ms-playwright/ /home/enduser/.cache/ - fi - - usermod -aG $DOCKER_SOCKET_GID enduser - echo "Running as enduser" - su enduser /bin/bash -c "${*@Q}" # This magically runs any arguments passed to the script as a command -fi \ No newline at end of file diff --git a/review.md b/review.md deleted file mode 100644 index 6c016c1..0000000 --- a/review.md +++ /dev/null @@ -1,498 +0,0 @@ -# 代码修改清单 - -## 1. 删除代码 -```rust -// 1. src/main/src/general/data/m_data_general/batch.rs 中删除 -// 1.1 删除 BatchManager -// pub(super) struct BatchManager { -// transfers: DashMap, -// sequence: AtomicU64, -// } - -// impl BatchManager { -// pub fn new() -> Self -// pub fn next_sequence(&self) -> u64 -// pub async fn create_transfer(...) -// pub async fn handle_block(...) -// } - -// 1.2 删除 BatchTransfer -// pub(super) struct BatchTransfer { -// pub unique_id: Vec, -// pub version: u64, -// pub block_type: proto::BatchDataBlockType, -// pub total_blocks: u32, -// data_sender: mpsc::Sender>, -// write_task: JoinHandle>, -// pub tx: Option>>, -// } - -// impl BatchTransfer { -// pub async fn new(...) -// pub async fn add_block(...) -// pub async fn complete(...) -// fn calculate_splits(...) -// } - -// 2. src/main/src/general/data/m_data_general/mod.rs 中删除 -// struct DataGeneral { -// batch_manager: Arc, // 删除此字段 -// } - -// DataGeneral::new() 中删除 -// batch_manager: Arc::new(BatchManager::new()), -``` - -## 2. 新增代码 - -### src/main/src/result.rs -```rust -pub enum WsDataError { - // 修改错误类型 - BatchTransferFailed { - request_id: proto::BatchRequestId, // 改为 request_id - reason: String, - }, - BatchTransferNotFound { - request_id: proto::BatchRequestId, // 改为 request_id - }, - BatchTransferError { - request_id: proto::BatchRequestId, // 改为 request_id - msg: String, - }, - WriteDataFailed { - request_id: proto::BatchRequestId, - }, - SplitTaskFailed { - request_id: proto::BatchRequestId, - idx: DataSplitIdx, - }, - VersionMismatch { - expected: u64, - actual: u64, - }, -} -``` - -### src/main/src/general/data/m_data_general/task.rs -```rust -// 写入任务句柄,用于提交新的分片任务 -pub struct WriteSplitDataTaskHandle { - tx: mpsc::Sender>, - write_type: WriteSplitDataType, - version: u64, // 添加版本号字段 -} - -// 写入类型 -enum WriteSplitDataType { - File { - path: PathBuf, - }, - Mem { - shared_mem: SharedMemHolder, - }, -} - -impl WriteSplitDataTaskHandle { - // 获取版本号 - pub fn version(&self) -> u64 { - self.version - } - - // 提交新的分片任务 - pub async fn submit_split(&self, idx: DataSplitIdx, data: proto::DataItem) -> WSResult<()> { - let task = match &self.write_type { - WriteSplitDataType::File { path } => { - let path = path.clone(); - let offset = idx.offset; - let data = data.as_bytes().to_vec(); - tokio::spawn(async move { - if let Err(e) = tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .open(&path) - .await - .and_then(|mut file| async move { - file.seek(SeekFrom::Start(offset)).await?; - file.write_all(&data).await - }) - .await - { - tracing::error!("Failed to write file data at offset {}: {}", offset, e); - } - }) - } - WriteSplitDataType::Mem { shared_mem } => { - let mem = shared_mem.clone(); - let offset = idx.offset as usize; - let data = data.as_bytes().to_vec(); - tokio::spawn(async move { - if let Err(e) = mem.write(offset, &data).await { - tracing::error!("Failed to write memory data at offset {}: {}", offset, e); - } - }) - } - }; - - self.tx.send(task).await.map_err(|e| { - tracing::error!("Failed to submit task: channel closed, idx: {:?}", idx); - WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: idx.into(), // 需要实现 From for BatchRequestId - reason: "Failed to submit task: channel closed".to_string() - }) - }) - } - - /// 等待所有已提交的写入任务完成 - pub async fn wait_all_tasks(self) -> WSResult<()> { - // 关闭发送端,不再接收新任务 - drop(self.tx); - - Ok(()) - } -} - -// 写入任务组 -enum WriteSplitDataTaskGroup { - // 文件写入模式 - ToFile { - unique_id: UniqueId, // 任务唯一标识 - file_path: PathBuf, // 文件路径 - tasks: Vec>, // 写入任务列表 - rx: mpsc::Receiver>, // 任务接收通道 - expected_size: usize, // 预期总大小 - current_size: usize, // 当前写入大小 - }, - // 内存写入模式 - ToMem { - unique_id: UniqueId, // 任务唯一标识 - shared_mem: SharedMemHolder, // 共享内存 - tasks: Vec>, // 写入任务列表 - rx: mpsc::Receiver>, // 任务接收通道 - expected_size: usize, // 预期总大小 - current_size: usize, // 当前写入大小 - } -} - -impl WriteSplitDataTaskGroup { - // 创建新任务组 - async fn new( - unique_id: UniqueId, - splits: Vec>, - block_type: proto::BatchDataBlockType, - version: u64, // 添加版本号参数 - ) -> (Self, WriteSplitDataTaskHandle) { - // 计算预期总大小 - let expected_size = splits.iter().map(|range| range.len()).sum(); - - // 创建通道 - let (tx, rx) = mpsc::channel(32); - - match block_type { - proto::BatchDataBlockType::File => { - let file_path = PathBuf::from(format!("{}.data", - base64::engine::general_purpose::STANDARD.encode(&unique_id))); - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::File { - path: file_path.clone(), - }, - version, // 设置版本号 - }; - - let group = Self::ToFile { - unique_id, - file_path, - tasks: Vec::new(), - rx, - expected_size, - current_size: 0, - }; - - (group, handle) - } - _ => { - let shared_mem = new_shared_mem(&splits).unwrap_or_default(); - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::Mem { - shared_mem: shared_mem.clone(), - }, - version, // 设置版本号 - }; - - let group = Self::ToMem { - unique_id, - shared_mem, - tasks: Vec::new(), - rx, - expected_size, - current_size: 0, - }; - - (group, handle) - } - } - } - - // 任务处理循环 - async fn process_tasks(&mut self) -> WSResult { - loop { - // 检查是否已完成所有写入 - if let Some(result) = self.try_complete() { - return Ok(result); - } - - // 等待新任务或已有任务完成 - tokio::select! { - Some(new_task) = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => { - tasks.push(new_task); - // 不需要更新current_size,因为是在任务完成时更新 - } - } - } - Some(completed_task) = futures::future::select_all(match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => tasks - }) => { - // 检查任务是否成功完成 - if let Err(e) = completed_task.0 { - tracing::error!("Task failed: {}", e); - return Err(WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: match self { - Self::ToFile { unique_id, .. } | - Self::ToMem { unique_id, .. } => unique_id.clone() - }, - reason: format!("Task failed: {}", e) - })); - } - // 从任务列表中移除已完成的任务 - match self { - Self::ToFile { tasks, current_size, .. } | - Self::ToMem { tasks, current_size, .. } => { - tasks.remove(completed_task.1); - // 更新当前大小 - *current_size += DEFAULT_BLOCK_SIZE; // 每个任务写入一个块 - } - } - } - None = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - // 通道关闭,直接退出 - break; - } - } - } - - Err(WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: match self { - Self::ToFile { unique_id, .. } | - Self::ToMem { unique_id, .. } => unique_id.clone() - }, - reason: "Channel closed".to_string() - })) - } - - /// 检查是否已完成所有写入 - fn try_complete(&self) -> Option { - match self { - Self::ToFile { current_size, expected_size, file_path, .. } => { - if *current_size >= *expected_size { - // 所有数据已写入,返回文件数据项 - Some(proto::DataItem::new_file_data(file_path.clone())) - } else { - None - } - } - Self::ToMem { current_size, expected_size, shared_mem, .. } => { - if *current_size >= *expected_size { - // 所有数据已写入,返回内存数据项 - Some(proto::DataItem::new_mem_data(shared_mem.clone())) - } else { - None - } - } - } - } -} - -/// DataItem 数据源 -pub enum DataItemSource { - Memory { - data: Arc>, - }, - File { - path: String, - }, -} - -DataItemSource 采用枚举设计,优点: -1. 类型安全:使用枚举确保数据源类型的互斥性 -2. 内存效率:文件类型只存储路径,避免一次性加载 -3. 延迟读取:只在实际需要时才读取文件数据 -4. 符合分层:配合 WriteSplitDataTaskGroup 的文件/内存写入流程 - -实现了 DataSource trait: -- size(): 获取数据总大小 -- read_chunk(): 读取指定范围的数据 -- block_type(): 返回对应的 BlockType -``` - -### src/main/src/general/data/m_data_general/mod.rs -```rust -/// 共享状态,用于记录最新的请求响应器 -#[derive(Clone)] -struct SharedWithBatchHandler { - responsor: Arc>>>, -} - -impl SharedWithBatchHandler { - fn new() -> Self { - Self { - responsor: Arc::new(Mutex::new(None)), - } - } - - async fn update_responsor(&self, responsor: RPCResponsor) { - let mut guard = self.responsor.lock().await; - if let Some(old_responsor) = guard.take() { - // 旧的responsor直接返回成功 - if let Err(e) = old_responsor.response(Ok(())).await { - tracing::error!("Failed to respond to old request: {}", e); - } - } - *guard = Some(responsor); - } - - async fn get_final_responsor(&self) -> Option> { - self.responsor.lock().await.take() - } -} - -/// 批量数据传输状态 -struct BatchReceiveState { - handle: WriteSplitDataTaskHandle, - shared: SharedWithBatchHandler, -} - -pub struct DataGeneral { - // 批量数据接收状态管理 - batch_receive_states: DashMap, - // ... 其他字段 -} - -impl DataGeneral { - pub fn new() -> Self { - Self { - batch_receive_states: DashMap::new(), - // ... 其他字段初始化 - } - } - - /// 处理批量数据写入请求 - pub async fn rpc_handle_batch_data( - &self, - request: BatchDataRequest, - responsor: RPCResponsor, - ) -> WSResult<()> { - let state = if let Some(state) = self.batch_receive_states.get(&request.request_id) { - // 验证版本号 - if state.handle.version() != request.version { - tracing::error!( - "Version mismatch for transfer {:?}, expected {}, got {}", - request.request_id, - state.handle.version(), - request.version - ); - return Err(WSError::WsDataError(WsDataError::BatchTransferError { - request_id: request.request_id, - msg: format!("Version mismatch, expected {}, got {}", - state.handle.version(), request.version) - })); - } - state - } else { - // 创建新的写入任务组 - let (group, handle) = WriteSplitDataTaskGroup::new( - request.unique_id.clone(), - calculate_splits(request.total_blocks), - request.block_type, - request.version, // 传递版本号 - ).await?; - - // 创建共享状态 - let shared = SharedWithBatchHandler::new(); - let state = BatchReceiveState { handle: handle.clone(), shared: shared.clone() }; - - // 启动等待完成的任务 - let request_id = request.request_id.clone(); // 使用 request_id - let batch_receive_states = self.batch_receive_states.clone(); - tokio::spawn(async move { - // 等待所有任务完成 - if let Err(e) = handle.wait_all_tasks().await { - tracing::error!( - "Failed to complete transfer {:?}: {}", - request_id, // 使用 request_id - e - ); - // 获取最后的responsor并返回错误 - if let Some(final_responsor) = shared.get_final_responsor().await { - if let Err(e) = final_responsor.response(Err(e)).await { - tracing::error!("Failed to send error response: {}", e); - } - } - // 清理状态 - batch_receive_states.remove(&request_id); // 使用 request_id - return; - } - - // 获取最后的responsor并返回成功 - if let Some(final_responsor) = shared.get_final_responsor().await { - if let Err(e) = final_responsor.response(Ok(())).await { - tracing::error!("Failed to send success response: {}", e); - } - } - // 清理状态 - batch_receive_states.remove(&request_id); // 使用 request_id - }); - - // 插入新状态 - self.batch_receive_states.insert(request.request_id.clone(), state); - self.batch_receive_states.get(&request.request_id).unwrap() - }; - - // 2. 使用WriteSplitDataTaskHandle提交写入任务 - let offset = request.block_index as usize * DEFAULT_BLOCK_SIZE; // 使用 block_index - - if let Err(e) = state.handle.submit_split(offset, request.data).await { - tracing::error!( - "Failed to submit split for transfer {:?}, block {}: {}", - request.request_id, - request.block_index, // 使用 block_index - e - ); - return Err(e); - } - - // 3. 更新共享状态中的responsor - state.shared.update_responsor(responsor).await; - - tracing::debug!( - "Successfully submitted block {} for transfer {:?}", - request.block_index, - request.request_id - ); - - Ok(()) - } -} \ No newline at end of file diff --git a/scripts/build/1.1build_core.py b/scripts/build/1.1build_core.py index 7dbf134..7ff215d 100644 --- a/scripts/build/1.1build_core.py +++ b/scripts/build/1.1build_core.py @@ -37,6 +37,5 @@ def print_title(title): os_system_sure("mkdir -p pack/waverless_backend") BACKEND_PATH=os.path.abspath("pack/waverless_backend") os.chdir("../../") -os_system_sure("rustup default 1.79.0") os_system_sure("$HOME/.cargo/bin/cargo build --release") -os_system_sure(f"cp target/release/wasm_serverless {BACKEND_PATH}") +os_system_sure(f"cp target/release/wasm_serverless {BACKEND_PATH}") \ No newline at end of file diff --git a/scripts/build/1.2build_apps.py b/scripts/build/1.2build_apps.py index edcb878..360b024 100644 --- a/scripts/build/1.2build_apps.py +++ b/scripts/build/1.2build_apps.py @@ -98,7 +98,7 @@ def cp_app_program(prj_dir,app): def pack_app(prj_dir,app,prjyml): - print_title(f"packing {prj_dir} {app}") + print_title(f"pack {prj_dir} {app}") os_system_sure(f"mkdir -p ../../scripts/build/pack/apps/{app}") app_yml={"fns":prjyml[app]} # write to app.yml @@ -106,12 +106,10 @@ def pack_app(prj_dir,app,prjyml): f.write(yaml.dump(app_yml)) # cp program cp_app_program(prj_dir,app) - print_title(f"packed {prj_dir} {app}") def pack_demo(app): - print_title(f"packing demo {app}") prj_dir=os.path.abspath(f"../../demos/{app}") os.chdir(prj_dir) # check Cargo.toml in the current directory @@ -135,7 +133,6 @@ def open_app_conf(app): for app in conf: pack_app(prj_dir,app,conf) - print_title(f"packed demo {app}") # if os.path.exists("pom.xml"): # os_system_sure("mvn clean package") # os_system_sure("cp target/*.jar ../pack/apps") diff --git a/scripts/build/template/run_node.py b/scripts/build/template/run_node.py index 7f5422d..042abe3 100644 --- a/scripts/build/template/run_node.py +++ b/scripts/build/template/run_node.py @@ -1,4 +1,3 @@ -#!/usr/bin/python3 # NODE_ID=$1 # wasm_serverless $NODE_ID test_dir diff --git a/scripts/deploy_cluster/node_config.yaml b/scripts/deploy_cluster/node_config.yaml index c7d83ba..7f54c57 100644 --- a/scripts/deploy_cluster/node_config.yaml +++ b/scripts/deploy_cluster/node_config.yaml @@ -1,11 +1,11 @@ nodes: - 2: - addr: 192.168.31.87:2500 + 9: + addr: 192.168.31.9:2500 spec: - meta - master - 3: - addr: 192.168.31.96:2500 + 10: + addr: 192.168.31.240:2500 spec: - meta - worker diff --git a/scripts/deploy_single_node/node_config.yaml b/scripts/deploy_single_node/node_config.yaml index cd5aa06..78aef3a 100644 --- a/scripts/deploy_single_node/node_config.yaml +++ b/scripts/deploy_single_node/node_config.yaml @@ -3,5 +3,5 @@ nodes: addr: 127.0.0.1:2600 spec: [meta,master] 2: - addr: 192.168.31.240:2602 + addr: 127.0.0.1:2605 spec: [meta,worker] diff --git a/scripts/mount_s3fs.sh b/scripts/mount_s3fs.sh deleted file mode 100644 index 2e22278..0000000 --- a/scripts/mount_s3fs.sh +++ /dev/null @@ -1,3 +0,0 @@ -umount /mnt/s3fs -s3fs s3fs /mnt/s3fs -o passwd_file=/root/.passwd-s3fs -o url=http://127.0.0.1:9000 -o use_path_request_style -o umask=0022,uid=$(id -u),gid=$(id -g) -o use_cache=/var/cache/s3fs -echo "mount s3fs success" \ No newline at end of file diff --git a/scripts/sync_md_files.py b/scripts/sync_md_files.py deleted file mode 100644 index 97879e3..0000000 --- a/scripts/sync_md_files.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python3 -import json -import os -import sys -from datetime import datetime -from typing import List, Dict, Optional - -class Node: - def __init__(self, data: dict): - self.data = data - self.children = [] - self.parent = None - - @property - def id(self) -> str: - return self.data.get('id', '') - - @property - def type(self) -> str: - return self.data.get('type', '') - - @property - def x(self) -> float: - return float(self.data.get('x', 0)) - - @property - def y(self) -> float: - return float(self.data.get('y', 0)) - - @property - def width(self) -> float: - return float(self.data.get('width', 0)) - - @property - def height(self) -> float: - return float(self.data.get('height', 0)) - - def contains(self, other: 'Node') -> bool: - """判断当前节点是否在空间上包含另一个节点""" - if self.type != 'group': - return False - - # 考虑边界重叠的情况 - return (other.x >= self.x - 1 and - other.y >= self.y - 1 and - other.x + other.width <= self.x + self.width + 1 and - other.y + other.height <= self.y + self.height + 1) - - def to_dict(self) -> dict: - """转换为字典格式""" - result = self.data.copy() - if self.children: - result['children'] = [child.to_dict() for child in self.children] - return result - - def to_flat_dict(self) -> List[dict]: - """转换为扁平的字典列表""" - result = [] - if self.type != 'root': # 不包含根节点 - node_data = self.data.copy() - if 'children' in node_data: - del node_data['children'] # 移除children字段 - result.append(node_data) - for child in self.children: - result.extend(child.to_flat_dict()) - return result - -def tree_to_flat_nodes(tree_data: dict) -> List[dict]: - """将树状结构转换为扁平的节点列表""" - result = [] - - # 处理当前节点 - if tree_data.get('type') != 'root': - node_data = tree_data.copy() - if 'children' in node_data: - del node_data['children'] - result.append(node_data) - - # 递归处理子节点 - for child in tree_data.get('children', []): - result.extend(tree_to_flat_nodes(child)) - - return result - -class CanvasData: - def __init__(self, data: dict): - self.nodes = [] - self.groups = [] - self.edges = [] - self.parse_data(data) - - def parse_data(self, data: dict): - """解析canvas数据""" - # 处理所有节点 - for item in data: - node = Node(item) - self.nodes.append(node) - if node.type == 'group': - self.groups.append(node) - - def find_best_parent(self, node: Node) -> Optional[Node]: - """为节点找到最佳的父节点""" - candidates = [] - for group in self.groups: - if group.contains(node) and group != node: - candidates.append(group) - - if not candidates: - return None - - # 选择面积最小的包含组作为父节点 - return min(candidates, - key=lambda g: g.width * g.height) - - def build_tree(self) -> Node: - """构建树状结构""" - # 创建虚拟根节点 - root = Node({ - 'id': 'root', - 'type': 'root', - }) - - # 按面积从大到小排序groups - self.groups.sort(key=lambda g: g.width * g.height, reverse=True) - - # 构建节点关系 - assigned_nodes = set() - - # 先处理groups之间的关系 - for group in self.groups: - parent = self.find_best_parent(group) - if parent: - parent.children.append(group) - group.parent = parent - assigned_nodes.add(group.id) - else: - root.children.append(group) - group.parent = root - assigned_nodes.add(group.id) - - # 处理剩余节点 - for node in self.nodes: - if node.id not in assigned_nodes: - parent = self.find_best_parent(node) - if parent: - parent.children.append(node) - node.parent = parent - else: - root.children.append(node) - node.parent = root - - return root - - def to_tree_json(self) -> dict: - """转换为树状JSON结构""" - root = self.build_tree() - return root.to_dict() - - def to_flat_json(self) -> List[dict]: - """转换为扁平JSON结构""" - root = self.build_tree() - return root.to_flat_dict() - -def backup_file(file_path: str): - """备份文件""" - if os.path.exists(file_path): - timestamp = datetime.now().strftime('%Y%m%d%H%M%S') - backup_path = f"{file_path}.{timestamp}.bak" - os.rename(file_path, backup_path) - print(f"Backup {file_path} to {backup_path}") - -def sync_from_s3fs(): - """从s3fs同步到本地,并生成树状结构""" - s3fs_dir = "/mnt/s3fs/waverless" - local_dir = "/root/prjs/waverless" - - print(f"Starting sync from {s3fs_dir} to {local_dir}") - - # 同步canvas文件 - canvas_path = os.path.join(local_dir, "design.canvas") - s3fs_canvas_path = os.path.join(s3fs_dir, "design.canvas") - - if os.path.exists(s3fs_canvas_path): - # 备份当前文件 - backup_file(canvas_path) - - # 读取s3fs中的canvas - with open(s3fs_canvas_path, 'r', encoding='utf-8') as f: - canvas_data = json.load(f) - - # 生成树状结构 - canvas = CanvasData(canvas_data.get('nodes', [])) - tree_data = canvas.to_tree_json() - - # 保存树状结构 - tree_path = os.path.join(local_dir, "design.json") - with open(tree_path, 'w', encoding='utf-8') as f: - json.dump(tree_data, f, ensure_ascii=False, indent=2) - - # 保存原始canvas - with open(canvas_path, 'w', encoding='utf-8') as f: - json.dump(canvas_data, f, ensure_ascii=False, indent=2) - -def sync_to_s3fs(): - """从本地同步到s3fs,将树状结构转换回扁平结构""" - s3fs_dir = "/mnt/s3fs/waverless" - local_dir = "/root/prjs/waverless" - - print(f"Starting sync from {local_dir} to {s3fs_dir}") - - # 读取树状结构 - tree_path = os.path.join(local_dir, "design.json") - if not os.path.exists(tree_path): - print(f"Tree file {tree_path} not found") - return - - with open(tree_path, 'r', encoding='utf-8') as f: - tree_data = json.load(f) - - # 直接将树状结构转换为扁平节点列表 - flat_nodes = tree_to_flat_nodes(tree_data) - - # 保存到s3fs - s3fs_canvas_path = os.path.join(s3fs_dir, "design.canvas") - backup_file(s3fs_canvas_path) - - with open(s3fs_canvas_path, 'w', encoding='utf-8') as f: - json.dump({'nodes': flat_nodes}, f, ensure_ascii=False, indent=2) - -def main(): - if len(sys.argv) != 2: - print("Usage: python3 sync_md_files.py [from_s3fs|to_s3fs]") - sys.exit(1) - - command = sys.argv[1] - if command == "from_s3fs": - sync_from_s3fs() - elif command == "to_s3fs": - sync_to_s3fs() - else: - print(f"Unknown command: {command}") - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/scripts/test_design_json_tool.py b/scripts/test_design_json_tool.py deleted file mode 100644 index b3b761e..0000000 --- a/scripts/test_design_json_tool.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python3 -import os -import json -import shutil -import unittest -from scripts.design_json_tool import DesignJson, Node - -class TestDesignJsonTool(unittest.TestCase): - def setUp(self): - """测试前准备工作""" - # 创建测试用的JSON文件 - self.test_json_path = 'test_design.json' - self.test_data = { - "id": "root", - "type": "root", - "children": [ - { - "id": "group1", - "type": "group", - "label": "测试组1", - "children": [ - { - "id": "node1", - "type": "text", - "text": "测试节点1" - } - ] - } - ], - "edges": [] - } - with open(self.test_json_path, 'w', encoding='utf-8') as f: - json.dump(self.test_data, f, ensure_ascii=False, indent=2) - - self.design = DesignJson(self.test_json_path) - - def tearDown(self): - """测试后清理工作""" - if os.path.exists(self.test_json_path): - os.remove(self.test_json_path) - - def test_read_all(self): - """测试读取整个JSON""" - root = self.design.root - self.assertEqual(root.id, "root") - self.assertEqual(root.type, "root") - self.assertEqual(len(root.children), 1) - - def test_read_node(self): - """测试读取单个节点""" - node = self.design.get_node("node1") - self.assertIsNotNone(node) - self.assertEqual(node.type, "text") - self.assertEqual(node.text, "测试节点1") - - def test_read_group(self): - """测试读取组内容""" - nodes = self.design.get_group_nodes("group1") - self.assertEqual(len(nodes), 1) - self.assertEqual(nodes[0].id, "node1") - - def test_create_node(self): - """测试创建新节点""" - node_data = { - "id": "new_node", - "type": "text", - "text": "新建节点" - } - node_id = self.design.create_node(node_data) - self.assertEqual(node_id, "new_node") - node = self.design.get_node(node_id) - self.assertIsNotNone(node) - self.assertEqual(node.text, "新建节点") - - def test_update_node(self): - """测试更新节点""" - updates = {"text": "更新后的文本"} - success = self.design.update_node("node1", updates) - self.assertTrue(success) - node = self.design.get_node("node1") - self.assertEqual(node.text, "更新后的文本") - - def test_move_to_group(self): - """测试移动节点到组""" - # 先创建新组 - group_data = { - "id": "group2", - "type": "group", - "label": "测试组2" - } - self.design.create_node(group_data) - - # 移动节点 - success = self.design.move_to_group("node1", "group2") - self.assertTrue(success) - - # 验证移动结果 - nodes = self.design.get_group_nodes("group2") - self.assertEqual(len(nodes), 1) - self.assertEqual(nodes[0].id, "node1") - - def test_edges(self): - """测试边操作""" - # 添加边 - success = self.design.add_edge("node1", "group1", "test_edge") - self.assertTrue(success) - - # 验证入度 - incoming = self.design.get_incoming_nodes("group1") - self.assertEqual(len(incoming), 1) - self.assertEqual(incoming[0], ("node1", "test_edge")) - - # 验证出度 - outgoing = self.design.get_outgoing_nodes("node1") - self.assertEqual(len(outgoing), 1) - self.assertEqual(outgoing[0], ("group1", "test_edge")) - - # 删除边 - success = self.design.remove_edge("node1", "group1", "test_edge") - self.assertTrue(success) - - # 验证边已删除 - incoming = self.design.get_incoming_nodes("group1") - self.assertEqual(len(incoming), 0) - - def test_nonexistent_node(self): - """测试操作不存在的节点""" - # 读取不存在的节点 - node = self.design.get_node("nonexistent") - self.assertIsNone(node) - - # 更新不存在的节点 - success = self.design.update_node("nonexistent", {"text": "新文本"}) - self.assertFalse(success) - - # 移动不存在的节点 - success = self.design.move_to_group("nonexistent", "group1") - self.assertFalse(success) - - # 添加包含不存在节点的边 - success = self.design.add_edge("nonexistent", "node1") - self.assertFalse(success) - - def test_duplicate_operations(self): - """测试重复操作""" - # 重复创建同ID节点 - node_data = { - "id": "node1", # 已存在的ID - "type": "text", - "text": "重复节点" - } - original_node = self.design.get_node("node1") - node_id = self.design.create_node(node_data) - self.assertEqual(node_id, "node1") - # 验证节点内容未被覆盖 - node = self.design.get_node("node1") - self.assertEqual(node.text, original_node.text) - - # 重复添加相同的边 - self.design.add_edge("node1", "group1", "test_edge") - success = self.design.add_edge("node1", "group1", "test_edge") - self.assertTrue(success) # 添加成功但不会重复 - incoming = self.design.get_incoming_nodes("group1") - self.assertEqual(len(incoming), 1) # 只有一条边 - - def test_nested_groups(self): - """测试嵌套组操作""" - # 创建嵌套的组结构 - group2_data = { - "id": "group2", - "type": "group", - "label": "测试组2" - } - group3_data = { - "id": "group3", - "type": "group", - "label": "测试组3" - } - self.design.create_node(group2_data) - self.design.create_node(group3_data) - - # 将group3移动到group2中 - success = self.design.move_to_group("group3", "group2") - self.assertTrue(success) - - # 验证嵌套结构 - nodes = self.design.get_group_nodes("group2") - self.assertEqual(len(nodes), 1) - self.assertEqual(nodes[0].id, "group3") - - # 将节点移动到最内层组 - success = self.design.move_to_group("node1", "group3") - self.assertTrue(success) - - # 验证节点位置 - nodes = self.design.get_group_nodes("group3") - self.assertEqual(len(nodes), 1) - self.assertEqual(nodes[0].id, "node1") - - def test_save_and_load(self): - """测试保存和加载功能""" - # 修改数据 - self.design.update_node("node1", {"text": "修改后的文本"}) - self.design.add_edge("node1", "group1", "test_edge") - - # 保存文件 - self.design.save() - - # 重新加载 - new_design = DesignJson(self.test_json_path) - - # 验证修改是否保持 - node = new_design.get_node("node1") - self.assertEqual(node.text, "修改后的文本") - - incoming = new_design.get_incoming_nodes("group1") - self.assertEqual(len(incoming), 1) - self.assertEqual(incoming[0], ("node1", "test_edge")) - - def test_invalid_operations(self): - """测试无效操作""" - # 测试移动到非组节点 - success = self.design.move_to_group("node1", "node1") # node1不是组 - self.assertFalse(success) - - # 测试更新不存在的属性 - success = self.design.update_node("node1", {"nonexistent_attr": "value"}) - self.assertTrue(success) # 更新成功但属性未添加 - node = self.design.get_node("node1") - self.assertFalse(hasattr(node, "nonexistent_attr")) - - # 测试创建缺少必要属性的节点 - invalid_node = { - "type": "text" # 缺少id - } - with self.assertRaises(KeyError): - self.design.create_node(invalid_node) - -if __name__ == '__main__': - unittest.main() \ No newline at end of file diff --git a/src/_back/storage/kv/raft_kv/tikvraft_kernel/mod.rs b/src/_back/storage/kv/raft_kv/tikvraft_kernel/mod.rs index fcd136c..2c8b640 100644 --- a/src/_back/storage/kv/raft_kv/tikvraft_kernel/mod.rs +++ b/src/_back/storage/kv/raft_kv/tikvraft_kernel/mod.rs @@ -138,7 +138,7 @@ impl RaftThreadState { tracing::info!("proprose join"); self.proprosed_join = true; let mut steps = vec![]; - for p in &self.view.p2p() { + for p in &self.view.p2p().peers { steps.push(ConfChangeSingle { change_type: ConfChangeType::AddNode.into(), node_id: p.1 as u64, diff --git a/src/main/src/apis.rs b/src/apis.rs similarity index 100% rename from src/main/src/apis.rs rename to src/apis.rs diff --git a/src/main/src/cmd_arg.rs b/src/cmd_arg.rs similarity index 100% rename from src/main/src/cmd_arg.rs rename to src/cmd_arg.rs diff --git a/src/main/src/config.rs b/src/config.rs similarity index 95% rename from src/main/src/config.rs rename to src/config.rs index 601b985..d5894a4 100644 --- a/src/main/src/config.rs +++ b/src/config.rs @@ -116,10 +116,8 @@ pub struct YamlConfig { } fn read_yaml_config(file_path: impl AsRef) -> YamlConfig { - tracing::info!("Running at dir: {:?}", std::env::current_dir()); - let path = file_path.as_ref().to_owned(); let file = std::fs::File::open(file_path).unwrap_or_else(|err| { - panic!("open config file {:?} failed, err: {:?}", path, err); + panic!("open config file failed, err: {:?}", err); }); serde_yaml::from_reader(file).unwrap_or_else(|e| { panic!("parse yaml config file failed, err: {:?}", e); diff --git a/src/data/dataitem.rs b/src/data/dataitem.rs deleted file mode 100644 index 3dc947e..0000000 --- a/src/data/dataitem.rs +++ /dev/null @@ -1,31 +0,0 @@ -/// A waiter for tracking completion of all write split data tasks -pub struct WriteSplitDataWaiter { - rx: broadcast::Receiver<()>, - total_tasks: usize, -} - -impl WriteSplitDataWaiter { - /// Wait for all tasks to complete - pub async fn wait(mut self) -> WSResult<()> { - let mut completed = 0; - while completed < self.total_tasks { - self.rx.recv().await.map_err(|e| { - WsDataError::WaitTaskError { - reason: format!("Failed to receive task completion: {}", e) - } - })?; - completed += 1; - } - Ok(()) - } -} - -impl Handle { - /// Gets a waiter that will complete when all tasks are finished - pub fn get_all_tasks_waiter(&self) -> WriteSplitDataWaiter { - WriteSplitDataWaiter { - rx: self.task_complete_tx.subscribe(), - total_tasks: self.tasks.lock().unwrap().len(), - } - } -} \ No newline at end of file diff --git a/src/data/write_split.rs b/src/data/write_split.rs deleted file mode 100644 index 22bf8b2..0000000 --- a/src/data/write_split.rs +++ /dev/null @@ -1,40 +0,0 @@ -/// A waiter for tracking completion of all write split data tasks -pub struct WriteSplitDataWaiter { - rx: broadcast::Receiver<()>, - total_tasks: usize, -} - -impl WriteSplitDataWaiter { - /// Wait for all tasks to complete - pub async fn wait(mut self) -> WSResult<()> { - let mut completed = 0; - while completed < self.total_tasks { - self.rx.recv().await.map_err(|e| { - WsDataError::WaitTaskError { - reason: format!("Failed to receive task completion: {}", e) - } - })?; - completed += 1; - } - Ok(()) - } -} - -impl Handle { - /// Gets a waiter that will complete when all tasks are finished - pub fn get_all_tasks_waiter(&self) -> WriteSplitDataWaiter { - WriteSplitDataWaiter { - rx: self.task_complete_tx.subscribe(), - total_tasks: self.tasks.lock().unwrap().len(), - } - } -} - -// 需要在 errors.rs 中添加新的错误类型 -#[derive(Debug)] -pub enum WsDataError { - // ... existing errors ... - WaitTaskError { - reason: String, - }, -} \ No newline at end of file diff --git a/src/errors.rs b/src/errors.rs deleted file mode 100644 index b4aca1a..0000000 --- a/src/errors.rs +++ /dev/null @@ -1,18 +0,0 @@ -#[derive(Debug)] -pub enum WsDataError { - // ... existing errors ... - WaitTaskError { - reason: String, - }, -} - -impl std::fmt::Display for WsDataError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - // ... existing error matches ... - WsDataError::WaitTaskError { reason } => { - write!(f, "Failed to wait for tasks: {}", reason) - } - } - } -} \ No newline at end of file diff --git a/src/main/src/general/data/kv_interface.rs b/src/general/kv_interface.rs similarity index 96% rename from src/main/src/general/data/kv_interface.rs rename to src/general/kv_interface.rs index 011a12e..2667dcd 100644 --- a/src/main/src/general/data/kv_interface.rs +++ b/src/general/kv_interface.rs @@ -1,10 +1,11 @@ use crate::{ - general::network::proto, result::WSResult, sys::{LogicalModule, NodeID}, }; use async_trait::async_trait; +use super::network::proto; + pub struct KvOptions { spec_node: Option, } diff --git a/src/general/m_appmeta_manager/fn_event.rs b/src/general/m_appmeta_manager/fn_event.rs new file mode 100644 index 0000000..5df5f80 --- /dev/null +++ b/src/general/m_appmeta_manager/fn_event.rs @@ -0,0 +1,76 @@ +use crate::general::{ + network::proto::sche::distribute_task_req::{Trigger, TriggerKvSet}, +}; + +use super::{ + super::network::proto::{self, kv::KvRequest}, +}; + +pub struct EventTriggerInfo { + pub trigger_appfns: Vec<(String, String)>, + pub kvreq: KvRequest, +} +impl EventTriggerInfo { + pub fn to_trigger(&self, opeid: u32) -> Trigger { + match self.kvreq.op.as_ref().unwrap() { + proto::kv::kv_request::Op::Set(set) => { + let kv = set.kv.as_ref().unwrap(); + Trigger::KvSet(TriggerKvSet { + key: kv.key.clone(), + opeid, + }) + } + _ => unimplemented!(), + } + } +} + +// impl Into for EventTriggerInfo { +// fn into(self) -> Trigger { +// match self.kvreq.op.unwrap() { +// proto::kv::kv_request::Op::Set(set) => Trigger::KvSet(TriggerKvSet{ +// key: +// }), +// _ => unimplemented!(), +// } +// } +// } + +// pub async fn try_match_kv_event( +// app_metas: &AppMetas, +// req: &KvRequest, +// source_app: &str, +// source_fn: &str, +// ) -> Option { +// // find source app +// let Some(appmeta) = app_metas.get_app_meta(source_app).await else { +// tracing::warn!("source app:{} not found", source_app); +// return None; +// }; +// // find source func +// let Some(fnmeta) = appmeta.get_fn_meta(source_fn) else { +// tracing::warn!("app {} source func:{} not found", source_app, source_fn); +// return None; +// }; + +// match req.op.as_ref().unwrap() { +// proto::kv::kv_request::Op::Set(set) => { +// let kv = set.kv.as_ref().unwrap(); +// // match kv pattern +// let Some(pattern) = fnmeta.match_key(&kv.key, KvOps::Set) else { +// return None; +// }; +// // find trigger func +// app_metas +// .pattern_2_app_fn +// .get(&pattern.0) +// .map(|triggers| EventTriggerInfo { +// trigger_appfns: triggers.clone(), +// kvreq: req.clone(), +// }) +// } +// proto::kv::kv_request::Op::Get(_) => None, +// proto::kv::kv_request::Op::Delete(_) => None, +// proto::kv::kv_request::Op::Lock(_) => None, +// } +// } diff --git a/src/main/src/general/app/http.rs b/src/general/m_appmeta_manager/http.rs similarity index 88% rename from src/main/src/general/app/http.rs rename to src/general/m_appmeta_manager/http.rs index aab77e3..76e2f01 100644 --- a/src/main/src/general/app/http.rs +++ b/src/general/m_appmeta_manager/http.rs @@ -13,21 +13,12 @@ lazy_static! { static ref VIEW: Option = None; } fn view() -> &'static super::View { - #[cfg(feature = "unsafe-log")] - tracing::debug!("unsafe http view begin"); - let res = unsafe { util::non_null(&*VIEW).as_ref().as_ref().unwrap() }; - #[cfg(feature = "unsafe-log")] - tracing::debug!("unsafe http view end"); - res + unsafe { util::non_null(&*VIEW).as_ref().as_ref().unwrap() } } pub(super) fn binds(router: Router, view: super::View) -> Router { unsafe { - #[cfg(feature = "unsafe-log")] - tracing::debug!("unsafe http view bind"); let _ = util::non_null(&*VIEW).as_mut().replace(view); - #[cfg(feature = "unsafe-log")] - tracing::debug!("unsafe http view bind end"); } tracing::debug!("binds appmeta_manager http"); router @@ -40,9 +31,7 @@ pub(super) fn binds(router: Router, view: super::View) -> Router { } async fn call_app_fn(Path((app, func)): Path<(String, String)>, body: String) -> Response { - tracing::debug!("handle func request app: {}, func: {}", app, func); if view().p2p().nodes_config.this.1.is_master() { - tracing::debug!("app: {:?}, func: {:?}", app, func); view() .http_handler() .handle_request(&format!("{app}/{func}"), body) diff --git a/src/general/m_appmeta_manager/mod.rs b/src/general/m_appmeta_manager/mod.rs new file mode 100644 index 0000000..68eb25a --- /dev/null +++ b/src/general/m_appmeta_manager/mod.rs @@ -0,0 +1,1056 @@ +pub mod fn_event; +mod http; +mod v_os; + +use self::v_os::AppMetaVisitOs; +use super::{ + m_data_general::DataGeneral, + m_kv_store_engine::{KeyTypeServiceList, KvStoreEngine}, + m_os::OperatingSystem, + network::{ + http_handler::HttpHandler, + m_p2p::P2PModule, + proto::{ + write_one_data_request::{ + data_item::{self, Data}, + DataItem, FileData, + }, + DataMeta, DataModeCache, DataModeDistribute, + }, + }, +}; +use crate::worker::m_executor::Executor; +use crate::{ + general::kv_interface::KvOps, + logical_module_view_impl, + master::m_master::Master, + result::{ErrCvt, WSResult, WsFuncError}, + sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef}, + util::{self, JoinHandleWrapper}, + worker::func::m_instance_manager::InstanceManager, +}; +use async_trait::async_trait; +use axum::body::Bytes; +use enum_as_inner::EnumAsInner; +use serde::{Deserialize, Deserializer, Serialize}; +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap}, + fs, + io::Cursor, + path::Path, +}; +use tokio::sync::RwLock; + +use ws_derive::LogicalModule; + +logical_module_view_impl!(View); +logical_module_view_impl!(View, os, OperatingSystem); +logical_module_view_impl!(View, kv_store_engine, KvStoreEngine); +logical_module_view_impl!(View, http_handler, Box); +logical_module_view_impl!(View, appmeta_manager, AppMetaManager); +logical_module_view_impl!(View, p2p, P2PModule); +logical_module_view_impl!(View, master, Option); +logical_module_view_impl!(View, instance_manager, Option); +logical_module_view_impl!(View, data_general, DataGeneral); +logical_module_view_impl!(View, executor, Option); + +#[derive(Debug, Serialize, Deserialize)] +#[serde(untagged)] +pub enum FnEventYaml { + HttpFn { http_fn: () }, + HttpApp { http_app: () }, + KvSet { kv_set: usize }, +} + +#[derive(PartialEq, Eq)] +pub enum FnEvent { + HttpFn, + HttpApp, + KvSet(usize), +} + +impl From for FnEvent { + fn from(yaml: FnEventYaml) -> Self { + match yaml { + FnEventYaml::HttpFn { http_fn: _ } => Self::HttpFn, + FnEventYaml::HttpApp { http_app: _ } => Self::HttpApp, + FnEventYaml::KvSet { kv_set } => Self::KvSet(kv_set), + } + } +} + +// #[derive(Debug, Serialize, Deserialize)] +// #[serde(untagged)] +// pub enum FnArgYaml { +// KvKey { kv_key: usize }, +// HttpText { http_text: () }, +// } + +// #[derive(Debug)] +// pub enum FnArg { +// KvKey(usize), +// HttpText, +// } + +// impl From for FnArg { +// fn from(yaml: FnArgYaml) -> Self { +// match yaml { +// FnArgYaml::KvKey { kv_key } => Self::KvKey(kv_key), +// FnArgYaml::HttpText { http_text: _ } => Self::HttpText, +// } +// } +// } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum HttpMethod { + Get, + Post, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum HttpCall { + Direct, + Indirect, +} + +#[derive(Debug, EnumAsInner, Clone, Serialize, Deserialize)] +pub enum FnCallMeta { + Http { method: HttpMethod, call: HttpCall }, + Rpc, +} + +#[derive(Debug)] +pub struct FnMetaYaml { + /// key to operations + pub calls: Vec, + pub kvs: Option>>, +} + +impl<'de> Deserialize<'de> for FnMetaYaml { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let mut map = serde_yaml::Value::deserialize(deserializer)?; + let map = map + .as_mapping_mut() + .ok_or_else(|| serde::de::Error::custom("not a map"))?; + // let calls = map.remove("calls").ok_or_else(|| serde::de::Error::missing_field("calls"))?; + let mut calls = vec![]; + fn parse_http_call<'de, D: Deserializer<'de>>( + map: &serde_yaml::Value, + ) -> Result { + let map = map + .as_mapping() + .ok_or_else(|| serde::de::Error::custom("not a map"))?; + let call = map + .get("call") + .ok_or_else(|| serde::de::Error::missing_field("call"))?; + let call = call + .as_str() + .ok_or_else(|| serde::de::Error::custom("not a string"))?; + let call = if call == "direct" { + HttpCall::Direct + } else if call == "indirect" { + HttpCall::Indirect + } else { + return Err(serde::de::Error::custom("invalid call type")); + }; + Ok(call) + } + if let Some(v) = map.get("http.get") { + let call = parse_http_call::(v)?; + calls.push(FnCallMeta::Http { + method: HttpMethod::Get, + call, + }); + } + if let Some(v) = map.get("http.post") { + let call = parse_http_call::(v)?; + calls.push(FnCallMeta::Http { + method: HttpMethod::Post, + call, + }); + } + if let Some(_v) = map.get("rpc") { + calls.push(FnCallMeta::Rpc); + } + + let kvs = map.remove("kvs"); + let kvs = if let Some(kvs) = kvs { + serde_yaml::from_value(kvs).map_err(serde::de::Error::custom)? + } else { + None + }; + + tracing::debug!("FnMetaYaml constructed, calls:{:?}", calls); + Ok(Self { calls, kvs }) + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct KeyPattern(pub String); + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct KvMeta { + set: bool, + get: bool, + delete: bool, + pub pattern: KeyPattern, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FnMeta { + pub calls: Vec, + // pub event: Vec, + // pub args: Vec, + pub kvs: Option>, +} + +#[derive(Debug, Deserialize)] +pub struct AppMetaYaml { + pub fns: HashMap, +} + +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum AppType { + Jar, + Wasm, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct AppMeta { + pub app_type: AppType, + fns: HashMap, + cache_contains_http_fn: Option, +} + +impl AppMeta { + pub fn contains_http_fn(&self) -> bool { + if let Some(v) = self.cache_contains_http_fn { + return v; + } + let res = self + .fns + .iter() + .any(|(_, fnmeta)| fnmeta.allow_http_call().is_some()); + unsafe { + let _ = util::non_null(&self.cache_contains_http_fn) + .as_mut() + .replace(res); + } + res + } +} + +// #[derive(Debug, Serialize, Deserialize)] +// pub struct AppMetaService { +// actions: Vec, +// node: NodeID, +// app_dir: String, +// } + +pub struct AppMetas { + app_metas: HashMap, + pattern_2_app_fn: HashMap>, +} + +// impl FnEvent { +// pub fn match_kv_ope(&self, ope: KvOps) -> bool { +// match self { +// Self::KvSet(_) => ope == KvOps::Set, +// Self::HttpApp => false, +// } +// } +// } + +impl AppMetaYaml { + pub fn read(apps_dir: impl AsRef, appname: &str) -> AppMetaYaml { + let file_path = apps_dir.as_ref().join(format!("{}/app.yaml", appname)); + let file = std::fs::File::open(file_path).unwrap_or_else(|err| { + tracing::debug!("open config file failed, err: {:?}", err); + + let file_path = apps_dir.as_ref().join(format!("{}/app.yml", appname)); + std::fs::File::open(file_path).unwrap_or_else(|err| { + panic!("open config file failed, err: {:?}", err); + }) + }); + serde_yaml::from_reader(file).unwrap_or_else(|e| { + panic!("parse yaml config file failed, err: {:?}", e); + }) + } + // // return true if key set is valid + // pub fn check_key_set(&self, key: &str) -> bool { + // self.fns + // .iter() + // .any(|(_, fn_meta)| { + // if let Some(kvs)=&fn_meta.kvs{ + // kvs.iter().any(|(k, _)| key.contains(k)) + // }else{ + // false + // }) + // } +} + +impl FnMeta { + pub fn allow_rpc_call(&self) -> bool { + self.calls.iter().any(|v| match v { + FnCallMeta::Rpc => true, + _ => false, + }) + } + pub fn allow_http_call(&self) -> Option { + self.calls.iter().find_map(|v| match v { + FnCallMeta::Http { method, call: _ } => Some(method.clone()), + _ => None, + }) + } + + pub fn match_key(&self, key: &[u8], ope: KvOps) -> Option { + let key = if let Ok(key) = std::str::from_utf8(key) { + key + } else { + return None; + }; + if let Some(kvs) = &self.kvs { + for kv in kvs { + if kv.pattern.match_key(key) { + match ope { + KvOps::Get => { + if kv.get { + return Some(kv.pattern.clone()); + } + } + KvOps::Set => { + if kv.set { + return Some(kv.pattern.clone()); + } + } + KvOps::Delete => { + if kv.delete { + return Some(kv.pattern.clone()); + } + } + } + tracing::info!("allow ope {:?}, cur ope:{:?}", kv, ope); + } + } + // tracing::info!("no key pattern matched for key: {}", key); + } + + None + } + + pub fn try_get_kv_meta_by_index(&self, index: usize) -> Option<&KvMeta> { + if let Some(kvs) = &self.kvs { + return kvs.get(index); + } + None + } + + // / index should be valid + // fn get_kv_meta_by_index_unwrap(&self, index: usize) -> &KvMeta { + // self.try_get_kv_meta_by_index(index).unwrap() + // } + // /// get event related kvmeta matches operation + // pub fn get_event_kv(&self, ope: KvOps, event: &FnEvent) -> Option<&KvMeta> { + // match event { + // FnEvent::KvSet(kv_set) => { + // if ope == KvOps::Set { + // return Some(self.get_kv_meta_by_index_unwrap(*kv_set)); + // } + // } + // FnEvent::HttpApp => {} + // } + // None + // } + + // / find kv event trigger with match the `pattern` and `ope` + // pub fn find_will_trigger_kv_event(&self, _pattern: &KeyPattern, _ope: KvOps) -> Option<&KvMeta> { + // unimplemented!() + // // self.event.iter().find_map(|event| { + // // match event { + // // FnEvent::HttpApp => {} + // // FnEvent::KvSet(key_index) => { + // // if ope == KvOps::Set { + // // let res = self.get_kv_meta_by_index_unwrap(*key_index); + // // if res.pattern == *pattern { + // // return Some(res); + // // } + // // } + // // } + // // FnEvent::HttpFn => {} + // // } + // // None + // // }) + // } +} + +impl KeyPattern { + pub fn new(input: String) -> Self { + Self(input) + } + // match {} for any words + // "xxxx_{}_{}" matches "xxxx_abc_123" + // “xxxx{}{}" matches "xxxxabc123" + pub fn match_key(&self, key: &str) -> bool { + let re = self.0.replace("{}", "[a-zA-Z0-9]+"); + // let pattern_len = re.len(); + // tracing::info!("len:{}", re.len()); + let re = regex::Regex::new(&re).unwrap(); + if let Some(len) = re.find(key) { + tracing::info!( + "match key: {} with pattern: {} with len {} {} ", + key, + self.0, + len.len(), + key.len() + ); + len.len() == key.len() + } else { + tracing::info!("not match key: {} with pattern: {}", key, self.0); + false + } + } + // pub fn matcher(&self) -> String { + + // // let re = Regex::new(r"(.+)\{\}").unwrap(); + + // // if let Some(captured) = re.captures(&*self.0) { + // // if let Some(capture_group) = captured.get(1) { + // // let result = capture_group.as_str(); + // // // println!("Result: {}", result); + // // return result.to_owned(); + // // } + // // } + + // // self.0.clone() + // } +} + +impl From for FnMeta { + fn from(yaml: FnMetaYaml) -> Self { + let kvs = if let Some(kvs) = yaml.kvs { + Some( + kvs.into_iter() + .map(|(key, ops)| { + let mut set = false; + let mut get = false; + let mut delete = false; + for op in ops { + if op == "set" { + set = true; + } else if op == "get" { + get = true; + } else if op == "delete" { + delete = true; + } else { + panic!("invalid operation: {}", op); + } + } + // TODO: check key pattern + KvMeta { + delete, + set, + get, + pattern: KeyPattern::new(key), + } + }) + .collect(), + ) + } else { + None + }; + let res = Self { + calls: yaml.calls, + kvs, + }; + // assert!(res.check_kv_valid()); + res + } +} + +// impl From for AppMeta { +// fn from(yaml: AppMetaYaml) -> Self { +// let fns = yaml +// .fns +// .into_iter() +// .map(|(fnname, fnmeta)| (fnname, fnmeta.into())) +// .collect(); +// Self { fns } +// } +// } + +impl AppMeta { + pub async fn new( + metayaml: AppMetaYaml, + app_name: &str, + meta_fs: &AppMetaVisitOs, + ) -> WSResult { + let fns = metayaml + .fns + .into_iter() + .map(|(fnname, fnmeta)| { + let fnmeta = fnmeta.into(); + (fnname, fnmeta) + }) + .collect(); + let app_type = meta_fs.get_app_type(app_name).await?; + Ok(Self { + app_type, + fns, + cache_contains_http_fn: None, + }) + } + pub fn fns(&self) -> Vec { + self.fns.iter().map(|(fnname, _)| fnname.clone()).collect() + } + pub fn get_fn_meta(&self, fnname: &str) -> Option<&FnMeta> { + self.fns.get(fnname) + } + // pub fn http_trigger_fn(&self) -> Option<&str> { + // self.fns.iter().find_map(|(fnname, fnmeta)| { + // if fnmeta.event.iter().any(|e| e == &FnEvent::HttpApp) { + // Some(fnname.as_str()) + // } else { + // None + // } + // }) + // } +} + +lazy_static::lazy_static! { + static ref VIEW: Option = None; +} +fn view() -> &'static View { + unsafe { util::non_null(&*VIEW).as_ref().as_ref().unwrap() } +} + +#[derive(LogicalModule)] +pub struct AppMetaManager { + pub meta: RwLock, + pub fs_layer: AppMetaVisitOs, + view: View, + // app_meta_list_lock: Mutex<()>, +} + +#[async_trait] +impl LogicalModule for AppMetaManager { + fn inner_new(args: LogicalModuleNewArgs) -> Self + where + Self: Sized, + { + let view = View::new(args.logical_modules_ref.clone()); + unsafe { + let _ = util::non_null(&*VIEW).as_mut().replace(view.clone()); + } + let fs_layer = AppMetaVisitOs::new(view.clone()); + Self { + meta: RwLock::new(AppMetas { + app_metas: HashMap::new(), + pattern_2_app_fn: HashMap::new(), + }), + view, + fs_layer, + // app_meta_list_lock: Mutex::new(()), + } + } + async fn init(&self) -> WSResult<()> { + let mut router = self.view.http_handler().building_router(); + + let take = router.option_mut().take().unwrap(); + let take = http::binds(take, self.view.clone()); + let _ = router.option_mut().replace(take); + // .route("/appman/upload", post(handler2)) + + Ok(()) + } + async fn start(&self) -> WSResult> { + self.meta + .write() + .await + .load_all_app_meta(&self.view.os().file_path, &self.fs_layer) + .await?; + Ok(vec![]) + } +} + +impl AppMetas { + // pub fn new() -> Self { + // Self { + // app_metas: HashMap::new(), + // pattern_2_app_fn: HashMap::new(), + // } + // } + // pub async fn set_tmp_appmeta(&self, ) + fn get_tmp_app_meta(&self, app: &str) -> Option { + self.app_metas.get(app).cloned() + } + pub async fn get_app_meta(&self, app: &str) -> Option { + if let Some(res)=self.get_tmp_app_meta(app){ + return Some(res); + } + + // self.app_metas.get(app) + let meta = view() + .data_general() + .get_data_item(format!("app{}", app), 0) + .await; + let Some(DataItem { + data: Some(Data::RawBytes(metabytes)), + }) = meta + else { + return None; + }; + + let meta = bincode::deserialize_from::<_, AppMeta>(Cursor::new(metabytes)); + let meta = match meta { + Err(e) => { + tracing::warn!("meta decode failed {:?}", e); + return None; + } + Ok(meta) => meta, + }; + Some(meta) + } + pub fn get_pattern_triggers( + &self, + pattern: impl Borrow, + ) -> Option<&Vec<(String, String)>> { + self.pattern_2_app_fn.get(pattern.borrow()) + } + async fn load_all_app_meta( + &mut self, + file_dir: impl AsRef, + meta_fs: &AppMetaVisitOs, + ) -> WSResult<()> { + if !file_dir.as_ref().join("apps").exists() { + fs::create_dir_all(file_dir.as_ref().join("apps")).unwrap(); + return Ok(()); + } + let entries = + fs::read_dir(file_dir.as_ref().join("apps")).map_err(|e| ErrCvt(e).to_ws_io_err())?; + + // 遍历文件夹中的每个条目 + for entry in entries { + // 获取目录项的 Result + let entry = entry.map_err(|e| ErrCvt(e).to_ws_io_err())?; + // 获取目录项的文件名 + let file_name = entry.file_name(); + // dir name is the app name + let app_name = file_name.to_str().unwrap().to_owned(); + + // allow spec files + if entry.file_type().unwrap().is_file() { + let allowed_files = vec!["crac_config"]; + assert!(allowed_files + .contains(&&*(*entry.file_name().as_os_str().to_string_lossy()).to_owned())); + continue; + } + + // allow only dir + assert!(entry.file_type().unwrap().is_dir()); + + // read app config yaml + let meta_yaml = { + let apps_dir = file_dir.as_ref().join("apps"); + let file_name_str = app_name.clone(); + tokio::task::spawn_blocking(move || AppMetaYaml::read(apps_dir, &*file_name_str)) + .await + .unwrap() + }; + + // transform + let meta = AppMeta::new(meta_yaml, &app_name, meta_fs).await.unwrap(); + + //TODO: build and checks + // - build up key pattern to app fn + + // for (fnname, fnmeta) in &meta.fns { + // for event in &fnmeta.event { + // match event { + // // not kv event, no key pattern + // FnEvent::HttpFn => {} + // FnEvent::HttpApp => {} + // FnEvent::KvSet(key_index) => { + // let kvmeta = fnmeta.try_get_kv_meta_by_index(*key_index).unwrap(); + // self.pattern_2_app_fn + // .entry(kvmeta.pattern.0.clone()) + // .or_insert_with(Vec::new) + // .push((app_name.clone(), fnname.clone())); + // } + // } + // } + // } + let _ = self.app_metas.insert(app_name, meta); + } + Ok(()) + } +} + +impl AppMetaManager { + async fn construct_tmp_app(&self, tmpapp: &str) -> WSResult { + // 1.meta + // let appdir = self.fs_layer.concat_app_dir(app); + let appmeta = self.fs_layer.read_app_meta(tmpapp).await?; + + // TODO: 2.check project dir + // 3. if java, take snapshot + if let AppType::Jar = appmeta.app_type { + let _ = self + .meta + .write() + .await + .app_metas + .insert(tmpapp.to_owned(), appmeta.clone()); + tracing::debug!("record app meta to make checkpoint {}", tmpapp); + self.view + .instance_manager() + .make_checkpoint_for_app(tmpapp) + .await?; + self.view + .instance_manager() + .drap_app_instances(tmpapp) + .await; + // remove app_meta + tracing::debug!("checkpoint made, remove app meta {}", tmpapp); + let _ = self + .meta + .write() + .await + .app_metas + .remove(tmpapp) + .unwrap_or_else(|| { + panic!("remove app meta failed, app: {}", tmpapp); + }); + } + + Ok(appmeta) + } + pub async fn app_available(&self, app: &str) -> WSResult { + Ok(self + .view + .data_general() + .get_data_item(format!("app{}", app), 0) + .await + .is_some()) + } + pub async fn app_uploaded(&self, appname: String, data: Bytes) -> WSResult<()> { + // 1. tmpapp name & dir + // TODO: fobidden tmpapp public access + // let tmpapp = format!("tmp{}", Uuid::new_v4()); //appname.clone(); + let tmpapp = format!("{}", appname); + let tmpappdir = self.fs_layer.concat_app_dir(&tmpapp); + let tmpapp = tmpapp.clone(); + + // 2. unzip app pack + let tmpappdir2 = tmpappdir.clone(); + // remove old dir&app + if let Some(_) = self.meta.write().await.app_metas.remove(&tmpapp) { + tracing::debug!("remove old app meta {}", tmpapp); + } + let ins = self.view.instance_manager().app_instances.remove(&tmpapp); + if let Some(ins) = ins { + ins.value().kill().await; + tracing::debug!("remove old app instance {}", tmpapp); + } + + if tmpappdir2.exists() { + // remove old app + fs::remove_dir_all(&tmpappdir2).unwrap(); + } + let res = tokio::task::spawn_blocking(move || { + let data = data.to_vec(); + zip_extract::extract(Cursor::new(data), &tmpappdir2, false) + }) + .await + .unwrap(); + + match res { + Ok(res) => res, + Err(err) => { + tracing::warn!("unzip failed, err: {:?}", err); + let _ = fs::remove_dir_all(&tmpappdir); + return Err(WsFuncError::AppPackFailedZip(err).into()); + } + }; + + // 3. check meta + let res = self.construct_tmp_app(&tmpapp).await; + let appmeta = match res { + Err(e) => { + let _ = fs::remove_dir_all(&tmpappdir); + tracing::warn!("construct app failed, err {:?}", e); + return Err(e); + } + Ok(appmeta) => appmeta, + }; + + // 4. zip tmp dir to memory + let zipfiledata = { + tracing::debug!("zip tmp dir to memory"); + // if let Ok(direntries) = fs::read_dir(tmpappdir.join("checkpoint-dir")) { + // for f in direntries { + // tracing::debug!( + // "file in checkpoint-dir: {:?}", + // f.map(|v| v.file_name().to_str().unwrap().to_owned()) + // ); + // } + // } + let view = self.view.clone(); + tokio::task::spawn_blocking(move || { + view.os() + .zip_dir_2_data(&tmpappdir, zip::CompressionMethod::Deflated) + }) + .await + .unwrap() + }?; + + // remove temp dir + // let _ = fs::remove_dir_all(&tmpappdir).map_err(|e| WSError::from(WsIoErr::Io(e)))?; + + // 3. broadcast meta and appfile + tracing::debug!("broadcast meta and appfile"); + self.view + .data_general() + .write_data( + format!("app{}", appname), + vec![ + DataMeta { + cache: DataModeCache::AlwaysInMem as i32, + distribute: DataModeDistribute::BroadcastRough as i32, + }, + DataMeta { + cache: DataModeCache::AlwaysInFs as i32, + distribute: DataModeDistribute::BroadcastRough as i32, + }, + ], + vec![ + DataItem { + data: Some(data_item::Data::RawBytes( + bincode::serialize(&appmeta).unwrap(), + )), + }, + DataItem { + data: Some(data_item::Data::File(FileData { + file_name: format!("apps/{}", appname), + is_dir: true, + file_content: zipfiledata, + })), + }, + ], + ) + .await; + tracing::debug!("app uploaded"); + Ok(()) + } + + pub fn set_app_meta_list(&self, list: Vec) { + self.view.kv_store_engine().set( + KeyTypeServiceList, + &serde_json::to_string(&list).unwrap().into(), + ); + } + pub fn get_app_meta_list(&self) -> Vec { + let res = self + .view + .kv_store_engine() + .get(KeyTypeServiceList) + .unwrap_or_else(|| { + return vec![]; + }); + serde_json::from_slice(&res).unwrap_or_else(|e| { + tracing::warn!("parse app meta list failed, err: {:?}", e); + vec![] + }) + } + + // pub fn get_app_meta_basicinfo_list(&self) -> Vec { + // let apps = self.get_app_meta_list(); + // apps.into_iter() + // .map(|app| { + // let service = self.get_app_meta_service(&app).unwrap(); + // ServiceBasic { + // name: app, + // node: format!("{}", service.node), + // dir: service.app_dir, + // actions: service.actions, + // } + // }) + // .collect() + // } + + // pub fn get_app_meta_service(&self, app_name: &str) -> Option { + // let Some(res) = self + // .view + // .kv_store_engine() + // .get(KeyTypeServiceMeta(app_name.as_bytes())) + // else { + // return None; + // }; + // serde_json::from_slice(&res).map_or_else( + // |e| { + // tracing::warn!("parse service meta failed, err: {:?}", e); + // None + // }, + // |v| Some(v), + // ) + // } + + // pub fn set_app_meta_service(&self, app_name: &str, service: AppMetaService) { + // self.view.kv_store_engine().set( + // KeyTypeServiceMeta(app_name.as_bytes()), + // &serde_json::to_string(&service).unwrap().into(), + // ); + // } + + // // node id is valid before call this function + // pub async fn add_service(&self, req: AddServiceReq) -> AddServiceResp { + // // // check conflict service + // // if self.get_app_meta_service(&req.service.name).is_some() { + // // return AddServiceResp::Fail { + // // msg: format!("service {} already exist", req.service.name), + // // }; + // // } + + // // get the target node + // let Ok(nodeid) = req.service.node.parse::() else { + // return AddServiceResp::Fail { + // msg: "node id should be number".to_owned(), + // }; + // }; + // if !self.view.p2p().nodes_config.node_exist(nodeid) { + // return AddServiceResp::Fail { + // msg: format!("node {nodeid} not exist"), + // }; + // } + + // // call and return if rpc failed + // let res = match self + // .view + // .os() + // .remote_get_dir_content_caller + // .call( + // self.view.p2p(), + // nodeid, + // GetDirContentReq { + // path: req.service.dir.clone(), + // }, + // None, + // ) + // .await + // { + // Ok(res) => res, + // Err(e) => { + // return AddServiceResp::Fail { + // msg: format!("call remote_get_dir_content_caller failed, err: {:?}", e), + // }; + // } + // }; + + // // return if remote failed + // let _res = match res.dispatch.unwrap() { + // super::network::proto::remote_sys::get_dir_content_resp::Dispatch::Fail(fail) => { + // return AddServiceResp::Fail { msg: fail.error }; + // } + // super::network::proto::remote_sys::get_dir_content_resp::Dispatch::Ok(res) => res, + // }; + + // // add to appmeta list + // { + // let _mu = self.app_meta_list_lock.lock(); + // let mut appmeta_list = self.get_app_meta_list(); + // appmeta_list.push(req.service.name.clone()); + // let mut dup = HashSet::new(); + // let appmeta_list = appmeta_list + // .into_iter() + // .filter(|v| dup.insert(v.clone())) + // .collect(); + // self.set_app_meta_list(appmeta_list); + // self.set_app_meta_service( + // &req.service.name, + // AppMetaService { + // actions: req.service.actions, + // node: nodeid, + // app_dir: req.service.dir, + // }, + // ); + // } + // AddServiceResp::Succ {} + // } + // pub async fn run_service_action(&self, req: RunServiceActionReq) -> RunServiceActionResp { + // if !req.sync { + // return RunServiceActionResp::Fail { + // msg: "unsuppot async mode".to_owned(), + // }; + // } + + // // sync logic + // // check service and action + // let service = match self.get_app_meta_service(&req.service) { + // Some(service) => service, + // None => { + // return RunServiceActionResp::Fail { + // msg: format!("service {} not exist", req.service), + // }; + // } + // }; + + // // check action valid + // let Some(action) = service.actions.iter().find(|v| v.cmd == req.action_cmd) else { + // return RunServiceActionResp::Fail { + // msg: format!("action {} not exist", req.action_cmd), + // }; + // }; + + // // handle rpc fail + // let res = match self + // .view + // .os() + // .remote_run_cmd_caller + // .call( + // self.view.p2p(), + // service.node, + // RunCmdReq { + // cmd: action.cmd.clone(), + // workdir: service.app_dir, + // }, + // Some(Duration::from_secs(10)), + // ) + // .await + // { + // Ok(res) => res, + // Err(err) => { + // return RunServiceActionResp::Fail { + // msg: format!("call remote_run_cmd_caller failed, err: {:?}", err), + // }; + // } + // }; + + // // handle cmd fail + // let res = match res.dispatch.unwrap() { + // super::network::proto::remote_sys::run_cmd_resp::Dispatch::Ok(res) => res, + // super::network::proto::remote_sys::run_cmd_resp::Dispatch::Err(err) => { + // return RunServiceActionResp::Fail { + // msg: format!("remote run cmd failed: {}", err.error), + // } + // } + // }; + + // RunServiceActionResp::Succ { output: res.output } + // } +} + +#[cfg(test)] +mod test { + use crate::util; + + use super::*; + #[test] + fn test_key_pattern() { + util::test_tracing_start(); + let pattern = KeyPattern::new("xxxx_{}_{}".to_owned()); + assert!(pattern.match_key("xxxx_abc_123")); + } +} diff --git a/src/main/src/general/app/v_os.rs b/src/general/m_appmeta_manager/v_os.rs similarity index 87% rename from src/main/src/general/app/v_os.rs rename to src/general/m_appmeta_manager/v_os.rs index e8c302b..bd43a82 100644 --- a/src/main/src/general/app/v_os.rs +++ b/src/general/m_appmeta_manager/v_os.rs @@ -13,21 +13,16 @@ impl AppMetaVisitOs { Self { view } } - pub fn crac_file_path(&self) -> PathBuf { - self.view - .os() - .file_path - .clone() - .join("apps") - .join("crac_config") + pub fn crac_file_path(&self) -> String { + let sys_dir = &self.view.os().file_path; + let app_dir = Path::new(sys_dir).join("apps").join("crac_config"); + (*app_dir.as_os_str().to_string_lossy()).to_owned() } pub fn concat_app_dir(&self, app: &str) -> PathBuf { - self.view.os().file_path.clone().join("apps").join(app) - } - - pub fn app_dir(&self) -> PathBuf { - self.view.os().file_path.clone().join("apps") + let sys_dir = &self.view.os().file_path; + let app_dir = Path::new(sys_dir).join("apps").join(app); + app_dir } pub async fn read_app_meta(&self, app: &str) -> WSResult { @@ -49,7 +44,7 @@ impl AppMetaVisitOs { } Ok(ok) => ok, }; - AppMeta::new_from_yaml(yml, app, self).await + AppMeta::new(yml, app, self).await } pub async fn get_app_type_in_dir(&self, app_dir: impl AsRef) -> WSResult { diff --git a/src/general/m_data_general.rs b/src/general/m_data_general.rs new file mode 100644 index 0000000..1b3c987 --- /dev/null +++ b/src/general/m_data_general.rs @@ -0,0 +1,405 @@ +use super::{ + m_kv_store_engine::{KeyTypeDataSetItem, KeyTypeDataSetMeta, KvStoreEngine}, + m_os::OperatingSystem, + network::{ + m_p2p::{P2PModule, RPCCaller, RPCHandler, RPCResponsor}, + proto::{ + write_one_data_request::{data_item::Data, DataItem}, + DataMeta, DataModeDistribute, DataVersionRequest, WriteOneDataRequest, + WriteOneDataResponse, + }, + }, +}; +use crate::{ + general::network::proto::write_one_data_request, + logical_module_view_impl, + result::WSResult, + sys::{LogicalModule, LogicalModuleNewArgs, NodeID}, + util::JoinHandleWrapper, +}; +use crate::{ + result::{WsDataError}, + sys::LogicalModulesRef, +}; +use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, time::Duration}; +use ws_derive::LogicalModule; + +// use super::m_appmeta_manager::AppMeta; + +logical_module_view_impl!(DataGeneralView); +logical_module_view_impl!(DataGeneralView, p2p, P2PModule); +logical_module_view_impl!(DataGeneralView, data_general, DataGeneral); +logical_module_view_impl!(DataGeneralView, kv_store_engine, KvStoreEngine); +logical_module_view_impl!(DataGeneralView, os, OperatingSystem); + +pub type DataVersion = u64; + +#[derive(LogicalModule)] +pub struct DataGeneral { + view: DataGeneralView, + pub rpc_call_data_version: RPCCaller, + + rpc_call_write_once_data: RPCCaller, + rpc_handler_write_once_data: RPCHandler, +} + +#[async_trait] +impl LogicalModule for DataGeneral { + fn inner_new(args: LogicalModuleNewArgs) -> Self + where + Self: Sized, + { + Self { + view: DataGeneralView::new(args.logical_modules_ref.clone()), + rpc_call_data_version: RPCCaller::new(), + rpc_call_write_once_data: RPCCaller::new(), + rpc_handler_write_once_data: RPCHandler::new(), + } + } + async fn start(&self) -> WSResult> { + tracing::info!("start as master"); + let p2p = self.view.p2p(); + self.rpc_call_data_version.regist(p2p); + self.rpc_call_write_once_data.regist(p2p); + let view = self.view.clone(); + self.rpc_handler_write_once_data + .regist(p2p, move |responsor, req| { + let view = view.clone(); + let _ = tokio::spawn(async move { + view.data_general().write_one_data(responsor, req).await; + }); + Ok(()) + }); + Ok(vec![]) + } +} + +// pub enum DataWrapper { +// Bytes(Vec), +// File(PathBuf), +// } + +impl DataGeneral { + async fn write_one_data( + &self, + responsor: RPCResponsor, + req: WriteOneDataRequest, + ) { + // ## verify data meta + tracing::debug!("verify data meta bf write data"); + let Some(res) = self + .view + .kv_store_engine() + .get(KeyTypeDataSetMeta(req.unique_id.as_bytes())) + else { + responsor.send_resp(WriteOneDataResponse { + remote_version: 0, + success: false, + message: "Data meta not found".to_owned(), + }); + return; + }; + if res.version != req.version { + responsor.send_resp(WriteOneDataResponse { + remote_version: res.version, + success: false, + message: "Data meta version not match".to_owned(), + }); + return; + } + if req.data.is_empty() { + responsor.send_resp(WriteOneDataResponse { + remote_version: res.version, + success: false, + message: "Data is empty".to_owned(), + }); + return; + } + if req.data[0].data.is_none() { + responsor.send_resp(WriteOneDataResponse { + remote_version: res.version, + success: false, + message: "Data enum is none".to_owned(), + }); + return; + } + for (_idx, data) in req.data.iter().enumerate() { + match data.data.as_ref().unwrap() { + write_one_data_request::data_item::Data::File(f) => { + if f.file_name.starts_with("/") { + responsor.send_resp(WriteOneDataResponse { + remote_version: res.version, + success: false, + message: format!( + "File name {} starts with / is forbidden", + f.file_name + ), + }); + return; + } + } + _ => {} + } + } + // ## write data + tracing::debug!("start to write data"); + for (idx, data) in req.data.into_iter().enumerate() { + match data.data.unwrap() { + write_one_data_request::data_item::Data::File(f) => { + tracing::debug!("writing data part{} file {}", idx, f.file_name); + let p: std::path::PathBuf = self.view.os().file_path.join(f.file_name); + let view = self.view.clone(); + + let p2 = p.clone(); + let res = if f.is_dir { + tokio::task::spawn_blocking(move || { + view.os().unzip_data_2_path(p2, f.file_content); + }) + } else { + // flush to p + tokio::task::spawn_blocking(move || { + view.os().cover_data_2_path(p2, f.file_content); + }) + }; + let res = res.await; + if let Err(e) = res { + responsor.send_resp(WriteOneDataResponse { + remote_version: req.version, + success: false, + message: format!("Write file error: {:?}, path: {:?}", e, p), + }); + return; + } + } + write_one_data_request::data_item::Data::RawBytes(bytes) => { + tracing::debug!("writing data part{} bytes", idx); + self.view.kv_store_engine().set( + KeyTypeDataSetItem { + uid: req.unique_id.as_bytes(), + idx: idx as u8, + }, + &bytes, + ); + } + } + } + self.view.kv_store_engine().flush(); + tracing::debug!("data is written"); + responsor + .send_resp(WriteOneDataResponse { + remote_version: req.version, + success: true, + message: "".to_owned(), + }) + .await; + // ## response + } + pub async fn get_data_item(&self, unique_id: String, idx: u8) -> Option { + let Some(itembytes) = self.view.kv_store_engine().get(KeyTypeDataSetItem { + uid: unique_id.as_bytes(), + idx: idx as u8, + }) else { + return None; + }; + Some(DataItem { + data: Some(Data::RawBytes(itembytes)), + }) + } + + pub async fn set_dataversion(&self, req: DataVersionRequest) -> WSResult<()> { + // follower just update the version from master + let old = self + .view + .kv_store_engine() + .get(KeyTypeDataSetMeta(req.unique_id.as_bytes())); + if let Some(old) = old { + if old.version > req.version { + return Err(WsDataError::SetExpiredDataVersion { + target_version: req.version, + cur_version: old.version, + data_id: req.unique_id.clone(), + } + .into()); + // responsor + // .send_resp(DataVersionResponse { + // version: old.version, + // }) + // .await; + // tracing::warn!("has larger version {}", old.version); + // return Ok(()); + } + } + self.view.kv_store_engine().set( + KeyTypeDataSetMeta(req.unique_id.as_bytes()), + &DataSetMeta { + version: req.version, + data_metas: req.data_metas.into_iter().map(|v| v.into()).collect(), + synced_nodes: HashSet::new(), + }, + ); + self.view.kv_store_engine().flush(); + Ok(()) + } + + pub async fn write_data( + &self, + unique_id: String, + data_metas: Vec, + datas: Vec, + ) { + if data_metas.len() == 0 { + tracing::warn!("write_data must have >0 data metas"); + return; + } + if datas.len() != data_metas.len() { + tracing::warn!("write_data data metas and datas length not match"); + return; + } + if DataModeDistribute::BroadcastRough as i32 == data_metas[0].distribute { + self.write_data_broadcast_rough(unique_id, data_metas, datas) + .await; + } + } + async fn write_data_broadcast_rough( + &self, + unique_id: String, + data_metas: Vec, + datas: Vec, + ) { + let p2p = self.view.p2p(); + let resp = self + .rpc_call_data_version + .call( + self.view.p2p(), + p2p.nodes_config.get_master_node(), + DataVersionRequest { + unique_id: unique_id.clone(), + version: 0, + data_metas, + }, + Some(Duration::from_secs(60)), + ) + .await; + let resp = match resp { + Err(e) => { + tracing::warn!("write_data_broadcast_rough require version error: {:?}", e); + return; + } + Ok(ok) => ok, + }; + + tracing::debug!("start broadcast data with version"); + let version = resp.version; + // use the got version to send to global paralell + let mut tasks = vec![]; + + for (_idx, node) in p2p.nodes_config.all_nodes_iter().enumerate() { + let n = *node.0; + let view = self.view.clone(); + let datas = datas.clone(); + let unique_id = unique_id.clone(); + // let datas = unsafe { util::SendNonNull(util::non_null(&datas)) }; + + let t = tokio::spawn(async move { + view.data_general() + .rpc_call_write_once_data + .call( + view.p2p(), + n, + WriteOneDataRequest { + unique_id, + version, + data: datas, + }, + Some(Duration::from_secs(60)), + ) + .await + }); + + tasks.push(t); + } + for t in tasks { + let res = t.await.unwrap(); + match res { + Err(e) => { + tracing::warn!("write_data_broadcast_rough broadcast error: {:?}", e); + } + Ok(ok) => { + if !ok.success { + tracing::warn!( + "write_data_broadcast_rough broadcast error: {:?}", + ok.message + ); + } + } + } + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct DataMetaSys { + pub cache: i32, + pub distribute: i32, +} +impl From for DataMetaSys { + fn from(d: DataMeta) -> Self { + Self { + cache: d.cache, + distribute: d.distribute, + } + } +} +impl Into for DataMetaSys { + fn into(self) -> DataMeta { + DataMeta { + cache: self.cache, + distribute: self.distribute, + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct DataSetMeta { + // unique_id: Vec, + pub version: u64, + pub data_metas: Vec, + pub synced_nodes: HashSet, +} + +// pub struct DataDescriptionPart { +// mode_dist: DataModeDistribute, +// mode_cache: DataModeCache, +// } +// pub struct DataDescription { +// small: DataDescriptionPart, +// big: DataDescriptionPart, +// } + +// impl Default for DataDescription { +// fn default() -> Self { +// Self { +// small: DataDescriptionPart { +// mode_dist: DataModeDistribute::GlobalSyncRough, +// mode_cache: DataModeCache::AlwaysInMem, +// }, +// big: DataDescriptionPart { +// mode_dist: DataModeDistribute::GlobalSyncRough, +// mode_cache: DataModeCache::AlwaysInFs, +// }, +// } +// } +// } + +// // data binds +// pub trait Data { +// fn disc() -> DataDescription; +// } + +// impl Data for AppMeta { +// fn disc() -> DataDescription { +// DataDescription::default() +// } +// } diff --git a/src/general/m_kv_store_engine.rs b/src/general/m_kv_store_engine.rs new file mode 100644 index 0000000..8748cd2 --- /dev/null +++ b/src/general/m_kv_store_engine.rs @@ -0,0 +1,202 @@ +// pub struct KvStorage { +// // testmap: SkipMap, Vec>, +// pub view: KvStorageView, +// } + +use super::{m_data_general::DataSetMeta, m_os::OperatingSystem, network::m_p2p::P2PModule}; +use crate::{ + logical_module_view_impl, + result::WSResult, + sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef, NodeID}, + util::JoinHandleWrapper, +}; +use axum::async_trait; +use bincode::serialize; +use bincode::serialize_into; +use serde::Serialize; +use serde::{de::DeserializeOwned, ser::SerializeTuple}; +use std::sync::OnceLock; +use ws_derive::LogicalModule; + +logical_module_view_impl!(View); +logical_module_view_impl!(View, os, OperatingSystem); +logical_module_view_impl!(View, p2p, P2PModule); + +#[derive(LogicalModule)] +pub struct KvStoreEngine { + db: OnceLock, + view: View, +} + +#[async_trait] +impl LogicalModule for KvStoreEngine { + fn inner_new(args: LogicalModuleNewArgs) -> Self + where + Self: Sized, + { + Self { + db: OnceLock::new(), + view: View::new(args.logical_modules_ref.clone()), + } + } + async fn start(&self) -> WSResult> { + let db_path = self.view.os().file_path.join(format!( + "kv_store_engine_{}", + self.view.p2p().nodes_config.this_node() + )); + let _ = self.db.get_or_init(|| { + let db = sled::Config::default() + .path(&db_path) + .create_new(true) + .open() + .map_or_else( + |_e| sled::Config::default().path(db_path).open().unwrap(), + |v| v, + ); + db + }); + Ok(vec![]) + } +} + +impl KvStoreEngine { + pub fn set(&self, key: K, value: &K::Value) + where + K: KeyType, + { + let key = key.make_key(); + let _ = self + .db + .get() + .unwrap() + .insert(key, serialize(value).unwrap()) + .unwrap(); + } + pub fn get<'a, K>(&self, key: K) -> Option + where + K: KeyType, + { + let key = key.make_key(); + self.db.get().unwrap().get(key).map_or_else( + |e| { + tracing::error!("get kv error: {:?}", e); + None + }, + |v| v.map(|v| bincode::deserialize_from(v.as_ref()).unwrap()), + ) + } + pub fn del(&self, key: K) + where + K: KeyType, + { + let key = key.make_key(); + let _ = self.db.get().unwrap().remove(key).unwrap(); + } + pub fn flush(&self) { + let _ = self.db.get().unwrap().flush().unwrap(); + } +} + +pub trait KeyType: Serialize { + type Value: Serialize + DeserializeOwned; + fn id(&self) -> u8; + fn make_key(&self) -> Vec { + let mut key = Vec::with_capacity(1 + bincode::serialized_size(self).unwrap() as usize); + key.push(self.id()); + serialize_into(&mut key, self).unwrap(); + key + } +} + +pub struct KeyTypeKv<'a>(pub &'a [u8]); + +pub struct KeyTypeKvPosition<'a>(pub &'a [u8]); + +pub struct KeyTypeServiceMeta<'a>(pub &'a [u8]); + +pub struct KeyTypeServiceList; + +pub struct KeyTypeDataSetMeta<'a>(pub &'a [u8]); + +pub struct KeyTypeDataSetItem<'a> { + pub uid: &'a [u8], + pub idx: u8, +} + +impl KeyType for KeyTypeKvPosition<'_> { + type Value = NodeID; + fn id(&self) -> u8 { + 0 + } +} +impl KeyType for KeyTypeKv<'_> { + type Value = Vec; + fn id(&self) -> u8 { + 1 + } +} +impl KeyType for KeyTypeServiceMeta<'_> { + type Value = Vec; + fn id(&self) -> u8 { + 2 + } +} +impl KeyType for KeyTypeServiceList { + type Value = Vec; + fn id(&self) -> u8 { + 3 + } +} + +impl KeyType for KeyTypeDataSetMeta<'_> { + type Value = DataSetMeta; + fn id(&self) -> u8 { + 4 + } +} + +impl KeyType for KeyTypeDataSetItem<'_> { + type Value = Vec; + fn id(&self) -> u8 { + 5 + } +} + +impl Serialize for KeyTypeKvPosition<'_> { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl Serialize for KeyTypeKv<'_> { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl Serialize for KeyTypeServiceMeta<'_> { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl Serialize for KeyTypeServiceList { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_unit() + } +} + +impl Serialize for KeyTypeDataSetMeta<'_> { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + +impl Serialize for KeyTypeDataSetItem<'_> { + fn serialize(&self, serializer: S) -> Result { + let mut tup = serializer.serialize_tuple(2)?; + tup.serialize_element(self.uid)?; + tup.serialize_element(&self.idx)?; + tup.end() + } +} diff --git a/src/main/src/general/m_metric_publisher.rs b/src/general/m_metric_publisher.rs similarity index 99% rename from src/main/src/general/m_metric_publisher.rs rename to src/general/m_metric_publisher.rs index 5125167..748344d 100644 --- a/src/main/src/general/m_metric_publisher.rs +++ b/src/general/m_metric_publisher.rs @@ -80,7 +80,6 @@ async fn report_metric_task(view: MetricPublisherView) { // view.metric_observor() // .insert_node_rsc_metric(view.p2p().nodes_config.this.0, metric); // } else { - let _res = view .metric_publisher() .msg_sender @@ -90,7 +89,6 @@ async fn report_metric_task(view: MetricPublisherView) { metric, ) .await; - // .send_resp(1, 0, metric).await; // } } diff --git a/src/main/src/general/m_os/mod.rs b/src/general/m_os/mod.rs similarity index 94% rename from src/main/src/general/m_os/mod.rs rename to src/general/m_os/mod.rs index e974f17..ee978b1 100644 --- a/src/main/src/general/m_os/mod.rs +++ b/src/general/m_os/mod.rs @@ -1,7 +1,7 @@ pub mod zip; -use crate::general::{ - app::AppMetaManager, +use super::{ + m_appmeta_manager::AppMetaManager, network::{ m_p2p::{P2PModule, RPCCaller, RPCHandler, RPCResponsor}, proto::remote_sys::{ @@ -13,7 +13,7 @@ use crate::general::{ use crate::{ general::network::proto, logical_module_view_impl, - result::{ErrCvt, WSError, WSResult, WSResultExt, WsIoErr}, + result::{ErrCvt, WSError, WSResult, WsIoErr}, sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef}, util::JoinHandleWrapper, }; @@ -37,9 +37,6 @@ logical_module_view_impl!(OperatingSystemView, p2p, P2PModule); logical_module_view_impl!(OperatingSystemView, os, OperatingSystem); logical_module_view_impl!(OperatingSystemView, appmeta_manager, AppMetaManager); - -pub const APPS_REL_DIR: &str = "apps"; - #[derive(LogicalModule)] pub struct OperatingSystem { view: OperatingSystemView, @@ -111,21 +108,9 @@ pub enum OsProcessType { } impl OperatingSystem { - pub fn abs_file_path(&self, p: PathBuf) -> PathBuf { - if p.is_absolute() { - p - } else { - self.file_path.join(p) - } - } pub fn app_path(&self, app: &str) -> PathBuf { self.view.appmeta_manager().fs_layer.concat_app_dir(app) } - - pub fn app_rootdir(&self) -> PathBuf { - self.view.appmeta_manager().fs_layer.app_dir() - } - pub fn start_process(&self, p: OsProcessType) -> process::Child { let (mut binding, log_file) = match p { OsProcessType::JavaApp(app) => { @@ -254,9 +239,7 @@ impl OperatingSystem { }) .await .unwrap(); - if let Err(e) = responser.send_resp(res).await { - tracing::error!("Failed to send run cmd response: {}", e); - } + responser.send_resp(res).await; } async fn remote_get_dir_content_handler( @@ -300,7 +283,9 @@ impl OperatingSystem { get_dir_content_resp::GetDirContentRespOk { files, dirs }, )), } + // 在这里使用 responser 将 dir_contents 发送回调用方 } else { + // 发生读取目录错误,可以选择使用 responser 发送错误消息 GetDirContentResp { dispatch: Some(get_dir_content_resp::Dispatch::Fail( GetDirContentRespFail { @@ -321,9 +306,7 @@ impl OperatingSystem { }) .await .unwrap(); - if let Err(e) = responser.send_resp(res).await { - tracing::error!("Failed to send get dir content response: {}", e); - } + responser.send_resp(res).await; } pub fn open_file(&self, fname: &str) -> WSResult { diff --git a/src/general/m_os/zip.rs b/src/general/m_os/zip.rs new file mode 100644 index 0000000..ee6591d --- /dev/null +++ b/src/general/m_os/zip.rs @@ -0,0 +1,127 @@ +use super::OperatingSystem; +use crate::result::{WSError, WSResult, WsIoErr}; +use std::{ + fs::{self, File}, + io::{self, Cursor, Read, Seek, Write}, + os::unix::fs::PermissionsExt, + path::Path, +}; +use walkdir::WalkDir; +use zip::{result::ZipError, write::FileOptions}; + +impl OperatingSystem { + pub fn unzip_data_2_path(&self, p: impl AsRef, data: Vec) -> WSResult<()> { + // remove old dir + let p = p.as_ref(); + if p.exists() { + fs::remove_dir_all(p).unwrap(); + } + // create new dir + fs::create_dir_all(p).unwrap(); + // unzip + match zip_extract::extract(Cursor::new(data), &p, false) { + Ok(_) => (), + Err(e) => { + return Err(WsIoErr::Zip(e).into()); + } + } + + Ok(()) + } + + // pub fn zip_dir_2_data(&self, p: impl AsRef) -> WSResult> { + // let p = p.as_ref(); + // let mut data = Vec::new(); + // let writer = Cursor::new(&mut data); + // let mut list = self.list_dir_with_prefix(p, p.to_str().unwrap())?; + // self.zip_dir( + // &mut list.iter_mut(), + // p.to_str().unwrap(), + // ZipWriter::new(&data), + // zip::CompressionMethod::Stored, + // ) + // .map_err(|e| WsIoErr::Zip2(e))?; + // Ok(data) + // } + + fn zip_dir( + it: &mut dyn Iterator, + prefix: &Path, + writer: T, + method: zip::CompressionMethod, + ) -> WSResult<()> + where + T: Write + Seek, + { + let mut zip = zip::ZipWriter::new(writer); + // let options = FileOptions::default() + // .compression_method(method) + // .unix_permissions(0o755); + + let prefix = Path::new(prefix); + let mut buffer = Vec::new(); + for entry in it { + let path = entry.path(); + let name = path.strip_prefix(prefix).unwrap(); + let path_as_string = name.to_str().unwrap().to_owned(); + + let options = FileOptions::default() + .compression_method(method) + .unix_permissions( + entry + .metadata() + .map_err(|e| WSError::from(e))? + .permissions() + .mode(), + ); + + // Write file or directory explicitly + // Some unzip tools unzip files with directory paths correctly, some do not! + if path.is_file() { + tracing::debug!("adding file {path:?} as {name:?} ..."); + zip.start_file(path_as_string, options) + .map_err(|e| WSError::from(WsIoErr::Zip2(e)))?; + let mut f = File::open(path).map_err(|e| WSError::from(WsIoErr::Io(e)))?; + + let _ = f + .read_to_end(&mut buffer) + .map_err(|e| WSError::from(WsIoErr::Io(e)))?; + zip.write_all(&buffer) + .map_err(|e| WSError::from(WsIoErr::Io(e)))?; + buffer.clear(); + } else if !name.as_os_str().is_empty() { + // Only if not root! Avoids path spec / warning + // and mapname conversion failed error on unzip + tracing::debug!("adding dir {path_as_string:?} as {name:?} ..."); + zip.add_directory(path_as_string, options) + .map_err(|e| WSError::from(WsIoErr::Zip2(e)))?; + } + } + let _ = zip.finish().map_err(|e| WSError::from(WsIoErr::Zip2(e)))?; + Ok(()) + } + + pub fn zip_dir_2_data( + &self, + src_dir: &Path, + method: zip::CompressionMethod, + ) -> WSResult> { + if !Path::new(src_dir).is_dir() { + return Err(WsIoErr::Zip2(ZipError::FileNotFound).into()); + } + + let mut data = Vec::new(); + + let walkdir = WalkDir::new(src_dir); + let it = walkdir.into_iter(); + + Self::zip_dir( + &mut it.filter_map(|e| e.ok()), + src_dir, + io::Cursor::new(&mut data), + method, + )?; + + Ok(data) + } +} diff --git a/src/general/mod.rs b/src/general/mod.rs new file mode 100644 index 0000000..879d634 --- /dev/null +++ b/src/general/mod.rs @@ -0,0 +1,7 @@ +pub mod kv_interface; +pub mod m_appmeta_manager; +pub mod m_data_general; +pub mod m_kv_store_engine; +pub mod m_metric_publisher; +pub mod m_os; +pub mod network; diff --git a/src/main/src/general/network/http_handler.rs b/src/general/network/http_handler.rs similarity index 100% rename from src/main/src/general/network/http_handler.rs rename to src/general/network/http_handler.rs diff --git a/src/main/src/general/network/m_p2p.rs b/src/general/network/m_p2p.rs similarity index 87% rename from src/main/src/general/network/m_p2p.rs rename to src/general/network/m_p2p.rs index 88675b8..4e7d85e 100644 --- a/src/main/src/general/network/m_p2p.rs +++ b/src/general/network/m_p2p.rs @@ -13,7 +13,7 @@ use super::{ use crate::{ config::NodesConfig, logical_module_view_impl, - result::{ErrCvt, WSResult, WSResultExt, WsNetworkConnErr, WsNetworkLogicErr}, + result::{ErrCvt, WSResult, WsNetworkConnErr, WsNetworkLogicErr}, sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef, NodeID}, util::JoinHandleWrapper, }; @@ -104,13 +104,6 @@ impl RPCCaller { req: R, dur: Option, ) -> WSResult { - #[cfg(feature = "rpc-log")] - tracing::debug!( - "call rpc {:?} from {} to {}", - req, - p2p.nodes_config.this_node(), - node_id - ); p2p.call_rpc::(node_id, req, dur).await } } @@ -222,13 +215,6 @@ impl Responser { where RESP: MsgPack + Default, { - #[cfg(feature = "rpc-log")] - tracing::debug!( - "resp rpc {:?} from {} to {}", - resp, - self.view.p2p().nodes_config.this_node(), - self.node_id - ); if self.view.p2p().nodes_config.this.0 == self.node_id { self.view.p2p().dispatch( self.node_id, @@ -294,17 +280,7 @@ impl P2PModule { *b.downcast::().unwrap() } }; - // if msg.msg_id() == 3 { - // tracing::info!("dispatch {:?} from: {}", msg, nid); - // } // tracing::debug!("dispatch from {} msg:{:?}", nid, msg); - #[cfg(feature = "rpc-log")] - tracing::debug!( - "handling rpc {:?} from {} to {}", - msg, - nid, - p2p.nodes_config.this_node(), - ); f( Responser { task_id, @@ -415,18 +391,12 @@ impl P2PModule { let _ = self .waiting_tasks .insert((taskid, node_id), Some(tx).into()); - if let Err(e) = self.dispatch( //返回结果未处理 曾俊 + self.dispatch( node_id, r.msg_id(), taskid, DispatchPayload::Local(Box::new(r)), - ){ - tracing::error!("Failed to dispatch rpc: {}", e); - } - //.todo_handle(); - //虞光勇修改,修改原因:在调用 todo_handle 方法时遇到了缺少参数的问题。需要确保在调用 todo_handle 方法时提供所需的字符串参数。 - //修改内容:加入字符串参数。 - // .todo_handle("This part of the code needs to be implemented."); + ); let resp = rx.await.unwrap(); let resp = resp.downcast::().unwrap(); @@ -468,12 +438,7 @@ impl P2PModule { Err(err) => { let _ = self.waiting_tasks.remove(&(taskid, node_id)).unwrap(); // tracing::info!("1stop holding lock msg:{} node:{}", r.msg_id(), node_id); - tracing::error!( - "rpc send failed: {:?}, request({:?}) from node({:?})", - err, - r, - self.nodes_config.this_node() - ); + tracing::error!("rpc send failed: {:?}", err); return Err(err); } } @@ -492,12 +457,7 @@ impl P2PModule { let _ = self.waiting_tasks.remove(&(taskid, node_id)); // let _ = self.p2p_kernel.close(node_id).await; - tracing::error!( - "rpc timeout: {:?} to node {} with req {:?}", - err, - node_id, - r - ); + tracing::error!("rpc timeout: {:?} to node {}", err, node_id); // tracing::warn!("rpc timeout: {:?} to node {}", err, node_id); // tracing::info!("2stop holding lock msg:{} node:{}", r.msg_id(), node_id); @@ -523,11 +483,7 @@ impl P2PModule { cb(nid, self, taskid, data)?; Ok(()) } else { - tracing::warn!( - "not match id: {}, this node: {}", - id, - self.nodes_config.this_node() - ); + tracing::warn!("not match id: {}", id); Err(WsNetworkLogicErr::MsgIdNotDispatchable(id).into()) } } diff --git a/src/main/src/general/network/m_p2p_quic.rs b/src/general/network/m_p2p_quic.rs similarity index 96% rename from src/main/src/general/network/m_p2p_quic.rs rename to src/general/network/m_p2p_quic.rs index f3c96c9..86eecd5 100644 --- a/src/main/src/general/network/m_p2p_quic.rs +++ b/src/general/network/m_p2p_quic.rs @@ -33,7 +33,7 @@ use ws_derive::LogicalModule; use crate::{ // module_view::P2PQuicNodeLMView, - logical_module_view_impl, result::{ErrCvt, WSResult, WSResultExt, WsNetworkConnErr, WsSerialErr}, sys::{BroadcastMsg, BroadcastSender, LogicalModule, LogicalModuleNewArgs, LogicalModulesRef, NodeID}, util::JoinHandleWrapper + logical_module_view_impl, result::{ErrCvt, WSResult, WsNetworkConnErr, WsSerialErr}, sys::{LogicalModulesRef,BroadcastMsg, BroadcastSender, LogicalModule, LogicalModuleNewArgs, NodeID}, util::JoinHandleWrapper }; use super::m_p2p::{MsgId, P2PKernel, P2PModule, TaskId}; @@ -360,11 +360,7 @@ async fn handle_connection( let head=bytes.split_to(headlen as usize); match deserialize_msg_id_task_id(&head) { Ok((msg_id, task_id)) => { - //返回结果未处理 曾俊 - if let Err(e) = view.p2p().dispatch(remote_id, msg_id, task_id, bytes.into()){ - tracing::error!("Failed to dispatch rpc: {}", e); - } - // .todo_handle("This part of the code needs to be implemented."); + view.p2p().dispatch(remote_id, msg_id, task_id, bytes.into()); } Err(err) => { tracing::warn!("incoming deserial head error: {:?}", err); @@ -399,7 +395,7 @@ async fn handle_connection( fn deserialize_msg_id_task_id(head: &[u8]) -> WSResult<(MsgId, TaskId)> { let (msg_id, task_id) = bincode::deserialize::<(MsgId, TaskId)>(head) - .map_err(|err| WsSerialErr::BincodeErr{err,context: "deserialize_msg_id_task_id".to_owned()})?; + .map_err(|err| WsSerialErr::BincodeErr(err))?; Ok((msg_id, task_id)) } fn serialize_msg_id_task_id(msg_id: MsgId, task_id: TaskId) -> Vec { diff --git a/src/main/src/general/network/mod.rs b/src/general/network/mod.rs similarity index 96% rename from src/main/src/general/network/mod.rs rename to src/general/network/mod.rs index 8f1ceff..f157fcc 100644 --- a/src/main/src/general/network/mod.rs +++ b/src/general/network/mod.rs @@ -2,7 +2,6 @@ pub mod http_handler; pub mod m_p2p; pub mod m_p2p_quic; pub mod msg_pack; -pub mod proto_ext; pub mod rpc_model; pub mod proto { diff --git a/src/general/network/msg_pack.rs b/src/general/network/msg_pack.rs new file mode 100644 index 0000000..dce2a3b --- /dev/null +++ b/src/general/network/msg_pack.rs @@ -0,0 +1,149 @@ +use downcast_rs::{impl_downcast, Downcast}; + +use super::{ + m_p2p::MsgId, + proto::{self, kv::KvResponse}, +}; + +macro_rules! count_modules { + ($module:ty) => {1u32}; + ($module:ty,$($modules:ty),+) => {1u32 + count_modules!($($modules),+)}; +} + +// 定义宏,用于生成 MsgPack trait 的实现 +macro_rules! define_msg_ids { + ($module:ty) => { + impl MsgPack for $module { + fn msg_id(&self) -> MsgId { + 0 + } + } + }; + ($module:ty,$($modules:ty),+) => { + impl MsgPack for $module { + fn msg_id(&self) -> MsgId { + count_modules!($($modules),+) + } + } + define_msg_ids!($($modules),+); + }; + // ($($module:ty),+) => { + // $( + // impl MsgPack for $module { + // fn msg_id(&self) -> MsgId { + // count_modules!($module) + // } + // } + // )* + // }; +} + +// pub struct MsgCoder {} + +pub trait MsgPack: prost::Message + Downcast { + fn msg_id(&self) -> MsgId; + // fn construct_from_raw_mem(bytes: Bytes) {} +} + +impl_downcast!(MsgPack); + +define_msg_ids!( + proto::raft::VoteRequest, + proto::raft::VoteResponse, + proto::raft::AppendEntriesRequest, + proto::raft::AppendEntriesResponse, + proto::sche::DistributeTaskReq, + proto::sche::DistributeTaskResp, + proto::metric::RscMetric, + proto::kv::KvRequests, + proto::kv::KvResponses, + proto::remote_sys::GetDirContentReq, + proto::remote_sys::GetDirContentResp, + proto::remote_sys::RunCmdReq, + proto::remote_sys::RunCmdResp, + proto::DataVersionRequest, + proto::DataVersionResponse, + proto::WriteOneDataRequest, + proto::WriteOneDataResponse +); + +pub trait RPCReq: MsgPack + Default { + type Resp: MsgPack + Default; +} + +impl RPCReq for proto::raft::VoteRequest { + type Resp = proto::raft::VoteResponse; +} + +impl RPCReq for proto::raft::AppendEntriesRequest { + type Resp = proto::raft::AppendEntriesResponse; +} + +impl RPCReq for proto::sche::DistributeTaskReq { + type Resp = proto::sche::DistributeTaskResp; +} + +impl RPCReq for proto::kv::KvRequests { + type Resp = proto::kv::KvResponses; +} + +impl RPCReq for proto::remote_sys::GetDirContentReq { + type Resp = proto::remote_sys::GetDirContentResp; +} + +impl RPCReq for proto::remote_sys::RunCmdReq { + type Resp = proto::remote_sys::RunCmdResp; +} + +impl RPCReq for proto::DataVersionRequest { + type Resp = proto::DataVersionResponse; +} + +impl RPCReq for proto::WriteOneDataRequest { + type Resp = proto::WriteOneDataResponse; +} + +pub trait KvResponseExt { + fn new_lock(lock_id: u32) -> KvResponse; + fn new_common(kvs: Vec) -> KvResponse; + fn lock_id(&self) -> Option; + fn common_kvs(&self) -> Option<&Vec>; +} + +impl KvResponseExt for KvResponse { + fn new_common(kvs: Vec) -> KvResponse { + KvResponse { + resp: Some(proto::kv::kv_response::Resp::CommonResp( + proto::kv::kv_response::KvResponse { kvs }, + )), + } + } + fn new_lock(lock_id: u32) -> KvResponse { + KvResponse { + resp: Some(proto::kv::kv_response::Resp::LockId(lock_id)), + } + } + fn lock_id(&self) -> Option { + match self.resp.as_ref().unwrap() { + proto::kv::kv_response::Resp::CommonResp(_) => None, + proto::kv::kv_response::Resp::LockId(id) => Some(*id), + } + } + fn common_kvs(&self) -> Option<&Vec> { + match self.resp.as_ref().unwrap() { + proto::kv::kv_response::Resp::CommonResp(resp) => Some(&resp.kvs), + proto::kv::kv_response::Resp::LockId(_) => None, + } + } +} + +// impl MsgId for raft::prelude::Message { +// fn msg_id(&self) -> u32 { +// 0 +// } +// } +// impl MsgPack for raft::prelude::Message { +// fn msg_id() -> u32 { +// 0 +// } +// } diff --git a/src/general/network/proto_src/data.proto b/src/general/network/proto_src/data.proto new file mode 100644 index 0000000..396267c --- /dev/null +++ b/src/general/network/proto_src/data.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; +package data; + +enum DataModeCache { + AlwaysInMem = 0; + AlwaysInFs = 1; +} + +enum DataModeDistribute { + BroadcastRough = 0; +} + +message DataMeta{ + DataModeCache cache = 1; + DataModeDistribute distribute = 2; +} + +message DataVersionRequest { + string unique_id = 1; + + // 0 means require for version + // >0 means node is compatible with the version's data + uint64 version = 2; + + // with value only when first time to get the version + repeated DataMeta data_metas=3; +} + +message DataVersionResponse { + uint64 version = 1; +} + + +message WriteOneDataRequest { + string unique_id = 1; + uint64 version = 2; + + repeated DataItem data = 3; + + message FileData { + string file_name = 1; + bool is_dir =2; + bytes file_content = 3; + } + + message DataItem { + oneof data { + FileData file = 1; + bytes raw_bytes = 2; + } + } +} + +message WriteOneDataResponse { + uint64 remote_version = 1; + bool success = 2; + string message = 3; // Optional: To provide additional info in case of failure +} + diff --git a/src/main/src/general/network/proto_src/kv.proto b/src/general/network/proto_src/kv.proto similarity index 70% rename from src/main/src/general/network/proto_src/kv.proto rename to src/general/network/proto_src/kv.proto index ca19e91..4cc9bf0 100644 --- a/src/main/src/general/network/proto_src/kv.proto +++ b/src/general/network/proto_src/kv.proto @@ -14,21 +14,17 @@ message KvPair { message KvRequest { message KvPutRequest{ - // required KvPair kv=1; } message KvGetRequest{ - // required KeyRange range=1; } message KvDeleteRequest{ - // required KeyRange range=1; } message KvLockRequest{ bool read_or_write=1; repeated uint32 release_id=2; - // required KeyRange range=3; } oneof op { @@ -39,25 +35,6 @@ message KvRequest { } } -message KvLockRequest{ - bytes key=1; - uint32 read_0_write_1_unlock_2=2; - // use release_id to do the unlock - uint32 release_id=3; -} - -message KvLockResponse{ - bool success=1; - string context=2; - uint32 release_id=3; -} - -// message KvLockWaitAcquireNotifyRequest{ -// uint32 release_id=1; -// } - -// message KvLockWaitAcquireNotifyResponse{} - message KvPairs{ repeated KvPair kvs=1; } @@ -68,7 +45,6 @@ message KvResponse{ } oneof resp { KvResponse common_resp=1; - // 0 is invalid lock id uint32 lock_id=2; } } diff --git a/src/main/src/general/network/proto_src/metric.proto b/src/general/network/proto_src/metric.proto similarity index 100% rename from src/main/src/general/network/proto_src/metric.proto rename to src/general/network/proto_src/metric.proto diff --git a/src/main/src/general/network/proto_src/raft.proto b/src/general/network/proto_src/raft.proto similarity index 100% rename from src/main/src/general/network/proto_src/raft.proto rename to src/general/network/proto_src/raft.proto diff --git a/src/main/src/general/network/proto_src/remote_sys.proto b/src/general/network/proto_src/remote_sys.proto similarity index 100% rename from src/main/src/general/network/proto_src/remote_sys.proto rename to src/general/network/proto_src/remote_sys.proto diff --git a/src/general/network/proto_src/sche.proto b/src/general/network/proto_src/sche.proto new file mode 100644 index 0000000..723b804 --- /dev/null +++ b/src/general/network/proto_src/sche.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package sche; + +// import "network/proto_src/kv.proto"; + +// message MakeSchePlanReq{ +// message AppFn{ +// string app=1; +// string func=2; +// } +// enum TriggerType{ +// SetKv = 0; +// } +// repeated AppFn app_fns=1; +// TriggerType trigger_type=2; +// } + +// message MakeSchePlanResp{ +// // align to AppFn[] +// repeated uint32 sche_target_node=1; +// // the data target position +// uint32 data_target_node=2; +// } + +message DistributeTaskReq{ + message TriggerKvSet{ + bytes key=1; + uint32 opeid=2; + } + string app=1; + string func=2; + uint32 task_id=3; + oneof trigger{ + TriggerKvSet kv_set=4; + } +} + +message DistributeTaskResp{} + diff --git a/src/main/src/general/network/rpc_model.rs b/src/general/network/rpc_model.rs similarity index 86% rename from src/main/src/general/network/rpc_model.rs rename to src/general/network/rpc_model.rs index 2d9776d..f6249d2 100644 --- a/src/main/src/general/network/rpc_model.rs +++ b/src/general/network/rpc_model.rs @@ -14,7 +14,7 @@ use std::{ }; use tokio::{net::UnixListener, sync::oneshot}; -use crate::result::{WSResult, WsFuncError, WsRpcErr}; +use crate::result::{WSResult, WsRpcErr}; // start from the begining #[async_trait] @@ -71,7 +71,6 @@ pub async fn call( ) -> WSResult { // wait for connection if not connected - tracing::debug!("111111111111111111111111"); let tx = { let mut conn_map = CONN_MAP.write(); match conn_map.get_mut(&conn) { @@ -85,15 +84,11 @@ pub async fn call( } }; - tracing::debug!("22222222222222222222222222"); - // register the call back let (wait_tx, wait_rx) = oneshot::channel(); let next_task = NEXT_TASK_ID.fetch_add(1, Ordering::SeqCst); let _ = CALL_MAP.write().insert(next_task, wait_tx); - tracing::debug!("33333333333333333333333333"); - // send the request let mut buf = BytesMut::with_capacity(req.encoded_len() + 8); buf.put_i32(req.encoded_len() as i32); @@ -142,26 +137,23 @@ lazy_static! { static ref NEXT_TASK_ID: AtomicU32 = AtomicU32::new(0); } -async fn listen_task(socket: tokio::net::UnixStream) -> WSResult<()> { +async fn listen_task(socket: tokio::net::UnixStream) { tracing::debug!("new connection: {:?}", socket.peer_addr().unwrap()); let (mut sockrx, socktx) = socket.into_split(); let mut buf = [0; 1024]; let mut len = 0; - let (conn, rx) = - match listen_task_ext::verify_remote::(&mut sockrx, &mut len, &mut buf).await { - Ok((conn, rx)) => (conn, rx), - Err(err) => { - tracing::debug!("verify failed {:?}", err); - return Err(WsFuncError::InsranceVerifyFailed("verify failed".to_string()).into()); - } - }; + + let Some((conn, rx)) = + listen_task_ext::verify_remote::(&mut sockrx, &mut len, &mut buf).await + else { + tracing::debug!("verify failed"); + return; + }; listen_task_ext::spawn_send_loop(rx, socktx); listen_task_ext::read_loop::(conn, &mut sockrx, &mut len, &mut buf).await; - - Ok(()) } pub(super) mod listen_task_ext { @@ -174,10 +166,7 @@ pub(super) mod listen_task_ext { sync::mpsc::Receiver, }; - use crate::{ - general::network::rpc_model::ConnState, - result::{WSResult, WsFuncError}, - }; + use crate::general::network::rpc_model::ConnState; use super::{HashValue, RpcCustom, CALL_MAP, CONN_MAP}; @@ -185,19 +174,16 @@ pub(super) mod listen_task_ext { sockrx: &mut OwnedReadHalf, len: &mut usize, buf: &mut [u8], - ) -> WSResult<(HashValue, Receiver>)> { + ) -> Option<(HashValue, Receiver>)> { async fn verify_remote_inner( sockrx: &mut OwnedReadHalf, len: &mut usize, buf: &mut [u8], - ) -> WSResult<(HashValue, Receiver>)> { + ) -> Option<(HashValue, Receiver>)> { // println!("waiting for verify head len"); if !wait_for_len(sockrx, len, 4, buf).await { tracing::warn!("failed to read verify head len"); - return Err(WsFuncError::InsranceVerifyFailed( - "failed to read verify head len".to_string(), - ) - .into()); + return None; } let verify_msg_len = consume_i32(0, buf, len); @@ -205,43 +191,34 @@ pub(super) mod listen_task_ext { // println!("waiting for verify msg {}", verify_msg_len); if !wait_for_len(sockrx, len, verify_msg_len, buf).await { tracing::warn!("failed to read verify msg"); - return Err(WsFuncError::InsranceVerifyFailed( - "failed to read verify msg".to_string(), - ) - .into()); + return None; } // println!("wait done"); let Some(id) = R::verify(&buf[4..4 + verify_msg_len]).await else { tracing::warn!("verify failed"); - return Err(WsFuncError::InsranceVerifyFailed("verify failed".to_string()).into()); + return None; }; let (tx, rx) = tokio::sync::mpsc::channel(10); let mut write_conn_map = CONN_MAP.write(); if write_conn_map.contains_key(&id) { tracing::warn!("conflict conn id: {:?}", id); - return Err( - WsFuncError::InsranceVerifyFailed("conflict conn id".to_string()).into(), - ); + return None; } let _ = write_conn_map.insert(id.clone(), ConnState { tx }); // println!("verify success"); - Ok((id, rx)) + Some((id, rx)) } - match tokio::time::timeout( + let res = tokio::time::timeout( Duration::from_secs(5), verify_remote_inner::(sockrx, len, buf), ) .await - { - Ok(ok) => ok, - Err(_) => { - tracing::warn!("verify timeout"); - Err(WsFuncError::InsranceVerifyFailed("verify timeout".to_string()).into()) - } - } + .unwrap_or_else(|_elapse| None); + // println!("verify return"); + res } pub(super) async fn read_loop( diff --git a/src/main/src/main.rs b/src/main.rs similarity index 65% rename from src/main/src/main.rs rename to src/main.rs index fefd8ca..bcd585d 100644 --- a/src/main/src/main.rs +++ b/src/main.rs @@ -8,16 +8,13 @@ clippy::unnecessary_mut_passed, unused_results, clippy::let_underscore_future, - clippy::let_underscore_future, - unused_must_use, - unconditional_recursion + clippy::let_underscore_future )] use clap::Parser; use cmd_arg::CmdArgs; -use sys::{LogicalModulesRef, Sys}; -use tracing::Level; +use sys::Sys; use tracing_subscriber::{ prelude::__tracing_subscriber_SubscriberExt, util::SubscriberInitExt, Layer, }; @@ -41,10 +38,7 @@ async fn main() { let config = config::read_config(args.this_id, args.files_dir); tracing::info!("config: {:?}", config); // dist_kv_raft::tikvraft_proxy::start(); - let mut sys=Sys::new(config); - let modules_ref=sys.new_logical_modules_ref(); - // modules_global_bridge::modules_ref_scope(modules_ref, async move{sys.wait_for_end().await;}) 由于modules_ref_scope改为了异步函数,所以这里加上.await 曾俊 - modules_global_bridge::modules_ref_scope(modules_ref, async move{sys.wait_for_end().await;}).await; + Sys::new(config).wait_for_end().await; } pub fn start_tracing() { @@ -67,21 +61,6 @@ pub fn start_tracing() { if mp.contains("hyper") { return false; } - if *v.level() == Level::DEBUG { - // if mp.contains("wasm_serverless::worker::m_kv_user_client") { - // return false; - // } - // if mp.contains("wasm_serverless::general::m_data_general") { - // return false; - // } - // if mp.contains("wasm_serverless::master::m_data_master") { - // return false; - // } - if mp.contains("sled::pagecache") { - return false; - } - // return false; - } } // if v.module_path().unwrap().contains("less::network::p2p") { @@ -96,9 +75,9 @@ pub fn start_tracing() { // true }); let my_layer = tracing_subscriber::fmt::layer(); - let _ = tracing_subscriber::registry() + tracing_subscriber::registry() .with(my_layer.with_filter(my_filter)) - .try_init(); + .init(); } pub fn new_test_systems() -> Vec { diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml deleted file mode 100644 index 12a8226..0000000 --- a/src/main/Cargo.toml +++ /dev/null @@ -1,96 +0,0 @@ -[package] -name = "wasm_serverless" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[features] -default = [] # 默认启用的特性 -unsafe-log = [] -rpc-log = [] - -[dependencies] -qp2p.workspace = true #{ path = "qp2p" } -tokio.workspace = true -thiserror.workspace = true -async-trait.workspace = true -prost.workspace = true -parking_lot.workspace = true -# raft = { version = "0.7.0", default-features = false, features = [ -# "prost-codec", -# ] } # tikv raft -async-raft.workspace = true #{ path = "async-raft/async-raft" } # -tracing.workspace = true -# openraft = "0.8" -serde.workspace = true -serde_json.workspace = true -serde_yaml.workspace = true -anyhow.workspace = true -slog.workspace = true -slog-async.workspace = true -slog-term.workspace = true -regex.workspace = true -camelpaste.workspace = true -tracing-subscriber.workspace = true -ws_derive.workspace = true -clap.workspace = true -downcast-rs.workspace = true -bincode.workspace = true -crossbeam-skiplist.workspace = true -lazy_static.workspace = true -axum.workspace = true -async-channel.workspace = true -sysinfo.workspace = true -ssh2.workspace = true -moka.workspace = true -rand.workspace = true -slotmap.workspace = true -prometheus-client.workspace = true -tower-http.workspace = true -tower.workspace = true -sled.workspace = true -enum-as-inner.workspace = true -reqwest.workspace = true -futures.workspace = true -zip-extract.workspace = true -zip.workspace = true -walkdir.workspace = true -# s3_server = { path = "../s3_server" } -hyper.workspace = true -md-5.workspace = true -path-absolutize.workspace = true -dashmap.workspace = true -base64.workspace = true -hex = "0.4.3" -tempfile.workspace = true - -[dependencies.uuid] -version = "1.8.0" -features = [ - "v4", # Lets you generate random UUIDs - "fast-rng", # Use a faster (but still sufficiently random) RNG - "macro-diagnostics", # Enable better diagnostics for compile-time UUIDs -] - -# slog-envlogger = { version = "2.1.0", optional = true } - -[build-dependencies] -prost-build = { version = "0.12" } - - -#[target.'cfg( target_os = "macos" )'.dependencies] -#wasmer = "4.2.5" - -[target.'cfg(target_os = "linux")'.dependencies] -wasmedge-sdk = { version = "0.10.1", features = ["async"] } - -[profile.test] -# 0: no optimizations -# 1: basic optimizations -# 2: some optimizations -# 3: all optimizations -# "s": optimize for binary size -# "z": optimize for binary size, but also turn off loop vectorization. -opt-level = 3 # Use slightly better optimizations. -overflow-checks = false # Disable integer overflow checks. diff --git a/src/main/src/general/app/app_native/app_checkpoint.rs b/src/main/src/general/app/app_native/app_checkpoint.rs deleted file mode 100644 index e69de29..0000000 diff --git a/src/main/src/general/app/app_native/mod.rs b/src/main/src/general/app/app_native/mod.rs deleted file mode 100644 index 5bf2a7e..0000000 --- a/src/main/src/general/app/app_native/mod.rs +++ /dev/null @@ -1,108 +0,0 @@ -pub mod app_checkpoint; - -use std::collections::HashMap; - -use super::{ - AffinityPattern, AffinityRule, AppMeta, AppType, DataAccess, DataEventTrigger, FnMeta, - KeyPattern, NodeTag, -}; -use crate::general::app::instance::{Instance, InstanceTrait}; -use crate::general::app::m_executor::{FnExeCtxAsync, FnExeCtxSync}; -use crate::general::data::m_data_general::DATA_UID_PREFIX_APP_META; -use crate::new_map; -use crate::result::{WSResult, WsFuncError}; -use async_trait::async_trait; - -pub struct NativeAppInstance { - _dummy_private: (), // avoid empty struct -} - -impl NativeAppInstance { - pub fn new() -> Self { - Self { _dummy_private: () } - } -} - -#[async_trait] -impl InstanceTrait for NativeAppInstance { - // don't need instance name - fn instance_name(&self) -> String { - "native_app_dummy_instance".to_string() - } - async fn execute(&self, _fn_ctx: &mut FnExeCtxAsync) -> WSResult> { - // Native apps don't support async execution - Err(WsFuncError::UnsupportedAppType.into()) - } - - fn execute_sync(&self, _fn_ctx: &mut FnExeCtxSync) -> WSResult> { - // For now, just return None as native apps don't produce results - todo!() - // Ok(None) - } -} - -impl From for Instance { - fn from(v: NativeAppInstance) -> Self { - Self::Native(v) - } -} - -pub fn native_apps() -> HashMap { - let mut nativeapps = HashMap::new(); - // https://fvd360f8oos.feishu.cn/wiki/GGUnw0H1diVoHSkgm3vcMhtbnjI - // app_checkpoint: - // checkpointable: - // inner_dataset: - // app_{}: - // - get - // checkpoint: - // inner_dataset: - // app_{}: - // - trigger_by_write: - // condition: checkpointable - // - get - let _ = nativeapps.insert( - "app_checkpoint".to_string(), - AppMeta::new( - AppType::Native, - new_map!(HashMap { - "checkpointable".to_string() => FnMeta { - sync_async: super::FnSyncAsyncSupport::Sync, - calls: vec![], - data_accesses: Some(new_map!(HashMap { - KeyPattern(DATA_UID_PREFIX_APP_META.to_string()) => DataAccess { - get: true, - set: false, - delete: false, - event: None, - } - })), - affinity: Some(AffinityRule { - tags: vec![NodeTag::Worker], - nodes: AffinityPattern::All, - }), - }, - "checkpoint".to_string() => FnMeta { - sync_async: super::FnSyncAsyncSupport::Async, - calls: vec![], - data_accesses: Some(new_map!(HashMap { - KeyPattern(DATA_UID_PREFIX_APP_META.to_string()) => DataAccess { - get: true, - set: false, - delete: false, - event: Some(DataEventTrigger::WriteWithCondition { - condition: "checkpointable".to_string(), - }), - } - })), - affinity: Some(AffinityRule { - tags: vec![NodeTag::Worker], - nodes: AffinityPattern::All, - }), - }, - }), - ), - ); - - nativeapps -} diff --git a/src/main/src/general/app/app_owned/mod.rs b/src/main/src/general/app/app_owned/mod.rs deleted file mode 100644 index 615bf55..0000000 --- a/src/main/src/general/app/app_owned/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -pub mod wasm; -pub mod wasm_host_funcs; - -use crate::general::app::instance::InstanceTrait; -use crate::general::app::instance::OwnedInstance; -use crate::general::app::m_executor::{FnExeCtxAsync, FnExeCtxSync}; -use crate::result::{WSResult}; -use async_trait::async_trait; - -#[async_trait] -impl InstanceTrait for OwnedInstance { - fn instance_name(&self) -> String { - match self { - OwnedInstance::WasmInstance(v) => v.instance_name(), - } - } - async fn execute(&self, fn_ctx: &mut FnExeCtxAsync) -> WSResult> { - match self { - OwnedInstance::WasmInstance(v) => v.execute(fn_ctx).await, - } - } - - fn execute_sync(&self, fn_ctx: &mut FnExeCtxSync) -> WSResult> { - match self { - OwnedInstance::WasmInstance(v) => v.execute_sync(fn_ctx), - } - } -} diff --git a/src/main/src/general/app/app_shared/java.rs b/src/main/src/general/app/app_shared/java.rs deleted file mode 100644 index 432edf5..0000000 --- a/src/main/src/general/app/app_shared/java.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::{path::PathBuf, str::from_utf8, time::Duration}; - -use tokio::process::{self, Command}; - -use crate::{ - general::m_os::{OperatingSystem, OsProcessType}, - result::{WSError, WSResult, WsFuncError}, -}; - -use super::process::PID; - -pub(super) struct JavaColdStart { - _dummy_private: (), -} - -impl JavaColdStart { - pub(super) async fn mksure_checkpoint(appdir: PathBuf) -> Self { - let mut i = 0; - loop { - // if dir not exist, continue - if !appdir.join("checkpoint-dir").exists() { - continue; - } - - let checkpoint_dir = appdir.join("checkpoint-dir"); - - // let lsres = Command::new("ls") - // .arg("-l") - // .arg(checkpoint_dir.to_str().unwrap()) - // .output() - // .await - // .expect("ls failed"); - - // tracing::debug!("ls checkpoint-dir output: {:?}", lsres); - - let res = Command::new("lsof") - .arg("+D") // check all process with files in checkpoint-dir - .arg(checkpoint_dir.to_str().unwrap()) - .output() - .await - .expect("lsof failed"); - - tracing::debug!("lsof checkpoint-dir output: {:?}", res); - - let output = from_utf8(&res.stdout).expect("failed to parse output to string"); - if output == "" { - break; - } - - let sleep_time = match i { - 0 => 1000, - 1 => 500, - _ => 200, - }; - tokio::time::sleep(Duration::from_millis(sleep_time)).await; - i += 1; - } - - Self { _dummy_private: () } - } - - pub fn direct_start() -> Self { - Self { _dummy_private: () } - } - - pub(super) fn cold_start(self, app: &str, os: &OperatingSystem) -> WSResult { - tracing::debug!("java cold start {}", app); - let p = os.start_process(OsProcessType::JavaApp(app.to_owned())); - Ok(p) - } -} - -pub(super) async fn find_pid(app: &str) -> WSResult { - let res = Command::new("jcmd") - .arg("-l") - .output() - .await - .map_err(|e| WSError::from(WsFuncError::InstanceProcessStartFailed(e)))?; - let res = from_utf8(&res.stdout).expect("failed to parse output to string"); - let res = res.split(|x| x == '\n').collect::>(); - tracing::debug!("jcmd output: {:?}", res); - let err = || Err(WsFuncError::InstanceJavaPidNotFound(app.to_owned()).into()); - let Some(res) = res - .iter() - .filter(|x| x.contains(&format!("--appName={}", app))) - .next() - else { - return err(); - }; - let Some(res) = res.split(|x| x == ' ').next() else { - return err(); - }; - let Ok(pid) = res.parse::() else { - return err(); - }; - Ok(pid) -} - -pub(super) async fn take_snapshot(app: &str, os: &OperatingSystem) { - let res = os - .start_process(OsProcessType::JavaCheckpoints(app.to_owned())) - .wait() - .await - .unwrap(); - assert!(res.success()); -} diff --git a/src/main/src/general/app/instance/mod.rs b/src/main/src/general/app/instance/mod.rs deleted file mode 100644 index 4197bce..0000000 --- a/src/main/src/general/app/instance/mod.rs +++ /dev/null @@ -1,73 +0,0 @@ -pub mod m_instance_manager; - -use super::app_native::NativeAppInstance; -use super::app_shared::SharedInstance; -use super::m_executor::{FnExeCtxAsync, FnExeCtxSync}; -use crate::general::app::app_owned::wasm::WasmInstance; -use crate::general::app::app_shared::process::ProcessInstance; -use crate::result::WSResult; -use async_trait::async_trait; -use enum_as_inner::EnumAsInner; - -#[derive(EnumAsInner)] -pub enum OwnedInstance { - WasmInstance(WasmInstance), -} - -pub enum Instance { - Owned(OwnedInstance), - Shared(SharedInstance), - Native(NativeAppInstance), -} -impl From for Instance { - fn from(v: OwnedInstance) -> Self { - Self::Owned(v) - } -} - -impl From for Instance { - fn from(v: SharedInstance) -> Self { - Self::Shared(v) - } -} - -impl From for Instance { - fn from(v: ProcessInstance) -> Self { - Self::Shared(SharedInstance(v)) - } -} - -#[async_trait] -impl InstanceTrait for Instance { - fn instance_name(&self) -> String { - match self { - Instance::Owned(v) => v.instance_name(), - Instance::Shared(v) => v.instance_name(), - Instance::Native(v) => v.instance_name(), - } - } - async fn execute(&self, fn_ctx: &mut FnExeCtxAsync) -> WSResult> { - match self { - Instance::Owned(v) => v.execute(fn_ctx).await, - Instance::Shared(v) => v.execute(fn_ctx).await, - Instance::Native(v) => v.execute(fn_ctx).await, - } - } - - fn execute_sync(&self, fn_ctx: &mut FnExeCtxSync) -> WSResult> { - match self { - Instance::Owned(v) => v.execute_sync(fn_ctx), - Instance::Shared(v) => v.execute_sync(fn_ctx), - Instance::Native(v) => v.execute_sync(fn_ctx), - } - } -} - -pub enum NewJavaInstanceConfig {} - -#[async_trait] -pub trait InstanceTrait { - fn instance_name(&self) -> String; - async fn execute(&self, fn_ctx: &mut FnExeCtxAsync) -> WSResult>; - fn execute_sync(&self, fn_ctx: &mut FnExeCtxSync) -> WSResult>; -} diff --git a/src/main/src/general/app/m_executor.rs b/src/main/src/general/app/m_executor.rs deleted file mode 100644 index 7e90948..0000000 --- a/src/main/src/general/app/m_executor.rs +++ /dev/null @@ -1,712 +0,0 @@ -use crate::general::app::instance::m_instance_manager::InstanceManager; -use crate::general::app::instance::m_instance_manager::UnsafeFunctionCtx; -use crate::general::app::instance::InstanceTrait; -use crate::general::app::AppType; -use crate::general::app::FnMeta; -use crate::result::WSError; -use crate::{ - general::{ - app::AppMetaManager, - network::{ - http_handler::ReqId, - m_p2p::{P2PModule, RPCHandler, RPCResponsor}, - proto::{ - self, - sche::{distribute_task_req, DistributeTaskResp}, - }, - }, - }, - logical_module_view_impl, - result::{WSResult, WsFuncError}, - sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef}, - util::JoinHandleWrapper, -}; -use async_trait::async_trait; -use std::{ - ptr::NonNull, - sync::atomic::{AtomicU32, AtomicUsize}, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::sync::oneshot; -use tokio::task::JoinHandle; -#[cfg(target_os = "linux")] -use ws_derive::LogicalModule; - -pub type SubTaskId = u32; - -pub type SubTaskNotifier = oneshot::Sender; - -pub type SubTaskWaiter = oneshot::Receiver; - -#[derive(Clone, Debug)] -pub enum EventCtx { - Http(String), - KvSet { key: Vec, opeid: Option }, -} - -impl EventCtx { - pub fn take_prev_kv_opeid(&mut self) -> Option { - match self { - EventCtx::KvSet { opeid, .. } => opeid.take(), - _ => None, - } - } -} - -struct FnExeCtx { - pub app: String, - pub app_type: AppType, - pub func: String, - pub _func_meta: FnMeta, - pub _req_id: ReqId, - pub event_ctx: EventCtx, - pub res: Option, - /// remote scheduling tasks - pub sub_waiters: Vec>, // pub trigger_node: NodeID, - _dummy_private: (), -} - -pub enum FnExeCtxAsyncAllowedType { - Jar, - Wasm, - Native, -} - -impl TryFrom for FnExeCtxAsyncAllowedType { - type Error = WSError; - fn try_from(v: AppType) -> Result { - match v { - AppType::Jar => Ok(FnExeCtxAsyncAllowedType::Jar), - AppType::Wasm => Ok(FnExeCtxAsyncAllowedType::Wasm), - AppType::Native => Ok(FnExeCtxAsyncAllowedType::Native), - } - } -} - -impl Into for FnExeCtxAsyncAllowedType { - fn into(self) -> AppType { - match self { - FnExeCtxAsyncAllowedType::Jar => AppType::Jar, - FnExeCtxAsyncAllowedType::Wasm => AppType::Wasm, - FnExeCtxAsyncAllowedType::Native => AppType::Native, - } - } -} - -pub struct FnExeCtxAsync { - inner: FnExeCtx, -} - -impl FnExeCtxAsync { - pub fn new( - apptype: FnExeCtxAsyncAllowedType, - app: String, - func: String, - func_meta: FnMeta, - req_id: ReqId, - event_ctx: EventCtx, - ) -> Self { - Self { - inner: FnExeCtx { - app, - func, - _req_id: req_id, - event_ctx, - res: None, - sub_waiters: vec![], - app_type: apptype.into(), - _func_meta: func_meta, - _dummy_private: (), - }, - } - } - - pub fn event_ctx(&self) -> &EventCtx { - &self.inner.event_ctx - } - - pub fn empty_http(&self) -> bool { - match &self.inner.event_ctx { - EventCtx::Http(text) => text.is_empty(), - _ => false, - } - } - - pub fn http_str_unwrap(&self) -> String { - match &self.inner.event_ctx { - EventCtx::Http(text) => text.clone(), - _ => panic!("not http event ctx"), - } - } - - pub fn set_result(&mut self, result: Option) { - self.inner.res = result; - } - - pub fn take_result(&mut self) -> Option { - self.inner.res.take() - } -} - -pub enum FnExeCtxSyncAllowedType { - Native, -} - -impl TryFrom for FnExeCtxSyncAllowedType { - type Error = WSError; - fn try_from(v: AppType) -> Result { - match v { - AppType::Native => Ok(FnExeCtxSyncAllowedType::Native), - AppType::Jar | AppType::Wasm => Err(WSError::from(WsFuncError::UnsupportedAppType)), - } - } -} - -impl Into for FnExeCtxSyncAllowedType { - fn into(self) -> AppType { - AppType::Native - } -} - -pub struct FnExeCtxSync { - inner: FnExeCtx, -} - -impl FnExeCtxSync { - pub fn new( - apptype: FnExeCtxAsyncAllowedType, - app: String, - func: String, - func_meta: FnMeta, - req_id: ReqId, - event_ctx: EventCtx, - ) -> Self { - Self { - inner: FnExeCtx { - app, - func, - _req_id: req_id, - event_ctx, - res: None, - sub_waiters: vec![], - app_type: apptype.into(), - _func_meta: func_meta, - _dummy_private: (), - }, - } - } -} - -// impl FnExeCtx { -// pub fn empty_http(&self) -> bool { -// match &self.event_ctx { -// EventCtx::Http(str) => str.len() == 0, -// _ => false, -// } -// } -// /// call this when you are sure it's a http event -// pub fn http_str_unwrap(&self) -> String { -// match &self.event_ctx { -// EventCtx::Http(str) => str.to_owned(), -// _ => panic!("not a http event"), -// } -// } -// } - -logical_module_view_impl!(ExecutorView); -logical_module_view_impl!(ExecutorView, p2p, P2PModule); -logical_module_view_impl!(ExecutorView, appmeta_manager, AppMetaManager); -logical_module_view_impl!(ExecutorView, instance_manager, InstanceManager); -logical_module_view_impl!(ExecutorView, executor, Executor); - -#[derive(LogicalModule)] -pub struct Executor { - sub_task_id: AtomicU32, - rpc_handler_distribute_task: RPCHandler, - next_req_id: AtomicUsize, - view: ExecutorView, -} - -/// Base trait for function execution contexts -pub trait FnExeCtxBase { - /// Get the application name - fn app(&self) -> &str; - /// Get the function name - fn func(&self) -> &str; - /// Get the event context - fn event_ctx(&self) -> &EventCtx; - /// Get mutable reference to event context - fn event_ctx_mut(&mut self) -> &mut EventCtx; -} - -impl FnExeCtxBase for FnExeCtxAsync { - fn app(&self) -> &str { - &self.inner.app - } - fn func(&self) -> &str { - &self.inner.func - } - fn event_ctx(&self) -> &EventCtx { - &self.inner.event_ctx - } - fn event_ctx_mut(&mut self) -> &mut EventCtx { - &mut self.inner.event_ctx - } -} - -impl FnExeCtxBase for FnExeCtxSync { - fn app(&self) -> &str { - &self.inner.app - } - fn func(&self) -> &str { - &self.inner.func - } - fn event_ctx(&self) -> &EventCtx { - &self.inner.event_ctx - } - fn event_ctx_mut(&mut self) -> &mut EventCtx { - &mut self.inner.event_ctx - } -} - -#[async_trait] -impl LogicalModule for Executor { - fn inner_new(args: LogicalModuleNewArgs) -> Self - where - Self: Sized, - { - Self { - rpc_handler_distribute_task: RPCHandler::default(), - view: ExecutorView::new(args.logical_modules_ref.clone()), - sub_task_id: AtomicU32::new(0), - next_req_id: AtomicUsize::new(0), - } - } - async fn start(&self) -> WSResult> { - let view = self.view.clone(); - self.view.executor().rpc_handler_distribute_task.regist( - self.view.p2p(), - move |responser, r| { - // tracing::info!("rpc recv: {:?}", r); - let view = view.clone(); - let _ = tokio::spawn(async move { - view.executor().handle_distribute_task(responser, r).await; - - // if let Err(err) = responser - // .send_resp(proto::sche::DistributeTaskResp {}) - // .await - // { - // tracing::error!("send sche resp failed with err: {}", err); - // } - }); - Ok(()) - }, - ); - // self.view - // .p2p() - // .regist_rpc::(); - Ok(vec![]) - } -} - -impl Executor { - pub fn register_sub_task(&self) -> SubTaskId { - let taskid = self - .sub_task_id - .fetch_add(1, std::sync::atomic::Ordering::Relaxed); - taskid - } - - pub async fn local_call_execute_async(&self, ctx: FnExeCtxAsync) -> WSResult> { - self.execute(ctx).await - } - - pub fn local_call_execute_sync(&self, ctx: FnExeCtxSync) -> WSResult> { - self.execute_sync(ctx) - } - - pub async fn handle_distribute_task( - &self, - resp: RPCResponsor, - req: proto::sche::DistributeTaskReq, - ) { - tracing::debug!("receive distribute task: {:?}", req); - let app = req.app.to_owned(); - let func = req.func.to_owned(); - let (appmeta, _) = match self.view.appmeta_manager().get_app_meta(&app).await { - Ok(Some(appmeta)) => appmeta, - Ok(None) => { - tracing::warn!("app {} not found in data meta", app); - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: false, - err_msg: format!("app {} not found in data meta", app), - }) - .await - { - tracing::error!("send distribute task resp failed with err: {}", err); - } - return; - } - Err(err) => { - tracing::error!("get appmeta failed with err: {}", err); - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: false, - err_msg: format!("get appmeta failed with err: {}", err), - }) - .await - { - tracing::error!("send distribute task resp failed with err: {}", err); - } - return; - } - }; - - let apptype = appmeta.app_type.clone(); - let Some(fnmeta) = appmeta.get_fn_meta(&func) else { - tracing::warn!("func {} not found, exist:{:?}", func, appmeta.fns()); - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: false, - err_msg: format!("func {} not found, exist:{:?}", func, appmeta.fns()), - }) - .await - { - tracing::error!("send distribute task resp failed with err: {}", err); - } - return; - }; - - // distribute task requires async support - if !fnmeta.sync_async.asyncable() { - let warn = format!( - "func {} not support async, meta:{:?}", - func, fnmeta.sync_async - ); - tracing::warn!("{}", warn); - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: false, - err_msg: warn, - }) - .await - { - tracing::error!("send distribute task resp failed with err: {}", err); - } - return; - } - - // construct async fn exe ctx - let ctx = FnExeCtxAsync::new( - match FnExeCtxAsyncAllowedType::try_from(apptype) { - Ok(v) => v, - Err(err) => { - let warn = format!("app type {:?} not supported, err: {}", apptype, err); - tracing::warn!("{}", warn); - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: false, - err_msg: warn, - }) - .await - { - tracing::error!("send distribute task resp failed with err: {}", err); - } - return; - } - }, - req.app, - req.func, - fnmeta.clone(), - req.task_id as usize, - match req.trigger.unwrap() { - distribute_task_req::Trigger::EventNew(new) => EventCtx::KvSet { - key: new.key, - opeid: Some(new.opeid), - }, - distribute_task_req::Trigger::EventWrite(write) => EventCtx::KvSet { - key: write.key, - opeid: Some(write.opeid), - }, - }, - ); - - if let Err(err) = resp - .send_resp(DistributeTaskResp { - success: true, - err_msg: "".to_owned(), - }) - .await - { - tracing::error!("send sche resp for app:{app} fn:{func} failed with err: {err}"); - } - let _ = self.execute(ctx).await; - } - - pub async fn handle_http_task(&self, route: &str, text: String) -> WSResult> { - let req_id: ReqId = self - .next_req_id - .fetch_add(1, std::sync::atomic::Ordering::SeqCst); - - //////////////////////////////////////////////////// - // route format //////////////////////////////////// - // format route, remove last / - let route = if route.ends_with('/') { - &route[..route.len() - 1] - } else { - route - }; - let split = route.split("/").into_iter().collect::>(); - // check path ok - if split.len() != 2 { - tracing::warn!( - "route {} not support, only support appname/funcname now", - route - ); - return Err(WsFuncError::InvalidHttpUrl(route.to_owned()).into()); - } - - ///////////////////////////////////////////////// - // existence //////////////////////////////////// - // trigger app - let appname = split[0]; - let funcname = split[1]; - - // check app exist - tracing::debug!("calling get_app_meta to check app exist, app: {}", appname); - let Some((appmeta, datameta_opt)) = - self.view.appmeta_manager().get_app_meta(appname).await? - else { - tracing::warn!("app {} not found", appname); - return Err(WsFuncError::AppNotFound { - app: appname.to_owned(), - } - .into()); - }; - // check func exist - let Some(func) = appmeta.get_fn_meta(funcname) else { - tracing::warn!("func {} not found, exist:{:?}", funcname, appmeta.fns()); - return Err(WsFuncError::FuncNotFound { - app: appname.to_owned(), - func: funcname.to_owned(), - } - .into()); - }; - - // get app file and extract to execute dir - if let Some(datameta) = datameta_opt { - self.view - .appmeta_manager() - .load_app_file(appname, datameta) - .await?; - } - - ///////////////////////////////////////////////// - // valid call /////////////////////////////////// - if func - .calls - .iter() - .filter(|call| call.as_http().is_some()) - .next() - .is_none() - { - tracing::warn!( - "func {} not support http trigger, meta:{:?}", - funcname, - func - ); - return Err(WsFuncError::FuncHttpNotSupported { - fname: funcname.to_owned(), - fmeta: func.clone(), - } - .into()); - } - - ///////////////////////////////////////////////// - // prepare ctx and run ////////////////////////// - - if func.sync_async.asyncable() { - let ctx = FnExeCtxAsync::new( - FnExeCtxAsyncAllowedType::try_from(appmeta.app_type.clone()).unwrap(), - appname.to_owned(), - funcname.to_owned(), - func.clone(), - req_id, - EventCtx::Http(text), - ); - self.execute(ctx).await - } else { - let ctx = FnExeCtxSync::new( - FnExeCtxAsyncAllowedType::try_from(appmeta.app_type.clone()).unwrap(), - appname.to_owned(), - funcname.to_owned(), - func.clone(), - req_id, - EventCtx::Http(text), - ); - - self.execute_sync(ctx) - } - } - // pub async fn execute_http_app(&self, fn_ctx_builder: FunctionCtxBuilder) { - // let app_meta_man = self.view.instance_manager().app_meta_manager.read().await; - // if let Some(app) = app_meta_man.get_app_meta(&fn_ctx_builder.app) { - // if let Some(func) = app.http_trigger_fn() { - // self.execute(fn_ctx_builder.build(func.to_owned())).await; - // } - // } - // } - // fn execute_sche_req(&self, sche_req: proto::sche::ScheReq) { - // // let vm = self - // // .view - // // .instance_manager() - // // .load_instance(&sche_req.app) - // // .await; - - // // let _ = self - // // .view - // // .instance_manager() - // // .instance_running_function - // // .write() - // // .insert( - // // vm.instance_names()[0].clone(), - // // Arc::new((sche_req.app.to_owned(), sche_req.func.to_owned())), - // // ); - - // // self.view - // // .instance_manager() - // // .finish_using(&sche_req.app, vm) - // // .await - // } - - fn execute_sync(&self, mut ctx: FnExeCtxSync) -> WSResult> { - let instance = self - .view - .instance_manager() - .load_instance_sync(&ctx.inner.app_type, &ctx.inner.app)?; - - let _ = self - .view - .instance_manager() - .instance_running_function - .insert( - instance.instance_name().to_owned(), - UnsafeFunctionCtx::Sync( - NonNull::new(&ctx as *const FnExeCtxSync as *mut FnExeCtxSync).unwrap(), - ), - ); - - tracing::debug!( - "start run sync instance {} app {} fn {}", - instance.instance_name(), - ctx.inner.app, - ctx.inner.func - ); - - let bf_exec_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_millis() as u64; - - tracing::debug!("start execute sync"); - let res = instance.execute_sync(&mut ctx)?; - - let res = res.map(|v| { - let mut res: serde_json::Value = serde_json::from_str(&*v).unwrap(); - let _ = res.as_object_mut().unwrap().insert( - "bf_exec_time".to_owned(), - serde_json::Value::from(bf_exec_time), - ); - serde_json::to_string(&res).unwrap() - }); - - let _ = self - .view - .instance_manager() - .instance_running_function - .remove(&instance.instance_name()); - - tracing::debug!( - "finish run sync instance {} fn {}, res:{:?}", - instance.instance_name(), - ctx.inner.func, - res - ); - - self.view - .instance_manager() - .finish_using(&ctx.inner.app, instance); - - Ok(res) - } - - /// prepare app and func before call execute - async fn execute(&self, mut fn_ctx: FnExeCtxAsync) -> WSResult> { - let instance = self - .view - .instance_manager() - .load_instance(&fn_ctx.inner.app_type, &fn_ctx.inner.app) - .await; - - let _ = self - .view - .instance_manager() - .instance_running_function - .insert( - instance.instance_name().to_owned(), - UnsafeFunctionCtx::Async( - NonNull::new(&fn_ctx as *const FnExeCtxAsync as *mut FnExeCtxAsync).unwrap(), - ), - ); - - tracing::debug!( - "start run instance {} app {} fn {}", - instance.instance_name(), - fn_ctx.inner.app, - fn_ctx.inner.func - ); - - let bf_exec_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_millis() as u64; - - tracing::debug!("start execute"); - let res = instance.execute(&mut fn_ctx).await; - - let res = res.map(|v| { - v.map(|v| { - let mut res: serde_json::Value = serde_json::from_str(&*v).unwrap(); - let _ = res.as_object_mut().unwrap().insert( - "bf_exec_time".to_owned(), - serde_json::Value::from(bf_exec_time), - ); - serde_json::to_string(&res).unwrap() - }) - }); - - let _ = self - .view - .instance_manager() - .instance_running_function - .remove(&instance.instance_name()); - - tracing::debug!( - "finish run instance {} fn {}, res:{:?}", - instance.instance_name(), - fn_ctx.inner.func, - res - ); - - while let Some(t) = fn_ctx.inner.sub_waiters.pop() { - let _ = t.await.unwrap(); - } - - self.view - .instance_manager() - .finish_using(&fn_ctx.inner.app, instance); - - res - } -} diff --git a/src/main/src/general/app/mod.rs b/src/main/src/general/app/mod.rs deleted file mode 100644 index be46a7e..0000000 --- a/src/main/src/general/app/mod.rs +++ /dev/null @@ -1,1336 +0,0 @@ -pub mod app_native; -pub mod app_owned; -pub mod app_shared; -mod http; -pub mod instance; -pub mod m_executor; -pub mod v_os; - -use std::path::PathBuf; -use super::data::m_data_general::{DataSetMetaV2, GetOrDelDataArg, GetOrDelDataArgType}; -use super::m_os::APPS_REL_DIR; -use crate::general::app::app_native::native_apps; -use crate::general::app::instance::m_instance_manager::InstanceManager; -use crate::general::app::m_executor::Executor; -use crate::general::app::m_executor::FnExeCtxAsyncAllowedType; -use crate::general::app::v_os::AppMetaVisitOs; -use crate::general::data::m_data_general::dataitem::DataItemArgWrapper; -use crate::general::network::proto_ext::ProtoExtDataItem; -use crate::util::VecExt; -use crate::{general::network::proto, result::WSResultExt}; -use crate::{ - general::{ - data::{ - m_data_general::{DataGeneral, DATA_UID_PREFIX_APP_META}, - m_kv_store_engine::{KeyTypeServiceList, KvAdditionalConf, KvStoreEngine}, - }, - m_os::OperatingSystem, - network::{ - http_handler::HttpHandler, - m_p2p::P2PModule, - proto::{data_schedule_context::OpeRole, DataOpeRoleUploadApp}, - }, - }, - result::{WSError, WsDataError}, -}; -use crate::{ - logical_module_view_impl, - master::m_master::Master, - result::{WSResult, WsFuncError}, - sys::{LogicalModule, LogicalModuleNewArgs, LogicalModulesRef, NodeID}, - util::{self, JoinHandleWrapper}, -}; -use async_trait::async_trait; -use axum::body::Bytes; -use enum_as_inner::EnumAsInner; -use m_executor::FnExeCtxSyncAllowedType; -use serde::{de::Error, Deserialize, Deserializer, Serialize}; -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashMap}, - fs, - io::Cursor, - path::Path, -}; -use tokio::sync::RwLock; -use ws_derive::LogicalModule; - -logical_module_view_impl!(View); -logical_module_view_impl!(View, os, OperatingSystem); -logical_module_view_impl!(View, kv_store_engine, KvStoreEngine); -logical_module_view_impl!(View, http_handler, Box); -logical_module_view_impl!(View, appmeta_manager, AppMetaManager); -logical_module_view_impl!(View, p2p, P2PModule); -logical_module_view_impl!(View, master, Option); -logical_module_view_impl!(View, instance_manager, InstanceManager); -logical_module_view_impl!(View, data_general, DataGeneral); -logical_module_view_impl!(View, executor, Executor); - -#[derive(Debug, Serialize, Deserialize)] -#[serde(untagged)] -pub enum FnEventYaml { - HttpFn { http_fn: () }, - HttpApp { http_app: () }, - KvSet { kv_set: usize }, -} - -#[derive(PartialEq, Eq)] -pub enum FnEvent { - HttpFn, - HttpApp, - KvSet(usize), -} - -impl From for FnEvent { - fn from(yaml: FnEventYaml) -> Self { - match yaml { - FnEventYaml::HttpFn { http_fn: _ } => Self::HttpFn, - FnEventYaml::HttpApp { http_app: _ } => Self::HttpApp, - FnEventYaml::KvSet { kv_set } => Self::KvSet(kv_set), - } - } -} - -// #[derive(Debug, Serialize, Deserialize)] -// #[serde(untagged)] -// pub enum FnArgYaml { -// KvKey { kv_key: usize }, -// HttpText { http_text: () }, -// } - -// #[derive(Debug)] -// pub enum FnArg { -// KvKey(usize), -// HttpText, -// } - -// impl From for FnArg { -// fn from(yaml: FnArgYaml) -> Self { -// match yaml { -// FnArgYaml::KvKey { kv_key } => Self::KvKey(kv_key), -// FnArgYaml::HttpText { http_text: _ } => Self::HttpText, -// } -// } -// } - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum HttpMethod { - Get, - Post, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum HttpCall { - Direct, - Indirect, -} - -#[derive(Debug, EnumAsInner, Clone, Serialize, Deserialize)] -pub enum FnCallMeta { - Http { method: HttpMethod, call: HttpCall }, - Rpc, - Event, -} - -#[derive(Debug)] -pub struct FnMetaYaml { - /// "sync" or "async" - pub sync: Option, - pub calls: Vec, - pub kvs: Option>>, - pub affinity: Option, -} - -impl<'de> Deserialize<'de> for FnMetaYaml { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let mut map = serde_yaml::Value::deserialize(deserializer)?; - let map = map - .as_mapping_mut() - .ok_or_else(|| D::Error::custom("not a map"))?; - - let mut calls = vec![]; - - // Helper block for parsing HTTP call configuration from YAML - // This block encapsulates the logic for extracting and validating HTTP call parameters - let parse_http_call = |v: &serde_yaml::Value| -> Result { - let map = v - .as_mapping() - .ok_or_else(|| D::Error::custom("not a map"))?; - let call = map - .get("call") - .ok_or_else(|| D::Error::missing_field("call"))?; - let call = call - .as_str() - .ok_or_else(|| D::Error::custom("not a string"))?; - match call { - "direct" => Ok(HttpCall::Direct), - "indirect" => Ok(HttpCall::Indirect), - _ => Err(D::Error::custom("invalid call type")), - } - }; - - if let Some(v) = map.get("http.get") { - let call = parse_http_call(v)?; - calls.push(FnCallMeta::Http { - method: HttpMethod::Get, - call, - }); - } - if let Some(v) = map.get("http.post") { - let call = parse_http_call(v)?; - calls.push(FnCallMeta::Http { - method: HttpMethod::Post, - call, - }); - } - if let Some(_v) = map.get("rpc") { - calls.push(FnCallMeta::Rpc); - } - - let kvs = map.remove("kvs"); - let kvs = if let Some(kvs) = kvs { - serde_yaml::from_value(kvs).map_err(|e| D::Error::custom(e.to_string()))? - } else { - None - }; - - let sync = if let Some(sync) = map.get("sync") { - let sync = sync - .as_str() - .ok_or_else(|| D::Error::custom("sync value must be a string"))?; - match sync { - "sync" | "async" => Some(sync.to_string()), - _ => return Err(D::Error::custom("sync value must be 'sync' or 'async'")), - } - } else { - None - }; - - let affinity = map.remove("affinity"); - let affinity = if let Some(affinity) = affinity { - serde_yaml::from_value(affinity).map_err(|e| D::Error::custom(e.to_string()))? - } else { - None - }; - - tracing::debug!("FnMetaYaml constructed, calls:{:?}", calls); - Ok(Self { - calls, - kvs, - sync, - affinity, - }) - } -} - -#[derive(Hash, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct KeyPattern(pub String); - -// #[derive(Debug, Clone, Serialize, Deserialize)] -// pub struct KvMeta { -// set: bool, -// get: bool, -// delete: bool, -// pub pattern: KeyPattern, -// } - -#[derive(Debug, Clone, Serialize, Deserialize, EnumAsInner)] -pub enum DataEventTrigger { - Write, - New, - WriteWithCondition { condition: String }, - NewWithCondition { condition: String }, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct DataAccess { - set: bool, - get: bool, - delete: bool, - pub event: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum FnSyncAsyncSupport { - Sync, - Async, - SyncAndAsync, -} - -impl FnSyncAsyncSupport { - pub fn syncable(&self) -> bool { - matches!( - self, - FnSyncAsyncSupport::Sync | FnSyncAsyncSupport::SyncAndAsync - ) - } - pub fn asyncable(&self) -> bool { - matches!( - self, - FnSyncAsyncSupport::Async | FnSyncAsyncSupport::SyncAndAsync - ) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FnMeta { - pub sync_async: FnSyncAsyncSupport, - pub calls: Vec, - // pub event: Vec, - // pub args: Vec, - pub data_accesses: Option>, - pub affinity: Option, -} - -#[derive(Debug, Deserialize)] -pub struct AppMetaYaml { - pub fns: HashMap, -} - -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)] -pub enum AppType { - Jar, - Wasm, - Native, -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct AppMeta { - pub app_type: AppType, - pub fns: HashMap, - cache_contains_http_fn: Option, -} - -impl AppMeta { - pub fn new(app_type: AppType, fns: HashMap) -> Self { - Self { - app_type, - fns, - cache_contains_http_fn: None, - } - } - - pub async fn new_from_yaml( - metayaml: AppMetaYaml, - app_name: &str, - meta_fs: &AppMetaVisitOs, - ) -> WSResult { - let app_type = meta_fs.get_app_type(app_name).await?; - let fns = metayaml - .fns - .into_iter() - .map(|(fnname, fnmeta)| { - let fnmeta = (app_type, fnmeta).into(); - (fnname, fnmeta) - }) - .collect(); - Ok(Self { - app_type, - fns, - cache_contains_http_fn: None, - }) - } - pub fn fns(&self) -> Vec { - self.fns.iter().map(|(fnname, _)| fnname.clone()).collect() - } - pub fn get_fn_meta(&self, fnname: &str) -> Option<&FnMeta> { - self.fns.get(fnname) - } - pub fn contains_http_fn(&self) -> bool { - if let Some(v) = self.cache_contains_http_fn { - return v; - } - let res = self - .fns - .iter() - .any(|(_, fnmeta)| fnmeta.allow_http_call().is_some()); - unsafe { - #[cfg(feature = "unsafe-log")] - tracing::debug!("http_handler begin"); - let _ = util::non_null(&self.cache_contains_http_fn) - .as_mut() - .replace(res); - #[cfg(feature = "unsafe-log")] - tracing::debug!("http_handler end"); - } - res - } -} - -// #[derive(Debug, Serialize, Deserialize)] -// pub struct AppMetaService { -// actions: Vec, -// node: NodeID, -// app_dir: String, -// } - -pub struct AppMetas { - tmp_app_metas: HashMap, - pattern_2_app_fn: HashMap>, -} - -// impl FnEvent { -// pub fn match_kv_ope(&self, ope: KvOps) -> bool { -// match self { -// Self::KvSet(_) => ope == KvOps::Set, -// Self::HttpApp => false, -// } -// } -// } - -impl AppMetaYaml { - pub fn read(apps_dir: impl AsRef, appname: &str) -> AppMetaYaml { - let file_path = apps_dir.as_ref().join(format!("{}/app.yaml", appname)); - let file = std::fs::File::open(file_path).unwrap_or_else(|err| { - tracing::debug!("open config file failed, err: {:?}", err); - - let file_path = apps_dir.as_ref().join(format!("{}/app.yml", appname)); - std::fs::File::open(file_path).unwrap_or_else(|err| { - panic!("open config file failed, err: {:?}", err); - }) - }); - serde_yaml::from_reader(file).unwrap_or_else(|e| { - panic!("parse yaml config file failed, err: {:?}", e); - }) - } - // // return true if key set is valid - // pub fn check_key_set(&self, key: &str) -> bool { - // self.fns - // .iter() - // .any(|(_, fn_meta)| { - // if let Some(kvs)=&fn_meta.kvs{ - // kvs.iter().any(|(k, _)| key.contains(k)) - // }else{ - // false - // }) - // } -} - -impl FnMeta { - pub fn allow_rpc_call(&self) -> bool { - self.calls.iter().any(|v| match v { - FnCallMeta::Rpc => true, - _ => false, - }) - } - pub fn allow_http_call(&self) -> Option { - self.calls.iter().find_map(|v| match v { - FnCallMeta::Http { method, call: _ } => Some(method.clone()), - _ => None, - }) - } -} - -impl KeyPattern { - pub fn new(input: String) -> Self { - Self(input) - } - // match {} for any words - // "xxxx_{}_{}" matches "xxxx_abc_123" - // "xxxx{}{}" matches "xxxxabc123" - pub fn match_key(&self, key: &str) -> bool { - let re = self.0.replace("{}", "[a-zA-Z0-9]+"); - // let pattern_len = re.len(); - // tracing::info!("len:{}", re.len()); - let re = regex::Regex::new(&re).unwrap(); - if let Some(len) = re.find(key) { - tracing::info!( - "match key: {} with pattern: {} with len {} {} ", - key, - self.0, - len.len(), - key.len() - ); - len.len() == key.len() - } else { - tracing::info!("not match key: {} with pattern: {}", key, self.0); - false - } - } - // pub fn matcher(&self) -> String { - - // // let re = Regex::new(r"(.+)\{\}").unwrap(); - - // // if let Some(captured) = re.captures(&*self.0) { - // // if let Some(capture_group) = captured.get(1) { - // // let result = capture_group.as_str(); - // // // println!("Result: {}", result); - // // return result.to_owned(); - // // } - // // } - - // // self.0.clone() - // } -} - -impl From<(AppType, FnMetaYaml)> for FnMeta { - fn from((app_type, yaml): (AppType, FnMetaYaml)) -> Self { - let sync_or_async = yaml.sync.as_deref().map(|s| s == "sync").unwrap_or(true); - - // if sync but not allowed, set sync_or_async to false - let sync_or_async = if sync_or_async && FnExeCtxSyncAllowedType::try_from(app_type).is_err() - { - false - } else { - sync_or_async - }; - - // if async but not allowed, set sync_or_async to true - let sync_or_async = - if !sync_or_async && FnExeCtxAsyncAllowedType::try_from(app_type).is_err() { - true - } else { - sync_or_async - }; - - let sync_async = if sync_or_async { - FnSyncAsyncSupport::Sync - } else { - FnSyncAsyncSupport::Async - }; - - // 处理亲和性规则 - let affinity = yaml.affinity.map(|affinity_yaml| { - let tags = affinity_yaml - .tags - .unwrap_or_else(|| vec!["worker".to_string()]) - .into_iter() - .map(|tag| match tag.as_str() { - "worker" => NodeTag::Worker, - "master" => NodeTag::Master, - custom => NodeTag::Custom(custom.to_string()), - }) - .collect(); - - let nodes = match affinity_yaml.nodes { - Some(nodes_str) => { - if nodes_str == "*" { - AffinityPattern::All - } else if let Ok(count) = nodes_str.parse::() { - AffinityPattern::NodeCount(count) - } else { - AffinityPattern::List( - nodes_str.split(',').map(|s| s.parse().unwrap()).collect(), - ) - } - } - None => AffinityPattern::All, - }; - - AffinityRule { tags, nodes } - }); - - Self { - sync_async, - calls: yaml.calls, - data_accesses: if let Some(kvs) = yaml.kvs { - Some( - kvs.into_iter() - .map(|(key, ops)| { - let mut set = false; - let mut get = false; - let mut delete = false; - let mut event = None; - for op in ops { - #[derive(Serialize, Deserialize)] - struct TriggerWithCondition { - condition: String, - } - if let Some(opstr) = op.as_str() { - match opstr { - "write" | "set" => set = true, - "read" | "get" => get = true, - "delete" => delete = true, - "trigger_by_write" => { - event = Some(DataEventTrigger::Write); - } - "trigger_by_new" => { - event = Some(DataEventTrigger::New); - } - _ => { - panic!("invalid op: {:?}", op); - } - } - } else if let Ok(trigger_with_condition) = - serde_yaml::from_value::>( - op.clone(), - ) - { - if trigger_with_condition.len() == 1 { - if let Some(t) = - trigger_with_condition.get("trigger_by_write") - { - event = Some(DataEventTrigger::WriteWithCondition { - condition: t.condition.clone(), - }); - } else if let Some(t) = - trigger_with_condition.get("trigger_by_new") - { - event = Some(DataEventTrigger::NewWithCondition { - condition: t.condition.clone(), - }); - } else { - panic!("invalid op: {:?}", op); - } - } else { - panic!("invalid op: {:?}", op); - } - } else { - panic!("invalid op: {:?}", op); - } - } - - ( - KeyPattern::new(key), - DataAccess { - delete, - set, - get, - event, - }, - ) - }) - .collect(), - ) - } else { - None - }, - affinity, - } - } -} - -// impl From for AppMeta { -// fn from(yaml: AppMetaYaml) -> Self { -// let fns = yaml -// .fns -// .into_iter() -// .map(|(fnname, fnmeta)| (fnname, fnmeta.into())) -// .collect(); -// Self { fns } -// } -// } - -lazy_static::lazy_static! { - static ref VIEW: Option = None; -} -fn view() -> &'static View { - tracing::debug!("get view"); - let res = unsafe { util::non_null(&*VIEW).as_ref().as_ref().unwrap() }; - tracing::debug!("get view end"); - res -} - -#[derive(LogicalModule)] -pub struct AppMetaManager { - meta: RwLock, - pub fs_layer: AppMetaVisitOs, - view: View, - pub native_apps: HashMap, - // app_meta_list_lock: Mutex<()>, -} - -#[async_trait] -impl LogicalModule for AppMetaManager { - fn inner_new(args: LogicalModuleNewArgs) -> Self - where - Self: Sized, - { - let view = View::new(args.logical_modules_ref.clone()); - unsafe { - #[cfg(feature = "unsafe-log")] - tracing::debug!("app man view begin"); - let _ = util::non_null(&*VIEW).as_mut().replace(view.clone()); - #[cfg(feature = "unsafe-log")] - tracing::debug!("app man view end"); - } - let fs_layer = AppMetaVisitOs::new(view.clone()); - Self { - meta: RwLock::new(AppMetas { - tmp_app_metas: HashMap::new(), - pattern_2_app_fn: HashMap::new(), - }), - view, - fs_layer, - native_apps: native_apps(), - // app_meta_list_lock: Mutex::new(()), - } - } - async fn init(&self) -> WSResult<()> { - { - let mut router = self.view.http_handler().building_router(); - - let take = router.option_mut().take().unwrap(); - let take = http::binds(take, self.view.clone()); - let _ = router.option_mut().replace(take); - // .route("/appman/upload", post(handler2)) - } - self.load_apps().await?; - - Ok(()) - } - async fn start(&self) -> WSResult> { - // load apps - - // self.meta - // .write() - // .await - // .load_all_app_meta(&self.view.os().file_path, &self.fs_layer) - // .await?; - Ok(vec![]) - } -} - -impl AppMetas { - // pub fn new() -> Self { - // Self { - // app_metas: HashMap::new(), - // pattern_2_app_fn: HashMap::new(), - // } - // } - // pub async fn set_tmp_appmeta(&self, ) - fn get_tmp_app_meta(&self, app: &str) -> Option { - self.tmp_app_metas.get(app).cloned() - } - - pub fn get_pattern_triggers( - &self, - pattern: impl Borrow, - ) -> Option<&Vec<(String, String)>> { - self.pattern_2_app_fn.get(pattern.borrow()) - } - // async fn load_all_app_meta( - // &mut self, - // file_dir: impl AsRef, - // meta_fs: &AppMetaVisitOs, - // ) -> WSResult<()> { - // if !file_dir.as_ref().join("apps").exists() { - // fs::create_dir_all(file_dir.as_ref().join("apps")).unwrap(); - // return Ok(()); - // } - // let entries = - // fs::read_dir(file_dir.as_ref().join("apps")).map_err(|e| ErrCvt(e).to_ws_io_err())?; - - // // 遍历文件夹中的每个条目 - // for entry in entries { - // // 获取目录项的 Result - // let entry = entry.map_err(|e| ErrCvt(e).to_ws_io_err())?; - // // 获取目录项的文件名 - // let file_name = entry.file_name(); - // // dir name is the app name - // let app_name = file_name.to_str().unwrap().to_owned(); - - // // allow spec files - // if entry.file_type().unwrap().is_file() { - // let allowed_files = vec!["crac_config"]; - // assert!(allowed_files - // .contains(&&*(*entry.file_name().as_os_str().to_string_lossy()).to_owned())); - // continue; - // } - - // // allow only dir - // assert!(entry.file_type().unwrap().is_dir()); - - // // read app config yaml - // let meta_yaml = { - // let apps_dir = file_dir.as_ref().join("apps"); - // let file_name_str = app_name.clone(); - // tokio::task::spawn_blocking(move || AppMetaYaml::read(apps_dir, &*file_name_str)) - // .await - // .unwrap() - // }; - - // // transform - // let meta = AppMeta::new(meta_yaml, &app_name, meta_fs).await.unwrap(); - - // //TODO: build and checks - // // - build up key pattern to app fn - - // // for (fnname, fnmeta) in &meta.fns { - // // for event in &fnmeta.event { - // // match event { - // // // not kv event, no key pattern - // // FnEvent::HttpFn => {} - // // FnEvent::HttpApp => {} - // // FnEvent::KvSet(key_index) => { - // // let kvmeta = fnmeta.try_get_kv_meta_by_index(*key_index).unwrap(); - // // self.pattern_2_app_fn - // // .entry(kvmeta.pattern.0.clone()) - // // .or_insert_with(Vec::new) - // // .push((app_name.clone(), fnname.clone())); - // // } - // // } - // // } - // // } - // let _ = self.tmp_app_metas.insert(app_name, meta); - // } - // Ok(()) - // } -} - -impl AppMetaManager { - async fn load_apps(&self) -> WSResult<()> { - // TODO: Implement app loading logic - Ok(()) - } - async fn construct_tmp_app(&self, tmpapp: &str) -> WSResult { - // 1.meta - // let appdir = self.fs_layer.concat_app_dir(app); - let appmeta = self.fs_layer.read_app_meta(tmpapp).await?; - - // TODO: 2.check project dir - // 3. if java, take snapshot - if let AppType::Jar = appmeta.app_type { - let _ = self - .meta - .write() - .await - .tmp_app_metas - .insert(tmpapp.to_owned(), appmeta.clone()); - tracing::debug!("record app meta to make checkpoint {}", tmpapp); - self.view - .instance_manager() - .make_checkpoint_for_app(tmpapp) - .await?; - self.view - .instance_manager() - .drap_app_instances(tmpapp) - .await; - // remove app_meta - tracing::debug!("checkpoint made, remove app meta {}", tmpapp); - let _ = self - .meta - .write() - .await - .tmp_app_metas - .remove(tmpapp) - .unwrap_or_else(|| { - panic!("remove app meta failed, app: {}", tmpapp); - }); - } - - Ok(appmeta) - } - pub async fn app_available(&self, app: &str) -> WSResult { - match self - .view - .data_general() - .get_or_del_datameta_from_master( - format!("{}{}", DATA_UID_PREFIX_APP_META, app).as_bytes(), - false, - ) - .await - { - Err(err) => match err { - WSError::WsDataError(WsDataError::DataSetNotFound { uniqueid }) => { - tracing::debug!( - "app meta not found, app: {}", - std::str::from_utf8(&*uniqueid).unwrap() - ); - Ok(false) - } - _ => Err(err), - }, - Ok(_) => Ok(true), - } - } - - /// get app by idx 1 - pub async fn load_app_file(&self, app: &str, datameta: DataSetMetaV2) -> WSResult<()> { - tracing::debug!( - "calling get_or_del_data to load app file, app: {}, datameta: {:?}", - app, - datameta - ); - let mut data = match self - .view - .data_general() - .get_or_del_data(GetOrDelDataArg { - meta: Some(datameta), - unique_id: format!("{}{}", DATA_UID_PREFIX_APP_META, app).into(), - ty: GetOrDelDataArgType::PartialOne { idx: 1 }, - }) - .await - { - Err(err) => { - tracing::warn!("get app file failed, err: {:?}", err); - return Err(err); - } - Ok((_datameta, data)) => data, - }; - - let proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(appfiledata)), - } = data.remove(&1).unwrap() - else { - return Err(WsFuncError::InvalidAppMetaDataItem { - app: app.to_owned(), - } - .into()); - }; - - // extract app file - let zipfilepath = appfiledata.file_name_opt; - let appdir = self.fs_layer.concat_app_dir(app); - let res = tokio::task::spawn_blocking(move || { - // remove old app dir - if appdir.exists() { - fs::remove_dir_all(&appdir).unwrap(); - } - // open zip file - let zipfile = std::fs::File::open(zipfilepath)?; - zip_extract::extract(zipfile, &appdir, false) - }) - .await - .unwrap(); - - if let Err(err) = res { - tracing::warn!("extract app file failed, err: {:?}", err); - return Err(WsFuncError::AppPackFailedZip(err).into()); - } - - Ok(()) - } - /// get app meta by idx 0 - /// None DataSetMetaV2 means temp app prepared - /// Some DataSetMetaV2 means app from inner storage - pub async fn get_app_meta( - &self, - app: &str, - ) -> WSResult)>> { - if let Some(res) = self.meta.read().await.get_tmp_app_meta(app) { - return Ok(Some((res, None))); - } - - // self.app_metas.get(app) - tracing::debug!("calling get_or_del_data to get app meta, app: {}", app); - let datameta = view() - .data_general() - .get_or_del_data(GetOrDelDataArg { - meta: None, - unique_id: format!("{}{}", DATA_UID_PREFIX_APP_META, app).into(), - ty: GetOrDelDataArgType::PartialOne { idx: 0 }, - }) - .await; - - // only one data item - let (datameta, meta): (DataSetMetaV2, proto::DataItem) = match datameta { - Err(err) => match err { - WSError::WsDataError(WsDataError::DataSetNotFound { uniqueid }) => { - tracing::debug!( - "get_app_meta not exist, uniqueid: {:?}", - std::str::from_utf8(&*uniqueid) - ); - return Ok(None); - } - _ => { - tracing::warn!("get_app_meta failed with err {:?}", err); - return Err(err); - } - }, - Ok((datameta, mut datas)) => (datameta, datas.remove(&0).unwrap()), - }; - - let proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(metabytes)), - } = meta - else { - return Err(WsFuncError::InvalidAppMetaDataItem { - app: app.to_owned(), - } - .into()); - }; - - let meta = bincode::deserialize_from::<_, AppMeta>(Cursor::new(&metabytes)); - let meta = match meta { - Err(e) => { - tracing::warn!( - "meta decode failed with data:{:?}, err:{:?}", - metabytes.limit_range_debug(0..100), - e - ); - return Err(WsFuncError::InvalidAppMetaDataItem { - app: app.to_owned(), - } - .into()); - } - Ok(meta) => meta, - }; - Ok(Some((meta, Some(datameta)))) - } - - pub async fn app_uploaded(&self, appname: String, data: Bytes) -> WSResult<()> { - // 1. tmpapp name & dir - // TODO: fobidden tmpapp public access - // let tmpapp = format!("tmp{}", Uuid::new_v4()); //appname.clone(); - let tmpapp = format!("{}", appname); - let tmpappdir = self.fs_layer.concat_app_dir(&tmpapp); - let tmpapp = tmpapp.clone(); - - // 2. unzip app pack - let tmpappdir2 = tmpappdir.clone(); - // remove old dir&app - if let Some(_) = self.meta.write().await.tmp_app_metas.remove(&tmpapp) { - tracing::debug!("remove old app meta {}", tmpapp); - } - let ins = self.view.instance_manager().app_instances.remove(&tmpapp); - if let Some(ins) = ins { - ins.value().kill().await; - tracing::debug!("remove old app instance {}", tmpapp); - } - - if tmpappdir2.exists() { - // remove old app - fs::remove_dir_all(&tmpappdir2).unwrap(); - } - let res = tokio::task::spawn_blocking(move || { - let data = data.to_vec(); - zip_extract::extract(Cursor::new(data), &tmpappdir2, false) - }) - .await - .unwrap(); - - match res { - Ok(res) => res, - Err(err) => { - tracing::warn!("unzip failed, err: {:?}", err); - let _ = fs::remove_dir_all(&tmpappdir); - return Err(WsFuncError::AppPackFailedZip(err).into()); - } - }; - - // 3. check meta - let res = self.construct_tmp_app(&tmpapp).await; - let appmeta = match res { - Err(e) => { - let _ = fs::remove_dir_all(&tmpappdir); - tracing::warn!("construct app failed, err {:?}", e); - return Err(e); - } - Ok(appmeta) => appmeta, - }; - - // remove temp dir - // let _ = fs::remove_dir_all(&tmpappdir).map_err(|e| WSError::from(WsIoErr::Io(e)))?; - - // mv temp app to formal app dir - let rel_app_dir = format!("{}/{}", APPS_REL_DIR, appname); - // 修改前: let formal_app_dir = self.view.os().file_path.join(rel_app_dir); rel_app_dir是字符串类型发生所有权转移,然而在下方还被使用了,选择修改为clone 曾俊 - let formal_app_dir = self.view.os().file_path.join(rel_app_dir.clone()); - //let _ = fs::rename(&tmpappdir, &formal_app_dir).map_err(|e| WSError::from(WsDataError::FileOpenErr { path: (), err: () })); - //虞光勇修改:因为在调用 fs::rename 并处理其结果时遇到了类型不匹配的问题。具体来说, - // 在构造WsDataError::FileOpenErr 时,path 字段的类型不匹配:期望的是 PathBuf 类型,但实际传入的是 ()(即单元类型)。 - //修改: - // let result = fs::rename(&tmpappdir, &formal_app_dir).map_err(|e| { - // 这里result变量下方没有再使用 加了一个标志 曾俊 - let _result = fs::rename(&tmpappdir, &formal_app_dir).map_err(|e| { - WSError::from(WsDataError::FileOpenErr { - path: PathBuf::from(formal_app_dir.clone()), - err: e, - }) - }); - - // 3. broadcast meta and appfile - let write_data_id = format!("{}{}", DATA_UID_PREFIX_APP_META, appname); - let write_datas = vec![ - DataItemArgWrapper::from_bytes(bincode::serialize(&appmeta).unwrap()), - //DataItemArgWrapper::from_file(rel_app_dir), - //虞光勇修改,因为编译器提示在调用 DataItemArgWrapper::from_file 方法时,传递的参数类型不匹配。 - // 具体来说,from_file 方法期望的是一个 PathBuf 类型的参数,但你传递的是一个 String 类型。 - //修改后: - //DataItemArgWrapper::from_file(rel_app_dir.into()), - //这里的 from_file 方法返回一个 Result, - // 但你直接将其赋值给一个期望 DataItemArgWrapper 类型的变量或参数,导致类型不匹配。使用 ? 操作符 - //DataItemArgWrapper::from_file(rel_app_dir.into())?, - DataItemArgWrapper::from_file(rel_app_dir.into())?, - ]; - tracing::debug!( - "app data size: {:?}", - write_datas - .iter() - // 修改前:.map(|v| v.to_string()) 去掉了这一行,为结构体派生了debug特征 曾俊 - .collect::>() - ); - self.view - .data_general() - .write_data( - write_data_id, - write_datas, - Some(( - self.view.p2p().nodes_config.this_node(), - proto::DataOpeType::Write, - OpeRole::UploadApp(DataOpeRoleUploadApp {}), - )), - ) - .await?; - tracing::debug!("app uploaded"); - Ok(()) - } - - pub fn set_app_meta_list(&self, list: Vec) { - //发送逻辑处理 曾俊 - // self.view - // .kv_store_engine() - // .set( - // KeyTypeServiceList, - // &serde_json::to_string(&list).unwrap().into(), - // false, - // ) - // .todo_handle("This part of the code needs to be implemented."); - - //修改后代码:对set函数的返回类型进行处理 曾俊 - match self.view - .kv_store_engine() - .set( - KeyTypeServiceList, - &serde_json::to_string(&list).unwrap().into(), - false, - ) { - Ok((version, _)) => { - tracing::debug!("App meta list updated successfully, version: {}, list: {:?}", version, list); - }, - Err(e) => { - tracing::error!("Failed to set app meta list: {:?}", e); - } - } -} - -pub fn get_app_meta_list(&self) -> Vec { - let res = self - .view - .kv_store_engine() - .get(&KeyTypeServiceList, false, KvAdditionalConf {}) - .map(|(_version, list)| list) - .unwrap_or_else(|| { - return vec![]; - }); - serde_json::from_slice(&res).unwrap_or_else(|e| { - tracing::warn!("parse app meta list failed, err: {:?}", e); - vec![] - }) -} - - - - // pub fn get_app_meta_basicinfo_list(&self) -> Vec { - // let apps = self.get_app_meta_list(); - // apps.into_iter() - // .map(|app| { - // let service = self.get_app_meta_service(&app).unwrap(); - // ServiceBasic { - // name: app, - // node: format!("{}", service.node), - // dir: service.app_dir, - // actions: service.actions, - // } - // }) - // .collect() - // } - - // pub fn get_app_meta_service(&self, app_name: &str) -> Option { - // let Some(res) = self - // .view - // .kv_store_engine() - // .get(KeyTypeServiceMeta(app_name.as_bytes())) - // else { - // return None; - // }; - // serde_json::from_slice(&res).map_or_else( - // |e| { - // tracing::warn!("parse service meta failed, err: {:?}", e); - // None - // }, - // |v| Some(v), - // ) - // } - - // pub fn set_app_meta_service(&self, app_name: &str, service: AppMetaService) { - // self.view.kv_store_engine().set( - // KeyTypeServiceMeta(app_name.as_bytes()), - // &serde_json::to_string(&service).unwrap().into(), - // ); - // } - - // // node id is valid before call this function - // pub async fn add_service(&self, req: AddServiceReq) -> AddServiceResp { - // // // check conflict service - // // if self.get_app_meta_service(&req.service.name).is_some() { - // // return AddServiceResp::Fail { - // // msg: format!("service {} already exist", req.service.name), - // // }; - // // } - - // // get the target node - // let Ok(nodeid) = req.service.node.parse::() else { - // return AddServiceResp::Fail { - // msg: "node id should be number".to_owned(), - // }; - // }; - // if !self.view.p2p().nodes_config.node_exist(nodeid) { - // return AddServiceResp::Fail { - // msg: format!("node {nodeid} not exist"), - // }; - // } - - // // call and return if rpc failed - // let res = match self - // .view - // .os() - // .remote_get_dir_content_caller - // .call( - // self.view.p2p(), - // nodeid, - // GetDirContentReq { - // path: req.service.dir.clone(), - // }, - // None, - // ) - // { - // Ok(res) => res, - // Err(e) => { - // return AddServiceResp::Fail { - // msg: format!("call remote_get_dir_content_caller failed, err: {:?}", e), - // }; - // } - // }; - - // // return if remote failed - // let _res = match res.dispatch.unwrap() { - // super::network::proto::remote_sys::get_dir_content_resp::Dispatch::Fail(fail) => { - // return AddServiceResp::Fail { msg: fail.error }; - // } - // super::network::proto::remote_sys::get_dir_content_resp::Dispatch::Ok(res) => res, - // }; - - // // add to appmeta list - // { - // let _mu = self.app_meta_list_lock.lock(); - // let mut appmeta_list = self.get_app_meta_list(); - // appmeta_list.push(req.service.name.clone()); - // let mut dup = HashSet::new(); - // let appmeta_list = appmeta_list - // .into_iter() - // .filter(|v| dup.insert(v.clone())) - // .collect(); - // self.set_app_meta_list(appmeta_list); - // self.set_app_meta_service( - // &req.service.name, - // AppMetaService { - // actions: req.service.actions, - // node: nodeid, - // app_dir: req.service.dir, - // }, - // ); - // } - // AddServiceResp::Succ {} - // } - // pub async fn run_service_action(&self, req: RunServiceActionReq) -> RunServiceActionResp { - // if !req.sync { - // return RunServiceActionResp::Fail { - // msg: "unsuppot async mode".to_owned(), - // }; - // } - - // // sync logic - // // check service and action - // let service = match self.get_app_meta_service(&req.service) { - // Some(service) => service, - // None => { - // return RunServiceActionResp::Fail { - // msg: format!("service {} not exist", req.service), - // }; - // } - // }; - - // // check action valid - // let Some(action) = service.actions.iter().find(|v| v.cmd == req.action_cmd) else { - // return RunServiceActionResp::Fail { - // msg: format!("action {} not exist", req.action_cmd), - // }; - // }; - - // // handle rpc fail - // let res = match self - // .view - // .os() - // .remote_run_cmd_caller - // .call( - // self.view.p2p(), - // service.node, - // RunCmdReq { - // cmd: action.cmd.clone(), - // workdir: service.app_dir, - // }, - // Some(Duration::from_secs(10)), - // ) - // { - // Ok(res) => res, - // Err(err) => { - // return RunServiceActionResp::Fail { - // msg: format!("call remote_run_cmd_caller failed, err: {:?}", err), - // }; - // } - // }; - - // // handle cmd fail - // let res = match res.dispatch.unwrap() { - // super::network::proto::remote_sys::run_cmd_resp::Dispatch::Ok(res) => res, - // super::network::proto::remote_sys::run_cmd_resp::Dispatch::Err(err) => { - // return RunServiceActionResp::Fail { - // msg: format!("remote run cmd failed: {}", err.error), - // } - // } - // }; - - // RunServiceActionResp::Succ { output: res.output } - // } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum NodeTag { - Worker, - Master, - Custom(String), -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct AffinityRule { - // 节点必须具有的标签列表,默认包含 worker - pub tags: Vec, - // 节点 ID 匹配规则 - pub nodes: AffinityPattern, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum AffinityPattern { - // 匹配所有节点 - All, - // 匹配指定节点列表 - List(Vec), - // 限定节点数量 - NodeCount(usize), -} - -#[derive(Debug, Deserialize)] -pub struct AffinityYaml { - // 标签列表,使用字符串表示 - pub tags: Option>, - // 节点列表,使用 "*" 表示所有节点,数字表示节点数量,或节点 ID 列表 "1,2,3" - pub nodes: Option, -} - -#[cfg(test)] -mod test { - use crate::util; - - use super::*; - #[test] - fn test_key_pattern() { - util::test_tracing_start(); - let pattern = KeyPattern::new("xxxx_{}_{}".to_owned()); - assert!(pattern.match_key("xxxx_abc_123")); - } -} diff --git a/src/main/src/general/data/m_data_general/batch.rs b/src/main/src/general/data/m_data_general/batch.rs deleted file mode 100644 index 4600187..0000000 --- a/src/main/src/general/data/m_data_general/batch.rs +++ /dev/null @@ -1,163 +0,0 @@ -/// Batch Data Transfer Interface -/// -/// # Design Overview -/// The batch interface is designed for efficient large-scale data transfer from data holders (writers) -/// to the system. It differs from the regular data interface in several key aspects: -/// -/// ## Batch Interface -/// - Purpose: Optimized for data holders to push complete datasets -/// - Key Feature: Supports streaming transfer during data writing process -/// - Use Case: Allows transfer before local sharding is complete -/// - Operation: Uses fixed-size block transfer with real-time processing -/// -/// ## Data Interface (For Comparison) -/// - Purpose: General-purpose data read/write operations -/// - Write Flow: Data is sharded and distributed across nodes -/// - Read Flow: Shards are collected from nodes and reassembled -/// - Operation: Requires complete data and consistency checks -/// -/// # Implementation Details -/// The batch interface implements this through: -/// - Efficient block-based streaming transfer -/// - Concurrent processing of received blocks -/// - Support for both memory and file-based transfers -/// - Real-time block validation and assembly -/// -/// For detailed implementation of the regular data interface, see the data.rs module. -use super::*; -use crate::general::network::proto; -use tokio::io::{AsyncReadExt, AsyncSeekExt}; -use tokio::sync::Semaphore; -use std::sync::Arc; -use std::time::Duration; -use crate::general::data::m_data_general::dataitem::DataItemSource; - -impl proto::DataItem { - pub fn size(&self) -> usize { - match &self.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::RawBytes(bytes)) => bytes.len(), - Some(proto::data_item::DataItemDispatch::File(file_data)) => file_data.file_content.len(), - None => 0, - } - } -} - -impl DataGeneral { - /// 发起批量数据传输 - pub async fn call_batch_data( - &self, - node_id: NodeID, - unique_id: Vec, - version: u64, - data: proto::DataItem, - ) -> WSResult { - // 调用 batch_transfer 函数处理数据传输 - async fn batch_transfer( - unique_id: Vec, - version: u64, - target_node: NodeID, - data: Arc, - view: DataGeneralView, - ) -> WSResult<()> { - let total_size = match data.as_ref() { - DataItemSource::Memory { data } => data.len(), - DataItemSource::File { path } => { - tokio::fs::metadata(path).await?.len() as usize - } - }; - let total_blocks = (total_size + DEFAULT_BLOCK_SIZE - 1) / DEFAULT_BLOCK_SIZE; - let semaphore = Arc::new(Semaphore::new(32)); - let mut handles: Vec>> = Vec::new(); - - // 发送所有数据块 - for block_idx in 0..total_blocks { - // 获取信号量许可 - let permit = semaphore.clone().acquire_owned().await.unwrap(); - let offset = block_idx as usize * DEFAULT_BLOCK_SIZE; - let size = DEFAULT_BLOCK_SIZE.min(total_size - offset); - - // 读取数据块 - let block_data = match data.as_ref() { - DataItemSource::Memory { data } => data[offset..offset + size].to_vec(), - DataItemSource::File { path } => { - let mut file = tokio::fs::File::open(path).await?; - let mut buffer = vec![0; size]; - let _ = file.seek(std::io::SeekFrom::Start(offset as u64)).await?; - let _ = file.read_exact(&mut buffer).await?; - buffer - } - }; - - // 构造请求 - let request = proto::BatchDataRequest { - request_id: Some(proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, // 修复:使用 u64 - }), - dataset_unique_id: unique_id.clone(), - data_item_idx: 0, // 因为是整体传输,所以使用0 - // block_type: match data.as_ref() { - // DataItemSource::Memory { .. } => proto::BatchDataBlockType::Memory as i32, - // DataItemSource::File { .. } => proto::BatchDataBlockType::File as i32, - // }, - //原代码block_type: data.as_ref(), 类型不匹配,使用自定义的to_data_item函数转化 曾俊 - block_type: Some(data.to_data_item()), - block_index: block_idx as u32, - data: block_data, - operation: proto::DataOpeType::Write as i32, - unique_id: unique_id.clone(), - version, - total_size: total_size as u64, - }; - - // 发送请求 - let view = view.clone(); - let handle = tokio::spawn(async move { - let _permit = permit; // 持有permit直到任务完成 - let resp = view.data_general() - .rpc_call_batch_data - .call( - view.p2p(), - target_node, - request, - Some(Duration::from_secs(30)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::BatchTransferError { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, // 修复:使用 u64 - }, - msg: resp.error_message, - }.into()); - } - Ok(()) - }); - - handles.push(handle); - } - - // 等待所有请求完成 - for handle in handles { - handle.await??; - } - - Ok(()) - } - - let data = Arc::new(DataItemSource::new(data)); - batch_transfer(unique_id.clone(), version, node_id, data, self.view.clone()).await?; - - Ok(proto::BatchDataResponse { - request_id: Some(proto::BatchRequestId { - node_id: node_id, - sequence: 0, - }), - success: true, - error_message: String::new(), - version, - }) - } -} diff --git a/src/main/src/general/data/m_data_general/batch_handler.rs b/src/main/src/general/data/m_data_general/batch_handler.rs deleted file mode 100644 index c5420ce..0000000 --- a/src/main/src/general/data/m_data_general/batch_handler.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::general::network::{ - proto::BatchDataRequest, - proto::BatchDataResponse, - m_p2p::RPCResponsor, -}; -use std::sync::Arc; -use tokio::sync::Mutex; -use tracing; - -/// 共享状态,用于记录最新的请求响应器 -/// 当收到新的请求时,会更新响应器并自动处理旧的请求 -#[derive(Clone)] -pub struct SharedWithBatchHandler { - /// 当前活跃的响应器 - /// 使用 Arc 保证线程安全 - responsor: Arc>>>, -} - -impl SharedWithBatchHandler { - /// 创建新的共享状态 - #[must_use] - pub fn new() -> Self { - Self { - responsor: Arc::new(Mutex::new(None)), - } - } - - /// 更新响应器 - /// 如果存在旧的响应器,会自动返回成功 - /// - /// # 参数 - /// * `responsor` - 新的响应器 - pub async fn update_responsor(&self, responsor: RPCResponsor) { - let mut guard = self.responsor.lock().await; - if let Some(old_responsor) = guard.take() { - // 旧的responsor直接返回成功 - if let Err(e) = old_responsor.send_resp(BatchDataResponse { - request_id: None, // 这里需要正确的 request_id - version: 0, // 这里需要正确的版本号 - success: true, - error_message: String::new(), - }).await { - tracing::error!("Failed to respond to old request: {}", e); - } - } - *guard = Some(responsor); - } - - /// 获取最终的响应器 - /// 用于在所有数据都写入完成后发送最终响应 - pub async fn get_final_responsor(&self) -> Option> { - self.responsor.lock().await.take() - } -} - -/// 批量数据传输状态 -/// 用于管理单个批量数据传输请求的生命周期 -pub struct BatchReceiveState { - /// 写入任务句柄 - pub handle: super::dataitem::WriteSplitDataTaskHandle, - /// 共享状态,用于处理请求响应 - pub shared: SharedWithBatchHandler, -} - -impl BatchReceiveState { - /// 创建新的批量数据传输状态 - /// - /// # 参数 - /// * `handle` - 写入任务句柄 - /// * `shared` - 共享状态 - pub fn new(handle: super::dataitem::WriteSplitDataTaskHandle, shared: SharedWithBatchHandler) -> Self { - Self { - handle, - shared, - } - } -} diff --git a/src/main/src/general/data/m_data_general/dataitem.rs b/src/main/src/general/data/m_data_general/dataitem.rs deleted file mode 100644 index fd2c083..0000000 --- a/src/main/src/general/data/m_data_general/dataitem.rs +++ /dev/null @@ -1,1022 +0,0 @@ -use crate::general::data::m_data_general::UniqueId; -use crate::LogicalModulesRef;//虞光勇修改,修改内容:增加use crate::LogicalModulesRef;来导入 LogicalModulesRef。 -use ::zip::CompressionMethod;//虞光勇修改,因为编译器无法找到 zip 模块中的 CompressionMethod,需加入头文件(860续) -use crate::general::m_os::OperatingSystem; -use crate::general::network::proto; -use crate::general::data::m_data_general::{DataItemIdx, DataSplitIdx, GetOrDelDataArgType}; -use crate::general::network::proto_ext::{NewPartialFileDataArg, ProtoExtDataItem}; -use crate::logical_module_view_impl; -use crate::modules_global_bridge::try_get_modules_ref; -use crate::result::{WSError, WSResult, WSResultExt, WsDataError}; -use crate::util::zip; -use futures::stream::{FuturesUnordered, StreamExt}; -use std::cell::RefCell; -use std::collections::btree_set; -use std::ops::Range; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::sync::broadcast; -use tracing; -use base64::{engine::general_purpose::STANDARD, Engine as _}; -use std::sync::RwLock; - -const DEFAULT_BLOCK_SIZE: usize = 4096; - -logical_module_view_impl!(DataItemView); -logical_module_view_impl!(DataItemView,os,OperatingSystem); - -/// 用于遍历数据项索引的迭代器 -#[derive(Debug)] -pub(super) enum WantIdxIter<'a> { - /// 遍历多个指定索引 - PartialMany { - iter: btree_set::Iter<'a, DataItemIdx>, - }, - /// 遍历单个索引 - PartialOne { - idx: DataItemIdx, - itercnt: u8, - }, - /// 遍历所有或删除操作的索引 - Other { - ty: GetOrDelDataArgType, - itercnt: u8, - len: u8, - }, -} - -impl<'a> WantIdxIter<'a> { - /// 创建新的索引迭代器 - /// - /// # 参数 - /// * `ty` - 迭代类型 - /// * `itemcnt` - 数据项总数 - #[must_use] - pub(super) fn new(ty: &'a GetOrDelDataArgType, itemcnt: DataItemIdx) -> Self { - match ty { - GetOrDelDataArgType::PartialMany { idxs } => Self::PartialMany { iter: idxs.iter() }, - GetOrDelDataArgType::Delete | GetOrDelDataArgType::All => Self::Other { - ty: ty.clone(), - itercnt: 0, - len: itemcnt, - }, - GetOrDelDataArgType::PartialOne { idx } => Self::PartialOne { - idx: *idx, - itercnt: 0, - }, - } - } -} - -impl<'a> Iterator for WantIdxIter<'a> { - type Item = DataItemIdx; - fn next(&mut self) -> Option { - match self { - WantIdxIter::PartialMany { iter, .. } => iter.next().map(|v| *v as DataItemIdx), - WantIdxIter::PartialOne { idx, itercnt } => { - if *itercnt == 0 { - *itercnt += 1; - Some(*idx) - } else { - None - } - } - WantIdxIter::Other { ty, itercnt, len } => match ty { - GetOrDelDataArgType::Delete | GetOrDelDataArgType::All => { - if itercnt == len { - None - } else { - let ret = *itercnt; - *itercnt += 1; - Some(ret) - } - } - GetOrDelDataArgType::PartialMany { .. } - | GetOrDelDataArgType::PartialOne { .. } => { - panic!("PartialMany should be handled by iter") - } - }, - } - } -} - -/// 共享内存区域的持有者 -/// 负责管理共享内存的所有权和生命周期 -#[derive(Debug, Clone)] -pub struct SharedMemHolder { - /// 共享内存数据 - data: Arc>, -} - -impl SharedMemHolder { - pub fn len(&self) -> usize { - self.data.len() - } - - pub fn try_take_data(self) -> Option> { - // SAFETY: - // 1. We're only replacing the Arc with an empty Vec - // 2. The original Arc will be dropped properly - // 3. This is safe as long as this is the only reference to the Arc - // unsafe { - // let ptr = &self.data as *const Arc> as *mut Arc>; - if Arc::strong_count(&self.data) == 1 { - Some(Arc::try_unwrap(self.data).unwrap()) - } else { - None - } - } - - pub fn as_raw_bytes(&self) -> Option<&[u8]> { - Some(self.data.as_ref()) - } -} - -impl From for Vec { - fn from(holder: SharedMemHolder) -> Self { - holder.as_raw_bytes().expect("Failed to get raw bytes").to_vec() - } -} - -/// 共享内存区域的访问者 -/// 提供对特定范围内存的安全访问 -pub struct SharedMemOwnedAccess { - /// 共享内存数据 - data: Arc>, - /// 访问范围 - range: Range, -} - -impl SharedMemOwnedAccess { - /// 获取可变字节切片 - /// - /// # Safety - /// 调用者必须确保: - /// 1. 没有其他线程同时访问这块内存 - /// 2. 访问范围不超过内存边界 - pub unsafe fn as_bytes_mut(&self) -> &mut [u8] { - // SAFETY: - // 1. We have &mut self, so we have exclusive access to this data - // 2. The underlying memory is valid for the entire Arc allocation - let full_slice = unsafe { - std::slice::from_raw_parts_mut(self.data.as_ptr() as *mut u8, self.data.len()) - }; - &mut full_slice[self.range.clone()] - } -} - -/// 创建新的共享内存和访问者 -/// -/// # 参数 -/// * `splits` - 内存分片范围列表 -#[must_use] -pub fn new_shared_mem(splits: &[Range]) -> (SharedMemHolder, Vec) { - let len = splits.iter().map(|range| range.len()).sum(); - let data = Arc::new(vec![0; len]); - let owned_accesses = splits - .iter() - .map(|range| SharedMemOwnedAccess { - data: Arc::clone(&data), - range: range.clone(), - }) - .collect(); - tracing::debug!("new_shared_mem, total_len: {}, splits: {:?}", len, splits); - (SharedMemHolder { data }, owned_accesses) -} - -/// 计算数据分片范围 -/// -/// # 参数 -/// * `total_size` - 总大小 -/// -/// # 返回 -/// * `Vec>` - 分片范围列表 -#[must_use] -pub fn calculate_splits(total_size: usize) -> Vec> { - let total_blocks = (total_size + DEFAULT_BLOCK_SIZE - 1) / DEFAULT_BLOCK_SIZE; - let mut splits = Vec::with_capacity(total_blocks); - for i in 0..total_blocks { - let start = i * DEFAULT_BLOCK_SIZE; - let end = (start + DEFAULT_BLOCK_SIZE).min(total_size); - splits.push(start..end); - } - splits -} - -/// 写入类型 -/// 支持写入文件或内存两种模式 -#[derive(Debug, Clone)] -pub enum WriteSplitDataType { - Dir{ - /// 接受的压缩文件形式 - zip_file: PathBuf, - /// 解压后的文件路径 - path: PathBuf, - }, - /// 文件写入模式 - File { - /// 目标文件路径 - path: PathBuf, - }, - /// 内存写入模式 - Mem { - /// 共享内存区域 - shared_mem: SharedMemHolder, - }, -} - -/// 写入分片任务的结果 -#[derive(Debug)] -pub struct WriteSplitTaskResult { - /// 写入的数据大小 - pub written_size: usize, -} - -/// 写入分片任务组 -/// 管理一组相关的写入任务 -#[derive(Debug)] -pub enum WriteSplitDataTaskGroup { - /// 文件写入模式 - ToFile { - is_dir: bool, - /// 任务唯一标识 - unique_id: UniqueId, - /// 临时文件路径,用作传输 - tmp_file_path: PathBuf, - /// 目标文件路径, 用作最终使用 - target_file_path: PathBuf, - /// 任务列表 - tasks: Vec>, - /// 接收新任务的通道 - rx: mpsc::Receiver>, - /// 预期总大小 - expected_size: usize, - /// 当前已写入大小 - current_size: usize, - /// 广播通道发送端,用于通知任务完成 - broadcast_tx: Arc>, - }, - /// 内存写入模式 - ToMem { - /// 任务唯一标识 - unique_id: UniqueId, - - - // /// 共享内存区域 - // shared_mem: RefCell>>, - /// 费新文修改,修改内容:shared_mem: RefCell>>, - /// 修改原因:shared_mem: RefCell>>, 需要修改为 RefCell>, - /// 修改后:shared_mem: RefCell>, - /// 共享内存区域 - /// - // shared_mem: RefCell>, 修改为RwLock>, 曾俊 - shared_mem: RwLock>, - - /// 任务列表 - tasks: Vec>, - /// 接收新任务的通道 - rx: mpsc::Receiver>, - /// 预期总大小 - expected_size: usize, - /// 当前已写入大小 - current_size: usize, - /// 广播通道发送端,用于通知任务完成 - broadcast_tx: Arc>, - }, -} - -impl WriteSplitDataTaskGroup { - /// 创建新的任务组 - pub async fn new( - unique_id: UniqueId, - total_size: usize, - block_type: proto::data_item::DataItemDispatch, - version: u64, - // file_name: Option<&str>, 函数体并没有用到这个参数 查看引用发现也没有使用到这个参数 这里直接删除 曾俊 - ) -> WSResult<(Self, WriteSplitDataTaskHandle)> { - let (tx, rx) = mpsc::channel(32); - let (broadcast_tx, _) = broadcast::channel::<()>(32); - let broadcast_tx = Arc::new(broadcast_tx); - // let pathbase=DataItemView::new(try_get_modules_ref().todo_handle("Failed to get modules ref when create WriteSplitDataTaskGroup")?).os().file_path; - //所有权发生变化 添加克隆方法 曾俊 - let pathbase=DataItemView::new(try_get_modules_ref().todo_handle("Failed to get modules ref when create WriteSplitDataTaskGroup")?).os().file_path.clone(); - - match block_type { - proto::data_item::DataItemDispatch::File(file_data) => { - let tmp_file_path = pathbase.join(format!("{}.data", - STANDARD.encode(&unique_id))); - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::File { - path: tmp_file_path.clone(), - }, - version, - broadcast_tx: broadcast_tx.clone(), - }; - - let group = Self::ToFile { - is_dir: file_data.is_dir_opt, - unique_id, - tmp_file_path, - target_file_path: pathbase.join(file_data.file_name_opt.as_str()), - tasks: Vec::new(), - rx, - expected_size: total_size, - current_size: 0, - broadcast_tx: broadcast_tx.clone(), - }; - - Ok((group, handle)) - } - proto::data_item::DataItemDispatch::RawBytes(_) => { - let shared_mem = SharedMemHolder { - data: Arc::new(vec![0; total_size]), - }; - - let handle = WriteSplitDataTaskHandle { - tx, - write_type: WriteSplitDataType::Mem { - shared_mem: shared_mem.clone(), - }, - version, - broadcast_tx: broadcast_tx.clone(), - }; - - let group = Self::ToMem { - unique_id, - // 原代码:shared_mem, 类型不匹配 曾俊 - shared_mem:RwLock::new(Some(shared_mem)), - tasks: Vec::new(), - rx, - expected_size: total_size, - current_size: 0, - broadcast_tx: broadcast_tx.clone(), - }; - - Ok((group, handle)) - } - } - } - - /// 处理所有写入任务 - /// - /// # 返回 - /// * `Ok(item)` - 所有数据写入完成,返回数据项 - /// * `Err(e)` - 写入过程中出错 - pub async fn process_tasks(&mut self) -> WSResult { - let mut pending_tasks: FuturesUnordered> = FuturesUnordered::new(); - - match self { - Self::ToFile { tasks, .. } | - Self::ToMem { tasks, .. } => { - for task in tasks.drain(..) { - pending_tasks.push(task); - } - } - } - - loop { - // 1. 检查完成状态 - match self.try_complete().await.todo_handle("Failed to complete write split data tasks")? { - Some(item) => return Ok(item), - None => {} // 继续等待 - } - - // 2. 等待新任务或已有任务完成 - tokio::select! { - Some(new_task) = match self { - Self::ToFile { rx, .. } | - Self::ToMem { rx, .. } => rx.recv() - } => { - pending_tasks.push(new_task); - } - Some(completed_result) = pending_tasks.next() => { - match completed_result { - Ok(result) => { - match self { - Self::ToFile { current_size, .. } | - Self::ToMem { current_size, .. } => { - *current_size += result.written_size; - } - } - } - Err(e) => { - tracing::error!("Task failed: {}", e); - return Err(WSError::WsDataError(WsDataError::BatchTransferTaskFailed { - reason: format!("Task failed: {}", e) - })); - } - } - } - } - } - } - - /// 检查写入完成状态 - /// - /// 返回: - /// - Ok(Some(item)) - 写入完成,返回数据项 - /// - Ok(None) - 写入未完成 - /// - Err(e) - 写入出错 - async fn try_complete(&self) -> WSResult> { - match self { - Self::ToFile { current_size, expected_size, tmp_file_path, target_file_path, unique_id, is_dir, .. } => { - if *current_size > *expected_size { - Err(WSError::WsDataError(WsDataError::BatchTransferError { - request_id: proto::BatchRequestId { - node_id: 0, // 这里需要传入正确的node_id - sequence: 0, - }, - msg: format!("Written size {} exceeds expected size {} for unique_id {:?}", - current_size, expected_size, unique_id) - })) - } else if *current_size == *expected_size { - if *is_dir{ - // unzip to file_path - // - open received file with std api - let file=std::fs::File::open(tmp_file_path).map_err(|e|{ - tracing::error!("Failed to open file: {}", e); - WSError::from(WsDataError::FileOpenErr { - path: tmp_file_path.clone(), - err: e, - }) - })?; - let tmp_file_path=tmp_file_path.clone(); - let target_file_path=target_file_path.clone(); - tokio::task::spawn_blocking(move || - zip_extract::extract(file,target_file_path.as_path() , false).map_err(|e|{ - WSError::from(WsDataError::UnzipErr { - path: tmp_file_path, - err: e, - }) - }) - ).await.unwrap().todo_handle("Failed to unzip file")?; - }else{ - // rename tmp_file_path to target_file_path - std::fs::rename(tmp_file_path, target_file_path).map_err(|e|{ - tracing::error!("Failed to rename file: {}", e); - WSError::from(WsDataError::FileRenameErr { - from: tmp_file_path.clone(), - to: target_file_path.clone(), - err: e, - }) - })?; - } - Ok(Some(proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(proto::FileData{ - file_name_opt: target_file_path.to_string_lossy().to_string(), - is_dir_opt: *is_dir, - file_content: vec![], - })), - })) - } else { - Ok(None) - } - } - Self::ToMem { current_size, expected_size, shared_mem, unique_id, .. } => { - if *current_size > *expected_size { - Err(WSError::WsDataError(WsDataError::BatchTransferError { - request_id: proto::BatchRequestId { - node_id: 0, // 这里需要传入正确的node_id - sequence: 0, - }, - msg: format!("Written size {} exceeds expected size {} for unique_id {:?}", - current_size, expected_size, unique_id) - })) - } else if *current_size == *expected_size { - Ok(Some(proto::DataItem{ - //曾俊 随RwLock数据类型改动 - // data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(shared_mem.borrow_mut().take().unwrap().try_take_data().expect("only group can take data once"))), - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(shared_mem.write().expect("Failed to lock RwLock for writing").take().unwrap().try_take_data().expect("only group can take data once"))), - })) - } else { - Ok(None) - } - } - } - } -} - -/// 简化的任务完成等待器 -pub struct WriteSplitDataWaiter { - rx: broadcast::Receiver<()>, -} - -impl WriteSplitDataWaiter { - /// 等待所有任务完成 - pub async fn wait(mut self) -> WSResult<()> { - // 持续接收直到通道关闭 - while let Ok(_) = self.rx.recv().await { - // 不需要处理具体消息内容,只需要知道有消息到达 - } - - // 通道关闭表示所有发送端都已释放 - Ok(()) - } -} - - -/// 写入分片任务的句柄 -/// 用于提交新的分片任务和等待任务完成 -#[derive(Clone)] -pub struct WriteSplitDataTaskHandle { - /// 发送任务的通道 - tx: mpsc::Sender>, - /// 写入类型(文件或内存) - write_type: WriteSplitDataType, - /// 数据版本号 - /// 用于防止数据覆盖和保证数据一致性: - /// 1. 防止旧版本数据覆盖新版本数据 - /// 2. 客户端可以通过比较版本号确认数据是否最新 - version: u64, - /// 广播通道发送端,用于通知任务完成 - broadcast_tx: Arc>, -} - -impl WriteSplitDataTaskHandle { - /// 获取当前数据版本号 - pub fn version(&self) -> u64 { - self.version - } - - pub fn get_all_tasks_waiter(&self) -> WriteSplitDataWaiter { - WriteSplitDataWaiter { - rx: self.broadcast_tx.subscribe(), - } - } - - /// 提交新的分片任务 - /// - /// # 参数 - /// * `idx` - 分片索引,表示数据在整体中的偏移位置 - /// * `data` - 分片数据 - /// - /// # 返回 - /// * `Ok(())` - 任务提交成功 - /// * `Err(e)` - 任务提交失败,可能是通道已关闭 - pub async fn submit_split(&self, idx: DataSplitIdx, data: proto::DataItem) -> WSResult<()> { - let task = match &self.write_type { - // WriteSplitDataType::File { path } | WriteSplitDataType::Dir { path } => { 原WriteSplitDataType::Dir忽视了zip_file字段 发现没有用到修改为直接忽视 曾俊 - WriteSplitDataType::File { path } | WriteSplitDataType::Dir { path ,..} => { - let path = path.clone(); - let offset = idx; - let data = data.as_raw_bytes().unwrap_or(&[]).to_vec(); - let written_size = data.len(); - tokio::spawn(async move { - let result = tokio::fs::OpenOptions::new() - .create(true) - .write(true) - .open(&path) - .await; - - match result { - Ok(mut file) => { - use tokio::io::{AsyncSeekExt, AsyncWriteExt}; - if let Err(e) = async move { - // 验证seek结果 - let seek_pos = file.seek(std::io::SeekFrom::Start(offset as u64)).await?; - if seek_pos != offset as u64 { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - format!("Seek position mismatch: expected {}, got {}", offset, seek_pos) - )); - } - // write_all保证写入所有数据或返回错误 - file.write_all(&data).await?; - Ok::<_, std::io::Error>(()) - }.await { - tracing::error!("Failed to write file data at offset {}: {}", offset, e); - panic!("Failed to write file: {}", e); - } - WriteSplitTaskResult { written_size } - } - Err(e) => { - tracing::error!("Failed to open file at offset {}: {}", offset, e); - panic!("Failed to open file: {}", e); - } - } - }) - } - WriteSplitDataType::Mem { shared_mem } => { - let mem = shared_mem.clone(); - let offset = idx; - let Some(data) = data.as_raw_bytes().map(|data| data.to_vec()) else { - return Err(WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: 0, - sequence: 0, - }, - reason: format!("mem data expected"), - })); - }; - let written_size = data.len(); - tracing::debug!("submit_split: Mem, len:{}, target len:{}", data.len(), shared_mem.len()); - - tokio::spawn(async move { - unsafe { - let slice = std::slice::from_raw_parts_mut( - mem.data.as_ptr() as *mut u8, - mem.data.len() - ); - slice[offset..offset + data.len()].copy_from_slice(&data); - } - WriteSplitTaskResult { written_size } - }) - } - }; - - // 发送到通道 - let _ = self.broadcast_tx.send(()); - self.tx.send(task).await.map_err(|e| { - tracing::error!("Failed to submit task: channel closed, idx: {:?}, error: {}", idx, e); - WSError::WsDataError(WsDataError::DataSplitTaskError { - msg: format!("Failed to submit task: channel closed, error: {}", e) - }) - }) - } - - /// 等待所有已提交的写入任务完成 - /// 关闭发送端,不再接收新任务 - pub async fn wait_all_tasks(&self) -> WSResult<()> { - // 等待广播通知 - let mut rx = self.broadcast_tx.subscribe(); - rx.recv().await.map_err(|e| { - tracing::error!("Failed to wait for tasks: {}", e); - WSError::WsDataError(WsDataError::BatchTransferTaskFailed { - reason: format!("Failed to wait for tasks: {}", e) - }) - })?; - - Ok(()) - } - - // 在任务处理逻辑中保持发送端的引用 - pub async fn process_tasks(&mut self) -> WSResult<()> { - let _tx_holder = self.broadcast_tx.clone(); // 保持发送端存活 - - // ...任务处理逻辑... - - // 当所有任务完成,_tx_holder被释放,广播通道自动关闭 - Ok(()) -} -} - -#[derive(Debug)] -pub enum DataItemSource { - Memory { - data: Vec, - }, - File { - path: PathBuf, - }, -} - -impl DataItemSource { - pub fn to_debug_string(&self) -> String { - match self { - Self::Memory { data } => { - //limit range vec - format!("Memory({:?})", data[0..10.min(data.len())].to_vec()) - } - Self::File { path } => format!("File({})", path.to_string_lossy()), - } - } - - pub fn new(data: proto::DataItem) -> Self { - match &data.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::RawBytes(bytes)) => Self::Memory { - data: bytes.clone(), - }, - Some(proto::data_item::DataItemDispatch::File(file_data)) => Self::File { - path: file_data.file_name_opt.clone().into(), - }, - _ => Self::Memory { - data: Vec::new(), - }, - } - } - - //添加一个DataItemSource转换到DataItem的函数 曾俊 - pub fn to_data_item(&self) -> proto::DataItem { - match self { - DataItemSource::Memory { data } => proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(data.clone())), - }, - DataItemSource::File { path } => proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(proto::FileData { - file_name_opt: path.to_str().map_or_else(|| String::from(""), |s| s.to_string()), // 这里需要根据实际情况调整类型转换 - ..Default::default() // 假设 FileData 有其他字段,这里使用默认值 - })), - }, - } - } - - pub async fn size(&self) -> WSResult { - match self { - DataItemSource::Memory { data } => Ok(data.len()), - DataItemSource::File { path } => { - let metadata = tokio::fs::metadata(path).await.map_err(|e| - WSError::WsDataError(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: 0, // 这里需要传入正确的node_id - sequence: 0, - }, - reason: format!("Failed to get file size: {}", e), - }) - )?; - Ok(metadata.len() as usize) - } - } - } - - // pub fn block_type(&self) -> proto::BatchDataBlockType { - // match self { - // DataItemSource::Memory { .. } => proto::BatchDataBlockType::Memory, - // DataItemSource::File { .. } => proto::BatchDataBlockType::File, - // } - // } - - pub async fn get_block(&self, block_idx: usize) -> WSResult> { - match self { - DataItemSource::Memory { data } => { - if block_idx == 0 { - Ok(data.clone()) - } else { - Err(WSError::WsDataError(WsDataError::SizeMismatch { - expected: data.len(), - actual: 0, - })) - } - }, - DataItemSource::File { path } => { - let content = tokio::fs::read(path).await.map_err(|_e| { - WSError::WsDataError(WsDataError::ReadDataFailed { - path: path.clone(), - }) - })?; - if block_idx == 0 { - Ok(content) - } else { - Err(WSError::WsDataError(WsDataError::SizeMismatch { - expected: content.len(), - actual: 0, - })) - } - }, - } - } -} - -use crate::general::network::proto_ext::DataItemExt; - -impl DataItemExt for DataItemSource { - fn decode_persist(data: Vec) -> WSResult { - if data.is_empty() { - return Err(WSError::WsDataError(WsDataError::DataDecodeError { - reason: "Empty data".to_string(), - data_type: "DataItemSource".to_string(), - })); - } - match data[0] { - 0 => { - let path_str = String::from_utf8(data[1..].to_vec()).map_err(|e| { - WSError::WsDataError(WsDataError::DataDecodeError { - reason: format!("Failed to decode path string: {}", e), - data_type: "DataItemSource::File".to_string(), - }) - })?; - Ok(DataItemSource::File { - path: PathBuf::from(path_str), - }) - }, - 1 => Ok(DataItemSource::Memory { - data: data[1..].to_owned(), - }), - _ => Err(WSError::WsDataError(WsDataError::DataDecodeError { - reason: format!("Unknown data item type id: {}", data[0]), - data_type: "DataItemSource".to_string(), - })) - } - } - - fn encode_persist(&self) -> Vec { - match self { - DataItemSource::File { path } => { - let mut ret = vec![0]; - ret.extend_from_slice(path.to_string_lossy().as_bytes()); - ret - } - DataItemSource::Memory { data } => { - let mut ret = vec![1]; - ret.extend_from_slice(data); - ret - } - } - } -} - -#[derive(Debug, Clone)] -enum DataItemZip { - /// 未初始化状态 - Uninitialized, - /// 不需要压缩(非目录) - NoNeed, - /// 已压缩的目录 - Directory { - zipped_file: PathBuf, - } -} - -//派生显示特征 曾俊 -#[derive(Debug, Clone)] -pub struct DataItemArgWrapper { - pub dataitem: proto::DataItem, - /// 目录压缩状态 - tmpzipfile: DataItemZip, -} - -impl DataItemArgWrapper { - - // 根据传入的DataItem类型新建一个DataItemArgWrapper实例, tmpzipfile默认为Uninitialized。 曾俊 - pub fn new(value: Vec) -> Self { - DataItemArgWrapper { - dataitem:proto::DataItem {data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(value))}, - tmpzipfile: DataItemZip::Uninitialized, - } - } - pub fn from_file(filepath: PathBuf) -> WSResult { - let view=DataItemView::new(try_get_modules_ref().map_err(|err|{ - tracing::error!("Failed to get modules ref: {}", err); - err - })?); - - //let abs_filepath=view.os().abs_file_path(filepath); - //虞光勇修改 添加.clone() - let abs_filepath=view.os().abs_file_path(filepath.clone()); - - Ok(Self { - dataitem: proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(proto::FileData{ - is_dir_opt: abs_filepath.is_dir(), - file_name_opt: filepath.to_str().unwrap().to_string(), - file_content: vec![], - })), - }, - tmpzipfile: DataItemZip::Uninitialized, - }) - } - - pub fn from_bytes(bytes: Vec) -> Self { - Self { - dataitem: proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(bytes)), - }, - tmpzipfile: DataItemZip::Uninitialized, - } - } - - pub async fn get_tmpzipfile(&mut self) -> WSResult> { - match &self.tmpzipfile { - DataItemZip::Uninitialized => { - self.init_tmpzipfile().await?; - } - _ => {} - } - - match &self.tmpzipfile { - DataItemZip::Directory { zipped_file } => Ok(Some(zipped_file)), - DataItemZip::NoNeed => Ok(None), - DataItemZip::Uninitialized => unreachable!(), - } - } - - async fn init_tmpzipfile(&mut self) -> WSResult<()> { - // 确保只初始化一次 - if !matches!(self.tmpzipfile, DataItemZip::Uninitialized) { - return Ok(()); - } - - let filedata = match self.dataitem.data_item_dispatch.as_ref().unwrap() { - proto::data_item::DataItemDispatch::File(file_data) => file_data, - proto::data_item::DataItemDispatch::RawBytes(_) => { - self.tmpzipfile = DataItemZip::NoNeed; - return Ok(()); - }, - }; - - // 检查目录元数据 - let metadata = tokio::fs::metadata(&filedata.file_name_opt).await.map_err(|e| { - WSError::WsDataError(WsDataError::FileMetadataErr { - path: PathBuf::from(&filedata.file_name_opt), - err: e, - }) - })?; - - if metadata.is_dir() { - let tmp_file = tempfile::NamedTempFile::new().map_err(|e| { - WSError::WsDataError(WsDataError::FileMetadataErr { - path: PathBuf::from(&filedata.file_name_opt), - err: e, - }) - })?; - let tmp_path = tmp_file.path().to_path_buf(); - - // 压缩目录到临时文件 - crate::util::zip::zip_dir_2_file( - &filedata.file_name_opt, - //zip::CompressionMethod::Stored, - CompressionMethod::Stored,//(续)虞光勇修改,修改内容删除zip:: - tmp_file.into_file(), - ).await?; - - self.tmpzipfile = DataItemZip::Directory { - zipped_file: tmp_path, - }; - } else { - self.tmpzipfile = DataItemZip::NoNeed; - } - - Ok(()) - } - - pub async fn transfer_size(&mut self) -> WSResult { - match &self.dataitem.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::RawBytes(bytes)) => return Ok(bytes.len()), - Some(proto::data_item::DataItemDispatch::File(_)) => { - // handle in following - } - None => return Ok(0), - } - - if let Some(tmp_path) = self.get_tmpzipfile().await? { - let metadata = tokio::fs::metadata(tmp_path).await?; - Ok(metadata.len() as usize) - } else { - let file_data=match &self.dataitem.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::File(file_data)) => { - // handle in following - file_data - } - Some(proto::data_item::DataItemDispatch::RawBytes(_)) | None=>{panic!("these case should be handled in previous match")} - }; - let metadata = tokio::fs::metadata(&file_data.file_name_opt).await?; - Ok(metadata.len() as usize) - } - } - - pub async fn clone_split_range(&mut self, range: Range) -> WSResult { - match &self.dataitem.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::RawBytes(bytes)) => { - return Ok(proto::DataItem::new_partial_raw_bytes(bytes.to_owned(), range).map_err(|err|{ - tracing::error!("Failed to clone split range: {}", err); - err - })?) - } - Some(proto::data_item::DataItemDispatch::File(_)) => { - - // handle in following - } - None => panic!("proto dataitem must be Some"), - } - - fn get_filedata(dataitem:&DataItemArgWrapper)->&proto::FileData{ - match &dataitem.dataitem.data_item_dispatch { - Some(proto::data_item::DataItemDispatch::File(file_data)) => file_data, - Some(proto::data_item::DataItemDispatch::RawBytes(_)) | None=>{panic!("these case should be handled in previous match")} - } - } - - // if zipped, use zipped file - // else use file_data.file_name_opt - if let Some(tmp_path) = self.get_tmpzipfile().await?.cloned() { - let file_data=get_filedata(self); - Ok(proto::DataItem::new_partial_file_data(NewPartialFileDataArg::FilePath { path: PathBuf::from_str(&file_data.file_name_opt).map_err(|err|{ - let err=WsDataError::FilePathParseErr { - path: file_data.file_name_opt.clone(), - err: err, - }; - tracing::error!("Failed to clone split range: {:?}", err); - err - })? , zip_path: Some(tmp_path.clone()) }, range).await.map_err(|err|{ - tracing::error!("Failed to clone split range: {}", err); - err - })?) - } else { - let file_data=get_filedata(self); - Ok(proto::DataItem::new_partial_file_data(NewPartialFileDataArg::FilePath { path: PathBuf::from_str(&file_data.file_name_opt).map_err(|err|{ - let err=WsDataError::FilePathParseErr { - path: file_data.file_name_opt.clone(), - err: err, - }; - tracing::error!("Failed to clone split range: {:?}", err); - err - })? , zip_path: None }, range).await.map_err(|err|{ - tracing::error!("Failed to clone split range: {}", err); - err - })?) - } - } -} diff --git a/src/main/src/general/data/m_data_general/mod.rs b/src/main/src/general/data/m_data_general/mod.rs deleted file mode 100644 index 19a372f..0000000 --- a/src/main/src/general/data/m_data_general/mod.rs +++ /dev/null @@ -1,1640 +0,0 @@ -/// 缓存模式类型 -pub type CacheMode = u16; - -pub mod dataitem; -pub mod batch; -pub mod batch_handler; - -use crate::general::data::m_data_general::dataitem::{calculate_splits, WantIdxIter, WriteSplitDataTaskGroup, DataItemSource}; -use crate::general::data::m_data_general::batch_handler::{BatchReceiveState, SharedWithBatchHandler}; -use crate::general::network::proto::DataItem; -use dataitem::{DataItemArgWrapper, WriteSplitTaskResult}; -use tokio::io::{AsyncSeekExt, AsyncReadExt}; - -use crate::general::{ - data::m_kv_store_engine::{ - KeyTypeDataSetItem, KeyTypeDataSetMeta, KvAdditionalConf, KvStoreEngine, KvVersion, - }, - m_os::OperatingSystem, - network::{ - m_p2p::{P2PModule, RPCCaller, RPCHandler, RPCResponsor}, - proto::{ - self, DataMeta, WriteOneDataResponse, - }, - proto_ext::ProtoExtDataItem, - }, -}; -use crate::{ - general::{ - data::m_kv_store_engine::{KeyLockGuard, KeyType}, - network::{proto_ext::DataItemExt}, - }, - logical_module_view_impl, - result::{WSError, WSResult, WSResultExt, WsSerialErr, WsNetworkLogicErr}, - sys::{LogicalModule, LogicalModuleNewArgs, NodeID}, - util::{JoinHandleWrapper, container::async_init_map::AsyncInitMap}, -}; -use crate::{result::WsDataError, sys::LogicalModulesRef}; -use async_trait::async_trait; -use camelpaste::paste; -use core::str; - -use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - sync::Arc, - time::Duration, - sync::atomic::{AtomicU32, Ordering}, -}; -use tokio::sync::Semaphore; -use tokio::task::JoinError; -use ws_derive::LogicalModule; - -logical_module_view_impl!(DataGeneralView); -logical_module_view_impl!(DataGeneralView, p2p, P2PModule); -logical_module_view_impl!(DataGeneralView, data_general, DataGeneral); -logical_module_view_impl!(DataGeneralView, kv_store_engine, KvStoreEngine); -logical_module_view_impl!(DataGeneralView, os, OperatingSystem); - -pub type DataVersion = u64; -pub type DataItemIdx = u8; - -pub const DATA_UID_PREFIX_APP_META: &str = "app"; -pub const DATA_UID_PREFIX_FN_KV: &str = "fkv"; - -/// 默认数据块大小 (4MB) -pub const DEFAULT_BLOCK_SIZE: usize = 4 * 1024 * 1024; - -pub const CACHE_MODE_TIME_MASK: u16 = 0xf000; -pub const CACHE_MODE_TIME_FOREVER_MASK: u16 = 0x0fff; -pub const CACHE_MODE_TIME_AUTO_MASK: u16 = 0x1fff; - -pub const CACHE_MODE_POS_MASK: u16 = 0x0f00; -pub const CACHE_MODE_POS_ALLNODE_MASK: u16 = 0xf0ff; -pub const CACHE_MODE_POS_SPECNODE_MASK: u16 = 0xf1ff; -pub const CACHE_MODE_POS_AUTO_MASK: u16 = 0xf2ff; - -pub const CACHE_MODE_MAP_MASK: u16 = 0x00f0; -pub const CACHE_MODE_MAP_COMMON_KV_MASK: u16 = 0xff0f; -pub const CACHE_MODE_MAP_FILE_MASK: u16 = 0xff1f; -// const DATA_UID_PREFIX_OBJ: &str = "obj"; - -pub fn new_data_unique_id_app(app_name: &str) -> String { - format!("{}{}", DATA_UID_PREFIX_APP_META, app_name) -} - -pub fn new_data_unique_id_fn_kv(key: &[u8]) -> Vec { - let mut temp = DATA_UID_PREFIX_FN_KV.as_bytes().to_owned(); - temp.extend(key); - temp - // let key_str = str::from_utf8(key).unwrap(); - // format!("{}{}", DATA_UID_PREFIX_FN_KV, key_str) -} - -/// 唯一标识符类型 -pub type UniqueId = Vec; - -#[derive(LogicalModule)] -pub struct DataGeneral { - view: DataGeneralView, - pub rpc_call_data_version_schedule: RPCCaller, - rpc_call_write_once_data: RPCCaller, - rpc_call_batch_data: RPCCaller, - rpc_call_get_data_meta: RPCCaller, - rpc_call_get_data: RPCCaller, - - rpc_handler_write_once_data: RPCHandler, - rpc_handler_batch_data: RPCHandler, - rpc_handler_data_meta_update: RPCHandler, - rpc_handler_get_data_meta: RPCHandler, - rpc_handler_get_data: RPCHandler, - - // 批量数据接收状态管理 - batch_receive_states: AsyncInitMap>, -} - -impl DataGeneral { - pub fn inner_new(args: LogicalModuleNewArgs) -> Self { - Self { - view: DataGeneralView::new(args.logical_modules_ref.clone()), - rpc_call_data_version_schedule: RPCCaller::new(), - rpc_call_write_once_data: RPCCaller::new(), - rpc_call_batch_data: RPCCaller::new(), - rpc_call_get_data_meta: RPCCaller::new(), - rpc_call_get_data: RPCCaller::new(), - rpc_handler_write_once_data: RPCHandler::new(), - rpc_handler_batch_data: RPCHandler::new(), - rpc_handler_data_meta_update: RPCHandler::new(), - rpc_handler_get_data_meta: RPCHandler::new(), - rpc_handler_get_data: RPCHandler::new(), - batch_receive_states: AsyncInitMap::new(), - } - } - - #[allow(dead_code)] - fn next_batch_id(&self) -> u32 { - static NEXT_BATCH_ID: AtomicU32 = AtomicU32::new(1); // 从1开始,保留0作为特殊值 - NEXT_BATCH_ID.fetch_add(1, Ordering::Relaxed) - } - - pub async fn write_data_batch( - &self, - unique_id: UniqueId, - version: u64, - data: proto::DataItem, - data_item_idx: DataItemIdx, - node_id: NodeID, - ) -> WSResult<()> { - // 调用 batch_transfer 函数处理数据传输 - async fn batch_transfer( - data_item_idx: DataItemIdx, - unique_id: UniqueId, - version: u64, - target_node: NodeID, - data: Arc, - view: DataGeneralView, - ) -> WSResult<()> { - let (tx, mut rx) = tokio::sync::mpsc::channel(32); - let mut handles = Vec::new(); - - let data_size = data.size().await?; - let splits = calculate_splits(data_size); - - tracing::debug!("batch_transfer total size({}), splits: {:?}, to node {}", data_size, splits, target_node); - - for (block_idx, split_range) in splits.iter().enumerate() { - let block_data = match data.as_ref() { - DataItemSource::Memory { data } => data[split_range.clone()].to_vec(), - DataItemSource::File { path } => { - // 读取文件对应块的数据 - let mut file = tokio::fs::File::open(path).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to open file: {}", e), - })?; - let mut buffer = vec![0; split_range.len()]; - // 验证seek结果 - let seek_pos = file.seek(std::io::SeekFrom::Start(split_range.start as u64)).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to seek file: {}", e), - })?; - if seek_pos != split_range.start as u64 { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Seek position mismatch: expected {}, got {}", split_range.start, seek_pos), - }.into()); - } - // read_exact保证读取指定长度的数据或返回错误 - let _ = file.read_exact(&mut buffer).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to read file: {}", e), - })?; - buffer - } - }; - - let request = proto::BatchDataRequest { - request_id: Some(proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }), - dataset_unique_id: unique_id.clone(), - data_item_idx: data_item_idx as u32, - // 用空的 DataItem 代替 - block_type: match data.as_ref() { - DataItemSource::Memory { .. } => Some(proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(Vec::new())), - }), - DataItemSource::File { .. } => Some(proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(proto::FileData { file_name_opt: String::new(), is_dir_opt: true, file_content: Vec::new() })), - }), - }, - block_index: block_idx as u32, - data: block_data, - operation: proto::DataOpeType::Write as i32, - unique_id: unique_id.clone(), - version, - total_size: data_size as u64, - }; - - let tx = tx.clone(); - let view = view.clone(); - - let handle = tokio::spawn(async move { - let result = view.data_general() - .rpc_call_batch_data - .call( - view.p2p(), - target_node, - request, - Some(Duration::from_secs(30)), - ) - .await; - - if let Err(e) = tx.send(result).await { - tracing::error!("Failed to send batch transfer result: {}", e); - } - }); - - handles.push(handle); - } - - drop(tx); - - while let Some(result) = rx.recv().await { - match result { - Ok(resp) if !resp.success => { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, // TODO: Add proper sequence number - }, - reason: resp.error_message, - }.into()); - } - Ok(_) => continue, - Err(e) => { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, - }, - reason: format!("RPC call failed: {}", e), - }.into()); - } - } - } - - for handle in handles { - handle.await.map_err(|e| { - WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, - }, - reason: format!("Task join failed: {}", e), - } - })?; - } - - Ok(()) - } - - let data = Arc::new(data.to_data_item_source()); - batch_transfer(data_item_idx,unique_id, version, node_id, data, self.view.clone()).await - } - - - pub async fn get_or_del_datameta_from_master( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult { - tracing::debug!("get_or_del_datameta_from_master uid: {:?}, delete: {}, whoami: {}", unique_id, delete, self.view.p2p().nodes_config.this.0); - let p2p = self.view.p2p(); - // get meta from master - let meta = self - .rpc_call_get_data_meta - .call( - p2p, - p2p.nodes_config.get_master_node(), - proto::DataMetaGetRequest { - unique_id: unique_id.to_vec(), - delete, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if meta.serialized_meta.is_empty() { - return Err(WsDataError::DataSetNotFound { - uniqueid: unique_id.to_vec(), - } - .into()); - } - - bincode::deserialize(&meta.serialized_meta).map_err(|err| { - WsSerialErr::BincodeErr { - err, - context: "get_or_del_datameta_from_master".to_owned(), - } - .into() - }) - } - - pub async fn get_or_del_data( - &self, - GetOrDelDataArg { - meta, - unique_id, - ty, - }: GetOrDelDataArg, - ) -> WSResult<(DataSetMetaV2, HashMap)> { - tracing::debug!("get_or_del_data uid: {:?}, maybe with meta: {:?}", unique_id, meta); - let mut data_map = HashMap::new(); - - // get meta from master - let meta = if let Some(meta) = meta { - meta - } else { - self.get_or_del_datameta_from_master(&unique_id, false) - .await? - }; - - tracing::debug!("start get_or_del_data uid: {:?},meta: {:?}", unique_id, meta); - - // basical verify - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let check_cache_map = |meta: &DataSetMetaV2| -> WSResult<()> { - if !meta.cache_mode_visitor(idx).is_map_common_kv() - && !meta.cache_mode_visitor(idx).is_map_file() - { - return Err(WsDataError::UnknownCacheMapMode { - mode: meta.cache_mode_visitor(idx).0, - } - .into()); - } - Ok(()) - }; - check_cache_map(&meta)?; - } - - // get data - let p2p = self.view.p2p(); - - match ty { - GetOrDelDataArgType::All => { - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - GetOrDelDataArgType::Delete => { - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: true, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - GetOrDelDataArgType::PartialOne { idx } => { - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - GetOrDelDataArgType::PartialMany { idxs } => { - for idx in idxs { - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - } - - Ok((meta, data_map)) - } - - pub async fn write_data( - &self, - unique_id: impl Into>, - mut datas: Vec, - context_openode_opetype_operole: Option<( - NodeID, - proto::DataOpeType, - proto::data_schedule_context::OpeRole, - )>, - ) -> WSResult<()> { - let unique_id = unique_id.into(); - let log_tag = format!("[write_data({})]", String::from_utf8_lossy(&unique_id)); - tracing::debug!("{} start write data", log_tag); - - let mut data_transfer_sizes=Vec::new(); - data_transfer_sizes.reserve(datas.len()); - for d in datas.iter_mut(){ - data_transfer_sizes.push(d.transfer_size().await.map_err(|err|{ - tracing::error!("{} transfer size error: {}", log_tag, err); - err - })?); - } - // 获取数据调度计划 - let version_schedule_resp = self - .rpc_call_data_version_schedule - .call( - self.view.p2p(), - self.view.p2p().nodes_config.get_master_node(), - proto::DataVersionScheduleRequest { - unique_id: unique_id.clone(), - context: context_openode_opetype_operole.map(|(node, ope, role)| { - proto::DataScheduleContext { - // each_data_sz_bytes: data_transfer_sizes, 原代码类型不匹配 曾俊 - each_data_sz_bytes: data_transfer_sizes.iter().map(|&x| x as u32).collect(), - ope_node: node as i64, - ope_type: ope as i32, - ope_role: Some(role), - } - }), - version: 0, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - // Clone the response to extend its lifetime - let version = version_schedule_resp.version; - let splits = version_schedule_resp.split.clone(); - - // 处理每个数据项 - let mut iter = WantIdxIter::new(&GetOrDelDataArgType::All, datas.len() as u8); - while let Some(data_item_idx) = iter.next() { - let data_item: &DataItemArgWrapper = &mut datas[data_item_idx as usize]; - let split = &splits[data_item_idx as usize]; - let mut primary_tasks = Vec::new(); - - // 1. 并行写入所有主数据分片 - let mut split_iter = WantIdxIter::new(&GetOrDelDataArgType::All, split.splits.len() as u8); - while let Some(split_idx) = split_iter.next() { - let split_info = &split.splits[split_idx as usize]; - tracing::debug!("{} creating split write task {}/{} for node {}, offset={}, size={}", - log_tag, split_idx + 1, split.splits.len(), split_info.node_id, split_info.data_offset, split_info.data_size); - let split_info = split_info.clone(); - let unique_id_clone = unique_id.clone(); - // let data_item_primary = data_item.clone_split_range(split_info.data_offset..split_info.data_offset+split_info.data_size); 类型不匹配 曾俊 - // 生成一个复制的可变数据项 - let mut data_item_clone = (*data_item).clone(); - let data_item_primary = data_item_clone.clone_split_range(split_info.data_offset as usize..(split_info.data_offset+split_info.data_size)as usize).await.todo_handle("clone_split_range for write data err")?; - // let data_item_primary = data_item.clone_split_range(split_info.data_offset as usize..(split_info.data_offset+split_info.data_size)as usize).await.todo_handle("clone_split_range for write data err")?; - let view = self.view.clone(); - let version_copy = version; - let task = tokio::spawn(async move { - view.data_general() - .rpc_call_write_once_data - .call( - view.p2p(), - split_info.node_id, - proto::WriteOneDataRequest { - unique_id: unique_id_clone.clone(), - version: version_copy, - data: vec![proto::DataItemWithIdx { - idx: data_item_idx as u32, - // data: Some(data_item_primary), 类型不匹配 曾俊 - data: Some(data_item_primary), - }], - }, - Some(Duration::from_secs(60)), - ) - .await - }); - primary_tasks.push(task); - } - - // 2. 并行写入缓存数据(完整数据) - let visitor = CacheModeVisitor(version_schedule_resp.cache_mode[data_item_idx as usize] as u16); - let need_cache = visitor.is_map_common_kv() || visitor.is_map_file(); - let cache_nodes: Vec = if need_cache { - split.splits.iter().map(|s| s.node_id).collect() - } else { - vec![] - }; - - let mut cache_tasks = Vec::new(); - if !cache_nodes.is_empty() { - tracing::debug!("{} found {} cache nodes: {:?}", log_tag, cache_nodes.len(), cache_nodes); - const MAX_CONCURRENT_TRANSFERS: usize = 3; - let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_TRANSFERS)); - - let mut cache_iter = WantIdxIter::new(&GetOrDelDataArgType::All, cache_nodes.len() as u8); - while let Some(cache_idx) = cache_iter.next() { - let node_id = cache_nodes[cache_idx as usize]; - let permit = semaphore.clone().acquire_owned().await.unwrap(); - tracing::debug!("{} creating cache write task {}/{} for node {}", log_tag, cache_idx + 1, cache_nodes.len(), node_id); - let unique_id_clone = unique_id.clone(); - let data_item_cache = data_item.clone(); - let view = self.view.clone(); - let task = tokio::spawn(async move { - let _permit = permit; // 持有permit直到任务完成 - view.data_general() - // .write_data_batch(unique_id_clone.clone(), version, data_item_cache, data_item_idx, node_id) //类型不匹配 曾俊 - .write_data_batch(unique_id_clone.clone(), version, data_item_cache.dataitem, data_item_idx, node_id) - .await?; - Ok::(proto::WriteOneDataResponse { - remote_version: version, - success: true, - message: String::new(), - }) - }); - cache_tasks.push(task); - } - } - - let primary_results = futures::future::join_all(primary_tasks).await; - tracing::debug!("{} primary_results: {:?}", log_tag, primary_results); - let cache_results = futures::future::join_all(cache_tasks).await; - tracing::debug!("{} cache_results: {:?}", log_tag, cache_results); - - if primary_results.iter().any(|res| res.is_err()) || cache_results.iter().any(|res| res.is_err()) { - let error_msg = format!("主节点或缓存节点数据写入失败"); - tracing::error!("{}", error_msg); - return Err(WSError::WsDataError(WsDataError::WriteDataFailed { - unique_id: unique_id.clone(), - message: error_msg, - })); - } - } - - Ok(()) - } - - async fn rpc_handle_write_one_data( - &self, - responsor: RPCResponsor, - req: proto::WriteOneDataRequest, - ) { - tracing::debug!("verify data meta bf write data"); - let kv_store_engine = self.view.kv_store_engine(); - - // Step1: verify version - // take old meta - #[allow(unused_assignments)] - let mut required_meta: Option<(usize, DataSetMetaV2)> = None; - { - let keybytes: Vec = KeyTypeDataSetMeta(&req.unique_id).make_key(); - let fail_by_overwrite = || async { - let message = "New data version overwrite".to_owned(); - tracing::warn!("{}", message); - - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(WriteOneDataResponse { - remote_version: 0, - success: false, - message, - }) - .await{ - tracing::error!("Failed to send write one data response 1: {}", e); - } - // .todo_handle("1 err_comment waitting to fill"); - - }; - let fail_with_msg = |message: String| async { - tracing::warn!("{}", message); - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(WriteOneDataResponse { - remote_version: 0, - success: false, - message, - }) - .await { - tracing::error!("Failed to send write one data response 2 : {}", e); - } - // .todo_handle("2 err_comment waitting to fill"); - }; - - loop { - // tracing::debug!("verify version loop"); - let lock = - kv_store_engine.with_rwlock(&KeyTypeDataSetMeta(&req.unique_id).make_key()); - let guard = KeyLockGuard::Read(lock.read()); - required_meta = kv_store_engine.get( - &KeyTypeDataSetMeta(&req.unique_id), - true, - KvAdditionalConf {}, - ); //tofix, master send maybe not synced - let old_dataset_version = if required_meta.is_none() { - 0 - } else { - required_meta.as_ref().unwrap().1.version - }; - // need to wait for new version - if required_meta.is_none() - || required_meta.as_ref().unwrap().1.version < req.version - { - if required_meta.is_none() { - tracing::debug!("no data version, waiting for notify"); - } else { - tracing::debug!( - "data version is old({}) at node({}), waiting for new notify({})", - required_meta.as_ref().unwrap().1.version, - self.view.p2p().nodes_config.this_node(), - req.version - ); - } - - let (kv_version, new_value) = kv_store_engine - .register_waiter_for_new(&keybytes, guard) - .await - .unwrap_or_else(|err| { - panic!("fail to wait for new data version: {:?}", err); - }); - - let Some(new_value) = new_value.as_raw_data() else { - fail_with_msg(format!( - "fatal error, kv value supposed to be DataSetMeta, rathe than {:?}", - new_value - )) - .await; - return; - }; - - // deserialize - let new_value = bincode::deserialize::(&new_value); - if let Err(err) = new_value { - fail_with_msg(format!( - "fatal error, kv value deserialization failed: {}", - err - )) - .await; - return; - } - let new_value = new_value.unwrap(); - - // version check - if new_value.version > req.version { - fail_by_overwrite().await; - return; - } else if new_value.version < req.version { - tracing::debug!("recv data version({}) is old than required({}), waiting for new notify",new_value.version, req.version); - // still need to wait for new version - continue; - } else { - required_meta = Some((kv_version, new_value)); - break; - } - } else if old_dataset_version > req.version { - drop(guard); - fail_by_overwrite().await; - return; - } else { - tracing::debug!( - "data version is matched cur({}) require({}) // 0 should be invalid", - old_dataset_version, - req.version - ); - break; - } - } - } - - // Step3: write data - tracing::debug!("start to write partial data"); - let lock = kv_store_engine.with_rwlock(&KeyTypeDataSetMeta(&req.unique_id).make_key()); - let guard = KeyLockGuard::Write(lock.write()); - let check_meta = kv_store_engine.get( - &KeyTypeDataSetMeta(&req.unique_id), - true, - KvAdditionalConf {}, - ); //tofix, master send maybe not synced - if check_meta.is_none() - || check_meta.as_ref().unwrap().0 != required_meta.as_ref().unwrap().0 - { - drop(guard); - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(WriteOneDataResponse { - remote_version: if check_meta.is_none() { - 0 - } else { - check_meta.as_ref().unwrap().1.version - }, - success: false, - message: "meta is updated again, cancel write".to_owned(), - }) - .await{ - tracing::error!("Failed to send write one data response 3: {}", e); - } - // .todo_handle("3 err_comment waitting to fill"); - return; - } - - for data_with_idx in req.data.into_iter() { - let proto::DataItemWithIdx { idx, data } = data_with_idx; - let data = data.unwrap(); - let data_source = data.to_data_item_source(); - let data = Arc::new(data_source); - let serialize = data.as_ref().encode_persist(); - tracing::debug!( - "writing data part uid({:?}) idx({}) item({})", - req.unique_id, - idx, - data.to_debug_string() - ); - if let Err(err) = kv_store_engine.set( - KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - &serialize, - true, - ) { - tracing::warn!("flush error: {}", err) - } - } - kv_store_engine.flush(); - drop(guard); - tracing::debug!("data partial is written"); - if let Err(e) = responsor //返回结果未使用 曾俊 - .send_resp(WriteOneDataResponse { - remote_version: req.version, - success: true, - message: "".to_owned(), - }) - .await{ - tracing::error!("Failed to send write one data response 4: {}", e); - } - // .todo_handle("4 err_comment waitting to fill"); - } - - async fn rpc_handle_data_meta_update( - &self, - responsor: RPCResponsor, - mut req: proto::DataMetaUpdateRequest, - ) { - struct Defer { - node: NodeID, - } - impl Drop for Defer { - fn drop(&mut self) { - tracing::debug!("rpc_handle_data_meta_update return at node({})", self.node); - } - } - let _defer = Defer { - node: self.view.p2p().nodes_config.this_node(), - }; - - let key = KeyTypeDataSetMeta(&req.unique_id); - let keybytes = key.make_key(); - - // test only log - #[cfg(test)] - tracing::debug!("rpc_handle_data_meta_update {:?}\n {:?}", req,bincode::deserialize::(&req.serialized_meta)); - // not test log - #[cfg(not(test))] - tracing::debug!("rpc_handle_data_meta_update {:?}", req); - - let kv_lock = self.view.kv_store_engine().with_rwlock(&keybytes); - let _kv_write_lock_guard = kv_lock.write(); - - if let Some((_old_version, mut old_meta)) = - self.view.kv_store_engine().get(&key, true, KvAdditionalConf {}) - { - if old_meta.version > req.version { - drop(_kv_write_lock_guard); - let err_msg = "New data version is smaller, failed update"; - tracing::warn!("{}", err_msg); - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(proto::DataMetaUpdateResponse { - version: old_meta.version, - message: err_msg.to_owned(), - }) - .await{ - tracing::error!("Failed to send data meta update response 5: {}", e); - } - // .todo_handle("5 err_comment waitting to fill"); - return; - } - old_meta.version = req.version; - if req.serialized_meta.len() > 0 { - if let Err(e) = self.view.kv_store_engine() //返回结果未处理 曾俊 - .set_raw(&keybytes, std::mem::take(&mut req.serialized_meta), true){ - tracing::error!("Failed to set raw data in kv store 6: {}", e); - } - // .todo_handle("6 err_comment waitting to fill"); - } else { - if let Err(e) = self.view.kv_store_engine() //返回结果未处理 曾俊 - .set(key, &old_meta, true){ - tracing::error!("Failed to set raw data in kv store 7: {}", e); - } - // .todo_handle("7 err_comment waitting to fill"); - } - } else { - if req.serialized_meta.len() > 0 { - tracing::debug!( - "set new meta data, {:?}", - bincode::deserialize::(&req.serialized_meta) - ); - if let Err(e) = self.view.kv_store_engine() //返回结果未处理 曾俊 - .set_raw(&keybytes, std::mem::take(&mut req.serialized_meta), true){ - tracing::error!("Failed to set raw data in kv store 8: {}", e); - } - // .todo_handle("8 err_comment waitting to fill"); - } else { - drop(_kv_write_lock_guard); - let err_msg = "Old meta data not found and missing new meta"; - tracing::warn!("{}", err_msg); - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(proto::DataMetaUpdateResponse { - version: 0, - message: err_msg.to_owned(), - }) - .await{ - tracing::error!("Failed to send data meta update response 9: {}", e); - } - // .todo_handle("9 err_comment waitting to fill"); - return; - } - } - drop(_kv_write_lock_guard); - tracing::debug!("rpc_handle_data_meta_update success"); - if let Err(e) = responsor //返回结果未处理 曾俊 - .send_resp(proto::DataMetaUpdateResponse { - version: req.version, - message: "Update success".to_owned(), - }) - .await{ - tracing::error!("Failed to send data meta update response 10: {}", e); - } - // .todo_handle("10 err_comment waitting to fill"); - } - - async fn rpc_handle_get_data_meta( - &self, - req: proto::DataMetaGetRequest, - responsor: RPCResponsor, - ) -> WSResult<()> { - tracing::debug!("rpc_handle_get_data_meta with req({:?})", req); - let meta = self.view.get_data_meta_local(&req.unique_id, req.delete)?; - if meta.is_none() { - tracing::debug!("rpc_handle_get_data_meta data meta not found"); - } else { - tracing::debug!("rpc_handle_get_data_meta data meta found"); - } - let serialized_meta = meta.map_or(vec![], |(_kvversion, meta)| { - bincode::serialize(&meta).unwrap() - }); - - responsor - .send_resp(proto::DataMetaGetResponse { serialized_meta }) - .await?; - - Ok(()) - } - - async fn rpc_handle_get_one_data( - &self, - responsor: RPCResponsor, - req: proto::GetOneDataRequest, - ) -> WSResult<()> { - tracing::debug!("starting rpc_handle_get_one_data {:?}", req); - - let kv_store_engine = self.view.kv_store_engine(); - let _ = self.view - .get_metadata(&req.unique_id, req.delete) - .await - .map_err(|err| { - tracing::warn!("rpc_handle_get_one_data get_metadata failed: {:?}", err); - err - })?; - - let mut got_or_deleted = vec![]; - let mut kv_ope_err = vec![]; - - for idx in req.idxs { - let value = if req.delete { - match kv_store_engine.del( - KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - false, - ) { - Ok(value) => value, - Err(e) => { - kv_ope_err.push(e); - None - } - } - } else { - kv_store_engine.get( - &KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - false, - KvAdditionalConf {}, - ) - }; - got_or_deleted.push(value); - } - - let (mut success, mut message): (bool, String) = if kv_ope_err.len() > 0 { - (false, { - let mut msg = String::from("KvEngine operation failed: "); - for e in kv_ope_err.iter() { - msg.push_str(&format!("{:?}", e)); - } - msg - }) - } else if got_or_deleted.iter().all(|v| v.is_some()) { - (true, "success".to_owned()) - } else { - tracing::warn!("some data not found"); - (false, "some data not found".to_owned()) - }; - - let mut got_or_deleted_checked: Vec = vec![]; - if success { - for v in got_or_deleted { - let decode_res = proto::DataItem::decode_persist(v.unwrap().1); - match decode_res { - Ok(item) => { - tracing::debug!("decoded data item: {:?}", item.to_string()); - got_or_deleted_checked.push(item); - } - Err(e) => { - tracing::error!("Failed to decode data item: {:?}", e); - success = false; - message = format!("Failed to decode data item: {:?}", e); - break; - } - } - } - } - - responsor - .send_resp(proto::GetOneDataResponse { - success, - data: got_or_deleted_checked, - message, - }) - .await?; - - Ok(()) - } - - // 处理批量数据写入请求 - pub async fn rpc_handle_batch_data( - &self, - responsor: RPCResponsor, - req: proto::BatchDataRequest, - ) -> WSResult<()> { - tracing::debug!("rpc_handle_batch_data with batchid({:?})", req.request_id.clone().unwrap()); - let batch_receive_states = self.batch_receive_states.clone(); - // 预先克隆闭包外需要的字段 - let block_index = req.block_index; - let data = req.data.clone(); - let request_id = req.request_id.clone().unwrap(); - - // 1. 查找或创建状态 - let state = match self.batch_receive_states - .get_or_init(req.request_id.clone().unwrap(), async move { - // 创建任务组和句柄 - let (mut group, handle) = match WriteSplitDataTaskGroup::new( - req.unique_id.clone(), - req.total_size as usize, - // req.block_type(), 类型错误 曾俊 - req.block_type.unwrap().data_item_dispatch.unwrap(), - req.version, - ).await { - Ok((group, handle)) => (group, handle), - Err(e) => { - tracing::error!("Failed to create task group: {:?}", e); - return Err(e); - } - }; - - // 再process之前订阅,避免通知先于订阅 - let waiter = handle.get_all_tasks_waiter(); - - // 启动process_tasks - let _ = tokio::spawn(async move { - match group.process_tasks().await { - Ok(item) => Ok(item), - Err(e) => { - tracing::error!("Failed to process tasks: {}", e); - Err(e) - } - } - }); - - let state = Arc::new(BatchReceiveState::new(handle, SharedWithBatchHandler::new())); - let state_clone = state.clone(); - - // response task - let _=tokio::spawn(async move { - tracing::debug!("rpc_handle_batch_data response task started"); - // 等待所有任务完成 - if let Err(e) = waiter.wait().await { - tracing::error!("Failed to wait for tasks: {}", e); - todo!("use responsor to send error response"); - return; - } - - tracing::debug!("rpc_handle_batch_data response task wait all tasks done"); - - // 发送最终响应 - if let Some(final_responsor) = state_clone.shared.get_final_responsor().await { - if let Err(e) = final_responsor.send_resp(proto::BatchDataResponse { - request_id: Some(req.request_id.clone().unwrap()), - success: true, - error_message: String::new(), - version: state_clone.handle.version(), - }).await { - tracing::error!("Failed to send final response: {}", e); - } - } - - // 清理状态 - let _=batch_receive_states.remove(&req.request_id.unwrap()); - }); - - Ok(state) - }) - .await { - Err(e) => return Err(WSError::WsDataError(WsDataError::BatchTransferError { - request_id, - msg: format!("Failed to initialize batch state: {}", e) - })), - Ok(state) => state, - }; - - tracing::debug!("rpc_handle_batch_data ready with write_split_data_task_group"); - - // 2. 提交分片数据 - let data_item = proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(data)), - ..Default::default() - }; - - tracing::debug!("submit_split with data split idx: {}, at node: {}", block_index, self.view.p2p().nodes_config.this_node()); - state.handle.submit_split( - block_index as usize * DEFAULT_BLOCK_SIZE, - data_item, - ).await?; - - // 3. 更新响应器 - state.shared.update_responsor(responsor).await; - - Ok(()) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DataMetaSys { - pub cache: i32, - pub distribute: i32, -} -impl From for DataMetaSys { - fn from(d: DataMeta) -> Self { - Self { - cache: d.cache, - distribute: d.distribute, - } - } -} -impl Into for DataMetaSys { - fn into(self) -> DataMeta { - DataMeta { - cache: self.cache, - distribute: self.distribute, - } - } -} - -/// 数据集元信息 -#[derive(Serialize, Deserialize)] -pub struct DataSetMetaV1 { - // unique_id: Vec, - pub version: u64, - pub data_metas: Vec, - pub synced_nodes: HashSet, -} - -/// 数据集元信息 -/// -/// 注意:新建元信息请使用 `DataSetMetaBuilder` -/// -/// https://fvd360f8oos.feishu.cn/docx/XoFudWhAgox84MxKC3ccP1TcnUh#share-Tqqkdxubpokwi5xREincb1sFnLc -#[derive(Serialize, Deserialize, Debug,Clone)] -pub struct DataSetMetaV2 { - // unique_id: Vec, - api_version: u8, - pub version: u64, - pub datas_splits: Vec, - pub data_metas: Vec, - pub synced_nodes: HashSet, - pub cache_mode: Vec, -} - -impl DataSetMetaV2 { - pub fn cache_mode_visitor(&self, idx: DataItemIdx) -> CacheModeVisitor { - CacheModeVisitor(self.cache_mode[idx as usize]) - } - - pub fn data_item_cnt(&self) -> usize { - self.datas_splits.len() - } - - pub fn get_data_node(&self, idx: DataItemIdx) -> NodeID { - // 获取指定数据项的主节点 - self.datas_splits[idx as usize].splits[0].node_id - } -} - -pub type DataSetMeta = DataSetMetaV2; - -// message EachNodeSplit{ -// uint32 node_id=1; -// uint32 data_offset=2; -// uint32 data_size=3; -// } - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct EachNodeSplit { - pub node_id: NodeID, - pub data_offset: u32, - pub data_size: u32, - pub cache_mode: u32, // 添加 cache_mode 字段 -} - -impl EachNodeSplit { - pub fn cache_mode_visitor(&self) -> CacheModeVisitor { - CacheModeVisitor(self.cache_mode as u16) - } -} - -/// 数据项的分片信息 -/// 我们需要知道每个数据项的分片大小 -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DataSplit { - pub splits: Vec, -} - -pub type DataSplitIdx = usize; - -// impl DataSplit { -// /// node_2_datas will be consumed partially -// pub fn recorver_data( -// &self, -// unique_id: &[u8], -// idx: DataItemIdx, -// node_2_datas: &mut HashMap<(NodeID, DataItemIdx), proto::DataItem>, -// ) -> WSResult> { -// let nodes = node_2_datas -// .iter() -// .filter(|v| v.0 .1 == idx) -// .map(|v| v.0 .0) -// .collect::>(); - -// let mut each_node_splits: HashMap)> = -// HashMap::new(); - -// for node in nodes { -// let data = node_2_datas.remove(&(node, idx)).unwrap(); -// let _ = each_node_splits.insert(node, (data, None)); -// } - -// let mut max_size = 0; -// let mut missing = vec![]; - -// // zip with split info -// // by the way, check if the split is missing -// for split in &self.splits { -// let Some(find) = each_node_splits.get_mut(&split.node_id) else { -// missing.push((*split).clone()); -// continue; -// }; -// find.1 = Some(split.clone()); -// if split.data_offset + split.data_size > max_size { -// max_size = split.data_offset + split.data_size; -// } -// } - -// if missing.len() > 0 { -// return Err(WsDataError::SplitRecoverMissing { -// unique_id: unique_id.to_owned(), -// idx, -// missing, -// } -// .into()); -// } - -// let mut recover = vec![0; max_size.try_into().unwrap()]; - -// for (_node, (data, splitmeta)) in each_node_splits { -// let splitmeta = splitmeta.unwrap(); -// let begin = splitmeta.data_offset as usize; -// let end = begin + splitmeta.data_size as usize; -// recover[begin..end].copy_from_slice(data.as_ref()); -// } - -// Ok(recover) -// } -// } - -impl Into for EachNodeSplit { - fn into(self) -> proto::EachNodeSplit { - proto::EachNodeSplit { - node_id: self.node_id, - data_offset: self.data_offset, - data_size: self.data_size, - } - } -} - -impl Into for DataSplit { - fn into(self) -> proto::DataSplit { - proto::DataSplit { - splits: self.splits.into_iter().map(|s| s.into()).collect(), - } - } -} -// uint32 split_size = 1; -// repeated uint32 node_ids = 2; - -macro_rules! generate_cache_mode_methods { - // The macro takes a list of pairs of the form [time, mask] and generates methods. - ($(($group:ident, $mode:ident)),*) => { - paste!{ - impl CacheModeVisitor { - $( - pub fn [](&self) -> bool { - (self.0 & []) == - ([] & []) - } - )* - } - impl DataSetMetaBuilder { - $( - pub fn [](&mut self, idx: DataItemIdx) -> &mut Self { - self.assert_cache_mode_len(); - self.building.as_mut().unwrap().cache_mode[idx as usize] = - (self.building.as_mut().unwrap().cache_mode[idx as usize] & ![]) | - ([] & []); - self - } - )* - } - } - }; -} -generate_cache_mode_methods!( - (time, forever), - (time, auto), - (pos, allnode), - (pos, specnode), - (pos, auto), - (map, common_kv), - (map, file) -); - -#[test] -fn test_cache_mode_visitor() { - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_TIME_FOREVER_MASK); - assert!(cache_mode_visitor.is_time_forever()); - assert!(!cache_mode_visitor.is_time_auto()); - - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_POS_ALLNODE_MASK); - assert!(cache_mode_visitor.is_pos_allnode()); - assert!(!cache_mode_visitor.is_pos_specnode()); - assert!(!cache_mode_visitor.is_pos_auto()); - - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_MAP_FILE_MASK); - assert!(cache_mode_visitor.is_map_file()); - assert!(!cache_mode_visitor.is_map_common_kv()); - - // test builder - - let meta = DataSetMetaBuilder::new() - .set_data_splits(vec![DataSplit { splits: vec![] }]) - .cache_mode_map_file(0) - .cache_mode_time_forever(0) - .build(); - assert!(meta.cache_mode_visitor(0).is_map_file()); - assert!(!meta.cache_mode_visitor(0).is_map_common_kv()); - assert!(meta.cache_mode_visitor(0).is_time_forever()); - assert!(!meta.cache_mode_visitor(0).is_time_auto()); - let meta = DataSetMetaBuilder::new() - .set_data_splits(vec![DataSplit { splits: vec![] }]) - .cache_mode_map_common_kv(0) - .cache_mode_time_forever(0) - .build(); - assert!(meta.cache_mode_visitor(0).is_map_common_kv()); - assert!(!meta.cache_mode_visitor(0).is_map_file()); - assert!(meta.cache_mode_visitor(0).is_time_forever()); - assert!(!meta.cache_mode_visitor(0).is_time_auto()); -} - -pub struct DataSetMetaBuilder { - building: Option, -} -impl From for DataSetMetaBuilder { - fn from(d: DataSetMetaV2) -> Self { - Self { building: Some(d) } - } -} -impl DataSetMetaBuilder { - pub fn new() -> Self { - Self { - building: Some(DataSetMetaV2 { - version: 0, - datas_splits: vec![], - data_metas: vec![], - api_version: 2, - synced_nodes: HashSet::new(), - cache_mode: vec![], - }), - } - } - fn assert_cache_mode_len(&self) { - if self.building.as_ref().unwrap().cache_mode.len() == 0 { - panic!("please set_data_splits before set_cache_mode"); - } - } - - pub fn version(&mut self, version: u64) -> &mut Self { - self.building.as_mut().unwrap().version = version; - self - } - - #[must_use] - pub fn set_data_splits(&mut self, splits: Vec) -> &mut Self { - let building = self.building.as_mut().unwrap(); - building.datas_splits = splits; - building.cache_mode = vec![0; building.datas_splits.len()]; - self - } - - pub fn set_cache_mode(&mut self, idx: DataItemIdx, mode: u16) -> &mut Self { - self.building.as_mut().unwrap().cache_mode[idx as usize] = mode; - self - } - - pub fn set_cache_mode_for_all(&mut self, mode: Vec) -> &mut Self { - self.building.as_mut().unwrap().cache_mode = mode; - assert_eq!( - self.building.as_mut().unwrap().cache_mode.len(), - self.building.as_mut().unwrap().datas_splits.len(), - "cache mode len must be equal to data splits len" - ); - self - } - - pub fn build(&mut self) -> DataSetMetaV2 { - self.building.take().unwrap() - } -} - -pub struct GetOrDelDataArg { - pub meta: Option, - pub unique_id: Vec, - pub ty: GetOrDelDataArgType, -} - -#[derive(Debug, Clone)] -pub enum GetOrDelDataArgType { - All, - Delete, - PartialOne { idx: DataItemIdx }, - PartialMany { idxs: BTreeSet }, -} - -impl DataGeneralView { - fn get_data_meta_local( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult> { - let ope_name = if delete { "delete" } else { "get" }; - tracing::debug!("{} data meta for uid({:?})", ope_name, unique_id); - - let kv_store_engine = self.kv_store_engine(); - let key = KeyTypeDataSetMeta(&unique_id); - let keybytes = key.make_key(); - - let write_lock = kv_store_engine.with_rwlock(&keybytes); - let _guard = write_lock.write(); - - let meta_opt = if delete { - kv_store_engine.del(key, true)? - } else { - kv_store_engine.get(&key, true, KvAdditionalConf {}) - }; - Ok(meta_opt) - } - - pub async fn get_metadata( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult { - // 先尝试从本地获取 - if let Some((_version, meta)) = self.get_data_meta_local(unique_id, delete)? { - return Ok(meta); - } - - // 本地不存在,从 master 获取 - self.data_general().get_or_del_datameta_from_master(unique_id, delete).await - } -} - -impl From for WSError { - fn from(err: JoinError) -> Self { - WsNetworkLogicErr::TaskJoinError { err }.into() - } -} - -#[async_trait] -impl LogicalModule for DataGeneral { - fn inner_new(args: LogicalModuleNewArgs) -> Self - where - Self: Sized, - { - Self { - view: DataGeneralView::new(args.logical_modules_ref.clone()), - rpc_call_data_version_schedule: RPCCaller::new(), - rpc_call_write_once_data: RPCCaller::new(), - rpc_call_batch_data: RPCCaller::new(), - rpc_call_get_data_meta: RPCCaller::new(), - rpc_call_get_data: RPCCaller::new(), - - rpc_handler_write_once_data: RPCHandler::new(), - rpc_handler_batch_data: RPCHandler::new(), - rpc_handler_data_meta_update: RPCHandler::new(), - rpc_handler_get_data_meta: RPCHandler::new(), - rpc_handler_get_data: RPCHandler::new(), - - // 批量数据接收状态管理 - batch_receive_states: AsyncInitMap::new(), - } - } - - async fn start(&self) -> WSResult> { - tracing::info!("start as master"); - - let p2p = self.view.p2p(); - // register rpc callers - { - self.rpc_call_data_version_schedule.regist(p2p); - self.rpc_call_write_once_data.regist(p2p); - self.rpc_call_batch_data.regist(p2p); - self.rpc_call_get_data_meta.regist(p2p); - self.rpc_call_get_data.regist(p2p); - } - - // register rpc handlers - { - let view = self.view.clone(); - self.rpc_handler_write_once_data - .regist(p2p, move |responsor, req| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_write_one_data(responsor, req).await; - }); - Ok(()) - }); - - let view = self.view.clone(); - self.rpc_handler_batch_data.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::BatchDataRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - let _ = view.data_general().rpc_handle_batch_data(responsor, req).await; - }); - Ok(()) - }, - ); - - let view = self.view.clone(); - self.rpc_handler_data_meta_update.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::DataMetaUpdateRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_data_meta_update(responsor, req).await - }); - Ok(()) - }, - ); - - let view = self.view.clone(); - self.rpc_handler_get_data_meta - .regist(p2p, move |responsor, req| { - let view = view.clone(); - let _ = tokio::spawn(async move { - if let Err(e) = view.data_general().rpc_handle_get_data_meta(req, responsor) //返回结果未处理 曾俊 - .await{ - tracing::error!("Failed to handle get data meta: {}", e); - } - // .todo_handle("rpc_handle_get_data_meta err"); - }); - Ok(()) - }); - - let view = self.view.clone(); - self.rpc_handler_get_data.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::GetOneDataRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_get_one_data(responsor, req).await - }); - Ok(()) - }, - ); - } - - Ok(vec![]) - } -} - -#[derive(Debug, Clone, Copy)] -pub struct CacheModeVisitor(pub u16); \ No newline at end of file diff --git a/src/main/src/general/data/m_data_general/mod.rs.bak b/src/main/src/general/data/m_data_general/mod.rs.bak deleted file mode 100644 index 6831f09..0000000 --- a/src/main/src/general/data/m_data_general/mod.rs.bak +++ /dev/null @@ -1,1616 +0,0 @@ -/// 缓存模式类型 -pub type CacheMode = u16; - -pub mod dataitem; -pub mod batch; -pub mod batch_handler; - -use crate::general::data::m_data_general::dataitem::{calculate_splits, WantIdxIter, WriteSplitDataTaskGroup, DataItemSource}; -use crate::general::data::m_data_general::batch_handler::{BatchReceiveState, SharedWithBatchHandler}; -use crate::general::network::proto::DataItem; -use dataitem::DataItemArgWrapper; -use tokio::io::{AsyncSeekExt, AsyncReadExt}; - -use crate::general::{ - data::m_kv_store_engine::{ - KeyTypeDataSetItem, KeyTypeDataSetMeta, KvAdditionalConf, KvStoreEngine, KvVersion, - }, - m_os::OperatingSystem, - network::{ - m_p2p::{P2PModule, RPCCaller, RPCHandler, RPCResponsor}, - proto::{ - self, DataMeta, WriteOneDataResponse, - }, - proto_ext::ProtoExtDataItem, - }, -}; -use crate::{ - general::{ - data::m_kv_store_engine::{KeyLockGuard, KeyType}, - network::{proto_ext::DataItemExt}, - }, - logical_module_view_impl, - result::{WSError, WSResult, WSResultExt, WsSerialErr, WsNetworkLogicErr}, - sys::{LogicalModule, LogicalModuleNewArgs, NodeID}, - util::{JoinHandleWrapper, container::async_init_map::AsyncInitMap}, -}; -use crate::{result::WsDataError, sys::LogicalModulesRef}; -use async_trait::async_trait; -use camelpaste::paste; -use core::str; - -use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - sync::Arc, - time::Duration, - sync::atomic::{AtomicU32, Ordering}, -}; -use tokio::sync::Semaphore; -use tokio::task::JoinError; -use ws_derive::LogicalModule; - -logical_module_view_impl!(DataGeneralView); -logical_module_view_impl!(DataGeneralView, p2p, P2PModule); -logical_module_view_impl!(DataGeneralView, data_general, DataGeneral); -logical_module_view_impl!(DataGeneralView, kv_store_engine, KvStoreEngine); -logical_module_view_impl!(DataGeneralView, os, OperatingSystem); - -pub type DataVersion = u64; -pub type DataItemIdx = u8; - -pub const DATA_UID_PREFIX_APP_META: &str = "app"; -pub const DATA_UID_PREFIX_FN_KV: &str = "fkv"; - -/// 默认数据块大小 (4MB) -pub const DEFAULT_BLOCK_SIZE: usize = 4 * 1024 * 1024; - -pub const CACHE_MODE_TIME_MASK: u16 = 0xf000; -pub const CACHE_MODE_TIME_FOREVER_MASK: u16 = 0x0fff; -pub const CACHE_MODE_TIME_AUTO_MASK: u16 = 0x1fff; - -pub const CACHE_MODE_POS_MASK: u16 = 0x0f00; -pub const CACHE_MODE_POS_ALLNODE_MASK: u16 = 0xf0ff; -pub const CACHE_MODE_POS_SPECNODE_MASK: u16 = 0xf1ff; -pub const CACHE_MODE_POS_AUTO_MASK: u16 = 0xf2ff; - -pub const CACHE_MODE_MAP_MASK: u16 = 0x00f0; -pub const CACHE_MODE_MAP_COMMON_KV_MASK: u16 = 0xff0f; -pub const CACHE_MODE_MAP_FILE_MASK: u16 = 0xff1f; -// const DATA_UID_PREFIX_OBJ: &str = "obj"; - -pub fn new_data_unique_id_app(app_name: &str) -> String { - format!("{}{}", DATA_UID_PREFIX_APP_META, app_name) -} - -pub fn new_data_unique_id_fn_kv(key: &[u8]) -> Vec { - let mut temp = DATA_UID_PREFIX_FN_KV.as_bytes().to_owned(); - temp.extend(key); - temp - // let key_str = str::from_utf8(key).unwrap(); - // format!("{}{}", DATA_UID_PREFIX_FN_KV, key_str) -} - -/// 唯一标识符类型 -pub type UniqueId = Vec; - -#[derive(LogicalModule)] -pub struct DataGeneral { - view: DataGeneralView, - pub rpc_call_data_version_schedule: RPCCaller, - rpc_call_write_once_data: RPCCaller, - rpc_call_batch_data: RPCCaller, - rpc_call_get_data_meta: RPCCaller, - rpc_call_get_data: RPCCaller, - - rpc_handler_write_once_data: RPCHandler, - rpc_handler_batch_data: RPCHandler, - rpc_handler_data_meta_update: RPCHandler, - rpc_handler_get_data_meta: RPCHandler, - rpc_handler_get_data: RPCHandler, - - // 批量数据接收状态管理 - batch_receive_states: AsyncInitMap>, -} - -impl DataGeneral { - pub fn inner_new(args: LogicalModuleNewArgs) -> Self { - Self { - view: DataGeneralView::new(args.logical_modules_ref.clone()), - rpc_call_data_version_schedule: RPCCaller::new(), - rpc_call_write_once_data: RPCCaller::new(), - rpc_call_batch_data: RPCCaller::new(), - rpc_call_get_data_meta: RPCCaller::new(), - rpc_call_get_data: RPCCaller::new(), - rpc_handler_write_once_data: RPCHandler::new(), - rpc_handler_batch_data: RPCHandler::new(), - rpc_handler_data_meta_update: RPCHandler::new(), - rpc_handler_get_data_meta: RPCHandler::new(), - rpc_handler_get_data: RPCHandler::new(), - batch_receive_states: AsyncInitMap::new(), - } - } - - #[allow(dead_code)] - fn next_batch_id(&self) -> u32 { - static NEXT_BATCH_ID: AtomicU32 = AtomicU32::new(1); // 从1开始,保留0作为特殊值 - NEXT_BATCH_ID.fetch_add(1, Ordering::Relaxed) - } - - pub async fn write_data_batch( - &self, - unique_id: UniqueId, - version: u64, - data: proto::DataItem, - data_item_idx: DataItemIdx, - node_id: NodeID, - ) -> WSResult<()> { - // 调用 batch_transfer 函数处理数据传输 - async fn batch_transfer( - data_item_idx: DataItemIdx, - unique_id: UniqueId, - version: u64, - target_node: NodeID, - data: Arc, - view: DataGeneralView, - ) -> WSResult<()> { - let (tx, mut rx) = tokio::sync::mpsc::channel(32); - let mut handles = Vec::new(); - - let data_size = data.size().await?; - let splits = calculate_splits(data_size); - - tracing::debug!("batch_transfer total size({}), splits: {:?}, to node {}", data_size, splits, target_node); - - for (block_idx, split_range) in splits.iter().enumerate() { - let block_data = match data.as_ref() { - DataItemSource::Memory { data } => data[split_range.clone()].to_vec(), - DataItemSource::File { path } => { - // 读取文件对应块的数据 - let mut file = tokio::fs::File::open(path).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to open file: {}", e), - })?; - let mut buffer = vec![0; split_range.len()]; - // 验证seek结果 - let seek_pos = file.seek(std::io::SeekFrom::Start(split_range.start as u64)).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to seek file: {}", e), - })?; - if seek_pos != split_range.start as u64 { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Seek position mismatch: expected {}, got {}", split_range.start, seek_pos), - }.into()); - } - // read_exact保证读取指定长度的数据或返回错误 - let _ = file.read_exact(&mut buffer).await.map_err(|e| WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }, - reason: format!("Failed to read file: {}", e), - })?; - buffer - } - }; - - let request = proto::BatchDataRequest { - request_id: Some(proto::BatchRequestId { - node_id: target_node as u32, - sequence: block_idx as u64, - }), - dataset_unique_id: unique_id.clone(), - data_item_idx: data_item_idx as u32, - // 用空的 DataItem 代替 - block_type: match data.as_ref() { - DataItemSource::Memory { .. } => Some(proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(Vec::new())), - }), - DataItemSource::File { .. } => Some(proto::DataItem{ - data_item_dispatch: Some(proto::data_item::DataItemDispatch::File(proto::FileData { file_name_opt: String::new(), is_dir_opt: true, file_content: Vec::new() })), - }), - }, - block_index: block_idx as u32, - data: block_data, - operation: proto::DataOpeType::Write as i32, - unique_id: unique_id.clone(), - version, - total_size: data_size as u64, - }; - - let tx = tx.clone(); - let view = view.clone(); - - let handle = tokio::spawn(async move { - let result = view.data_general() - .rpc_call_batch_data - .call( - view.p2p(), - target_node, - request, - Some(Duration::from_secs(30)), - ) - .await; - - if let Err(e) = tx.send(result).await { - tracing::error!("Failed to send batch transfer result: {}", e); - } - }); - - handles.push(handle); - } - - drop(tx); - - while let Some(result) = rx.recv().await { - match result { - Ok(resp) if !resp.success => { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, // TODO: Add proper sequence number - }, - reason: resp.error_message, - }.into()); - } - Ok(_) => continue, - Err(e) => { - return Err(WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, - }, - reason: format!("RPC call failed: {}", e), - }.into()); - } - } - } - - for handle in handles { - handle.await.map_err(|e| { - WsDataError::BatchTransferFailed { - request_id: proto::BatchRequestId { - node_id: target_node as u32, - sequence: 0, - }, - reason: format!("Task join failed: {}", e), - } - })?; - } - - Ok(()) - } - - let data = Arc::new(data.to_data_item_source()); - batch_transfer(data_item_idx,unique_id, version, node_id, data, self.view.clone()).await - } - - - pub async fn get_or_del_datameta_from_master( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult { - tracing::debug!("get_or_del_datameta_from_master uid: {:?}, delete: {}, whoami: {}", unique_id, delete, self.view.p2p().nodes_config.this.0); - let p2p = self.view.p2p(); - // get meta from master - let meta = self - .rpc_call_get_data_meta - .call( - p2p, - p2p.nodes_config.get_master_node(), - proto::DataMetaGetRequest { - unique_id: unique_id.to_vec(), - delete, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if meta.serialized_meta.is_empty() { - return Err(WsDataError::DataSetNotFound { - uniqueid: unique_id.to_vec(), - } - .into()); - } - - bincode::deserialize(&meta.serialized_meta).map_err(|err| { - WsSerialErr::BincodeErr { - err, - context: "get_or_del_datameta_from_master".to_owned(), - } - .into() - }) - } - - pub async fn get_or_del_data( - &self, - GetOrDelDataArg { - meta, - unique_id, - ty, - }: GetOrDelDataArg, - ) -> WSResult<(DataSetMetaV2, HashMap)> { - tracing::debug!("get_or_del_data uid: {:?}, maybe with meta: {:?}", unique_id, meta); - let mut data_map = HashMap::new(); - - // get meta from master - let meta = if let Some(meta) = meta { - meta - } else { - self.get_or_del_datameta_from_master(&unique_id, false) - .await? - }; - - tracing::debug!("start get_or_del_data uid: {:?},meta: {:?}", unique_id, meta); - - // basical verify - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let check_cache_map = |meta: &DataSetMetaV2| -> WSResult<()> { - if !meta.cache_mode_visitor(idx).is_map_common_kv() - && !meta.cache_mode_visitor(idx).is_map_file() - { - return Err(WsDataError::UnknownCacheMapMode { - mode: meta.cache_mode_visitor(idx).0, - } - .into()); - } - Ok(()) - }; - check_cache_map(&meta)?; - } - - // get data - let p2p = self.view.p2p(); - - match ty { - GetOrDelDataArgType::All => { - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - GetOrDelDataArgType::Delete => { - for idx in 0..meta.data_item_cnt() { - let idx = idx as DataItemIdx; - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: true, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - GetOrDelDataArgType::PartialOne { idx } => { - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - GetOrDelDataArgType::PartialMany { idxs } => { - for idx in idxs { - let resp = self - .rpc_call_get_data - .call( - p2p, - meta.get_data_node(idx), - proto::GetOneDataRequest { - unique_id: unique_id.to_vec(), - idxs: vec![idx as u32], - delete: false, - return_data: true, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - if !resp.success { - return Err(WsDataError::GetDataFailed { - unique_id: unique_id.to_vec(), - msg: resp.message, - } - .into()); - } - - let _ = data_map.insert(idx, resp.data[0].clone()); - } - } - } - - Ok((meta, data_map)) - } - - pub async fn write_data( - &self, - unique_id: impl Into>, - mut datas: Vec, - context_openode_opetype_operole: Option<( - NodeID, - proto::DataOpeType, - proto::data_schedule_context::OpeRole, - )>, - ) -> WSResult<()> { - let unique_id = unique_id.into(); - let log_tag = format!("[write_data({})]", String::from_utf8_lossy(&unique_id)); - tracing::debug!("{} start write data", log_tag); - - let mut data_transfer_sizes=Vec::new(); - data_transfer_sizes.reserve(datas.len()); - for d in datas.iter_mut(){ - data_transfer_sizes.push(d.transfer_size().await.map_err(|err|{ - tracing::error!("{} transfer size error: {}", log_tag, err); - err - })?); - } - // 获取数据调度计划 - let version_schedule_resp = self - .rpc_call_data_version_schedule - .call( - self.view.p2p(), - self.view.p2p().nodes_config.get_master_node(), - proto::DataVersionScheduleRequest { - unique_id: unique_id.clone(), - context: context_openode_opetype_operole.map(|(node, ope, role)| { - proto::DataScheduleContext { - // each_data_sz_bytes: data_transfer_sizes, 原代码类型不匹配 曾俊 - each_data_sz_bytes: data_transfer_sizes.iter().map(|&x| x as u32).collect(), - ope_node: node as i64, - ope_type: ope as i32, - ope_role: Some(role), - } - }), - version: 0, - }, - Some(Duration::from_secs(60)), - ) - .await?; - - // Clone the response to extend its lifetime - let version = version_schedule_resp.version; - let splits = version_schedule_resp.split.clone(); - - // 处理每个数据项 - let mut iter = WantIdxIter::new(&GetOrDelDataArgType::All, datas.len() as u8); - while let Some(data_item_idx) = iter.next() { - let data_item: &DataItemArgWrapper = &mut datas[data_item_idx as usize]; - let split = &splits[data_item_idx as usize]; - let mut primary_tasks = Vec::new(); - - // 1. 并行写入所有主数据分片 - let mut split_iter = WantIdxIter::new(&GetOrDelDataArgType::All, split.splits.len() as u8); - while let Some(split_idx) = split_iter.next() { - let split_info = &split.splits[split_idx as usize]; - tracing::debug!("{} creating split write task {}/{} for node {}, offset={}, size={}", - log_tag, split_idx + 1, split.splits.len(), split_info.node_id, split_info.data_offset, split_info.data_size); - let split_info = split_info.clone(); - let unique_id_clone = unique_id.clone(); - // let data_item_primary = data_item.clone_split_range(split_info.data_offset..split_info.data_offset+split_info.data_size); 类型不匹配 曾俊 - // 生成一个复制的可变数据项 - let mut data_item_clone = (*data_item).clone(); - let data_item_primary = data_item_clone.clone_split_range(split_info.data_offset as usize..(split_info.data_offset+split_info.data_size)as usize).await.todo_handle("clone_split_range for write data err")?; - // let data_item_primary = data_item.clone_split_range(split_info.data_offset as usize..(split_info.data_offset+split_info.data_size)as usize).await.todo_handle("clone_split_range for write data err")?; - let view = self.view.clone(); - let version_copy = version; - let task = tokio::spawn(async move { - view.data_general() - .rpc_call_write_once_data - .call( - view.p2p(), - split_info.node_id, - proto::WriteOneDataRequest { - unique_id: unique_id_clone.clone(), - version: version_copy, - data: vec![proto::DataItemWithIdx { - idx: data_item_idx as u32, - // data: Some(data_item_primary), 类型不匹配 曾俊 - data: Some(data_item_primary), - }], - }, - Some(Duration::from_secs(60)), - ) - .await - }); - primary_tasks.push(task); - } - - // 2. 并行写入缓存数据(完整数据) - let visitor = CacheModeVisitor(version_schedule_resp.cache_mode[data_item_idx as usize] as u16); - let need_cache = visitor.is_map_common_kv() || visitor.is_map_file(); - let cache_nodes: Vec = if need_cache { - split.splits.iter().map(|s| s.node_id).collect() - } else { - vec![] - }; - - let mut cache_tasks = Vec::new(); - if !cache_nodes.is_empty() { - tracing::debug!("{} found {} cache nodes: {:?}", log_tag, cache_nodes.len(), cache_nodes); - const MAX_CONCURRENT_TRANSFERS: usize = 3; - let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_TRANSFERS)); - - let mut cache_iter = WantIdxIter::new(&GetOrDelDataArgType::All, cache_nodes.len() as u8); - while let Some(cache_idx) = cache_iter.next() { - let node_id = cache_nodes[cache_idx as usize]; - let permit = semaphore.clone().acquire_owned().await.unwrap(); - tracing::debug!("{} creating cache write task {}/{} for node {}", log_tag, cache_idx + 1, cache_nodes.len(), node_id); - let unique_id_clone = unique_id.clone(); - let data_item_cache = data_item.clone(); - let view = self.view.clone(); - let task = tokio::spawn(async move { - let _permit = permit; // 持有permit直到任务完成 - view.data_general() - // .write_data_batch(unique_id_clone.clone(), version, data_item_cache, data_item_idx, node_id) //类型不匹配 曾俊 - .write_data_batch(unique_id_clone.clone(), version, data_item_cache.dataitem, data_item_idx, node_id) - .await?; - Ok::(proto::WriteOneDataResponse { - remote_version: version, - success: true, - message: String::new(), - }) - }); - cache_tasks.push(task); - } - } - - let primary_results = futures::future::join_all(primary_tasks).await; - tracing::debug!("{} primary_results: {:?}", log_tag, primary_results); - let cache_results = futures::future::join_all(cache_tasks).await; - tracing::debug!("{} cache_results: {:?}", log_tag, cache_results); - - if primary_results.iter().any(|res| res.is_err()) || cache_results.iter().any(|res| res.is_err()) { - let error_msg = format!("主节点或缓存节点数据写入失败"); - tracing::error!("{}", error_msg); - return Err(WSError::WsDataError(WsDataError::WriteDataFailed { - unique_id: unique_id.clone(), - message: error_msg, - })); - } - } - - Ok(()) - } - - async fn rpc_handle_write_one_data( - &self, - responsor: RPCResponsor, - req: proto::WriteOneDataRequest, - ) { - tracing::debug!("verify data meta bf write data"); - let kv_store_engine = self.view.kv_store_engine(); - - // Step1: verify version - // take old meta - #[allow(unused_assignments)] - let mut required_meta: Option<(usize, DataSetMetaV2)> = None; - { - let keybytes: Vec = KeyTypeDataSetMeta(&req.unique_id).make_key(); - let fail_by_overwrite = || async { - let message = "New data version overwrite".to_owned(); - tracing::warn!("{}", message); - responsor - .send_resp(WriteOneDataResponse { - remote_version: 0, - success: false, - message, - }) - .await - .todo_handle("1 err_comment waitting to fill"); - }; - let fail_with_msg = |message: String| async { - tracing::warn!("{}", message); - responsor - .send_resp(WriteOneDataResponse { - remote_version: 0, - success: false, - message, - }) - .await - .todo_handle("2 err_comment waitting to fill"); - }; - - loop { - // tracing::debug!("verify version loop"); - let lock = - kv_store_engine.with_rwlock(&KeyTypeDataSetMeta(&req.unique_id).make_key()); - let guard = KeyLockGuard::Read(lock.read()); - required_meta = kv_store_engine.get( - &KeyTypeDataSetMeta(&req.unique_id), - true, - KvAdditionalConf {}, - ); //tofix, master send maybe not synced - let old_dataset_version = if required_meta.is_none() { - 0 - } else { - required_meta.as_ref().unwrap().1.version - }; - // need to wait for new version - if required_meta.is_none() - || required_meta.as_ref().unwrap().1.version < req.version - { - if required_meta.is_none() { - tracing::debug!("no data version, waiting for notify"); - } else { - tracing::debug!( - "data version is old({}) at node({}), waiting for new notify({})", - required_meta.as_ref().unwrap().1.version, - self.view.p2p().nodes_config.this_node(), - req.version - ); - } - - let (kv_version, new_value) = kv_store_engine - .register_waiter_for_new(&keybytes, guard) - .await - .unwrap_or_else(|err| { - panic!("fail to wait for new data version: {:?}", err); - }); - - let Some(new_value) = new_value.as_raw_data() else { - fail_with_msg(format!( - "fatal error, kv value supposed to be DataSetMeta, rathe than {:?}", - new_value - )) - .await; - return; - }; - - // deserialize - let new_value = bincode::deserialize::(&new_value); - if let Err(err) = new_value { - fail_with_msg(format!( - "fatal error, kv value deserialization failed: {}", - err - )) - .await; - return; - } - let new_value = new_value.unwrap(); - - // version check - if new_value.version > req.version { - fail_by_overwrite().await; - return; - } else if new_value.version < req.version { - tracing::debug!("recv data version({}) is old than required({}), waiting for new notify",new_value.version, req.version); - // still need to wait for new version - continue; - } else { - required_meta = Some((kv_version, new_value)); - break; - } - } else if old_dataset_version > req.version { - drop(guard); - fail_by_overwrite().await; - return; - } else { - tracing::debug!( - "data version is matched cur({}) require({}) // 0 should be invalid", - old_dataset_version, - req.version - ); - break; - } - } - } - - // Step3: write data - tracing::debug!("start to write partial data"); - let lock = kv_store_engine.with_rwlock(&KeyTypeDataSetMeta(&req.unique_id).make_key()); - let guard = KeyLockGuard::Write(lock.write()); - let check_meta = kv_store_engine.get( - &KeyTypeDataSetMeta(&req.unique_id), - true, - KvAdditionalConf {}, - ); //tofix, master send maybe not synced - if check_meta.is_none() - || check_meta.as_ref().unwrap().0 != required_meta.as_ref().unwrap().0 - { - drop(guard); - responsor - .send_resp(WriteOneDataResponse { - remote_version: if check_meta.is_none() { - 0 - } else { - check_meta.as_ref().unwrap().1.version - }, - success: false, - message: "meta is updated again, cancel write".to_owned(), - }) - .await - .todo_handle("3 err_comment waitting to fill"); - return; - } - - for data_with_idx in req.data.into_iter() { - let proto::DataItemWithIdx { idx, data } = data_with_idx; - let data = data.unwrap(); - let data_source = data.to_data_item_source(); - let data = Arc::new(data_source); - let serialize = data.as_ref().encode_persist(); - tracing::debug!( - "writing data part uid({:?}) idx({}) item({})", - req.unique_id, - idx, - data.to_debug_string() - ); - if let Err(err) = kv_store_engine.set( - KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - &serialize, - true, - ) { - tracing::warn!("flush error: {}", err) - } - } - kv_store_engine.flush(); - drop(guard); - tracing::debug!("data partial is written"); - responsor - .send_resp(WriteOneDataResponse { - remote_version: req.version, - success: true, - message: "".to_owned(), - }) - .await - .todo_handle("4 err_comment waitting to fill"); - } - - async fn rpc_handle_data_meta_update( - &self, - responsor: RPCResponsor, - mut req: proto::DataMetaUpdateRequest, - ) { - struct Defer { - node: NodeID, - } - impl Drop for Defer { - fn drop(&mut self) { - tracing::debug!("rpc_handle_data_meta_update return at node({})", self.node); - } - } - let _defer = Defer { - node: self.view.p2p().nodes_config.this_node(), - }; - - let key = KeyTypeDataSetMeta(&req.unique_id); - let keybytes = key.make_key(); - - // test only log - #[cfg(test)] - tracing::debug!("rpc_handle_data_meta_update {:?}\n {:?}", req,bincode::deserialize::(&req.serialized_meta)); - // not test log - #[cfg(not(test))] - tracing::debug!("rpc_handle_data_meta_update {:?}", req); - - let kv_lock = self.view.kv_store_engine().with_rwlock(&keybytes); - let _kv_write_lock_guard = kv_lock.write(); - - if let Some((_old_version, mut old_meta)) = - self.view.kv_store_engine().get(&key, true, KvAdditionalConf {}) - { - if old_meta.version > req.version { - drop(_kv_write_lock_guard); - let err_msg = "New data version is smaller, failed update"; - tracing::warn!("{}", err_msg); - responsor - .send_resp(proto::DataMetaUpdateResponse { - version: old_meta.version, - message: err_msg.to_owned(), - }) - .await - .todo_handle("5 err_comment waitting to fill"); - return; - } - old_meta.version = req.version; - if req.serialized_meta.len() > 0 { - self.view.kv_store_engine() - .set_raw(&keybytes, std::mem::take(&mut req.serialized_meta), true) - .todo_handle("6 err_comment waitting to fill"); - } else { - self.view.kv_store_engine() - .set(key, &old_meta, true) - .todo_handle("7 err_comment waitting to fill"); - } - } else { - if req.serialized_meta.len() > 0 { - tracing::debug!( - "set new meta data, {:?}", - bincode::deserialize::(&req.serialized_meta) - ); - self.view.kv_store_engine() - .set_raw(&keybytes, std::mem::take(&mut req.serialized_meta), true) - .todo_handle("8 err_comment waitting to fill"); - } else { - drop(_kv_write_lock_guard); - let err_msg = "Old meta data not found and missing new meta"; - tracing::warn!("{}", err_msg); - responsor - .send_resp(proto::DataMetaUpdateResponse { - version: 0, - message: err_msg.to_owned(), - }) - .await - .todo_handle("9 err_comment waitting to fill"); - return; - } - } - drop(_kv_write_lock_guard); - tracing::debug!("rpc_handle_data_meta_update success"); - responsor - .send_resp(proto::DataMetaUpdateResponse { - version: req.version, - message: "Update success".to_owned(), - }) - .await - .todo_handle("10 err_comment waitting to fill"); - } - - async fn rpc_handle_get_data_meta( - &self, - req: proto::DataMetaGetRequest, - responsor: RPCResponsor, - ) -> WSResult<()> { - tracing::debug!("rpc_handle_get_data_meta with req({:?})", req); - let meta = self.view.get_data_meta_local(&req.unique_id, req.delete)?; - if meta.is_none() { - tracing::debug!("rpc_handle_get_data_meta data meta not found"); - } else { - tracing::debug!("rpc_handle_get_data_meta data meta found"); - } - let serialized_meta = meta.map_or(vec![], |(_kvversion, meta)| { - bincode::serialize(&meta).unwrap() - }); - - responsor - .send_resp(proto::DataMetaGetResponse { serialized_meta }) - .await?; - - Ok(()) - } - - async fn rpc_handle_get_one_data( - &self, - responsor: RPCResponsor, - req: proto::GetOneDataRequest, - ) -> WSResult<()> { - tracing::debug!("starting rpc_handle_get_one_data {:?}", req); - - let kv_store_engine = self.view.kv_store_engine(); - let _ = self.view - .get_metadata(&req.unique_id, req.delete) - .await - .map_err(|err| { - tracing::warn!("rpc_handle_get_one_data get_metadata failed: {:?}", err); - err - })?; - - let mut got_or_deleted = vec![]; - let mut kv_ope_err = vec![]; - - for idx in req.idxs { - let value = if req.delete { - match kv_store_engine.del( - KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - false, - ) { - Ok(value) => value, - Err(e) => { - kv_ope_err.push(e); - None - } - } - } else { - kv_store_engine.get( - &KeyTypeDataSetItem { - uid: req.unique_id.as_ref(), - idx: idx as u8, - }, - false, - KvAdditionalConf {}, - ) - }; - got_or_deleted.push(value); - } - - let (mut success, mut message): (bool, String) = if kv_ope_err.len() > 0 { - (false, { - let mut msg = String::from("KvEngine operation failed: "); - for e in kv_ope_err.iter() { - msg.push_str(&format!("{:?}", e)); - } - msg - }) - } else if got_or_deleted.iter().all(|v| v.is_some()) { - (true, "success".to_owned()) - } else { - tracing::warn!("some data not found"); - (false, "some data not found".to_owned()) - }; - - let mut got_or_deleted_checked: Vec = vec![]; - if success { - for v in got_or_deleted { - let decode_res = proto::DataItem::decode_persist(v.unwrap().1); - match decode_res { - Ok(item) => { - tracing::debug!("decoded data item: {:?}", item.to_string()); - got_or_deleted_checked.push(item); - } - Err(e) => { - tracing::error!("Failed to decode data item: {:?}", e); - success = false; - message = format!("Failed to decode data item: {:?}", e); - break; - } - } - } - } - - responsor - .send_resp(proto::GetOneDataResponse { - success, - data: got_or_deleted_checked, - message, - }) - .await?; - - Ok(()) - } - - /// 处理批量数据写入请求 - pub async fn rpc_handle_batch_data( - &self, - responsor: RPCResponsor, - req: proto::BatchDataRequest, - ) -> WSResult<()> { - tracing::debug!("rpc_handle_batch_data with batchid({:?})", req.request_id.clone().unwrap()); - let batch_receive_states = self.batch_receive_states.clone(); - // 预先克隆闭包外需要的字段 - let block_index = req.block_index; - let data = req.data.clone(); - let request_id = req.request_id.clone().unwrap(); - - // 1. 查找或创建状态 - let state = match self.batch_receive_states - .get_or_init(req.request_id.clone().unwrap(), async move { - // 创建任务组和句柄 - let (mut group, handle) = match WriteSplitDataTaskGroup::new( - req.unique_id.clone(), - req.total_size as usize, - // req.block_type(), 类型错误 曾俊 - req.block_type.unwrap().data_item_dispatch.unwrap(), - req.version, - ).await { - Ok((group, handle)) => (group, handle), - Err(e) => { - tracing::error!("Failed to create task group: {:?}", e); - return Err(e); - } - }; - - // 再process之前订阅,避免通知先于订阅 - let waiter = handle.get_all_tasks_waiter(); - - // 启动process_tasks - let _ = tokio::spawn(async move { - match group.process_tasks().await { - Ok(item) => Ok(item), - Err(e) => { - tracing::error!("Failed to process tasks: {}", e); - Err(e) - } - } - }); - - let state = Arc::new(BatchReceiveState::new(handle, SharedWithBatchHandler::new())); - let state_clone = state.clone(); - - // response task - let _=tokio::spawn(async move { - tracing::debug!("rpc_handle_batch_data response task started"); - // 等待所有任务完成 - if let Err(e) = waiter.wait().await { - tracing::error!("Failed to wait for tasks: {}", e); - todo!("use responsor to send error response"); - return; - } - - tracing::debug!("rpc_handle_batch_data response task wait all tasks done"); - - // 发送最终响应 - if let Some(final_responsor) = state_clone.shared.get_final_responsor().await { - if let Err(e) = final_responsor.send_resp(proto::BatchDataResponse { - request_id: Some(req.request_id.clone().unwrap()), - success: true, - error_message: String::new(), - version: state_clone.handle.version(), - }).await { - tracing::error!("Failed to send final response: {}", e); - } - } - - // 清理状态 - let _=batch_receive_states.remove(&req.request_id.unwrap()); - }); - - Ok(state) - }) - .await { - Err(e) => return Err(WSError::WsDataError(WsDataError::BatchTransferError { - request_id, - msg: format!("Failed to initialize batch state: {}", e) - })), - Ok(state) => state, - }; - - tracing::debug!("rpc_handle_batch_data ready with write_split_data_task_group"); - - // 2. 提交分片数据 - let data_item = proto::DataItem { - data_item_dispatch: Some(proto::data_item::DataItemDispatch::RawBytes(data)), - ..Default::default() - }; - - tracing::debug!("submit_split with data split idx: {}, at node: {}", block_index, self.view.p2p().nodes_config.this_node()); - state.handle.submit_split( - block_index as usize * DEFAULT_BLOCK_SIZE, - data_item, - ).await?; - - // 3. 更新响应器 - state.shared.update_responsor(responsor).await; - - Ok(()) - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DataMetaSys { - pub cache: i32, - pub distribute: i32, -} -impl From for DataMetaSys { - fn from(d: DataMeta) -> Self { - Self { - cache: d.cache, - distribute: d.distribute, - } - } -} -impl Into for DataMetaSys { - fn into(self) -> DataMeta { - DataMeta { - cache: self.cache, - distribute: self.distribute, - } - } -} - -/// 数据集元信息 -#[derive(Serialize, Deserialize)] -pub struct DataSetMetaV1 { - // unique_id: Vec, - pub version: u64, - pub data_metas: Vec, - pub synced_nodes: HashSet, -} - -/// 数据集元信息 -/// -/// 注意:新建元信息请使用 `DataSetMetaBuilder` -/// -/// https://fvd360f8oos.feishu.cn/docx/XoFudWhAgox84MxKC3ccP1TcnUh#share-Tqqkdxubpokwi5xREincb1sFnLc -#[derive(Serialize, Deserialize, Debug,Clone)] -pub struct DataSetMetaV2 { - // unique_id: Vec, - api_version: u8, - pub version: u64, - pub datas_splits: Vec, - pub data_metas: Vec, - pub synced_nodes: HashSet, - pub cache_mode: Vec, -} - -impl DataSetMetaV2 { - pub fn cache_mode_visitor(&self, idx: DataItemIdx) -> CacheModeVisitor { - CacheModeVisitor(self.cache_mode[idx as usize]) - } - - pub fn data_item_cnt(&self) -> usize { - self.datas_splits.len() - } - - pub fn get_data_node(&self, idx: DataItemIdx) -> NodeID { - // 获取指定数据项的主节点 - self.datas_splits[idx as usize].splits[0].node_id - } -} - -pub type DataSetMeta = DataSetMetaV2; - -// message EachNodeSplit{ -// uint32 node_id=1; -// uint32 data_offset=2; -// uint32 data_size=3; -// } - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct EachNodeSplit { - pub node_id: NodeID, - pub data_offset: u32, - pub data_size: u32, - pub cache_mode: u32, // 添加 cache_mode 字段 -} - -impl EachNodeSplit { - pub fn cache_mode_visitor(&self) -> CacheModeVisitor { - CacheModeVisitor(self.cache_mode as u16) - } -} - -/// 数据项的分片信息 -/// 我们需要知道每个数据项的分片大小 -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct DataSplit { - pub splits: Vec, -} - -pub type DataSplitIdx = usize; - -// impl DataSplit { -// /// node_2_datas will be consumed partially -// pub fn recorver_data( -// &self, -// unique_id: &[u8], -// idx: DataItemIdx, -// node_2_datas: &mut HashMap<(NodeID, DataItemIdx), proto::DataItem>, -// ) -> WSResult> { -// let nodes = node_2_datas -// .iter() -// .filter(|v| v.0 .1 == idx) -// .map(|v| v.0 .0) -// .collect::>(); - -// let mut each_node_splits: HashMap)> = -// HashMap::new(); - -// for node in nodes { -// let data = node_2_datas.remove(&(node, idx)).unwrap(); -// let _ = each_node_splits.insert(node, (data, None)); -// } - -// let mut max_size = 0; -// let mut missing = vec![]; - -// // zip with split info -// // by the way, check if the split is missing -// for split in &self.splits { -// let Some(find) = each_node_splits.get_mut(&split.node_id) else { -// missing.push((*split).clone()); -// continue; -// }; -// find.1 = Some(split.clone()); -// if split.data_offset + split.data_size > max_size { -// max_size = split.data_offset + split.data_size; -// } -// } - -// if missing.len() > 0 { -// return Err(WsDataError::SplitRecoverMissing { -// unique_id: unique_id.to_owned(), -// idx, -// missing, -// } -// .into()); -// } - -// let mut recover = vec![0; max_size.try_into().unwrap()]; - -// for (_node, (data, splitmeta)) in each_node_splits { -// let splitmeta = splitmeta.unwrap(); -// let begin = splitmeta.data_offset as usize; -// let end = begin + splitmeta.data_size as usize; -// recover[begin..end].copy_from_slice(data.as_ref()); -// } - -// Ok(recover) -// } -// } - -impl Into for EachNodeSplit { - fn into(self) -> proto::EachNodeSplit { - proto::EachNodeSplit { - node_id: self.node_id, - data_offset: self.data_offset, - data_size: self.data_size, - } - } -} - -impl Into for DataSplit { - fn into(self) -> proto::DataSplit { - proto::DataSplit { - splits: self.splits.into_iter().map(|s| s.into()).collect(), - } - } -} -// uint32 split_size = 1; -// repeated uint32 node_ids = 2; - -macro_rules! generate_cache_mode_methods { - // The macro takes a list of pairs of the form [time, mask] and generates methods. - ($(($group:ident, $mode:ident)),*) => { - paste!{ - impl CacheModeVisitor { - $( - pub fn [](&self) -> bool { - (self.0 & []) == - ([] & []) - } - )* - } - impl DataSetMetaBuilder { - $( - pub fn [](&mut self, idx: DataItemIdx) -> &mut Self { - self.assert_cache_mode_len(); - self.building.as_mut().unwrap().cache_mode[idx as usize] = - (self.building.as_mut().unwrap().cache_mode[idx as usize] & ![]) | - ([] & []); - self - } - )* - } - } - }; -} -generate_cache_mode_methods!( - (time, forever), - (time, auto), - (pos, allnode), - (pos, specnode), - (pos, auto), - (map, common_kv), - (map, file) -); - -#[test] -fn test_cache_mode_visitor() { - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_TIME_FOREVER_MASK); - assert!(cache_mode_visitor.is_time_forever()); - assert!(!cache_mode_visitor.is_time_auto()); - - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_POS_ALLNODE_MASK); - assert!(cache_mode_visitor.is_pos_allnode()); - assert!(!cache_mode_visitor.is_pos_specnode()); - assert!(!cache_mode_visitor.is_pos_auto()); - - let cache_mode_visitor = CacheModeVisitor(CACHE_MODE_MAP_FILE_MASK); - assert!(cache_mode_visitor.is_map_file()); - assert!(!cache_mode_visitor.is_map_common_kv()); - - // test builder - - let meta = DataSetMetaBuilder::new() - .set_data_splits(vec![DataSplit { splits: vec![] }]) - .cache_mode_map_file(0) - .cache_mode_time_forever(0) - .build(); - assert!(meta.cache_mode_visitor(0).is_map_file()); - assert!(!meta.cache_mode_visitor(0).is_map_common_kv()); - assert!(meta.cache_mode_visitor(0).is_time_forever()); - assert!(!meta.cache_mode_visitor(0).is_time_auto()); - let meta = DataSetMetaBuilder::new() - .set_data_splits(vec![DataSplit { splits: vec![] }]) - .cache_mode_map_common_kv(0) - .cache_mode_time_forever(0) - .build(); - assert!(meta.cache_mode_visitor(0).is_map_common_kv()); - assert!(!meta.cache_mode_visitor(0).is_map_file()); - assert!(meta.cache_mode_visitor(0).is_time_forever()); - assert!(!meta.cache_mode_visitor(0).is_time_auto()); -} - -pub struct DataSetMetaBuilder { - building: Option, -} -impl From for DataSetMetaBuilder { - fn from(d: DataSetMetaV2) -> Self { - Self { building: Some(d) } - } -} -impl DataSetMetaBuilder { - pub fn new() -> Self { - Self { - building: Some(DataSetMetaV2 { - version: 0, - datas_splits: vec![], - data_metas: vec![], - api_version: 2, - synced_nodes: HashSet::new(), - cache_mode: vec![], - }), - } - } - fn assert_cache_mode_len(&self) { - if self.building.as_ref().unwrap().cache_mode.len() == 0 { - panic!("please set_data_splits before set_cache_mode"); - } - } - - pub fn version(&mut self, version: u64) -> &mut Self { - self.building.as_mut().unwrap().version = version; - self - } - - #[must_use] - pub fn set_data_splits(&mut self, splits: Vec) -> &mut Self { - let building = self.building.as_mut().unwrap(); - building.datas_splits = splits; - building.cache_mode = vec![0; building.datas_splits.len()]; - self - } - - pub fn set_cache_mode(&mut self, idx: DataItemIdx, mode: u16) -> &mut Self { - self.building.as_mut().unwrap().cache_mode[idx as usize] = mode; - self - } - - pub fn set_cache_mode_for_all(&mut self, mode: Vec) -> &mut Self { - self.building.as_mut().unwrap().cache_mode = mode; - assert_eq!( - self.building.as_mut().unwrap().cache_mode.len(), - self.building.as_mut().unwrap().datas_splits.len(), - "cache mode len must be equal to data splits len" - ); - self - } - - pub fn build(&mut self) -> DataSetMetaV2 { - self.building.take().unwrap() - } -} - -pub struct GetOrDelDataArg { - pub meta: Option, - pub unique_id: Vec, - pub ty: GetOrDelDataArgType, -} - -#[derive(Debug, Clone)] -pub enum GetOrDelDataArgType { - All, - Delete, - PartialOne { idx: DataItemIdx }, - PartialMany { idxs: BTreeSet }, -} - -impl DataGeneralView { - fn get_data_meta_local( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult> { - let ope_name = if delete { "delete" } else { "get" }; - tracing::debug!("{} data meta for uid({:?})", ope_name, unique_id); - - let kv_store_engine = self.kv_store_engine(); - let key = KeyTypeDataSetMeta(&unique_id); - let keybytes = key.make_key(); - - let write_lock = kv_store_engine.with_rwlock(&keybytes); - let _guard = write_lock.write(); - - let meta_opt = if delete { - kv_store_engine.del(key, true)? - } else { - kv_store_engine.get(&key, true, KvAdditionalConf {}) - }; - Ok(meta_opt) - } - - pub async fn get_metadata( - &self, - unique_id: &[u8], - delete: bool, - ) -> WSResult { - // 先尝试从本地获取 - if let Some((_version, meta)) = self.get_data_meta_local(unique_id, delete)? { - return Ok(meta); - } - - // 本地不存在,从 master 获取 - self.data_general().get_or_del_datameta_from_master(unique_id, delete).await - } -} - -impl From for WSError { - fn from(err: JoinError) -> Self { - WsNetworkLogicErr::TaskJoinError { err }.into() - } -} - -#[async_trait] -impl LogicalModule for DataGeneral { - fn inner_new(args: LogicalModuleNewArgs) -> Self - where - Self: Sized, - { - Self { - view: DataGeneralView::new(args.logical_modules_ref.clone()), - rpc_call_data_version_schedule: RPCCaller::new(), - rpc_call_write_once_data: RPCCaller::new(), - rpc_call_batch_data: RPCCaller::new(), - rpc_call_get_data_meta: RPCCaller::new(), - rpc_call_get_data: RPCCaller::new(), - - rpc_handler_write_once_data: RPCHandler::new(), - rpc_handler_batch_data: RPCHandler::new(), - rpc_handler_data_meta_update: RPCHandler::new(), - rpc_handler_get_data_meta: RPCHandler::new(), - rpc_handler_get_data: RPCHandler::new(), - - // 批量数据接收状态管理 - batch_receive_states: AsyncInitMap::new(), - } - } - - async fn start(&self) -> WSResult> { - tracing::info!("start as master"); - - let p2p = self.view.p2p(); - // register rpc callers - { - self.rpc_call_data_version_schedule.regist(p2p); - self.rpc_call_write_once_data.regist(p2p); - self.rpc_call_batch_data.regist(p2p); - self.rpc_call_get_data_meta.regist(p2p); - self.rpc_call_get_data.regist(p2p); - } - - // register rpc handlers - { - let view = self.view.clone(); - self.rpc_handler_write_once_data - .regist(p2p, move |responsor, req| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_write_one_data(responsor, req).await; - }); - Ok(()) - }); - - let view = self.view.clone(); - self.rpc_handler_batch_data.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::BatchDataRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - let _ = view.data_general().rpc_handle_batch_data(responsor, req).await; - }); - Ok(()) - }, - ); - - let view = self.view.clone(); - self.rpc_handler_data_meta_update.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::DataMetaUpdateRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_data_meta_update(responsor, req).await - }); - Ok(()) - }, - ); - - let view = self.view.clone(); - self.rpc_handler_get_data_meta - .regist(p2p, move |responsor, req| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_get_data_meta(req, responsor) - .await - .todo_handle("rpc_handle_get_data_meta err"); - }); - Ok(()) - }); - - let view = self.view.clone(); - self.rpc_handler_get_data.regist( - p2p, - move |responsor: RPCResponsor, - req: proto::GetOneDataRequest| { - let view = view.clone(); - let _ = tokio::spawn(async move { - view.data_general().rpc_handle_get_one_data(responsor, req).await - }); - Ok(()) - }, - ); - } - - Ok(vec![]) - } -} - -#[derive(Debug, Clone, Copy)] -pub struct CacheModeVisitor(pub u16); \ No newline at end of file diff --git a/src/main/src/general/data/m_dist_lock.rs b/src/main/src/general/data/m_dist_lock.rs deleted file mode 100644 index 293b48e..0000000 --- a/src/main/src/general/data/m_dist_lock.rs +++ /dev/null @@ -1,560 +0,0 @@ -use std::collections::hash_map::DefaultHasher; -use std::collections::HashMap; -use std::collections::HashSet; -use std::hash::Hash; -use std::hash::Hasher; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; - -use crate::sys::LogicalModule; -use crate::sys::LogicalModulesRef; -use crate::util::DropDebug; -use crate::{ - logical_module_view_impl, result::WSResult, sys::LogicalModuleNewArgs, util::JoinHandleWrapper, -}; -use axum::async_trait; -use enum_as_inner::EnumAsInner; -use parking_lot::Mutex; -use rand::thread_rng; -use rand::Rng; -use tokio::sync::Notify; -use tokio::sync::RwLock; -use ws_derive::LogicalModule; - -use crate::general::network::{ - m_p2p::{P2PModule, RPCCaller, RPCHandler, RPCResponsor}, - proto, -}; - -logical_module_view_impl!(View); -logical_module_view_impl!(View, p2p, P2PModule); -logical_module_view_impl!(View, dist_lock, DistLock); - -type LockReleaseId = u32; - -#[derive(EnumAsInner)] -pub enum DistLockOpe { - Read, - Write, - Unlock(LockReleaseId), -} - -/// https://fvd360f8oos.feishu.cn/wiki/ZUPNwpKLEiRs6Ukzf3ncVa9FnHe -/// 这个是对于某个key的锁的状态记录,包括读写锁的引用计数,以及等待释放的notify -/// 对于写锁,只有第一个人竞争往map里插入能拿到锁,后续的都得等notify,然后竞争往map里插入 -/// 对于读锁,只要自增的初始值>=1即可,初始值为0意味着当前锁已经处于释放过程中(cnt减到0但可能还在map里)得等到map中的值被删掉 -/// 如何等map中的值被删掉:等到map中的值被删掉,再把wait_for_delete中的notify全部notify -/// 有没有在删掉后,全部notify过了,又有用户往notify队列里插入,(使用Mutex