Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion program-libs/batched-merkle-tree/src/batch.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use light_bloom_filter::BloomFilter;
use light_bloom_filter::{BloomFilter, BloomFilterRef};
use light_hasher::{Hasher, Poseidon};
use light_zero_copy::vec::ZeroCopyVecU64;
use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
Expand Down Expand Up @@ -396,6 +396,20 @@ impl Batch {
Ok(())
}

/// Immutable version of `check_non_inclusion` using `BloomFilterRef`.
pub fn check_non_inclusion_ref(
num_iters: usize,
bloom_filter_capacity: u64,
value: &[u8; 32],
store: &[u8],
) -> Result<(), BatchedMerkleTreeError> {
let bloom_filter = BloomFilterRef::new(num_iters, bloom_filter_capacity, store)?;
if bloom_filter.contains(value) {
return Err(BatchedMerkleTreeError::NonInclusionCheckFailed);
}
Ok(())
}

/// Marks the batch as inserted in the merkle tree.
/// 1. Checks that the batch is ready.
/// 2. increments the number of inserted zkps.
Expand Down
2 changes: 2 additions & 0 deletions program-libs/batched-merkle-tree/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,10 +174,12 @@
pub mod constants;
pub mod errors;
pub mod initialize_address_tree;
pub mod initialize_state_tree;

Check warning on line 177 in program-libs/batched-merkle-tree/src/lib.rs

View workflow job for this annotation

GitHub Actions / lint

Diff in /home/runner/work/light-protocol/light-protocol/program-libs/batched-merkle-tree/src/lib.rs
pub mod merkle_tree;
pub mod merkle_tree_metadata;
pub mod queue;
pub mod merkle_tree_ref;
pub mod queue_ref;
Comment on lines +181 to +182
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Module declarations should follow alphabetical ordering per lint.

The pipeline lint flags a module ordering issue. Currently merkle_tree_ref and queue_ref are grouped together after queue, but alphabetical ordering (which the project enforces) would place them differently.

🔧 Proposed fix for alphabetical ordering
 pub mod merkle_tree;
 pub mod merkle_tree_metadata;
+pub mod merkle_tree_ref;
 pub mod queue;
-pub mod merkle_tree_ref;
-pub mod queue_ref;
 pub mod queue_batch_metadata;
+pub mod queue_ref;
 pub mod rollover_address_tree;
🤖 Prompt for AI Agents
In `@program-libs/batched-merkle-tree/src/lib.rs` around lines 181 - 182, Reorder
the pub mod declarations in lib.rs to satisfy the project's alphabetical module
order: move pub mod merkle_tree_ref and pub mod queue_ref so they appear in
alphabetical sequence relative to the other module declarations (e.g., ensure
queue, merkle_tree_ref, queue_ref are ordered alphabetically or adjust
surrounding modules so merkle_tree_ref and queue_ref are placed correctly);
update the module block containing merkle_tree_ref and queue_ref to match the
lint's expected ordering.

pub mod queue_batch_metadata;
pub mod rollover_address_tree;
pub mod rollover_state_tree;
Expand Down
1 change: 1 addition & 0 deletions program-libs/batched-merkle-tree/src/merkle_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> {
account_data: &'a mut [u8],
pubkey: &Pubkey,
) -> Result<BatchedMerkleTreeAccount<'a>, BatchedMerkleTreeError> {
light_account_checks::checks::check_discriminator::<Self>(account_data)?;
Self::from_bytes::<ADDRESS_MERKLE_TREE_TYPE_V2>(account_data, pubkey)
}

Expand Down
172 changes: 172 additions & 0 deletions program-libs/batched-merkle-tree/src/merkle_tree_ref.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
use std::ops::Deref;

use light_account_checks::{
checks::check_account_info,
discriminator::{Discriminator, DISCRIMINATOR_LEN},
AccountInfoTrait,
};
use light_compressed_account::{
pubkey::Pubkey, ADDRESS_MERKLE_TREE_TYPE_V2, STATE_MERKLE_TREE_TYPE_V2,

Check warning on line 9 in program-libs/batched-merkle-tree/src/merkle_tree_ref.rs

View workflow job for this annotation

GitHub Actions / lint

Diff in /home/runner/work/light-protocol/light-protocol/program-libs/batched-merkle-tree/src/merkle_tree_ref.rs
};
use light_merkle_tree_metadata::errors::MerkleTreeMetadataError;
use light_zero_copy::{
cyclic_vec::ZeroCopyCyclicVecRefU64,
errors::ZeroCopyError,
};
use zerocopy::Ref;

use crate::{
batch::Batch, constants::ACCOUNT_COMPRESSION_PROGRAM_ID, errors::BatchedMerkleTreeError,
merkle_tree::BatchedMerkleTreeAccount, merkle_tree_metadata::BatchedMerkleTreeMetadata,
};

/// Immutable batched Merkle tree reference.
///
/// Uses `try_borrow_data()` + `&'a [u8]` instead of
/// `try_borrow_mut_data()` + `&'a mut [u8]`, avoiding UB from
/// dropping a `RefMut` guard while a raw-pointer-based mutable
/// reference continues to live.
///
/// Only contains the fields that external consumers actually read:
/// metadata, root history, and bloom filter stores.
/// Hash chain stores are not parsed (only needed inside account-compression).
#[derive(Debug)]
pub struct BatchedMerkleTreeRef<'a> {
pubkey: Pubkey,
metadata: Ref<&'a [u8], BatchedMerkleTreeMetadata>,
root_history: ZeroCopyCyclicVecRefU64<'a, [u8; 32]>,
pub bloom_filter_stores: [&'a [u8]; 2],
}

impl Discriminator for BatchedMerkleTreeRef<'_> {
const LIGHT_DISCRIMINATOR: [u8; 8] = *b"BatchMta";
const LIGHT_DISCRIMINATOR_SLICE: &'static [u8] = b"BatchMta";
}

impl<'a> BatchedMerkleTreeRef<'a> {
/// Deserialize a batched state Merkle tree (immutable) from account info.
pub fn state_from_account_info<A: AccountInfoTrait>(
account_info: &A,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
Self::from_account_info::<STATE_MERKLE_TREE_TYPE_V2, A>(
&ACCOUNT_COMPRESSION_PROGRAM_ID,
account_info,
)
}

/// Deserialize an address tree (immutable) from account info.
pub fn address_from_account_info<A: AccountInfoTrait>(
account_info: &A,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
Self::from_account_info::<ADDRESS_MERKLE_TREE_TYPE_V2, A>(
&ACCOUNT_COMPRESSION_PROGRAM_ID,
account_info,
)
}

pub(crate) fn from_account_info<const TREE_TYPE: u64, A: AccountInfoTrait>(
program_id: &[u8; 32],
account_info: &A,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
check_account_info::<BatchedMerkleTreeAccount, A>(program_id, account_info)?;
let data = account_info.try_borrow_data()?;
// SAFETY: We extend the lifetime of the borrowed data to 'a.
// The borrow is shared (immutable), so dropping the Ref guard
// restores pinocchio's borrow state correctly for shared borrows.
let data_slice: &'a [u8] = unsafe { std::slice::from_raw_parts(data.as_ptr(), data.len()) };
Self::from_bytes::<TREE_TYPE>(data_slice, &account_info.key().into())
}

/// Deserialize a state tree (immutable) from bytes.
#[cfg(not(target_os = "solana"))]
pub fn state_from_bytes(
account_data: &'a [u8],
pubkey: &Pubkey,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
light_account_checks::checks::check_discriminator::<BatchedMerkleTreeAccount>(
account_data,
)?;
Self::from_bytes::<STATE_MERKLE_TREE_TYPE_V2>(account_data, pubkey)
}

/// Deserialize an address tree (immutable) from bytes.
#[cfg(not(target_os = "solana"))]
pub fn address_from_bytes(
account_data: &'a [u8],
pubkey: &Pubkey,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
light_account_checks::checks::check_discriminator::<BatchedMerkleTreeAccount>(
account_data,
)?;
Self::from_bytes::<ADDRESS_MERKLE_TREE_TYPE_V2>(account_data, pubkey)
}

pub(crate) fn from_bytes<const TREE_TYPE: u64>(
account_data: &'a [u8],
pubkey: &Pubkey,
) -> Result<BatchedMerkleTreeRef<'a>, BatchedMerkleTreeError> {
// 1. Skip discriminator.
let (_discriminator, account_data) = account_data.split_at(DISCRIMINATOR_LEN);

// 2. Parse metadata.
let (metadata, account_data) =
Ref::<&'a [u8], BatchedMerkleTreeMetadata>::from_prefix(account_data)
.map_err(ZeroCopyError::from)?;
if metadata.tree_type != TREE_TYPE {
return Err(MerkleTreeMetadataError::InvalidTreeType.into());
}

// 3. Parse root history (cyclic vec).
let (root_history, account_data) =
ZeroCopyCyclicVecRefU64::<[u8; 32]>::from_bytes_at(account_data)?;

// 4. Parse bloom filter stores (immutable).
let bloom_filter_size = metadata.queue_batches.get_bloomfilter_size_bytes();
let (bf_store_0, account_data) = account_data.split_at(bloom_filter_size);
let (bf_store_1, _account_data) = account_data.split_at(bloom_filter_size);

Comment on lines +119 to +127
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Guard bloom filter parsing against short buffers.
Line 125 uses split_at without a length check; malformed or truncated input can panic instead of returning a structured error. Prefer an explicit size check and return a ZeroCopyError.

🛠️ Suggested fix
-        let bloom_filter_size = metadata.queue_batches.get_bloomfilter_size_bytes();
-        let (bf_store_0, account_data) = account_data.split_at(bloom_filter_size);
-        let (bf_store_1, _account_data) = account_data.split_at(bloom_filter_size);
+        let bloom_filter_size = metadata.queue_batches.get_bloomfilter_size_bytes();
+        let total_bf_bytes = bloom_filter_size
+            .checked_mul(2)
+            .ok_or(ZeroCopyError::InvalidConversion)?;
+        if account_data.len() < total_bf_bytes {
+            return Err(
+                ZeroCopyError::InsufficientMemoryAllocated(account_data.len(), total_bf_bytes)
+                    .into(),
+            );
+        }
+        let (bf_store_0, account_data) = account_data.split_at(bloom_filter_size);
+        let (bf_store_1, _account_data) = account_data.split_at(bloom_filter_size);
🤖 Prompt for AI Agents
In `@program-libs/batched-merkle-tree/src/merkle_tree_ref.rs` around lines 119 -
127, The bloom filter parsing currently calls
account_data.split_at(bloom_filter_size) without validating buffer length, which
can panic on truncated input; update the code around
ZeroCopyCyclicVecRefU64::from_bytes_at and the subsequent bf_store_0/bf_store_1
parsing to first check that account_data.len() >=
bloom_filter_size.checked_mul(2).ok_or(ZeroCopyError::ShortBuffer)? (or
equivalent) and if not return Err(ZeroCopyError::ShortBuffer), then safely
slice/split the two bloom filter stores (bf_store_0, bf_store_1); reference the
ZeroCopyCyclicVecRefU64::from_bytes_at call and the bf_store_0/bf_store_1
variables to locate the fix.

// 5. Stop here -- hash_chain_stores are not needed for read-only access.

Ok(BatchedMerkleTreeRef {
pubkey: *pubkey,
metadata,
root_history,
bloom_filter_stores: [bf_store_0, bf_store_1],
})
}

/// Check non-inclusion in all bloom filters which are not zeroed.
pub fn check_input_queue_non_inclusion(
&self,
value: &[u8; 32],
) -> Result<(), BatchedMerkleTreeError> {
for i in 0..self.queue_batches.num_batches as usize {
Batch::check_non_inclusion_ref(
self.queue_batches.batches[i].num_iters as usize,
self.queue_batches.batches[i].bloom_filter_capacity,
value,
self.bloom_filter_stores[i],
)?;
}
Ok(())
}
Comment on lines +138 to +152
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Doc comment is slightly misleading — the code checks all bloom filters, not just "not zeroed" ones.

The comment says "Check non-inclusion in all bloom filters which are not zeroed," but the implementation unconditionally iterates all num_batches bloom filters without checking bloom_filter_is_zeroed(). This is functionally correct — a zeroed bloom filter has no bits set, so check_non_inclusion_ref will always pass on it. But the comment creates a false expectation that there's a conditional skip happening.

📝 Suggested doc fix
-    /// Check non-inclusion in all bloom filters which are not zeroed.
+    /// Check non-inclusion across all bloom filters.
+    /// Zeroed bloom filters pass trivially (no bits set).
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
/// Check non-inclusion in all bloom filters which are not zeroed.
pub fn check_input_queue_non_inclusion(
&self,
value: &[u8; 32],
) -> Result<(), BatchedMerkleTreeError> {
for i in 0..self.queue_batches.num_batches as usize {
Batch::check_non_inclusion_ref(
self.queue_batches.batches[i].num_iters as usize,
self.queue_batches.batches[i].bloom_filter_capacity,
value,
self.bloom_filter_stores[i],
)?;
}
Ok(())
}
/// Check non-inclusion across all bloom filters.
/// Zeroed bloom filters pass trivially (no bits set).
pub fn check_input_queue_non_inclusion(
&self,
value: &[u8; 32],
) -> Result<(), BatchedMerkleTreeError> {
for i in 0..self.queue_batches.num_batches as usize {
Batch::check_non_inclusion_ref(
self.queue_batches.batches[i].num_iters as usize,
self.queue_batches.batches[i].bloom_filter_capacity,
value,
self.bloom_filter_stores[i],
)?;
}
Ok(())
}
🤖 Prompt for AI Agents
In `@program-libs/batched-merkle-tree/src/merkle_tree_ref.rs` around lines 145 -
159, The docstring for check_input_queue_non_inclusion is misleading: the
function iterates all batches unconditionally and calls
Batch::check_non_inclusion_ref for each bloom filter, it does not skip zeroed
filters; update the comment on check_input_queue_non_inclusion to state it
checks every bloom filter (including zeroed ones) or remove the "which are not
zeroed" phrase and optionally reference that zeroed filters trivially pass
check_non_inclusion_ref rather than implying a conditional skip via
bloom_filter_is_zeroed().


pub fn pubkey(&self) -> &Pubkey {
&self.pubkey
}
}

impl Deref for BatchedMerkleTreeRef<'_> {
type Target = BatchedMerkleTreeMetadata;

fn deref(&self) -> &Self::Target {
&self.metadata
}
}

impl<'a> BatchedMerkleTreeRef<'a> {
/// Return root from the root history by index.
pub fn get_root_by_index(&self, index: usize) -> Option<&[u8; 32]> {
self.root_history.get(index)
}
}
173 changes: 173 additions & 0 deletions program-libs/batched-merkle-tree/src/queue_ref.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
use std::ops::Deref;

use light_account_checks::{
checks::check_account_info,
discriminator::{Discriminator, DISCRIMINATOR_LEN},
AccountInfoTrait,
};
use light_compressed_account::{pubkey::Pubkey, OUTPUT_STATE_QUEUE_TYPE_V2};
use light_merkle_tree_metadata::errors::MerkleTreeMetadataError;
use light_zero_copy::{errors::ZeroCopyError, vec::ZeroCopyVecU64};
use zerocopy::Ref;

use crate::{
constants::ACCOUNT_COMPRESSION_PROGRAM_ID,
errors::BatchedMerkleTreeError,
queue::{BatchedQueueAccount, BatchedQueueMetadata},
};

/// Immutable batched queue reference.
///
/// Uses `try_borrow_data()` + `&'a [u8]` instead of
/// `try_borrow_mut_data()` + `&'a mut [u8]`.
///
/// Only contains the fields that external consumers actually read:
/// metadata and value vecs. Hash chain stores are not parsed.
#[derive(Debug)]
pub struct BatchedQueueRef<'a> {
pubkey: Pubkey,
metadata: Ref<&'a [u8], BatchedQueueMetadata>,
/// Value vec metadata: [length, capacity] per batch, parsed inline.
_value_vec_metas: [Ref<&'a [u8], [u64; 2]>; 2],
value_vec_data: [Ref<&'a [u8], [[u8; 32]]>; 2],
}

impl Discriminator for BatchedQueueRef<'_> {
const LIGHT_DISCRIMINATOR: [u8; 8] = *b"queueacc";
const LIGHT_DISCRIMINATOR_SLICE: &'static [u8] = b"queueacc";
}

impl<'a> BatchedQueueRef<'a> {
/// Deserialize an output queue (immutable) from account info.
pub fn output_from_account_info<A: AccountInfoTrait>(
account_info: &A,
) -> Result<BatchedQueueRef<'a>, BatchedMerkleTreeError> {
Self::from_account_info::<OUTPUT_STATE_QUEUE_TYPE_V2, A>(
&Pubkey::new_from_array(ACCOUNT_COMPRESSION_PROGRAM_ID),
account_info,
)
}

pub(crate) fn from_account_info<const QUEUE_TYPE: u64, A: AccountInfoTrait>(
program_id: &Pubkey,
account_info: &A,
) -> Result<BatchedQueueRef<'a>, BatchedMerkleTreeError> {
check_account_info::<BatchedQueueAccount, A>(&program_id.to_bytes(), account_info)?;
let data = account_info.try_borrow_data()?;
// SAFETY: We extend the lifetime of the borrowed data to 'a.
// The borrow is shared (immutable), so dropping the Ref guard
// restores pinocchio's borrow state correctly for shared borrows.
let data_slice: &'a [u8] = unsafe { std::slice::from_raw_parts(data.as_ptr(), data.len()) };
Self::from_bytes::<QUEUE_TYPE>(data_slice, account_info.key().into())
}
Comment on lines +40 to +62
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Minor API inconsistency: program_id parameter type differs from merkle_tree_ref.rs.

In merkle_tree_ref.rs, from_account_info takes program_id: &[u8; 32], while here it takes program_id: &Pubkey. Both ultimately pass &[u8; 32] to check_account_info. This works, but the divergent signatures between sibling types can be surprising for consumers.

Consider aligning the parameter types across BatchedMerkleTreeRef::from_account_info and BatchedQueueRef::from_account_info — both are pub(crate), so the blast radius is small.

🤖 Prompt for AI Agents
In `@program-libs/batched-merkle-tree/src/queue_ref.rs` around lines 40 - 62,
Change BatchedQueueRef::from_account_info to accept program_id: &[u8; 32]
(matching BatchedMerkleTreeRef::from_account_info) instead of &Pubkey, update
its use of check_account_info to pass program_id directly (remove
program_id.to_bytes()), and update callers such as
BatchedQueueRef::output_from_account_info to pass
&ACCOUNT_COMPRESSION_PROGRAM_ID (or the &[u8;32] constant) instead of creating a
Pubkey; refer to the function name BatchedQueueRef::from_account_info, the
caller output_from_account_info, and the
check_account_info::<BatchedQueueAccount, A> call when making these edits.


/// Deserialize an output queue (immutable) from bytes.
#[cfg(not(target_os = "solana"))]
pub fn output_from_bytes(
account_data: &'a [u8],
) -> Result<BatchedQueueRef<'a>, BatchedMerkleTreeError> {
light_account_checks::checks::check_discriminator::<BatchedQueueAccount>(account_data)?;
Self::from_bytes::<OUTPUT_STATE_QUEUE_TYPE_V2>(account_data, Pubkey::default())
}

pub(crate) fn from_bytes<const QUEUE_TYPE: u64>(
account_data: &'a [u8],
pubkey: Pubkey,
) -> Result<BatchedQueueRef<'a>, BatchedMerkleTreeError> {
// 1. Skip discriminator.
let (_discriminator, account_data) = account_data.split_at(DISCRIMINATOR_LEN);

// 2. Parse metadata.
let (metadata, account_data) =
Ref::<&'a [u8], BatchedQueueMetadata>::from_prefix(account_data)
.map_err(ZeroCopyError::from)?;

if metadata.metadata.queue_type != QUEUE_TYPE {
return Err(MerkleTreeMetadataError::InvalidQueueType.into());
}

// 3. Parse two value vecs inline.
// ZeroCopyVecU64 layout: [u64; 2] metadata (length, capacity), then [u8; 32] * capacity.
let metadata_size = ZeroCopyVecU64::<[u8; 32]>::metadata_size();

let (meta0_bytes, account_data) = account_data.split_at(metadata_size);
let (value_vec_meta0, _padding) =
Ref::<&'a [u8], [u64; 2]>::from_prefix(meta0_bytes).map_err(ZeroCopyError::from)?;
let capacity0 = value_vec_meta0[1] as usize; // CAPACITY_INDEX = 1
let (value_vec_data0, account_data) =
Ref::<&'a [u8], [[u8; 32]]>::from_prefix_with_elems(account_data, capacity0)
.map_err(ZeroCopyError::from)?;

let (meta1_bytes, account_data) = account_data.split_at(metadata_size);
let (value_vec_meta1, _padding) =
Ref::<&'a [u8], [u64; 2]>::from_prefix(meta1_bytes).map_err(ZeroCopyError::from)?;
let capacity1 = value_vec_meta1[1] as usize;
let (value_vec_data1, _account_data) =
Ref::<&'a [u8], [[u8; 32]]>::from_prefix_with_elems(account_data, capacity1)
.map_err(ZeroCopyError::from)?;

// 4. Stop here -- hash_chain_stores are not needed for read-only access.

Ok(BatchedQueueRef {
pubkey,
metadata,
_value_vec_metas: [value_vec_meta0, value_vec_meta1],
value_vec_data: [value_vec_data0, value_vec_data1],
})
}

/// Proves inclusion of leaf index if it exists in one of the batches.
/// Returns true if leaf index exists in one of the batches.
pub fn prove_inclusion_by_index(
&self,
leaf_index: u64,
hash_chain_value: &[u8; 32],
) -> Result<bool, BatchedMerkleTreeError> {
if leaf_index >= self.batch_metadata.next_index {
return Err(BatchedMerkleTreeError::InvalidIndex);
}
for (batch_index, batch) in self.batch_metadata.batches.iter().enumerate() {
if batch.leaf_index_exists(leaf_index) {
let index = batch.get_value_index_in_batch(leaf_index)?;
let element = self.value_vec_data[batch_index]
.get(index as usize)
.ok_or(BatchedMerkleTreeError::InclusionProofByIndexFailed)?;

if *element == *hash_chain_value {
return Ok(true);
} else {
#[cfg(target_os = "solana")]
{
solana_msg::msg!(
"Index found but value doesn't match leaf_index {} compressed account hash: {:?} expected compressed account hash {:?}. (If the expected element is [0u8;32] it was already spent. Other possibly causes, data hash, discriminator, leaf index, or Merkle tree mismatch.)",
leaf_index,
hash_chain_value, *element
);
}
return Err(BatchedMerkleTreeError::InclusionProofByIndexFailed);
}
}
}
Ok(false)
}

/// Check if the pubkey is the associated Merkle tree of the queue.
pub fn check_is_associated(&self, pubkey: &Pubkey) -> Result<(), BatchedMerkleTreeError> {
if self.metadata.metadata.associated_merkle_tree != *pubkey {
return Err(MerkleTreeMetadataError::MerkleTreeAndQueueNotAssociated.into());
}
Ok(())
}

pub fn pubkey(&self) -> &Pubkey {
&self.pubkey
}
}

impl Deref for BatchedQueueRef<'_> {
type Target = BatchedQueueMetadata;

fn deref(&self) -> &Self::Target {
&self.metadata
}
}
Loading
Loading