diff --git a/.github/workflows/pull-request-account-compression.yml b/.github/workflows/pull-request-account-compression.yml index 9504a1bcc46..a3e936f8c8a 100644 --- a/.github/workflows/pull-request-account-compression.yml +++ b/.github/workflows/pull-request-account-compression.yml @@ -79,7 +79,7 @@ jobs: js-test-account-compression: runs-on: ubuntu-latest env: - NODE_VERSION: 16.x + NODE_VERSION: 20.5 steps: - uses: actions/checkout@v4 - name: Use Node.js ${{ env.NODE_VERSION }} diff --git a/.github/workflows/pull-request-libraries.yml b/.github/workflows/pull-request-libraries.yml index 18ca39623a9..2b4eeef1e33 100644 --- a/.github/workflows/pull-request-libraries.yml +++ b/.github/workflows/pull-request-libraries.yml @@ -66,7 +66,7 @@ jobs: js-test: runs-on: ubuntu-latest env: - NODE_VERSION: 16.x + NODE_VERSION: 20.5 steps: - uses: actions/checkout@v4 - name: Use Node.js ${{ env.NODE_VERSION }} diff --git a/account-compression/Cargo.lock b/account-compression/Cargo.lock index c5764a640be..4ad012b4ab3 100644 --- a/account-compression/Cargo.lock +++ b/account-compression/Cargo.lock @@ -544,9 +544,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" dependencies = [ "bytemuck_derive", ] @@ -1563,7 +1563,7 @@ dependencies = [ [[package]] name = "spl-account-compression" -version = "0.3.0" +version = "0.4.0" dependencies = [ "anchor-lang", "bytemuck", @@ -1574,7 +1574,7 @@ dependencies = [ [[package]] name = "spl-concurrent-merkle-tree" -version = "0.3.0" +version = "0.4.0" dependencies = [ "bytemuck", "solana-program", diff --git a/account-compression/programs/account-compression/Cargo.toml b/account-compression/programs/account-compression/Cargo.toml index a57df385abe..6aac624cf76 100644 --- a/account-compression/programs/account-compression/Cargo.toml +++ b/account-compression/programs/account-compression/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "spl-account-compression" -version = "0.3.1" +version = "0.4.0" description = "Solana Program Library Account Compression Program" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana-program-library" @@ -21,7 +21,7 @@ default = [] anchor-lang = "0.29.0" bytemuck = "1.13" solana-program = ">=1.18.11,<=2" -spl-concurrent-merkle-tree = { version = "0.3.0", path = "../../../libraries/concurrent-merkle-tree" } +spl-concurrent-merkle-tree = { version = "0.4.0", path = "../../../libraries/concurrent-merkle-tree" } spl-noop = { version = "0.2.0", path = "../noop", features = ["no-entrypoint"] } [profile.release] diff --git a/account-compression/programs/account-compression/src/canopy.rs b/account-compression/programs/account-compression/src/canopy.rs index bdf85d7d5d0..284b2fbbe73 100644 --- a/account-compression/programs/account-compression/src/canopy.rs +++ b/account-compression/programs/account-compression/src/canopy.rs @@ -20,9 +20,13 @@ use crate::error::AccountCompressionError; use crate::events::ChangeLogEvent; use anchor_lang::prelude::*; use bytemuck::{cast_slice, cast_slice_mut}; -use spl_concurrent_merkle_tree::node::{empty_node_cached, Node, EMPTY}; +use solana_program::keccak::hashv; +use spl_concurrent_merkle_tree::node::{empty_node_cached, empty_node_cached_mut, Node, EMPTY}; use std::mem::size_of; +/// Maximum depth of the tree, supported by the SPL Compression +const MAX_SUPPORTED_DEPTH: usize = 30; + #[inline(always)] pub fn check_canopy_bytes(canopy_bytes: &[u8]) -> Result<()> { if canopy_bytes.len() % size_of::() != 0 { @@ -94,8 +98,7 @@ pub fn fill_in_proof_from_canopy( index: u32, proof: &mut Vec, ) -> Result<()> { - // 30 is hard coded as it is the current max depth that SPL Compression supports - let mut empty_node_cache = Box::new([EMPTY; 30]); + let mut empty_node_cache = Box::new([EMPTY; MAX_SUPPORTED_DEPTH]); check_canopy_bytes(canopy_bytes)?; let canopy = cast_slice::(canopy_bytes); let path_len = get_cached_path_length(canopy, max_depth)?; @@ -114,7 +117,7 @@ pub fn fill_in_proof_from_canopy( }; if canopy[cached_idx] == EMPTY { let level = max_depth - (31 - node_idx.leading_zeros()); - let empty_node = empty_node_cached::<30>(level, &mut empty_node_cache); + let empty_node = empty_node_cached::(level, &mut empty_node_cache); inferred_nodes.push(empty_node); } else { inferred_nodes.push(canopy[cached_idx]); @@ -128,3 +131,431 @@ pub fn fill_in_proof_from_canopy( proof.extend(inferred_nodes.iter().skip(overlap)); Ok(()) } + +/// Sets the leaf nodes of the canopy. The leaf nodes are the lowest level of the canopy, +/// representing the leaves of the canopy-tree. The method will update the parent nodes of all the +/// modified subtrees up to the uppermost level of the canopy. The leaf nodes indexing for the +/// start_index is 0-based without regards to the full tree indexes or the node indexes. The +/// start_index is the index of the first leaf node to be updated. +pub fn set_canopy_leaf_nodes( + canopy_bytes: &mut [u8], + max_depth: u32, + start_index: u32, + nodes: &[Node], +) -> Result<()> { + check_canopy_bytes(canopy_bytes)?; + let canopy = cast_slice_mut::(canopy_bytes); + let path_len = get_cached_path_length(canopy, max_depth)?; + if path_len == 0 { + return err!(AccountCompressionError::CanopyNotAllocated); + } + let start_canopy_node = leaf_node_index_to_canopy_index(path_len, start_index)?; + let start_canopy_idx = start_canopy_node - 2; + // set the "leaf" nodes of the canopy first - that's the lowest level of the canopy + for (i, node) in nodes.iter().enumerate() { + canopy[start_canopy_idx + i] = *node; + } + let mut start_canopy_node = start_canopy_node; + let mut end_canopy_node = start_canopy_node + nodes.len() - 1; + let mut empty_node_cache = Box::new([EMPTY; MAX_SUPPORTED_DEPTH]); + let leaf_node_level = max_depth - path_len; + // traverse up the tree and update the parent nodes in the modified subtree + for level in leaf_node_level + 1..max_depth { + start_canopy_node >>= 1; + end_canopy_node >>= 1; + for node in start_canopy_node..end_canopy_node + 1 { + let left_child = get_value_for_node::( + node << 1, + level - 1, + canopy, + &mut empty_node_cache, + ); + let right_child = get_value_for_node::( + (node << 1) + 1, + level - 1, + canopy, + &mut empty_node_cache, + ); + canopy[node - 2].copy_from_slice(hashv(&[&left_child, &right_child]).as_ref()); + } + } + Ok(()) +} + +/// Checks the root of the canopy against the expected root. +pub fn check_canopy_root(canopy_bytes: &[u8], expected_root: &Node, max_depth: u32) -> Result<()> { + check_canopy_bytes(canopy_bytes)?; + let canopy = cast_slice::(canopy_bytes); + if canopy.is_empty() { + return Ok(()); // Canopy is empty + } + let mut empty_node_cache = Box::new([EMPTY; MAX_SUPPORTED_DEPTH]); + // first two nodes are the children of the root, they have index 2 and 3 respectively + let left_root_child = + get_value_for_node::(2, max_depth - 1, canopy, &mut empty_node_cache); + let right_root_child = + get_value_for_node::(3, max_depth - 1, canopy, &mut empty_node_cache); + let actual_root = hashv(&[&left_root_child, &right_root_child]).to_bytes(); + if actual_root != *expected_root { + msg!( + "Canopy root mismatch. Expected: {:?}, Actual: {:?}", + expected_root, + actual_root + ); + err!(AccountCompressionError::CanopyRootMismatch) + } else { + Ok(()) + } +} + +/// Checks the canopy doesn't have any nodes to the right of the provided index in the full tree. +/// This is done by iterating through the canopy nodes to the right of the provided index and +/// finding the top-most node that has the node as its left child. The node should be empty. The +/// iteration contains following the previous checked node on the same level until the last node on +/// the level is reached. +pub fn check_canopy_no_nodes_to_right_of_index( + canopy_bytes: &[u8], + max_depth: u32, + index: u32, +) -> Result<()> { + check_canopy_bytes(canopy_bytes)?; + check_index(index, max_depth)?; + let canopy = cast_slice::(canopy_bytes); + let path_len = get_cached_path_length(canopy, max_depth)?; + + let mut node_idx = ((1 << max_depth) + index) >> (max_depth - path_len); + // no need to check the node_idx as it's the leaf continaing the index underneath it + while node_idx & (node_idx + 1) != 0 { + // check the next node to the right + node_idx += 1; + // find the top-most node that has the node as its left-most child + node_idx >>= node_idx.trailing_zeros(); + + let shifted_index = node_idx as usize - 2; + if canopy[shifted_index] != EMPTY { + msg!("Canopy node at index {} is not empty", shifted_index); + return err!(AccountCompressionError::CanopyRightmostLeafMismatch); + } + } + Ok(()) +} + +#[inline(always)] +fn check_index(index: u32, at_depth: u32) -> Result<()> { + if at_depth > MAX_SUPPORTED_DEPTH as u32 { + return err!(AccountCompressionError::ConcurrentMerkleTreeConstantsError); + } + if at_depth == 0 { + return err!(AccountCompressionError::ConcurrentMerkleTreeConstantsError); + } + if index >= (1 << at_depth) { + return err!(AccountCompressionError::LeafIndexOutOfBounds); + } + Ok(()) +} + +#[inline(always)] +fn get_value_for_node( + node_idx: usize, + level: u32, + canopy: &[Node], + empty_node_cache: &mut [Node; N], +) -> Node { + if canopy[node_idx - 2] != EMPTY { + return canopy[node_idx - 2]; + } + empty_node_cached_mut::(level, empty_node_cache) +} + +#[inline(always)] +fn leaf_node_index_to_canopy_index(path_len: u32, index: u32) -> Result { + check_index(index, path_len)?; + Ok((1 << path_len) + index as usize) +} + +#[cfg(test)] +mod tests { + use {super::*, spl_concurrent_merkle_tree::node::empty_node}; + + fn success_leaf_node_index_to_canopy_index(path_len: u32, index: u32, expected: usize) { + assert_eq!( + leaf_node_index_to_canopy_index(path_len, index).unwrap(), + expected + ); + } + + #[test] + fn test_zero_length_tree() { + assert_eq!( + leaf_node_index_to_canopy_index(0, 0).unwrap_err(), + AccountCompressionError::ConcurrentMerkleTreeConstantsError.into() + ); + } + + #[test] + fn test_1_level_0_index() { + success_leaf_node_index_to_canopy_index(1, 0, 2); + } + + #[test] + fn test_1_level_1_index() { + success_leaf_node_index_to_canopy_index(1, 1, 3); + } + + #[test] + fn test_2_level_0_index() { + success_leaf_node_index_to_canopy_index(2, 0, 4); + } + #[test] + fn test_2_level_3_index() { + success_leaf_node_index_to_canopy_index(2, 3, 7); + } + + #[test] + fn test_10_level_0_index() { + success_leaf_node_index_to_canopy_index(10, 0, 1024); + } + + #[test] + fn test_10_level_1023_index() { + success_leaf_node_index_to_canopy_index(10, 1023, 2047); + } + + #[test] + fn test_simple_single_level_canopy_set_canopy_leaf_nodes_with_empty_nodes() { + let mut canopy_bytes = vec![0_u8; 2 * size_of::()]; + let nodes = vec![EMPTY; 2]; + set_canopy_leaf_nodes(&mut canopy_bytes, 1, 0, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + assert_eq!(canopy[0], EMPTY); + assert_eq!(canopy[1], EMPTY); + } + + #[test] + fn test_simple_single_level_canopy_set_canopy_leaf_nodes_non_empty_nodes() { + let mut canopy_bytes = vec![0_u8; 2 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 1, 0, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + assert_eq!(canopy[0], [1_u8; 32]); + assert_eq!(canopy[1], [2_u8; 32]); + } + + #[test] + fn test_2levels_canopy_set_canopy_leaf_nodes_first_2_elements_provided() { + let mut canopy_bytes = vec![0_u8; 6 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 2, 0, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + assert_eq!(canopy[0], hashv(&[&[1_u8; 32], &[2_u8; 32]]).to_bytes()); + assert_eq!(canopy[1], EMPTY); // is not updated + assert_eq!(canopy[2], [1_u8; 32]); + assert_eq!(canopy[3], [2_u8; 32]); + assert_eq!(canopy[4], EMPTY); + assert_eq!(canopy[5], EMPTY); + } + + #[test] + fn test_2levels_canopy_set_canopy_leaf_nodes_last_2_elements_provided() { + let mut canopy_bytes = vec![0_u8; 6 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 2, 2, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + assert_eq!(canopy[0], EMPTY); // is not updated + assert_eq!(canopy[1], hashv(&[&[1_u8; 32], &[2_u8; 32]]).to_bytes()); + assert_eq!(canopy[2], EMPTY); + assert_eq!(canopy[3], EMPTY); + assert_eq!(canopy[4], [1_u8; 32]); + assert_eq!(canopy[5], [2_u8; 32]); + } + + #[test] + fn test_2levels_canopy_set_canopy_leaf_nodes_middle_2_elements_provided() { + let mut canopy_bytes = vec![0_u8; 6 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 2, 1, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + assert_eq!(canopy[2], EMPTY); + assert_eq!(canopy[3], [1_u8; 32]); + assert_eq!(canopy[4], [2_u8; 32]); + assert_eq!(canopy[5], EMPTY); + assert_eq!(canopy[0], hashv(&[&EMPTY, &[1_u8; 32]]).to_bytes()); + assert_eq!(canopy[1], hashv(&[&[2_u8; 32], &EMPTY]).to_bytes()); + } + + #[test] + fn test_3level_canopy_in_10_level_tree_set_canopy_leaf_nodes_first_2_elements_provided() { + let mut canopy_bytes = vec![0_u8; 14 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 10, 0, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + let expected_hash12 = hashv(&[&[1_u8; 32], &[2_u8; 32]]).to_bytes(); + assert_eq!( + canopy[0], + hashv(&[&expected_hash12, &empty_node(8)]).to_bytes() + ); + assert_eq!(canopy[1], EMPTY); // is not updated + assert_eq!(canopy[2], expected_hash12); + assert_eq!(canopy[3], EMPTY); // is not updated + assert_eq!(canopy[4], EMPTY); // is not updated + assert_eq!(canopy[5], EMPTY); // is not updated + assert_eq!(canopy[6], [1_u8; 32]); + assert_eq!(canopy[7], [2_u8; 32]); + } + + #[test] + fn test_3level_canopy_in_10_level_tree_set_canopy_leaf_nodes_middle_2_elements_provided() { + let mut canopy_bytes = vec![0_u8; 14 * size_of::()]; + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 10, 3, &nodes).unwrap(); + let canopy = cast_slice::(&canopy_bytes); + + let expected_hash_empty_1 = hashv(&[&empty_node(7), &[1_u8; 32]]).to_bytes(); + let expected_hash_2_empty = hashv(&[&[2_u8; 32], &empty_node(7)]).to_bytes(); + + assert_eq!( + canopy[0], + hashv(&[&empty_node(8), &expected_hash_empty_1]).to_bytes() + ); + assert_eq!( + canopy[1], + hashv(&[&expected_hash_2_empty, &empty_node(8)]).to_bytes() + ); + assert_eq!(canopy[2], EMPTY); // is not updated + assert_eq!(canopy[3], expected_hash_empty_1); + assert_eq!(canopy[4], expected_hash_2_empty); + assert_eq!(canopy[5], EMPTY); // is not updated + assert_eq!(canopy[9], [1_u8; 32]); + assert_eq!(canopy[10], [2_u8; 32]); + } + + #[test] + fn test_3level_canopy_empty_set_canopy_leaf_nodes_no_impact() { + let mut canopy_bytes = vec![0_u8; 14 * size_of::()]; + let nodes = vec![]; + set_canopy_leaf_nodes(&mut canopy_bytes, 10, 0, &nodes).unwrap(); + assert_eq!(canopy_bytes, vec![0_u8; 14 * size_of::()]); + } + + #[test] + fn test_success_check_canopy_root() { + let mut canopy_bytes = vec![0_u8; 2 * size_of::()]; + let expected_root = hashv(&[&[1_u8; 32], &[2_u8; 32]]).to_bytes(); + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 1, 0, &nodes).unwrap(); + check_canopy_root(&canopy_bytes, &expected_root, 30).unwrap(); + } + + #[test] + fn test_success_check_canopy_root_with_empty_right_branch() { + let mut canopy_bytes = vec![0_u8; 2 * size_of::()]; + let mut empty_node_cache = Box::new([EMPTY; MAX_SUPPORTED_DEPTH]); + let top_level = (MAX_SUPPORTED_DEPTH - 1) as u32; + let right_branch = + empty_node_cached_mut::(top_level, &mut empty_node_cache); + let expected_root = hashv(&[&[1_u8; 32], &right_branch]).to_bytes(); + let nodes = vec![[1_u8; 32], EMPTY]; + set_canopy_leaf_nodes(&mut canopy_bytes, MAX_SUPPORTED_DEPTH as u32, 0, &nodes).unwrap(); + check_canopy_root(&canopy_bytes, &expected_root, 30).unwrap(); + } + + #[test] + fn test_failure_check_canopy_root() { + let mut canopy_bytes = vec![0_u8; 2 * size_of::()]; + let expected_root = hashv(&[&[1_u8; 32], &[2_u8; 32]]).to_bytes(); + let nodes = vec![[1_u8; 32], [2_u8; 32]]; + set_canopy_leaf_nodes(&mut canopy_bytes, 1, 0, &nodes).unwrap(); + let mut expected_root = expected_root; + expected_root[0] = 0; + assert_eq!( + check_canopy_root(&canopy_bytes, &expected_root, 30).unwrap_err(), + AccountCompressionError::CanopyRootMismatch.into() + ); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_empty_tree_first_index() { + let canopy_bytes = vec![0_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, 0).unwrap(); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_empty_tree_last_index() { + let canopy_bytes = vec![0_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, (1 << 20) - 1).unwrap(); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_empty_canopy_only_tree_first_index() { + let canopy_bytes = vec![0_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 2, 0).unwrap(); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_empty_canopy_only_tree_last_index() { + let canopy_bytes = vec![0_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 2, (1 << 2) - 1).unwrap(); + } + + #[test] + fn test_failure_check_canopy_no_nodes_to_right_of_index_empty_tree_index_out_of_range() { + let canopy_bytes = vec![0_u8; 6 * size_of::()]; + assert_eq!( + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 2, 1 << 20).unwrap_err(), + AccountCompressionError::LeafIndexOutOfBounds.into() + ); + } + + #[test] + fn test_failure_check_canopy_no_nodes_to_right_of_index_full_tree_index_out_of_range() { + let canopy_bytes = vec![1_u8; 6 * size_of::()]; + assert_eq!( + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 2, 1 << 21).unwrap_err(), + AccountCompressionError::LeafIndexOutOfBounds.into() + ); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_full_tree_last_index() { + let canopy_bytes = vec![1_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, (1 << 20) - 1).unwrap(); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_full_tree_first_child_of_last_canopy_node_leaf( + ) { + let canopy_bytes = vec![1_u8; 6 * size_of::()]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, 3 << (20 - 2)).unwrap(); + } + + #[test] + fn test_failure_check_canopy_no_nodes_to_right_of_index_full_tree_last_child_of_second_to_last_canopy_node_leaf( + ) { + let canopy_bytes = vec![1_u8; 6 * size_of::()]; + assert_eq!( + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, (3 << (20 - 2)) - 1) + .unwrap_err(), + AccountCompressionError::CanopyRightmostLeafMismatch.into() + ); + } + + #[test] + fn test_success_check_canopy_no_nodes_to_right_of_index_last_child_of_second_to_last_canopy_node_leaf( + ) { + let mut canopy_bytes = vec![1_u8; 6 * size_of::()]; + canopy_bytes[5 * size_of::()..].fill(0); + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, (3 << (20 - 2)) - 1).unwrap(); + } + + #[test] + fn test_succes_check_canopy_no_nodes_to_right_of_index_no_canopy() { + let canopy_bytes = vec![]; + check_canopy_no_nodes_to_right_of_index(&canopy_bytes, 20, 0).unwrap(); + } +} diff --git a/account-compression/programs/account-compression/src/concurrent_tree_wrapper.rs b/account-compression/programs/account-compression/src/concurrent_tree_wrapper.rs index 23c518c731f..e954fea5c57 100644 --- a/account-compression/programs/account-compression/src/concurrent_tree_wrapper.rs +++ b/account-compression/programs/account-compression/src/concurrent_tree_wrapper.rs @@ -86,3 +86,30 @@ pub fn merkle_tree_append_leaf( ) -> Result> { merkle_tree_apply_fn_mut!(header, tree_id, tree_bytes, append, *args) } + +/// Checks whether the tree in not initialized yet without doing the deserialization. A rought +/// equivalent to deserializing the tree and calling is_initialized() on it without the heavy +/// lifting with macros. An empty account is a zero'd account. The tree is considered empty if the +/// tree_bytes are all 0. A regular non-batch initialized tree is initialized early on when the +/// init_empty_merkle_tree is called. A batch initialized tree stays uninitialized until the +/// init_prepared_tree_with_root is called. +pub fn tree_bytes_uninitialized(tree_bytes: &[u8]) -> bool { + tree_bytes.iter().all(|&x| x == 0) +} + +#[inline(never)] +pub fn assert_tree_is_empty( + header: &ConcurrentMerkleTreeHeader, + tree_id: Pubkey, + tree_bytes: &mut [u8], +) -> Result<()> { + // If the tree is batch initialized and not finalized yet, we can treat it as empty. + // Before the tree is finalized, the tree_bytes will be all 0 as only the header will be + // initialized at that point, so we may skip the deserialization. + if header.get_is_batch_initialized() && tree_bytes_uninitialized(tree_bytes) { + return Ok(()); + } + // check the tree is empty + merkle_tree_apply_fn_mut!(header, tree_id, tree_bytes, prove_tree_is_empty,)?; + Ok(()) +} diff --git a/account-compression/programs/account-compression/src/error.rs b/account-compression/programs/account-compression/src/error.rs index fc7efdb215a..ef682300ebc 100644 --- a/account-compression/programs/account-compression/src/error.rs +++ b/account-compression/programs/account-compression/src/error.rs @@ -47,6 +47,26 @@ pub enum AccountCompressionError { /// is out of bounds of tree's maximum leaf capacity #[msg("Leaf index of concurrent merkle tree is out of bounds")] LeafIndexOutOfBounds, + + /// When initializing a canopy of the tree, the underlying tree was allocated without space for the canopy + #[msg("Tree was initialized without allocating space for the canopy")] + CanopyNotAllocated, + + /// The tree was already initialized + #[msg("Tree was already initialized")] + TreeAlreadyInitialized, + + /// The tree header was not initialized for batch processing + #[msg("Tree header was not initialized for batch processing")] + BatchNotInitialized, + + /// The canopy root doesn't match the root of the tree + #[msg("Canopy root does not match the root of the tree")] + CanopyRootMismatch, + + /// The canopy contains nodes to the right of the rightmost leaf of the tree + #[msg("Canopy contains nodes to the right of the rightmost leaf of the tree")] + CanopyRightmostLeafMismatch, } impl From<&ConcurrentMerkleTreeError> for AccountCompressionError { diff --git a/account-compression/programs/account-compression/src/lib.rs b/account-compression/programs/account-compression/src/lib.rs index 152cd5f745b..93d8411b3c4 100644 --- a/account-compression/programs/account-compression/src/lib.rs +++ b/account-compression/programs/account-compression/src/lib.rs @@ -40,7 +40,10 @@ pub mod zero_copy; pub use crate::noop::{wrap_application_data_v1, Noop}; -use crate::canopy::{fill_in_proof_from_canopy, update_canopy}; +use crate::canopy::{ + check_canopy_bytes, check_canopy_no_nodes_to_right_of_index, check_canopy_root, + fill_in_proof_from_canopy, set_canopy_leaf_nodes, update_canopy, +}; use crate::concurrent_tree_wrapper::*; pub use crate::error::AccountCompressionError; pub use crate::events::{AccountCompressionEvent, ChangeLogEvent}; @@ -48,13 +51,13 @@ use crate::noop::wrap_event; use crate::state::{ merkle_tree_get_size, ConcurrentMerkleTreeHeader, CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1, }; -use crate::zero_copy::ZeroCopy; /// Exported for Anchor / Solita pub use spl_concurrent_merkle_tree::{ concurrent_merkle_tree::{ConcurrentMerkleTree, FillEmptyOrAppendArgs}, error::ConcurrentMerkleTreeError, node::Node, + node::EMPTY, }; declare_id!("cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK"); @@ -74,7 +77,8 @@ pub struct Initialize<'info> { pub noop: Program<'info, Noop>, } -/// Context for inserting, appending, or replacing a leaf in the tree +/// Context for modifying a tree: inserting, appending, or replacing a leaf in +/// the existing tree and setting the canopy or finalizing a prepared tree. /// /// Modification instructions also require the proof to the leaf to be provided /// as 32-byte nodes via "remaining accounts". @@ -180,69 +184,166 @@ pub mod spl_account_compression { update_canopy(canopy_bytes, header.get_max_depth(), None) } - /// Note: - /// Supporting this instruction open a security vulnerability for indexers. - /// This instruction has been deemed unusable for publicly indexed compressed NFTs. - /// Indexing batched data in this way requires indexers to read in the `uri`s onto physical storage - /// and then into their database. This opens up a DOS attack vector, whereby this instruction is - /// repeatedly invoked, causing indexers to fail. - /// - /// Because this instruction was deemed insecure, this instruction has been removed - /// until secure usage is available on-chain. - // pub fn init_merkle_tree_with_root( - // ctx: Context, - // max_depth: u32, - // max_buffer_size: u32, - // root: [u8; 32], - // leaf: [u8; 32], - // index: u32, - // _changelog_db_uri: String, - // _metadata_db_uri: String, - // ) -> Result<()> { - // require_eq!( - // *ctx.accounts.merkle_tree.owner, - // crate::id(), - // AccountCompressionError::IncorrectAccountOwner - // ); - // let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?; - - // let (mut header_bytes, rest) = - // merkle_tree_bytes.split_at_mut(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); - - // let mut header = ConcurrentMerkleTreeHeader::try_from_slice(&header_bytes)?; - // header.initialize( - // max_depth, - // max_buffer_size, - // &ctx.accounts.authority.key(), - // Clock::get()?.slot, - // ); - // header.serialize(&mut header_bytes)?; - // let merkle_tree_size = merkle_tree_get_size(&header)?; - // let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size); - - // // Get rightmost proof from accounts - // let mut proof = vec![]; - // for node in ctx.remaining_accounts.iter() { - // proof.push(node.key().to_bytes()); - // } - // fill_in_proof_from_canopy(canopy_bytes, header.max_depth, index, &mut proof)?; - // assert_eq!(proof.len(), max_depth as usize); - - // let id = ctx.accounts.merkle_tree.key(); - // // A call is made to ConcurrentMerkleTree::initialize_with_root(root, leaf, proof, index) - // let change_log = merkle_tree_apply_fn!( - // header, - // id, - // tree_bytes, - // initialize_with_root, - // root, - // leaf, - // &proof, - // index - // )?; - // wrap_event(change_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; - // update_canopy(canopy_bytes, header.max_depth, Some(change_log)) - // } + /// (Devnet only) In order to initialize a tree with a root, we need to create the tree on-chain first with + /// the proper authority. The tree might contain a canopy, which is a cache of the uppermost + /// nodes. The canopy is used to decrease the size of the proof required to update the tree. + /// If the tree is expected to have a canopy, it needs to be prefilled with the necessary nodes. + /// There are 2 ways to initialize a merkle tree: + /// 1. Initialize an empty tree + /// 2. Initialize a tree with a root and leaf + /// For the former case, the canopy will be empty which is expected for an empty tree. The + /// expected flow is `init_empty_merkle_tree`. For the latter case, the canopy should be + /// filled with the necessary nodes to render the tree usable. Thus we need to prefill the + /// canopy with the necessary nodes. The expected flow for a tree without canopy is + /// `prepare_batch_merkle_tree` -> `init_prepared_tree_with_root`. The expected flow for a tree + /// with canopy is `prepare_batch_merkle_tree` -> `append_canopy_nodes` (multiple times + /// until all of the canopy is filled) -> `init_prepared_tree_with_root`. This instruction + /// initializes the tree header while leaving the tree itself uninitialized. This allows + /// distinguishing between an empty tree and a tree prepare to be initialized with a root. + pub fn prepare_batch_merkle_tree( + ctx: Context, + max_depth: u32, + max_buffer_size: u32, + ) -> Result<()> { + require_eq!( + *ctx.accounts.merkle_tree.owner, + crate::id(), + AccountCompressionError::IncorrectAccountOwner + ); + let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?; + + let (mut header_bytes, rest) = + merkle_tree_bytes.split_at_mut(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); + + let mut header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?; + header.initialize_batched( + max_depth, + max_buffer_size, + &ctx.accounts.authority.key(), + Clock::get()?.slot, + ); + header.serialize(&mut header_bytes)?; + let merkle_tree_size = merkle_tree_get_size(&header)?; + let (_tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size); + check_canopy_bytes(canopy_bytes) + } + + /// (Devnet only) This instruction pre-initializes the canopy with the specified leaf nodes of the canopy. + /// This is intended to be used after `prepare_batch_merkle_tree` and in conjunction with the + /// `init_prepared_tree_with_root` instruction that'll finalize the tree initialization. + /// The canopy is used to cache the uppermost nodes of the tree, which allows for a smaller + /// proof size when updating the tree. The canopy should be filled with the necessary nodes + /// before calling `init_prepared_tree_with_root`. You may call this instruction multiple + /// times to fill the canopy with the necessary nodes. The canopy may be filled with the + /// nodes in any order. The already filled nodes may be replaced with new nodes before calling + /// `init_prepared_tree_with_root` if the step was done in error. + /// The canopy should be filled with all the nodes that are to the left of the rightmost + /// leaf of the tree before calling `init_prepared_tree_with_root`. The canopy should not + /// contain any nodes to the right of the rightmost leaf of the tree. + /// This instruction calculates and filles in all the canopy nodes "above" the provided ones. + /// The validation of the canopy is done in the `init_prepared_tree_with_root` instruction. + pub fn append_canopy_nodes( + ctx: Context, + start_index: u32, + canopy_nodes: Vec<[u8; 32]>, + ) -> Result<()> { + require_eq!( + *ctx.accounts.merkle_tree.owner, + crate::id(), + AccountCompressionError::IncorrectAccountOwner + ); + let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?; + + let (header_bytes, rest) = + merkle_tree_bytes.split_at_mut(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); + + let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?; + header.assert_valid_authority(&ctx.accounts.authority.key())?; + header.assert_is_batch_initialized()?; + // assert the tree is not initialized yet, we don't want to overwrite the canopy of an + // initialized tree + let merkle_tree_size = merkle_tree_get_size(&header)?; + let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size); + // ensure the tree is not initialized, the hacky way + require!( + tree_bytes_uninitialized(tree_bytes), + AccountCompressionError::TreeAlreadyInitialized + ); + set_canopy_leaf_nodes( + canopy_bytes, + header.get_max_depth(), + start_index, + &canopy_nodes, + ) + } + + /// (Devnet only) Initializes a prepared tree with a root and a rightmost leaf. The rightmost leaf is used to + /// verify the canopy if the tree has it. Before calling this instruction, the tree should be + /// prepared with `prepare_batch_merkle_tree` and the canopy should be filled with the necessary + /// nodes with `append_canopy_nodes` (if the canopy is used). This method should be used for + /// batch creation of trees. The indexing of such batches should be done off-chain. The + /// programs calling this instruction should take care of ensuring the indexing is possible. + /// For example, staking may be required to ensure the tree creator has some responsibility + /// for what is being indexed. If indexing is not possible, there should be a mechanism to + /// penalize the tree creator. + pub fn init_prepared_tree_with_root( + ctx: Context, + root: [u8; 32], + rightmost_leaf: [u8; 32], + rightmost_index: u32, + ) -> Result<()> { + require_eq!( + *ctx.accounts.merkle_tree.owner, + crate::id(), + AccountCompressionError::IncorrectAccountOwner + ); + let mut merkle_tree_bytes = ctx.accounts.merkle_tree.try_borrow_mut_data()?; + + let (header_bytes, rest) = + merkle_tree_bytes.split_at_mut(CONCURRENT_MERKLE_TREE_HEADER_SIZE_V1); + // the header should already be initialized with prepare_batch_merkle_tree + let header = ConcurrentMerkleTreeHeader::try_from_slice(header_bytes)?; + header.assert_valid_authority(&ctx.accounts.authority.key())?; + header.assert_is_batch_initialized()?; + let merkle_tree_size = merkle_tree_get_size(&header)?; + let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size); + // check the canopy root matches the tree root + check_canopy_root(canopy_bytes, &root, header.get_max_depth())?; + // verify the canopy does not conain any nodes to the right of the rightmost leaf + check_canopy_no_nodes_to_right_of_index( + canopy_bytes, + header.get_max_depth(), + rightmost_index, + )?; + + // Get rightmost proof from accounts + let mut proof = vec![]; + for node in ctx.remaining_accounts.iter() { + proof.push(node.key().to_bytes()); + } + fill_in_proof_from_canopy( + canopy_bytes, + header.get_max_depth(), + rightmost_index, + &mut proof, + )?; + assert_eq!(proof.len(), header.get_max_depth() as usize); + + let id = ctx.accounts.merkle_tree.key(); + // A call is made to ConcurrentMerkleTree::initialize_with_root + let args = &InitializeWithRootArgs { + root, + rightmost_leaf, + proof_vec: proof, + index: rightmost_index, + }; + let change_log = merkle_tree_initialize_with_root(&header, id, tree_bytes, args)?; + update_canopy(canopy_bytes, header.get_max_depth(), Some(&change_log))?; + wrap_event( + &AccountCompressionEvent::ChangeLog(*change_log), + &ctx.accounts.noop, + ) + } /// Executes an instruction that overwrites a leaf node. /// Composing programs should check that the data hashed into previous_leaf @@ -466,7 +567,7 @@ pub mod spl_account_compression { let (tree_bytes, canopy_bytes) = rest.split_at_mut(merkle_tree_size); let id = ctx.accounts.merkle_tree.key(); - merkle_tree_apply_fn_mut!(header, id, tree_bytes, prove_tree_is_empty,)?; + assert_tree_is_empty(&header, id, tree_bytes)?; // Close merkle tree account // 1. Move lamports diff --git a/account-compression/programs/account-compression/src/state/concurrent_merkle_tree_header.rs b/account-compression/programs/account-compression/src/state/concurrent_merkle_tree_header.rs index 6d326b76142..29c0a38f71c 100644 --- a/account-compression/programs/account-compression/src/state/concurrent_merkle_tree_header.rs +++ b/account-compression/programs/account-compression/src/state/concurrent_merkle_tree_header.rs @@ -65,9 +65,14 @@ pub struct ConcurrentMerkleTreeHeaderDataV1 { /// Provides a lower-bound on what slot to start (re-)building a tree from. creation_slot: u64, + /// A flag indicating whether the tree has been initialized with a root. + /// This field was added together with the `finalize_tree_with_root` instruction. + /// It takes 1 byte of space taken from the previous padding for existing accounts. + is_batch_initialized: bool, + /// Needs padding for the account to be 8-byte aligned /// 8-byte alignment is necessary to zero-copy the SPL ConcurrentMerkleTree - _padding: [u8; 6], + _padding: [u8; 5], } #[repr(C)] @@ -95,6 +100,24 @@ impl ConcurrentMerkleTreeHeader { header.max_depth = max_depth; header.authority = *authority; header.creation_slot = creation_slot; + // is_batch_initialized is left false by default + } + } + } + + /// Initializes the header with the given parameters and sets the `is_batch_initialized` flag to + /// true. + pub fn initialize_batched( + &mut self, + max_depth: u32, + max_buffer_size: u32, + authority: &Pubkey, + creation_slot: u64, + ) { + self.initialize(max_depth, max_buffer_size, authority, creation_slot); + match self.header { + ConcurrentMerkleTreeHeaderData::V1(ref mut header) => { + header.is_batch_initialized = true; } } } @@ -117,6 +140,12 @@ impl ConcurrentMerkleTreeHeader { } } + pub fn get_is_batch_initialized(&self) -> bool { + match &self.header { + ConcurrentMerkleTreeHeaderData::V1(header) => header.is_batch_initialized, + } + } + pub fn set_new_authority(&mut self, new_authority: &Pubkey) { match self.header { ConcurrentMerkleTreeHeaderData::V1(ref mut header) => { @@ -155,6 +184,18 @@ impl ConcurrentMerkleTreeHeader { } Ok(()) } + + pub fn assert_is_batch_initialized(&self) -> Result<()> { + match &self.header { + ConcurrentMerkleTreeHeaderData::V1(header) => { + require!( + header.is_batch_initialized, + AccountCompressionError::BatchNotInitialized + ); + } + } + Ok(()) + } } pub fn merkle_tree_get_size(header: &ConcurrentMerkleTreeHeader) -> Result { @@ -166,10 +207,10 @@ pub fn merkle_tree_get_size(header: &ConcurrentMerkleTreeHeader) -> Result Ok(size_of::>()), (8, 16) => Ok(size_of::>()), (9, 16) => Ok(size_of::>()), - (10, 32)=> Ok(size_of::>()), - (11, 32)=> Ok(size_of::>()), - (12, 32)=> Ok(size_of::>()), - (13, 32)=> Ok(size_of::>()), + (10, 32) => Ok(size_of::>()), + (11, 32) => Ok(size_of::>()), + (12, 32) => Ok(size_of::>()), + (13, 32) => Ok(size_of::>()), (14, 64) => Ok(size_of::>()), (14, 256) => Ok(size_of::>()), (14, 1024) => Ok(size_of::>()), diff --git a/account-compression/sdk/idl/spl_account_compression.json b/account-compression/sdk/idl/spl_account_compression.json index 1a5de6f83d9..a9477f25eca 100644 --- a/account-compression/sdk/idl/spl_account_compression.json +++ b/account-compression/sdk/idl/spl_account_compression.json @@ -1,5 +1,5 @@ { - "version": "0.2.0", + "version": "0.3.1", "name": "spl_account_compression", "instructions": [ { @@ -51,18 +51,171 @@ } ] }, + { + "name": "prepareBatchMerkleTree", + "docs": [ + "In order to initialize a tree with a root, we need to create the tree on-chain first with", + "the proper authority. The tree might contain a canopy, which is a cache of the uppermost", + "nodes. The canopy is used to decrease the size of the proof required to update the tree.", + "If the tree is expected to have a canopy, it needs to be prefilled with the necessary nodes.", + "There are 2 ways to initialize a merkle tree:", + "1. Initialize an empty tree", + "2. Initialize a tree with a root and leaf", + "For the former case, the canopy will be empty which is expected for an empty tree. The", + "expected flow is `init_empty_merkle_tree`. For the latter case, the canopy should be", + "filled with the necessary nodes to render the tree usable. Thus we need to prefill the", + "canopy with the necessary nodes. The expected flow for a tree without canopy is", + "`prepare_batch_merkle_tree` -> `init_prepared_tree_with_root`. The expected flow for a tree", + "with canopy is `prepare_batch_merkle_tree` -> `append_canopy_nodes` (multiple times", + "until all of the canopy is filled) -> `init_prepared_tree_with_root`. This instruction", + "initializes the tree header while leaving the tree itself uninitialized. This allows", + "distinguishing between an empty tree and a tree prepare to be initialized with a root." + ], + "accounts": [ + { + "name": "merkleTree", + "isMut": true, + "isSigner": false + }, + { + "name": "authority", + "isMut": false, + "isSigner": true, + "docs": [ + "Authority that controls write-access to the tree", + "Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs." + ] + }, + { + "name": "noop", + "isMut": false, + "isSigner": false, + "docs": ["Program used to emit changelogs as cpi instruction data."] + } + ], + "args": [ + { + "name": "maxDepth", + "type": "u32" + }, + { + "name": "maxBufferSize", + "type": "u32" + } + ] + }, + { + "name": "appendCanopyNodes", + "docs": [ + "This instruction pre-initializes the canopy with the specified leaf nodes of the canopy.", + "This is intended to be used after `prepare_batch_merkle_tree` and in conjunction with the", + "`init_prepared_tree_with_root` instruction that'll finalize the tree initialization.", + "The canopy is used to cache the uppermost nodes of the tree, which allows for a smaller", + "proof size when updating the tree. The canopy should be filled with the necessary nodes", + "before calling `init_prepared_tree_with_root`. You may call this instruction multiple", + "times to fill the canopy with the necessary nodes. The canopy may be filled with the", + "nodes in any order. The already filled nodes may be replaced with new nodes before calling", + "`init_prepared_tree_with_root` if the step was done in error.", + "The canopy should be filled with all the nodes that are to the left of the rightmost", + "leaf of the tree before calling `init_prepared_tree_with_root`. The canopy should not", + "contain any nodes to the right of the rightmost leaf of the tree.", + "This instruction calculates and filles in all the canopy nodes \"above\" the provided ones.", + "The validation of the canopy is done in the `init_prepared_tree_with_root` instruction." + ], + "accounts": [ + { + "name": "merkleTree", + "isMut": true, + "isSigner": false + }, + { + "name": "authority", + "isMut": false, + "isSigner": true, + "docs": [ + "Authority that controls write-access to the tree", + "Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs." + ] + }, + { + "name": "noop", + "isMut": false, + "isSigner": false, + "docs": ["Program used to emit changelogs as cpi instruction data."] + } + ], + "args": [ + { + "name": "startIndex", + "type": "u32" + }, + { + "name": "canopyNodes", + "type": { + "vec": { + "array": ["u8", 32] + } + } + } + ] + }, + { + "name": "initPreparedTreeWithRoot", + "docs": [ + "Initializes a prepared tree with a root and a rightmost leaf. The rightmost leaf is used to", + "verify the canopy if the tree has it. Before calling this instruction, the tree should be", + "prepared with `prepare_batch_merkle_tree` and the canopy should be filled with the necessary", + "nodes with `append_canopy_nodes` (if the canopy is used). This method should be used for", + "batch creation of trees. The indexing of such batches should be done off-chain. The", + "programs calling this instruction should take care of ensuring the indexing is possible.", + "For example, staking may be required to ensure the tree creator has some responsibility", + "for what is being indexed. If indexing is not possible, there should be a mechanism to", + "penalize the tree creator." + ], + "accounts": [ + { + "name": "merkleTree", + "isMut": true, + "isSigner": false + }, + { + "name": "authority", + "isMut": false, + "isSigner": true, + "docs": [ + "Authority that controls write-access to the tree", + "Typically a program, e.g., the Bubblegum contract validates that leaves are valid NFTs." + ] + }, + { + "name": "noop", + "isMut": false, + "isSigner": false, + "docs": ["Program used to emit changelogs as cpi instruction data."] + } + ], + "args": [ + { + "name": "root", + "type": { + "array": ["u8", 32] + } + }, + { + "name": "rightmostLeaf", + "type": { + "array": ["u8", 32] + } + }, + { + "name": "rightmostIndex", + "type": "u32" + } + ] + }, { "name": "replaceLeaf", "docs": [ - "Note:", - "Supporting this instruction open a security vulnerability for indexers.", - "This instruction has been deemed unusable for publicly indexed compressed NFTs.", - "Indexing batched data in this way requires indexers to read in the `uri`s onto physical storage", - "and then into their database. This opens up a DOS attack vector, whereby this instruction is", - "repeatedly invoked, causing indexers to fail.", - "", - "Because this instruction was deemed insecure, this instruction has been removed", - "until secure usage is available on-chain.", "Executes an instruction that overwrites a leaf node.", "Composing programs should check that the data hashed into previous_leaf", "matches the authority information necessary to execute this instruction." @@ -407,6 +560,15 @@ ], "type": "u64" }, + { + "name": "isBatchInitialized", + "docs": [ + "A flag indicating whether the tree has been initialized with a root.", + "This field was added together with the `finalize_tree_with_root` instruction.", + "It takes 1 byte of space taken from the previous padding for existing accounts." + ], + "type": "bool" + }, { "name": "padding", "docs": [ @@ -414,7 +576,7 @@ "8-byte alignment is necessary to zero-copy the SPL ConcurrentMerkleTree" ], "type": { - "array": ["u8", 6] + "array": ["u8", 5] } } ] @@ -570,12 +732,37 @@ "code": 6008, "name": "LeafIndexOutOfBounds", "msg": "Leaf index of concurrent merkle tree is out of bounds" + }, + { + "code": 6009, + "name": "CanopyNotAllocated", + "msg": "Tree was initialized without allocating space for the canopy" + }, + { + "code": 6010, + "name": "TreeAlreadyInitialized", + "msg": "Tree was already initialized" + }, + { + "code": 6011, + "name": "BatchNotInitialized", + "msg": "Tree header was not initialized for batch processing" + }, + { + "code": 6012, + "name": "CanopyRootMismatch", + "msg": "Canopy root does not match the root of the tree" + }, + { + "code": 6013, + "name": "CanopyRightmostLeafMismatch", + "msg": "Canopy contains nodes to the right of the rightmost leaf of the tree" } ], "metadata": { "address": "cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK", "origin": "anchor", - "binaryVersion": "0.25.0", - "libVersion": "0.25.0" + "binaryVersion": "0.29.0", + "libVersion": "0.29.0" } } diff --git a/account-compression/sdk/package.json b/account-compression/sdk/package.json index 3b44d78d9c2..9d2e1d9c165 100644 --- a/account-compression/sdk/package.json +++ b/account-compression/sdk/package.json @@ -39,7 +39,7 @@ "lint:fix": "eslint . --fix --ext .js,.ts", "docs": "rm -rf docs/ && typedoc --out docs", "deploy:docs": "npm run docs && gh-pages --dest account-compression/sdk --dist docs --dotfiles", - "start-validator": "solana-test-validator --reset --quiet --bpf-program cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK ../target/deploy/spl_account_compression.so --bpf-program noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV ../target/deploy/spl_noop.so", + "start-validator": "solana-test-validator --reset --quiet --bpf-program cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK ../target/deploy/spl_account_compression.so --bpf-program noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV ../target/deploy/spl_noop.so --account 27QMkDMpBoAhmWj6xxQNYdqXZL5nnC8tkZcEtkNxCqeX pre-batch-init-tree-account.json", "run-tests": "jest tests --detectOpenHandles", "run-tests:events": "jest tests/events --detectOpenHandles", "run-tests:accounts": "jest tests/accounts --detectOpenHandles", diff --git a/account-compression/sdk/src/accounts/ConcurrentMerkleTreeAccount.ts b/account-compression/sdk/src/accounts/ConcurrentMerkleTreeAccount.ts index 2e602599b18..bef2f4954cb 100644 --- a/account-compression/sdk/src/accounts/ConcurrentMerkleTreeAccount.ts +++ b/account-compression/sdk/src/accounts/ConcurrentMerkleTreeAccount.ts @@ -121,6 +121,14 @@ export class ConcurrentMerkleTreeAccount { getCanopyDepth(): number { return getCanopyDepth(this.canopy.canopyBytes.length); } + + /** + * Returns the flag that indicates if the tree has been batch initialized + * @returns the flag + */ + getIsBatchInitialized(): boolean { + return this.getHeaderV1().isBatchInitialized; + } } /** diff --git a/account-compression/sdk/src/generated/errors/index.ts b/account-compression/sdk/src/generated/errors/index.ts index 4600510da7f..03066ebcf49 100644 --- a/account-compression/sdk/src/generated/errors/index.ts +++ b/account-compression/sdk/src/generated/errors/index.ts @@ -194,6 +194,106 @@ export class LeafIndexOutOfBoundsError extends Error { createErrorFromCodeLookup.set(0x1778, () => new LeafIndexOutOfBoundsError()); createErrorFromNameLookup.set('LeafIndexOutOfBounds', () => new LeafIndexOutOfBoundsError()); +/** + * CanopyNotAllocated: 'Tree was initialized without allocating space for the canopy' + * + * @category Errors + * @category generated + */ +export class CanopyNotAllocatedError extends Error { + readonly code: number = 0x1779; + readonly name: string = 'CanopyNotAllocated'; + constructor() { + super('Tree was initialized without allocating space for the canopy'); + if (typeof Error.captureStackTrace === 'function') { + Error.captureStackTrace(this, CanopyNotAllocatedError); + } + } +} + +createErrorFromCodeLookup.set(0x1779, () => new CanopyNotAllocatedError()); +createErrorFromNameLookup.set('CanopyNotAllocated', () => new CanopyNotAllocatedError()); + +/** + * TreeAlreadyInitialized: 'Tree was already initialized' + * + * @category Errors + * @category generated + */ +export class TreeAlreadyInitializedError extends Error { + readonly code: number = 0x177a; + readonly name: string = 'TreeAlreadyInitialized'; + constructor() { + super('Tree was already initialized'); + if (typeof Error.captureStackTrace === 'function') { + Error.captureStackTrace(this, TreeAlreadyInitializedError); + } + } +} + +createErrorFromCodeLookup.set(0x177a, () => new TreeAlreadyInitializedError()); +createErrorFromNameLookup.set('TreeAlreadyInitialized', () => new TreeAlreadyInitializedError()); + +/** + * BatchNotInitialized: 'Tree header was not initialized for batch processing' + * + * @category Errors + * @category generated + */ +export class BatchNotInitializedError extends Error { + readonly code: number = 0x177b; + readonly name: string = 'BatchNotInitialized'; + constructor() { + super('Tree header was not initialized for batch processing'); + if (typeof Error.captureStackTrace === 'function') { + Error.captureStackTrace(this, BatchNotInitializedError); + } + } +} + +createErrorFromCodeLookup.set(0x177b, () => new BatchNotInitializedError()); +createErrorFromNameLookup.set('BatchNotInitialized', () => new BatchNotInitializedError()); + +/** + * CanopyRootMismatch: 'Canopy root does not match the root of the tree' + * + * @category Errors + * @category generated + */ +export class CanopyRootMismatchError extends Error { + readonly code: number = 0x177c; + readonly name: string = 'CanopyRootMismatch'; + constructor() { + super('Canopy root does not match the root of the tree'); + if (typeof Error.captureStackTrace === 'function') { + Error.captureStackTrace(this, CanopyRootMismatchError); + } + } +} + +createErrorFromCodeLookup.set(0x177c, () => new CanopyRootMismatchError()); +createErrorFromNameLookup.set('CanopyRootMismatch', () => new CanopyRootMismatchError()); + +/** + * CanopyRightmostLeafMismatch: 'Canopy contains nodes to the right of the rightmost leaf of the tree' + * + * @category Errors + * @category generated + */ +export class CanopyRightmostLeafMismatchError extends Error { + readonly code: number = 0x177d; + readonly name: string = 'CanopyRightmostLeafMismatch'; + constructor() { + super('Canopy contains nodes to the right of the rightmost leaf of the tree'); + if (typeof Error.captureStackTrace === 'function') { + Error.captureStackTrace(this, CanopyRightmostLeafMismatchError); + } + } +} + +createErrorFromCodeLookup.set(0x177d, () => new CanopyRightmostLeafMismatchError()); +createErrorFromNameLookup.set('CanopyRightmostLeafMismatch', () => new CanopyRightmostLeafMismatchError()); + /** * Attempts to resolve a custom program error from the provided error code. * @category Errors diff --git a/account-compression/sdk/src/generated/instructions/appendCanopyNodes.ts b/account-compression/sdk/src/generated/instructions/appendCanopyNodes.ts new file mode 100644 index 00000000000..5cf4ddff765 --- /dev/null +++ b/account-compression/sdk/src/generated/instructions/appendCanopyNodes.ts @@ -0,0 +1,105 @@ +/** + * This code was GENERATED using the solita package. + * Please DO NOT EDIT THIS FILE, instead rerun solita to update it or write a wrapper to add functionality. + * + * See: https://github.com/metaplex-foundation/solita + */ + +import * as beet from '@metaplex-foundation/beet'; +import * as web3 from '@solana/web3.js'; + +/** + * @category Instructions + * @category AppendCanopyNodes + * @category generated + */ +export type AppendCanopyNodesInstructionArgs = { + canopyNodes: number[] /* size: 32 */[]; + startIndex: number; +}; +/** + * @category Instructions + * @category AppendCanopyNodes + * @category generated + */ +export const appendCanopyNodesStruct = new beet.FixableBeetArgsStruct< + AppendCanopyNodesInstructionArgs & { + instructionDiscriminator: number[] /* size: 8 */; + } +>( + [ + ['instructionDiscriminator', beet.uniformFixedSizeArray(beet.u8, 8)], + ['startIndex', beet.u32], + ['canopyNodes', beet.array(beet.uniformFixedSizeArray(beet.u8, 32))], + ], + 'AppendCanopyNodesInstructionArgs', +); +/** + * Accounts required by the _appendCanopyNodes_ instruction + * + * @property [_writable_] merkleTree + * @property [**signer**] authority + * @property [] noop + * @category Instructions + * @category AppendCanopyNodes + * @category generated + */ +export type AppendCanopyNodesInstructionAccounts = { + anchorRemainingAccounts?: web3.AccountMeta[]; + authority: web3.PublicKey; + merkleTree: web3.PublicKey; + noop: web3.PublicKey; +}; + +export const appendCanopyNodesInstructionDiscriminator = [139, 155, 238, 167, 11, 243, 132, 205]; + +/** + * Creates a _AppendCanopyNodes_ instruction. + * + * @param accounts that will be accessed while the instruction is processed + * @param args to provide as instruction data to the program + * + * @category Instructions + * @category AppendCanopyNodes + * @category generated + */ +export function createAppendCanopyNodesInstruction( + accounts: AppendCanopyNodesInstructionAccounts, + args: AppendCanopyNodesInstructionArgs, + programId = new web3.PublicKey('cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK'), +) { + const [data] = appendCanopyNodesStruct.serialize({ + instructionDiscriminator: appendCanopyNodesInstructionDiscriminator, + ...args, + }); + const keys: web3.AccountMeta[] = [ + { + isSigner: false, + isWritable: true, + pubkey: accounts.merkleTree, + }, + { + isSigner: true, + isWritable: false, + pubkey: accounts.authority, + }, + { + isSigner: false, + isWritable: false, + pubkey: accounts.noop, + }, + ]; + + if (accounts.anchorRemainingAccounts != null) { + for (const acc of accounts.anchorRemainingAccounts) { + keys.push(acc); + } + } + + const ix = new web3.TransactionInstruction({ + data, + keys, + programId, + }); + return ix; +} diff --git a/account-compression/sdk/src/generated/instructions/index.ts b/account-compression/sdk/src/generated/instructions/index.ts index 7194bbb5b9e..10605ab4704 100644 --- a/account-compression/sdk/src/generated/instructions/index.ts +++ b/account-compression/sdk/src/generated/instructions/index.ts @@ -1,7 +1,10 @@ export * from './append'; +export * from './appendCanopyNodes'; export * from './closeEmptyTree'; export * from './initEmptyMerkleTree'; +export * from './initPreparedTreeWithRoot'; export * from './insertOrAppend'; +export * from './prepareBatchMerkleTree'; export * from './replaceLeaf'; export * from './transferAuthority'; export * from './verifyLeaf'; diff --git a/account-compression/sdk/src/generated/instructions/initPreparedTreeWithRoot.ts b/account-compression/sdk/src/generated/instructions/initPreparedTreeWithRoot.ts new file mode 100644 index 00000000000..b729a877a95 --- /dev/null +++ b/account-compression/sdk/src/generated/instructions/initPreparedTreeWithRoot.ts @@ -0,0 +1,107 @@ +/** + * This code was GENERATED using the solita package. + * Please DO NOT EDIT THIS FILE, instead rerun solita to update it or write a wrapper to add functionality. + * + * See: https://github.com/metaplex-foundation/solita + */ + +import * as beet from '@metaplex-foundation/beet'; +import * as web3 from '@solana/web3.js'; + +/** + * @category Instructions + * @category InitPreparedTreeWithRoot + * @category generated + */ +export type InitPreparedTreeWithRootInstructionArgs = { + rightmostIndex: number; + rightmostLeaf: number[] /* size: 32 */; + root: number[] /* size: 32 */; +}; +/** + * @category Instructions + * @category InitPreparedTreeWithRoot + * @category generated + */ +export const initPreparedTreeWithRootStruct = new beet.BeetArgsStruct< + InitPreparedTreeWithRootInstructionArgs & { + instructionDiscriminator: number[] /* size: 8 */; + } +>( + [ + ['instructionDiscriminator', beet.uniformFixedSizeArray(beet.u8, 8)], + ['root', beet.uniformFixedSizeArray(beet.u8, 32)], + ['rightmostLeaf', beet.uniformFixedSizeArray(beet.u8, 32)], + ['rightmostIndex', beet.u32], + ], + 'InitPreparedTreeWithRootInstructionArgs', +); +/** + * Accounts required by the _initPreparedTreeWithRoot_ instruction + * + * @property [_writable_] merkleTree + * @property [**signer**] authority + * @property [] noop + * @category Instructions + * @category InitPreparedTreeWithRoot + * @category generated + */ +export type InitPreparedTreeWithRootInstructionAccounts = { + anchorRemainingAccounts?: web3.AccountMeta[]; + authority: web3.PublicKey; + merkleTree: web3.PublicKey; + noop: web3.PublicKey; +}; + +export const initPreparedTreeWithRootInstructionDiscriminator = [218, 248, 192, 55, 91, 205, 122, 10]; + +/** + * Creates a _InitPreparedTreeWithRoot_ instruction. + * + * @param accounts that will be accessed while the instruction is processed + * @param args to provide as instruction data to the program + * + * @category Instructions + * @category InitPreparedTreeWithRoot + * @category generated + */ +export function createInitPreparedTreeWithRootInstruction( + accounts: InitPreparedTreeWithRootInstructionAccounts, + args: InitPreparedTreeWithRootInstructionArgs, + programId = new web3.PublicKey('cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK'), +) { + const [data] = initPreparedTreeWithRootStruct.serialize({ + instructionDiscriminator: initPreparedTreeWithRootInstructionDiscriminator, + ...args, + }); + const keys: web3.AccountMeta[] = [ + { + isSigner: false, + isWritable: true, + pubkey: accounts.merkleTree, + }, + { + isSigner: true, + isWritable: false, + pubkey: accounts.authority, + }, + { + isSigner: false, + isWritable: false, + pubkey: accounts.noop, + }, + ]; + + if (accounts.anchorRemainingAccounts != null) { + for (const acc of accounts.anchorRemainingAccounts) { + keys.push(acc); + } + } + + const ix = new web3.TransactionInstruction({ + data, + keys, + programId, + }); + return ix; +} diff --git a/account-compression/sdk/src/generated/instructions/prepareBatchMerkleTree.ts b/account-compression/sdk/src/generated/instructions/prepareBatchMerkleTree.ts new file mode 100644 index 00000000000..0d5aa007db4 --- /dev/null +++ b/account-compression/sdk/src/generated/instructions/prepareBatchMerkleTree.ts @@ -0,0 +1,105 @@ +/** + * This code was GENERATED using the solita package. + * Please DO NOT EDIT THIS FILE, instead rerun solita to update it or write a wrapper to add functionality. + * + * See: https://github.com/metaplex-foundation/solita + */ + +import * as beet from '@metaplex-foundation/beet'; +import * as web3 from '@solana/web3.js'; + +/** + * @category Instructions + * @category PrepareBatchMerkleTree + * @category generated + */ +export type PrepareBatchMerkleTreeInstructionArgs = { + maxBufferSize: number; + maxDepth: number; +}; +/** + * @category Instructions + * @category PrepareBatchMerkleTree + * @category generated + */ +export const prepareBatchMerkleTreeStruct = new beet.BeetArgsStruct< + PrepareBatchMerkleTreeInstructionArgs & { + instructionDiscriminator: number[] /* size: 8 */; + } +>( + [ + ['instructionDiscriminator', beet.uniformFixedSizeArray(beet.u8, 8)], + ['maxDepth', beet.u32], + ['maxBufferSize', beet.u32], + ], + 'PrepareBatchMerkleTreeInstructionArgs', +); +/** + * Accounts required by the _prepareBatchMerkleTree_ instruction + * + * @property [_writable_] merkleTree + * @property [**signer**] authority + * @property [] noop + * @category Instructions + * @category PrepareBatchMerkleTree + * @category generated + */ +export type PrepareBatchMerkleTreeInstructionAccounts = { + anchorRemainingAccounts?: web3.AccountMeta[]; + authority: web3.PublicKey; + merkleTree: web3.PublicKey; + noop: web3.PublicKey; +}; + +export const prepareBatchMerkleTreeInstructionDiscriminator = [230, 124, 120, 196, 249, 134, 199, 128]; + +/** + * Creates a _PrepareBatchMerkleTree_ instruction. + * + * @param accounts that will be accessed while the instruction is processed + * @param args to provide as instruction data to the program + * + * @category Instructions + * @category PrepareBatchMerkleTree + * @category generated + */ +export function createPrepareBatchMerkleTreeInstruction( + accounts: PrepareBatchMerkleTreeInstructionAccounts, + args: PrepareBatchMerkleTreeInstructionArgs, + programId = new web3.PublicKey('cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK'), +) { + const [data] = prepareBatchMerkleTreeStruct.serialize({ + instructionDiscriminator: prepareBatchMerkleTreeInstructionDiscriminator, + ...args, + }); + const keys: web3.AccountMeta[] = [ + { + isSigner: false, + isWritable: true, + pubkey: accounts.merkleTree, + }, + { + isSigner: true, + isWritable: false, + pubkey: accounts.authority, + }, + { + isSigner: false, + isWritable: false, + pubkey: accounts.noop, + }, + ]; + + if (accounts.anchorRemainingAccounts != null) { + for (const acc of accounts.anchorRemainingAccounts) { + keys.push(acc); + } + } + + const ix = new web3.TransactionInstruction({ + data, + keys, + programId, + }); + return ix; +} diff --git a/account-compression/sdk/src/generated/types/AccountCompressionEvent.ts b/account-compression/sdk/src/generated/types/AccountCompressionEvent.ts index 4bce6560620..ee25ced07a1 100644 --- a/account-compression/sdk/src/generated/types/AccountCompressionEvent.ts +++ b/account-compression/sdk/src/generated/types/AccountCompressionEvent.ts @@ -62,4 +62,4 @@ export const accountCompressionEventBeet = beet.dataEnum; +]) as beet.FixableBeet; diff --git a/account-compression/sdk/src/generated/types/ApplicationDataEvent.ts b/account-compression/sdk/src/generated/types/ApplicationDataEvent.ts index eef6c13b391..d6fd2aff4bf 100644 --- a/account-compression/sdk/src/generated/types/ApplicationDataEvent.ts +++ b/account-compression/sdk/src/generated/types/ApplicationDataEvent.ts @@ -49,4 +49,4 @@ export const applicationDataEventBeet = beet.dataEnum; +]) as beet.FixableBeet; diff --git a/account-compression/sdk/src/generated/types/ChangeLogEvent.ts b/account-compression/sdk/src/generated/types/ChangeLogEvent.ts index ffd34de7791..9418cff3bcf 100644 --- a/account-compression/sdk/src/generated/types/ChangeLogEvent.ts +++ b/account-compression/sdk/src/generated/types/ChangeLogEvent.ts @@ -48,4 +48,4 @@ export const changeLogEventBeet = beet.dataEnum([ 'ChangeLogEventRecord["V1"]', ), ], -]) as beet.FixableBeet; +]) as beet.FixableBeet; diff --git a/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderData.ts b/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderData.ts index f49f8f38996..3c1c8011f10 100644 --- a/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderData.ts +++ b/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderData.ts @@ -53,4 +53,4 @@ export const concurrentMerkleTreeHeaderDataBeet = beet.dataEnum; +]) as beet.FixableBeet; diff --git a/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderDataV1.ts b/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderDataV1.ts index 19a48b0d366..7e3b0e54e78 100644 --- a/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderDataV1.ts +++ b/account-compression/sdk/src/generated/types/ConcurrentMerkleTreeHeaderDataV1.ts @@ -11,9 +11,10 @@ import * as web3 from '@solana/web3.js'; export type ConcurrentMerkleTreeHeaderDataV1 = { authority: web3.PublicKey; creationSlot: beet.bignum; + isBatchInitialized: boolean; maxBufferSize: number; maxDepth: number; - padding: number[] /* size: 6 */; + padding: number[] /* size: 5 */; }; /** @@ -26,7 +27,8 @@ export const concurrentMerkleTreeHeaderDataV1Beet = new beet.BeetArgsStruct[] | Buffer[], + startIndex: number, +): TransactionInstruction { + return createAppendCanopyNodesInstruction( + { + authority, + merkleTree, + noop: SPL_NOOP_PROGRAM_ID, + }, + { + canopyNodes: canopyNodes.map(node => Array.from(node)), + startIndex, + }, + ); +} + +/** + * Helper function for {@link createInitPreparedTreeWithRootInstruction} + * @param merkleTree + * @param authority + * @param root + * @param rightmostLeaf + * @param rightmostIndex + * @param proof + * @returns + */ +export function createInitPreparedTreeWithRootIx( + merkleTree: PublicKey, + authority: PublicKey, + root: ArrayLike | Buffer, + rightmostLeaf: ArrayLike | Buffer, + rightmostIndex: number, + proof: Buffer[], +): TransactionInstruction { + return createInitPreparedTreeWithRootInstruction( + { + anchorRemainingAccounts: proof.map(node => { + return { + isSigner: false, + isWritable: false, + pubkey: new PublicKey(node), + }; + }), + authority, + merkleTree, + noop: SPL_NOOP_PROGRAM_ID, + }, + { + rightmostIndex, + rightmostLeaf: Array.from(rightmostLeaf), + root: Array.from(root), + }, + ); +} + /** * Helper function for {@link createReplaceLeafInstruction} * @param merkleTree diff --git a/account-compression/sdk/tests/accountCompression.test.ts b/account-compression/sdk/tests/accountCompression.test.ts index d7a3338b7eb..deb7a2e3173 100644 --- a/account-compression/sdk/tests/accountCompression.test.ts +++ b/account-compression/sdk/tests/accountCompression.test.ts @@ -8,15 +8,21 @@ import * as crypto from 'crypto'; import { ConcurrentMerkleTreeAccount, + createAppendCanopyNodesIx, createAppendIx, createCloseEmptyTreeInstruction, + createCloseEmptyTreeIx, + createInitEmptyMerkleTreeIx, + createInitPreparedTreeWithRootIx, createReplaceIx, createTransferAuthorityIx, createVerifyLeafIx, + prepareTreeIx, ValidDepthSizePair, } from '../src'; import { hash, MerkleTree } from '../src/merkle-tree'; -import { createTreeOnChain, execute } from './utils'; +import { assertCMTProperties } from './accounts/concurrentMerkleTreeAccount.test'; +import { createTreeOnChain, execute, prepareTree } from './utils'; // eslint-disable-next-line no-empty describe('Account Compression', () => { @@ -54,6 +60,547 @@ describe('Account Compression', () => { ); }); + describe('Having prepared a tree without canopy', () => { + const depth = 3; + const size = 8; + const canopyDepth = 0; + const leaves = [ + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + ]; + let anotherKeyPair: Keypair; + let another: PublicKey; + + beforeEach(async () => { + const cmtKeypair = await prepareTree({ + canopyDepth, + depthSizePair: { + maxBufferSize: size, + maxDepth: depth, + }, + payer: payerKeypair, + provider, + }); + cmt = cmtKeypair.publicKey; + anotherKeyPair = Keypair.generate(); + another = anotherKeyPair.publicKey; + await provider.connection.confirmTransaction( + await provider.connection.requestAirdrop(another, 1e10), + 'confirmed', + ); + }); + it('Should be able to finalize the tree', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + const canopyDepth = 0; + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + + await execute(provider, [finalize], [payerKeypair]); + + const splCMT = await ConcurrentMerkleTreeAccount.fromAccountAddress(connection, cmt); + assertCMTProperties(splCMT, depth, size, payer, root, canopyDepth, true); + assert(splCMT.getBufferSize() == 1, 'Buffer size does not match'); + }); + it('Should fail to append canopy node for a tree without canopy', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 0); + try { + await execute(provider, [appendIx], [payerKeypair]); + assert(false, 'Canopy appending should have failed to execute for a tree without canopy'); + } catch {} + }); + it('Should fail to finalize the tree with another payer authority', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const finalize = createInitPreparedTreeWithRootIx( + cmt, + another, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + + try { + await execute(provider, [finalize], [anotherKeyPair]); + assert(false, 'Finalizing with another payer should have failed'); + } catch {} + }); + it('Should fail to finalize the tree with a wrong proof', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + // Replace valid proof with random bytes so it is wrong + const proof = merkleTreeRaw.getProof(leaves.length - 1); + proof.proof = proof.proof.map(_ => { + return crypto.randomBytes(32); + }); + + const finalize = createInitPreparedTreeWithRootIx(cmt, payer, root, leaf, leaves.length - 1, proof.proof); + + try { + await execute(provider, [finalize], [payerKeypair]); + assert(false, 'Finalizing with a wrong proof should have failed'); + } catch {} + }); + it('Should fail to double finalize the tree', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + + await execute(provider, [finalize], [payerKeypair]); + + try { + await execute(provider, [finalize], [payerKeypair]); + assert(false, 'Double finalizing should have failed'); + } catch {} + }); + + it('Should be able to close a prepared tree', async () => { + let payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + let treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed')!; + + const payerLamports = payerInfo!.lamports; + const treeLamports = treeInfo!.lamports; + + const closeIx = createCloseEmptyTreeIx(cmt, payer, payer); + await execute(provider, [closeIx], [payerKeypair]); + + payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + const finalLamports = payerInfo!.lamports; + assert( + finalLamports === payerLamports + treeLamports - 5000, + 'Expected payer to have received the lamports from the closed tree account', + ); + + treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed'); + assert(treeInfo === null, 'Expected the merkle tree account info to be null'); + }); + }); + describe('Having prepared a tree with canopy', () => { + const depth = 3; + const size = 8; + const canopyDepth = 2; + const leaves = [ + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + crypto.randomBytes(32), + ]; + let anotherKeyPair: Keypair; + let another: PublicKey; + beforeEach(async () => { + const cmtKeypair = await prepareTree({ + canopyDepth, + depthSizePair: { + maxBufferSize: size, + maxDepth: depth, + }, + payer: payerKeypair, + provider, + }); + cmt = cmtKeypair.publicKey; + anotherKeyPair = Keypair.generate(); + another = anotherKeyPair.publicKey; + await provider.connection.confirmTransaction( + await provider.connection.requestAirdrop(another, 1e10), + 'confirmed', + ); + }); + it('Should be able to append a single canopy node', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 0); + await execute(provider, [appendIx], [payerKeypair]); + }); + it('Should be able to append a single canopy node at the index more then 0', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 1); + await execute(provider, [appendIx], [payerKeypair]); + }); + it('Should be able to append several canopy nodes at the start of the node leaves', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32), crypto.randomBytes(32)], 0); + await execute(provider, [appendIx], [payerKeypair]); + }); + it('Should fail to append canopy node with another payer authority', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, another, [crypto.randomBytes(32)], 0); + try { + await execute(provider, [appendIx], [anotherKeyPair]); + assert(false, 'Appending with another payer should have failed'); + } catch {} + }); + it('Should fail to append canopy nodes over the limit', async () => { + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + Array.from({ length: 3 }, () => crypto.randomBytes(32)), + 0, + ); + try { + await execute(provider, [appendIx], [payerKeypair]); + assert(false, 'Appending over the limit should have failed'); + } catch {} + }); + it('Should fail to append canopy nodes over the limit starting from the last index', async () => { + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + Array.from({ length: 2 }, () => crypto.randomBytes(32)), + 1, + ); + try { + await execute(provider, [appendIx], [payerKeypair]); + assert(false, 'Appending over the limit should have failed'); + } catch {} + }); + it('Should fail to append 0 canopy nodes', async () => { + const appendIx = createAppendCanopyNodesIx(cmt, payer, [], 0); + try { + await execute(provider, [appendIx], [payerKeypair]); + assert(false, 'Appending 0 nodes should have failed'); + } catch {} + }); + it('Should fail to finalize the tree without canopy', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + + try { + await execute(provider, [finalize], [payerKeypair]); + assert(false, 'Finalizing without canopy should have failed'); + } catch {} + }); + it('Should fail to finalize the tree with an incomplete canopy', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const appendIx = createAppendCanopyNodesIx(cmt, payer, [merkleTreeRaw.leaves[0].parent!.node!], 0); + await execute(provider, [appendIx], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + + try { + await execute(provider, [finalize], [payerKeypair]); + assert(false, 'Finalization for an incomplete canopy should have failed'); + } catch {} + }); + it('Should finalize the tree with a complete canopy', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + // take every second leaf and append it's parent node to the canopy + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves.filter((_, i) => i % 2 === 0).map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + const splCMT = await ConcurrentMerkleTreeAccount.fromAccountAddress(connection, cmt); + assertCMTProperties(splCMT, depth, size, payer, root, canopyDepth, true); + }); + it('Should be able to setup canopy with several transactions', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + // take every second leaf of the first half of a tree and append it's parent node to the canopy + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(0, leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx], [payerKeypair]); + // take every second leaf of the second half of a tree and append it's parent node to the canopy + const appendIx2 = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 2, + ); + await execute(provider, [appendIx2], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + }); + it('Should be able to setup canopy with several transactions in reverse order', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 2, + ); + await execute(provider, [appendIx], [payerKeypair]); + const appendIx2 = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(0, leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx2], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + }); + it('Should be able to replace a canopy node', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(0, leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx], [payerKeypair]); + const appendIx2 = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 2); + await execute(provider, [appendIx2], [payerKeypair]); + const replaceIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 2, + ); + await execute(provider, [replaceIx], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + }); + it('Should fail to replace a canopy node for a finalised tree', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves.filter((_, i) => i % 2 === 0).map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx], [payerKeypair]); + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + const replaceIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 0); + try { + await execute(provider, [replaceIx], [payerKeypair]); + assert(false, 'Replacing a canopy node for a finalised tree should have failed'); + } catch {} + }); + it('Should fail to initialize an empty tree after preparing a tree', async () => { + const ixs = [ + createInitEmptyMerkleTreeIx(cmt, payer, { + maxBufferSize: size, + maxDepth: depth, + }), + ]; + try { + await execute(provider, ixs, [payerKeypair]); + assert(false, 'Initializing an empty tree after preparing a tree should have failed'); + } catch {} + }); + it('Should be able to close a prepared tree after setting the canopy', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + + const appendIx = createAppendCanopyNodesIx( + cmt, + payer, + merkleTreeRaw.leaves + .slice(0, leaves.length / 2) + .filter((_, i) => i % 2 === 0) + .map(leaf => leaf.parent!.node!), + 0, + ); + await execute(provider, [appendIx], [payerKeypair]); + let payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + let treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed')!; + + const payerLamports = payerInfo!.lamports; + const treeLamports = treeInfo!.lamports; + + const closeIx = createCloseEmptyTreeIx(cmt, payer, payer); + await execute(provider, [closeIx], [payerKeypair]); + + payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + const finalLamports = payerInfo!.lamports; + assert( + finalLamports === payerLamports + treeLamports - 5000, + 'Expected payer to have received the lamports from the closed tree account', + ); + + treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed'); + assert(treeInfo === null, 'Expected the merkle tree account info to be null'); + }); + }); + describe('Having prepared an empty tree with canopy', () => { + const depth = 3; + const size = 8; + const canopyDepth = 2; + // empty leaves represent the empty tree + const leaves = [ + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + Buffer.alloc(32), + ]; + let anotherKeyPair: Keypair; + let another: PublicKey; + beforeEach(async () => { + const cmtKeypair = await prepareTree({ + canopyDepth, + depthSizePair: { + maxBufferSize: size, + maxDepth: depth, + }, + payer: payerKeypair, + provider, + }); + cmt = cmtKeypair.publicKey; + anotherKeyPair = Keypair.generate(); + another = anotherKeyPair.publicKey; + await provider.connection.confirmTransaction( + await provider.connection.requestAirdrop(another, 1e10), + 'confirmed', + ); + }); + + it('Should be able to finalize an empty tree with empty canopy and close it afterwards', async () => { + const merkleTreeRaw = new MerkleTree(leaves); + const root = merkleTreeRaw.root; + const leaf = leaves[leaves.length - 1]; + + const finalize = createInitPreparedTreeWithRootIx( + cmt, + payer, + root, + leaf, + leaves.length - 1, + merkleTreeRaw.getProof(leaves.length - 1).proof, + ); + await execute(provider, [finalize], [payerKeypair]); + let payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + let treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed')!; + + const payerLamports = payerInfo!.lamports; + const treeLamports = treeInfo!.lamports; + + const closeIx = createCloseEmptyTreeIx(cmt, payer, payer); + await execute(provider, [closeIx], [payerKeypair]); + + payerInfo = await provider.connection.getAccountInfo(payer, 'confirmed')!; + const finalLamports = payerInfo!.lamports; + assert( + finalLamports === payerLamports + treeLamports - 5000, + 'Expected payer to have received the lamports from the closed tree account', + ); + + treeInfo = await provider.connection.getAccountInfo(cmt, 'confirmed'); + assert(treeInfo === null, 'Expected the merkle tree account info to be null'); + }); + }); + describe('Having created a tree with a single leaf', () => { beforeEach(async () => { [cmtKeypair, offChainTree] = await createTreeOnChain(provider, payerKeypair, 1, DEPTH_SIZE_PAIR); @@ -162,6 +709,30 @@ describe('Account Compression', () => { 'Updated on chain root matches root of updated off chain tree', ); }); + + it('Should fail to prepare a batch ready tree for an existing tree', async () => { + const prepareIx = prepareTreeIx(cmt, payer, DEPTH_SIZE_PAIR); + try { + await execute(provider, [prepareIx], [payerKeypair]); + assert(false, 'Prepare a batch tree should have failed for the existing tree'); + } catch {} + }); + + it('Should fail to finalize an existing tree', async () => { + const index = offChainTree.leaves.length - 1; + const finalizeIx = createInitPreparedTreeWithRootIx( + cmt, + payer, + offChainTree.root, + offChainTree.leaves[index].node, + index, + offChainTree.getProof(index).proof, + ); + try { + await execute(provider, [finalizeIx], [payerKeypair]); + assert(false, 'Finalize an existing tree should have failed'); + } catch {} + }); }); describe('Examples transferring authority', () => { @@ -476,6 +1047,22 @@ describe('Account Compression', () => { await execute(provider, [replaceIx, replaceBackIx], [payerKeypair], true, true); } }); + + it('Should fail to append a canopy node for an existing tree', async () => { + [cmtKeypair, offChainTree] = await createTreeOnChain( + provider, + payerKeypair, + 0, + { maxBufferSize: 8, maxDepth: DEPTH }, + DEPTH, // Store full tree on chain + ); + cmt = cmtKeypair.publicKey; + const appendIx = createAppendCanopyNodesIx(cmt, payer, [crypto.randomBytes(32)], 0); + try { + await execute(provider, [appendIx], [payerKeypair]); + assert(false, 'Appending a canopy node for an existing tree should have failed'); + } catch {} + }); }); describe(`Having created a tree with 8 leaves`, () => { beforeEach(async () => { diff --git a/account-compression/sdk/tests/accounts/concurrentMerkleTreeAccount.test.ts b/account-compression/sdk/tests/accounts/concurrentMerkleTreeAccount.test.ts index 0b6d4a25e00..5e85aaca0c9 100644 --- a/account-compression/sdk/tests/accounts/concurrentMerkleTreeAccount.test.ts +++ b/account-compression/sdk/tests/accounts/concurrentMerkleTreeAccount.test.ts @@ -8,13 +8,14 @@ import { ALL_DEPTH_SIZE_PAIRS, ConcurrentMerkleTreeAccount, getConcurrentMerkleT import { emptyNode, MerkleTree } from '../../src/merkle-tree'; import { createEmptyTreeOnChain, createTreeOnChain } from '../utils'; -function assertCMTProperties( +export function assertCMTProperties( onChainCMT: ConcurrentMerkleTreeAccount, expectedMaxDepth: number, expectedMaxBufferSize: number, expectedAuthority: PublicKey, expectedRoot: Buffer, expectedCanopyDepth?: number, + expectedIsBatchInitialized = false, ) { assert( onChainCMT.getMaxDepth() === expectedMaxDepth, @@ -32,6 +33,10 @@ function assertCMTProperties( 'On chain canopy depth does not match expected canopy depth', ); } + assert( + onChainCMT.getIsBatchInitialized() === expectedIsBatchInitialized, + 'On chain isBatchInitialized does not match expected value', + ); } describe('ConcurrentMerkleTreeAccount tests', () => { @@ -142,4 +147,30 @@ describe('ConcurrentMerkleTreeAccount tests', () => { } }); }); + + describe('Can deserialize an existing CMTAccount from a real on-chain CMT created before the is_batch_initialized field was introduced inplace of the first byte of _padding', () => { + it('Interpreted on-chain fields correctly', async () => { + // The account data was generated by running: + // $ solana account 27QMkDMpBoAhmWj6xxQNYdqXZL5nnC8tkZcEtkNxCqeX \ + // --output-file tests/fixtures/pre-batch-init-tree-account.json \ + // --output json + const deployedAccount = new PublicKey('27QMkDMpBoAhmWj6xxQNYdqXZL5nnC8tkZcEtkNxCqeX'); + const cmt = await ConcurrentMerkleTreeAccount.fromAccountAddress(connection, deployedAccount, 'confirmed'); + const expectedMaxDepth = 10; + const expectedMaxBufferSize = 32; + const expectedCanopyDepth = 0; + const expectedAuthority = new PublicKey('BFNT941iRwYPe2Js64dTJSoksGCptWAwrkKMaSN73XK2'); + const expectedRoot = new PublicKey('83UjseEuEgxyVyDTmrJCQ9QbeksdRZ7KPDZGQYc5cAgF').toBuffer(); + const expectedIsBatchInitialized = false; + await assertCMTProperties( + cmt, + expectedMaxDepth, + expectedMaxBufferSize, + expectedAuthority, + expectedRoot, + expectedCanopyDepth, + expectedIsBatchInitialized, + ); + }); + }); }); diff --git a/account-compression/sdk/tests/fixtures/pre-batch-init-tree-account.json b/account-compression/sdk/tests/fixtures/pre-batch-init-tree-account.json new file mode 100644 index 00000000000..98adaa774dc --- /dev/null +++ b/account-compression/sdk/tests/fixtures/pre-batch-init-tree-account.json @@ -0,0 +1,14 @@ +{ + "pubkey": "27QMkDMpBoAhmWj6xxQNYdqXZL5nnC8tkZcEtkNxCqeX", + "account": { + "lamports": 84132480, + "data": [ + "AQAgAAAACgAAAJhDQUO8VHxyuU2+8Gbazk9rGe2MW3Xu3mPjn5qN+Mnd8qEyDgAAAAAAAAAAAAAJAAAAAAAAAAkAAAAAAAAACgAAAAAAAAD53D5/4BbgUO/yYDNPGKXU/jkdggkjGfWWTy4ut8HDpQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArTIotnb3081ChKVEPxfxlis25JGzCkCyQFhJ5Ze6X7W0wRlRlXxvj2QsSvYc1rJGQP7G3H/GB+6CBqmekkENMCHduaNWgVw/rBAmtt7F3zEkr7rbSFybpaPjOYoEt7qF5Ydpsyob6vHqJzdaRAlaDR+2ZM4t01jn/L+3jCahk0QOsB6/ye0nUAzU38l5Jy0fCRPMn2ZUDX6ABYERCeHPLYh8Ir2HUNNAFqw8ZrX/EC2s3XP2sBTnELUegCKvmhlo/9cBV+SAY/wzyXoFD39kAjO/ZGzJjZUkxrkrzzq1b4OYZ8xffxlrk7rh4n5jIHQkRdKQ8iY4J0mLVP7FOfdWr8761OUIwJi5p+HY/rGZVfsCupZ1WFB4cQlp00QPUFTgAAAAAAAAAABCQcymud9cvMqkqaz7rb8B4fJGipunRCEkcpIwwf1bqgG0jc8ty2NKzqQ11mfpEgaWHx71Zkd+KhVW6g9RF++oo50UCdctGN9/Yj9M997BjhZjf9P0egTPElcLkXHmGEjtm+CkQBf+3Sbkkq2UmWgP+WnCKKzej7Rk25CTxmlTwR0oez8apwcYlgkfdQNxr3jMbmyJlZungLUL0qdF54IhXXYkra5BQiu8imQXVs5vHpaJmrjUhQCiQT0GdoO+zap3nFWBCJyTSPURgM3qEQhr6WeAPbarDAZnu2Hn6KOJBJiGW8/Mdyb2UHzslHH5v92pUD262jHxv9Dxka11yDv8R21qTggJggnyd9Y9xfFQJmMOlsHxK/OXAbJx15vj+kmxMg/C5+70VPL35eDhb5k8H351PyxQoZH0RK2eXBcIbwO6kdDpcOt1lYsFGxVJOtPzJ+Eq2c5sWwdFSlaopv7WAAAAAAAAAAC4lUsocxFm8dZSnJRk0dztzMPjp1Tw++Vr9kJl2nwJK9rUpl3vmHv+1y5vONxkqse+I11hYnCbfPjlMwi1H3TGbXub1fM1JIxVi6J8yee8q1pLyvHD6l8RgltY7GeLcH/U4Ya37FWODitELZNDut7d8YRI66XliIh458WBFM9ZkWXQAtLrm0fso9F+CE4K2hmZHGSd1tnCGMe1CN5PYnAd32gpAsQAHgFgjiYdxQ+Ezfk2PTE2KL6ltcvyeQ2fVk/xBR50NHfWV4D8JytRVz0YXRQ3fYf4EniFtQtPmQ2CntMT1AroOs/TbheIWHf427xgdUB6Nbd0xZqaRnp3PgDnBy4iDWhVohnVlke8mes2HV+4PTvKtS8QvSek76DrEV0oeh2SM/j5OOPFLfsK2gHRGX20vcKOYa3Rcs9g7X+F3uMazFbBC/vFwbCK/Pfjhmj6ZWod2MSVhsq2Xn9ezssqAQAAAAAAAAARmQ/IUesWNRXIOks0o34cvGsvVMbFCIZqluF+zkUg3hdpJ3DYGBOMaX3Jtz5u3/HqSrb4Uio0kx5VrkfTuCStLPB7V+PqjzBlN326Cg9WoxfTC6nEgo++DbKYbMIW+kYXnWqnD0lliL2PQlq/MxvtOYbZSWYNIg687iKjYnZpTKfNIilBb7JXh6KfMm2xDEpsmNy+djJBXPNfcB9y90yP7xXZA4/4HL5hPpQTNnestf54Wvoo2ZFUKHF/b1hmH7EzF4X5SJlrI/RlinkvdEjGtPzaIL2jYEjQ2S0rSYMD9impk0XbfWrsrHNs8UwnVVrAzmYtnvI0q4YFRoHl0hO45BUJhpj4wEBrSEt11HdPrSj9ENeYuMHmDeN8j7I3oZ0YQ8BhI/FSTMKss2tGL8oa0Z0QPCvQHmL+6bAj38xXGmE/mHkgRdP2Cfy9f++ql/p2oA0Fh6zlzDHT5GjR1V4aAgAAAAAAAAB8dN1Ir+WmUJgJmzlUI8WF6f4d7ht9QkK/wnbsSBjBRpJkz7UEmWcwil8CTYmNwaV2Du1lW32sVcgVEJujtpMImqpKqpjfJAV7T+VhZOwOrz4Cxbhy0Uk6AP5Dzt03oE2am+sbNkvl+u4EBJ7nI9+OF5a2fd+T8QZNfFKuK+sbcJ5MDyIH+szPX7DliqrMLVHTov3qzMMT1BnoZIM7oxf94ksHIuS7WwOlsyXxkLuD/r+QiLUacD4bdxSxxR8+5YlhEhEUKbOZvMJCq4o+0LWkNvYGY7Dy4J3MbXY4yzp/VEu7WuifydqNvwfmiHT9Z/o5T4z1ApBoFyXyblXRAJH9FwVy0dLmQP9M13f6Akv1vxEzPg4SaSZcwK1onN8GLnvz2b4OH2bpevfc8pZId4ukEjaMURY9n7GtYyuvIK8rZS9OoNUDYlgULbDKfheL6sYW/j5plzTZOxRDjZIMyX4pAwAAAAAAAADNp9MXV4O0q8K4yJ4rPS4VXwuq4t6hgk27MVZJKGlunsiS1BPzCUsYrjY3HC+M6PP9IYX6/C1lnCIJ0o+cGJbYD19oYWHldkOUktrvzv1YeIT02RPpXgD7krjwyw6GMkhnmRHM08ykLrx+su31lZ/7InhGHjMGkXE2RSSpISmXIKyTFqBiz4VpTggxrfX9+GjlvMNkhfOFIfePNnEkf+68dCkpR1dgZNsp8j7B2L6NTDXn0xlzzOFCPdYIW+khPYsvornwd8zpbJ5MrdNJ83iC52PruW+e81UFB2tsjX/V8hfGsXTW9Himu9kyZl20vhO5ySw0Z75NoEX2FF5w7z1I9BgV2QhCFgzt0zrTY6OZbibtCjzVuhbEcXSt78iQY6P4u2kfGGabhJ8jRzmrTl7fXpjX/NASK+oOTkveW5gvCh+4k9wetZPvkoCJOSj7rPsxrFs4ac13sWE6OK/Vu+yABAAAAAAAAACMUJhFEGPVsytUOFCvzFCerXY67FhmJvjd7P8/Ab3J9VSkSuNkq51kaKlKK1xhWXrj1UgdBRbh1Esks6jgfWyPNinZuBr83cBaOA09aMy5pE4aD3NM9VnTXi/qfIRcTBhYE15K4fr+Qq+1stfqGG79wM9CwprrCpltIbuAoTjySt4mWHDzbROyEl4QNmaCm/GMyIT5joPvINeBDngrSmzV7J1lAC0hj27wLpADHGbGmL21SgKBIka32HJvVbRnN6GCL+Bk7OmUUNnUoKK0c/mlFguIN7DoiyOWdhCHN3YbM23aZIHefz1HkjUMnW4sIP3jF5I9Za+lJLOR+Y80vL/X8P2Ti5wEFqYOTNjg+y6Yz20KF7Z2GCwOTSgIEshFms939zWviwqjq1KT5KeHoyAVUuHXe6ZaI2l9FIhLOlFDZFz/39I24yvRY3ZCp6XhC5X9EbwbXJ7+dwCwQgbzES1ABQAAAAAAAABjqS9H5fcSRNDGyBaFKz79o8xeMIRKzzlT7sFa/bMX3kkyS5iXq/g0JVqbjB2dQajXxf9E3+IOICDQd1ZPSxHDFJSUjQD9vX44uD9ITmVSGxgS7j59qA7RCYXZadRg+TMmfB9Ji2xLITEiD31ZnNzCAjTvf2nYowheYXqzIK2OpXKvcAwtvYZ6jQUXYQN8MbRpugjApKVETYk3uXwnZE3VK8i5NLxjGokDVRdbGjlKV4Whugr31MZOvQrOhpW0vO3C97zgdgI/HyNGJeILnH3Xjn0b47Lzq1r/0FJx/ECc5D2kXvbpgCQLc1b69+hVg1N4H9/qvMCtpYwILqNbk0My1FzSfUupEqsFEcfhiVXz2uM5JaUm5ei0h8S2zmH/eBAz4smlHxG/xtAT3NnLmEUkI1jtSnIquo2kuKrOoU7Dyv2AE0zHGJRLQGwDCzfDasneG68qjdK7WioqemFHYB5GBgAAAAAAAAC67j8goNDnD6JGvBqXvwGS77IyYa56EExRm96UvuqOjz6G8UCwmDpNdjVbd0qr1ViFXTtoozYLJPsM1Q3xmxPxAKRZU70n/FaS1N4p4SPQ/TN7i+a/rph3DHMrpd3RsI7OyYZIRPCCqLdQrpA7xz4lXRaEt9hqN96n4UJvafy3d1CQJss98bv5Y+8je4Wb+bqavGOBnT7XQlrcTPnMTYIsJjE5Wjv8iP1IUuo0C4QsTF6CFGT7dfIf3sQx8O7Coz1dficqvARL6riF3SlIZ9V0K7YWDinCWUSqt9s2jul37QjE/wYvGg7E8lz2yeVpXXjDxl4gh1GzOEn0OWDTc+XzJ/yNCzDeI0Z/52C1rj9hMf2D5/Cl5DJAR4HJsk7nVMV9QT88uOxYi5TWVR2qChpSJ0pULmoPTLAiyW47/g7yfqLPJ+8vhMP2TXv7rcnviSHPo2CGxSBc4ZmhpojK3pY1BwAAAAAAAABopFW8eCE4hGyMUCyvKkQ+GSMhBfrBc6HcBAOIRfpeAPWxOuEEHeBOEdf46B5t7j+RhQ6FEju+knqr9ZyHLaiTGWyc6JkD47GcNMQmldLaVovSYKtB+BESBYjL0jLzGCp58k+YbjuCUpp6iImugZizQSMOYtYVUjbc1DyM3IdHVsohegHZOROewEmAImpFV56O8G+tCcYlnRWQI2cfduj9GjJOGcVqB3xYTcV9WEc1Yrr1JtHSWI4W3r3zo2UbnSpDmpI3fmG2VuvHQLh5h8sFt/1fgrRkI5yZ5eFPHsp1E7LHzoD5y/rVHBYsqKhxKPOgWrUJnXjUrwNJfB1RNWQPzXMz/WJ1OdGvxQOsnVfqLrh7VS9pDPqS+FuJPoX66qT3pdemBFWruc87OG9JPadnG6whSDrCnPiAswOYQL0ArJGPPuHqjtPw/OTL2DDIvOy08OYVQfJiJ2cHEuJaSNrICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAK0yKLZ299PNQoSlRD8X8ZYrNuSRswpAskBYSeWXul+1tMEZUZV8b49kLEr2HNayRkD+xtx/xgfuggapnpJBDTBQkCbLPfG7+WPvI3uFm/m6mrxjgZ0+10Ja3Ez5zE2CLOWHabMqG+rx6ic3WkQJWg0ftmTOLdNY5/y/t4wmoZNEDrAev8ntJ1AM1N/JeSctHwkTzJ9mVA1+gAWBEQnhzy2IfCK9h1DTQBasPGa1/xAtrN1z9rAU5xC1HoAir5oZaP/XAVfkgGP8M8l6BQ9/ZAIzv2RsyY2VJMa5K886tW+DmGfMX38Za5O64eJ+YyB0JEXSkPImOCdJi1T+xTn3Vq/O+tTlCMCYuafh2P6xmVX7ArqWdVhQeHEJadNED1BU4PWxOuEEHeBOEdf46B5t7j+RhQ6FEju+knqr9ZyHLaiTCQAAAAAAAAA=", + "base64" + ], + "owner": "cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK", + "executable": false, + "rentEpoch": 18446744073709551615, + "space": 11960 + } +} \ No newline at end of file diff --git a/account-compression/sdk/tests/utils.ts b/account-compression/sdk/tests/utils.ts index c7737120dd9..4e49c305c71 100644 --- a/account-compression/sdk/tests/utils.ts +++ b/account-compression/sdk/tests/utils.ts @@ -2,7 +2,13 @@ import { AnchorProvider } from '@coral-xyz/anchor'; import { Keypair, SendTransactionError, Signer, Transaction, TransactionInstruction } from '@solana/web3.js'; import * as crypto from 'crypto'; -import { createAllocTreeIx, createAppendIx, createInitEmptyMerkleTreeIx, ValidDepthSizePair } from '../src'; +import { + createAllocTreeIx, + createAppendIx, + createInitEmptyMerkleTreeIx, + prepareTreeIx, + ValidDepthSizePair, +} from '../src'; import { MerkleTree } from '../src/merkle-tree'; /// Wait for a transaction of a certain id to confirm and optionally log its messages @@ -115,3 +121,28 @@ export async function createEmptyTreeOnChain( return cmtKeypair; } + +export type PrepareTreeArgs = { + canopyDepth: number; + depthSizePair: ValidDepthSizePair; + payer: Keypair; + provider: AnchorProvider; +}; + +export async function prepareTree(args: PrepareTreeArgs): Promise { + const { provider, payer, depthSizePair, canopyDepth } = args; + const cmtKeypair = Keypair.generate(); + const allocAccountIx = await createAllocTreeIx( + provider.connection, + cmtKeypair.publicKey, + payer.publicKey, + depthSizePair, + canopyDepth, + ); + + const ixs = [allocAccountIx, prepareTreeIx(cmtKeypair.publicKey, payer.publicKey, depthSizePair)]; + + const txId = await execute(provider, ixs, [payer, cmtKeypair]); + await confirmAndLogTx(provider, txId as string); + return cmtKeypair; +} diff --git a/libraries/concurrent-merkle-tree/Cargo.toml b/libraries/concurrent-merkle-tree/Cargo.toml index 0d0c7490ce5..0c3c91ec723 100644 --- a/libraries/concurrent-merkle-tree/Cargo.toml +++ b/libraries/concurrent-merkle-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "spl-concurrent-merkle-tree" -version = "0.3.0" +version = "0.4.0" description = "Solana Program Library Concurrent Merkle Tree" authors = ["Solana Labs Maintainers "] repository = "https://github.com/solana-labs/solana-program-library" diff --git a/libraries/concurrent-merkle-tree/src/node.rs b/libraries/concurrent-merkle-tree/src/node.rs index 876ce61f095..e04613a4f6b 100644 --- a/libraries/concurrent-merkle-tree/src/node.rs +++ b/libraries/concurrent-merkle-tree/src/node.rs @@ -11,7 +11,7 @@ pub fn empty_node(level: u32) -> Node { empty_node_cached::<0>(level, &[]) } -/// Calculates and caches the hash of empty nodes up to level i +/// Calculates the hash of empty nodes up to level i using an existing cache pub fn empty_node_cached(level: u32, cache: &[Node; N]) -> Node { let mut data = EMPTY; if level != 0 { @@ -26,3 +26,20 @@ pub fn empty_node_cached(level: u32, cache: &[Node; N]) -> Node } data } + +/// Calculates and caches the hash of empty nodes up to level i +pub fn empty_node_cached_mut(level: u32, cache: &mut [Node; N]) -> Node { + let mut data = EMPTY; + if level != 0 { + let target = (level - 1) as usize; + let lower_empty = if target < cache.len() && cache[target] != EMPTY { + cache[target] + } else { + empty_node(target as u32) + }; + let hash = hashv(&[lower_empty.as_ref(), lower_empty.as_ref()]); + data.copy_from_slice(hash.as_ref()); + } + cache[level as usize] = data; + data +}