|
| 1 | +use std::collections::HashMap; |
| 2 | + |
| 3 | +use async_trait::async_trait; |
| 4 | +use starknet_api::core::ContractAddress; |
| 5 | +use starknet_patricia::patricia_merkle_tree::filled_tree::tree::FilledTree; |
| 6 | +use starknet_patricia::patricia_merkle_tree::node_data::leaf::{Leaf, LeafModifications}; |
| 7 | +use starknet_patricia::patricia_merkle_tree::types::NodeIndex; |
| 8 | +use starknet_patricia::patricia_merkle_tree::updated_skeleton_tree::hash_function::TreeHashFunction; |
| 9 | +use starknet_patricia_storage::db_object::{EmptyKeyContext, HasStaticPrefix}; |
| 10 | +use starknet_patricia_storage::errors::SerializationResult; |
| 11 | +use starknet_patricia_storage::storage_trait::{DbHashMap, Storage}; |
| 12 | + |
| 13 | +use crate::block_committer::input::{ReaderConfig, StarknetStorageValue}; |
| 14 | +use crate::db::db_layout::NodeLayout; |
| 15 | +use crate::db::facts_db::types::FactsDbInitialRead; |
| 16 | +use crate::db::forest_trait::{ForestReader, ForestWriter}; |
| 17 | +use crate::db::index_db::leaves::{ |
| 18 | + IndexLayoutCompiledClassHash, |
| 19 | + IndexLayoutContractState, |
| 20 | + IndexLayoutStarknetStorageValue, |
| 21 | + TrieType, |
| 22 | +}; |
| 23 | +use crate::db::index_db::types::{ |
| 24 | + EmptyNodeData, |
| 25 | + IndexFilledNode, |
| 26 | + IndexLayoutSubTree, |
| 27 | + IndexNodeContext, |
| 28 | +}; |
| 29 | +use crate::db::trie_traversal::{create_classes_trie, create_contracts_trie, create_storage_tries}; |
| 30 | +use crate::forest::filled_forest::FilledForest; |
| 31 | +use crate::forest::forest_errors::ForestResult; |
| 32 | +use crate::forest::original_skeleton_forest::{ForestSortedIndices, OriginalSkeletonForest}; |
| 33 | +use crate::hash_function::hash::TreeHashFunctionImpl; |
| 34 | +use crate::patricia_merkle_tree::leaf::leaf_impl::ContractState; |
| 35 | +use crate::patricia_merkle_tree::types::CompiledClassHash; |
| 36 | + |
| 37 | +pub struct IndexDb<S: Storage> { |
| 38 | + pub storage: S, |
| 39 | +} |
| 40 | + |
| 41 | +impl<S: Storage> IndexDb<S> { |
| 42 | + pub fn new(storage: S) -> Self { |
| 43 | + Self { storage } |
| 44 | + } |
| 45 | +} |
| 46 | + |
| 47 | +pub struct IndexNodeLayout {} |
| 48 | + |
| 49 | +impl<'a, L: Leaf> NodeLayout<'a, L> for IndexNodeLayout |
| 50 | +where |
| 51 | + L: HasStaticPrefix<KeyContext = TrieType>, |
| 52 | + TreeHashFunctionImpl: TreeHashFunction<L>, |
| 53 | +{ |
| 54 | + type NodeData = EmptyNodeData; |
| 55 | + type NodeDbObject = IndexFilledNode<L>; |
| 56 | + type DeserializationContext = IndexNodeContext; |
| 57 | + type SubTree = IndexLayoutSubTree<'a>; |
| 58 | + |
| 59 | + fn generate_key_context(trie_type: TrieType) -> <L as HasStaticPrefix>::KeyContext { |
| 60 | + trie_type |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +// TODO(Ariel): define an IndexDbInitialRead empty type, and check whether each tree is empty inside |
| 65 | +// create_xxx_trie. |
| 66 | +#[async_trait] |
| 67 | +impl<S: Storage> ForestReader<FactsDbInitialRead> for IndexDb<S> { |
| 68 | + /// Creates an original skeleton forest that includes the storage tries of the modified |
| 69 | + /// contracts, the classes trie and the contracts trie. Additionally, returns the original |
| 70 | + /// contract states that are needed to compute the contract state tree. |
| 71 | + async fn read<'a>( |
| 72 | + &mut self, |
| 73 | + context: FactsDbInitialRead, |
| 74 | + storage_updates: &'a HashMap<ContractAddress, LeafModifications<StarknetStorageValue>>, |
| 75 | + classes_updates: &'a LeafModifications<CompiledClassHash>, |
| 76 | + forest_sorted_indices: &'a ForestSortedIndices<'a>, |
| 77 | + config: ReaderConfig, |
| 78 | + ) -> ForestResult<(OriginalSkeletonForest<'a>, HashMap<NodeIndex, ContractState>)> { |
| 79 | + let (contracts_trie, original_contracts_trie_leaves) = |
| 80 | + create_contracts_trie::<IndexLayoutContractState, IndexNodeLayout>( |
| 81 | + &mut self.storage, |
| 82 | + context.0.contracts_trie_root_hash, |
| 83 | + forest_sorted_indices.contracts_trie_sorted_indices, |
| 84 | + ) |
| 85 | + .await?; |
| 86 | + let storage_tries = |
| 87 | + create_storage_tries::<IndexLayoutStarknetStorageValue, IndexNodeLayout>( |
| 88 | + &mut self.storage, |
| 89 | + storage_updates, |
| 90 | + &original_contracts_trie_leaves, |
| 91 | + &config, |
| 92 | + &forest_sorted_indices.storage_tries_sorted_indices, |
| 93 | + ) |
| 94 | + .await?; |
| 95 | + let classes_trie = create_classes_trie::<IndexLayoutCompiledClassHash, IndexNodeLayout>( |
| 96 | + &mut self.storage, |
| 97 | + classes_updates, |
| 98 | + context.0.classes_trie_root_hash, |
| 99 | + &config, |
| 100 | + forest_sorted_indices.classes_trie_sorted_indices, |
| 101 | + ) |
| 102 | + .await?; |
| 103 | + |
| 104 | + Ok(( |
| 105 | + OriginalSkeletonForest { classes_trie, contracts_trie, storage_tries }, |
| 106 | + original_contracts_trie_leaves, |
| 107 | + )) |
| 108 | + } |
| 109 | +} |
| 110 | + |
| 111 | +#[async_trait] |
| 112 | +impl<S: Storage> ForestWriter for IndexDb<S> { |
| 113 | + fn serialize_forest(filled_forest: &FilledForest) -> SerializationResult<DbHashMap> { |
| 114 | + let mut serialized_forest = DbHashMap::new(); |
| 115 | + |
| 116 | + // TODO(Ariel): use a different key context when FilledForest is generic over leaf types. |
| 117 | + for tree in filled_forest.storage_tries.values() { |
| 118 | + serialized_forest.extend(tree.serialize(&EmptyKeyContext)?); |
| 119 | + } |
| 120 | + |
| 121 | + // Contracts and classes tries. |
| 122 | + serialized_forest.extend(filled_forest.contracts_trie.serialize(&EmptyKeyContext)?); |
| 123 | + serialized_forest.extend(filled_forest.classes_trie.serialize(&EmptyKeyContext)?); |
| 124 | + |
| 125 | + Ok(serialized_forest) |
| 126 | + } |
| 127 | + |
| 128 | + async fn write_updates(&mut self, updates: DbHashMap) -> usize { |
| 129 | + let n_updates = updates.len(); |
| 130 | + self.storage |
| 131 | + .mset(updates) |
| 132 | + .await |
| 133 | + .unwrap_or_else(|_| panic!("Write of {n_updates} new updates to storage failed")); |
| 134 | + n_updates |
| 135 | + } |
| 136 | +} |
0 commit comments