@@ -15,6 +15,7 @@ use octez_riscv_durable_storage::random::generate_random_bytes;
1515use octez_riscv_durable_storage:: repo:: DirectoryManager ;
1616use rand:: prelude:: * ;
1717use rand:: rng;
18+ use serde:: Deserialize ;
1819use tokio:: runtime:: Handle ;
1920
2021#[ cfg_attr(
@@ -28,7 +29,7 @@ struct BenchmarkState<'a> {
2829 database : Database ,
2930 operations : Vec < Operation > ,
3031 prepopulated_node_keys : Vec < Key > ,
31- prepopulated_values : Vec < Bytes > ,
32+ random_data : Vec < u8 > ,
3233 read_buffer : Vec < u8 > ,
3334 handle : & ' a Handle ,
3435 repo : & ' a DirectoryManager ,
@@ -44,42 +45,95 @@ impl<'a> BenchmarkState<'a> {
4445 operations : self . operations . clone ( ) ,
4546 read_buffer : vec ! [ 0u8 ; LARGE_READ_MAX_SIZE ] ,
4647 prepopulated_node_keys : self . prepopulated_node_keys . clone ( ) ,
47- prepopulated_values : self . prepopulated_values . clone ( ) ,
48+ random_data : self . random_data . clone ( ) ,
4849 handle,
4950 repo,
5051 }
5152 }
5253}
5354
5455#[ derive( Clone ) ]
55- #[ expect( dead_code, reason = "Some operations aren't benchmarked yet" ) ]
5656enum Operation {
5757 Clone ,
58- Delete { key_index : usize } ,
59- Exists { key_index : usize } ,
58+ Delete { key : Key } ,
59+ Exists { key : Key } ,
6060 Hash ,
61- Read { key_index : usize , size : usize } ,
62- ValueLength { key_index : usize } ,
63- Write { key_index : usize , data_index : usize } ,
61+ Read { key : Key , size : usize } ,
62+ ValueLength { key : Key } ,
63+ Write { key : Key , size : usize } ,
64+ }
65+
66+ impl From < SerialisedOperation > for Operation {
67+ fn from ( serialised_operation : SerialisedOperation ) -> Self {
68+ match serialised_operation {
69+ SerialisedOperation :: Copy { .. } => Operation :: Clone ,
70+ SerialisedOperation :: Delete { path } => Operation :: Delete {
71+ key : Key :: new ( path. as_bytes ( ) ) . expect ( "The path should be a valid key" ) ,
72+ } ,
73+ SerialisedOperation :: Has { path } => Operation :: Exists {
74+ key : Key :: new ( path. as_bytes ( ) ) . expect ( "The path should be a valid key" ) ,
75+ } ,
76+ SerialisedOperation :: Read { path, size }
77+ | SerialisedOperation :: ReadAll { path, size }
78+ | SerialisedOperation :: ReadSlice { path, size } => Operation :: Read {
79+ key : Key :: new ( path. as_bytes ( ) ) . expect ( "The path should be a valid key" ) ,
80+ size,
81+ } ,
82+ SerialisedOperation :: ValueSize { path } => Operation :: ValueLength {
83+ key : Key :: new ( path. as_bytes ( ) ) . expect ( "The path should be a valid key" ) ,
84+ } ,
85+ SerialisedOperation :: Write { path, size }
86+ | SerialisedOperation :: WriteAll { path, size } => Operation :: Write {
87+ key : Key :: new ( path. as_bytes ( ) ) . expect ( "The path should be a valid key" ) ,
88+ size,
89+ } ,
90+ }
91+ }
92+ }
93+
94+ #[ derive( Deserialize ) ]
95+ #[ serde( tag = "operation" , rename_all = "snake_case" ) ]
96+ enum SerialisedOperation {
97+ #[ serde( rename = "store_copy" ) ]
98+ #[ expect( dead_code, reason = "The result of the copy is ignored" ) ]
99+ Copy { from_path : String , to_path : String } ,
100+ #[ serde( rename = "store_delete" ) ]
101+ Delete { path : String } ,
102+ #[ serde( rename = "store_has" ) ]
103+ Has { path : String } ,
104+ #[ serde( rename = "store_read" ) ]
105+ Read { path : String , size : usize } ,
106+ #[ serde( rename = "store_read_all" ) ]
107+ ReadAll { path : String , size : usize } ,
108+ #[ serde( rename = "store_read_slice" ) ]
109+ ReadSlice { path : String , size : usize } ,
110+ #[ serde( rename = "store_value_size" ) ]
111+ ValueSize { path : String } ,
112+ #[ serde( rename = "store_write" ) ]
113+ Write { path : String , size : usize } ,
114+ #[ serde( rename = "store_write_all" ) ]
115+ WriteAll { path : String , size : usize } ,
116+ }
117+
118+ #[ derive( Deserialize ) ]
119+ struct StoreAccesses {
120+ setup : Vec < SerialisedOperation > ,
121+ transaction : Vec < SerialisedOperation > ,
122+ block_creation : Vec < SerialisedOperation > ,
64123}
65124
66125const BLOCK_FREQUENCY : usize = 5_000 ;
67126const ERC_20_TRANSACTIONS : usize = 10_000 ;
68127const LARGE_READ_MIN_SIZE : usize = 96000 ;
69128const LARGE_READ_MAX_SIZE : usize = LARGE_READ_MIN_SIZE * 2 ;
70- const PREPOPULATED_LARGE_NODE_KEYS_COUNT : usize = 100 ;
71- const PREPOPULATED_NODE_KEYS_COUNT : usize = 10_000_000 - PREPOPULATED_LARGE_NODE_KEYS_COUNT ;
72- const SMALL_READS : usize = 13 ;
73- const SMALL_READ_SIZE : usize = 32 ;
74- const SMALL_WRITES : usize = 7 ;
129+ const PREPOPULATED_NODE_KEYS_COUNT : usize = 10_000_000 ;
75130
76131fn setup_benchmark_state < ' a > ( handle : & ' a Handle , repo : & ' a DirectoryManager ) -> BenchmarkState < ' a > {
77132 let mut database = Database :: try_new ( handle, repo) . expect ( "Creating a database should succeed" ) ;
78133 let mut rng = rng ( ) ;
79134
80135 const VALUES_COUNT : usize = 100 ;
81136 let mut prepopulated_node_keys = Vec :: with_capacity ( PREPOPULATED_NODE_KEYS_COUNT ) ;
82- let mut prepopulated_large_node_keys = Vec :: with_capacity ( PREPOPULATED_LARGE_NODE_KEYS_COUNT ) ;
83137 let mut prepopulated_values = Vec :: with_capacity ( VALUES_COUNT ) ;
84138
85139 // Pre-populate a small number of values of random small sizes
@@ -98,73 +152,57 @@ fn setup_benchmark_state<'a>(handle: &'a Handle, repo: &'a DirectoryManager) ->
98152 prepopulated_node_keys. push ( key) ;
99153 }
100154
101- // Pre-populate the database with a small number of random keys with large values
102- let keys = generate_keys ( & mut rng, PREPOPULATED_LARGE_NODE_KEYS_COUNT ) ;
103- for key in keys {
104- let value = Bytes :: from ( generate_random_bytes (
105- & mut rng,
106- LARGE_READ_MIN_SIZE ..LARGE_READ_MAX_SIZE ,
107- ) ) ;
108- database. write ( key. clone ( ) , 0 , value) . ok ( ) ;
109- prepopulated_large_node_keys. push ( key) ;
110- }
155+ let store_accesses_data = include_str ! ( "store_accesses.json" ) ;
156+ let store_accesses: StoreAccesses = serde_json:: from_str ( store_accesses_data) . expect (
157+ "The benchmark data should be a valid serialisation of the store accesses representation" ,
158+ ) ;
111159
112- let mut operations =
113- Vec :: < Operation > :: with_capacity ( ERC_20_TRANSACTIONS * ( 1 + SMALL_READS + SMALL_WRITES ) ) ;
160+ // Convert the serialised form into a sequence of transactions
161+ let erc_20_transaction: Vec < Operation > = store_accesses
162+ . setup
163+ . into_iter ( )
164+ . chain (
165+ store_accesses
166+ . transaction
167+ . into_iter ( )
168+ . chain ( store_accesses. block_creation ) ,
169+ )
170+ . map ( Operation :: from)
171+ . collect ( ) ;
114172
115- for i in 0 ..ERC_20_TRANSACTIONS {
116- let key_index = rng. random_range ( 0 ..prepopulated_large_node_keys. len ( ) ) ;
117- let size = rng. random_range ( LARGE_READ_MIN_SIZE ..LARGE_READ_MAX_SIZE ) ;
118- let operation = Operation :: Read { key_index, size } ;
119-
120- let mut vec = vec ! [ 0u8 ; size] ;
121- rng. fill ( vec. as_mut_slice ( ) ) ;
122-
123- // Write to the node so there's something meaningful and large to read
124- database
125- . write (
126- prepopulated_large_node_keys[ key_index] . clone ( ) ,
127- 0 ,
128- Bytes :: from ( vec) ,
129- )
130- . expect ( "The write should succeed" ) ;
131- operations. push ( operation) ;
132-
133- for _ in 0 ..SMALL_READS {
134- let key_index = rng. random_range ( 0 ..prepopulated_large_node_keys. len ( ) ) ;
135- let size = SMALL_READ_SIZE ;
136- operations. push ( Operation :: Read { key_index, size } ) ;
137-
138- let mut vec = vec ! [ 0u8 ; size] ;
139- rng. fill ( vec. as_mut_slice ( ) ) ;
140-
141- // Write to the node so there's something meaningful to read
173+ // Populate nodes for read operations where keys are expected to exist
174+ for operation in & erc_20_transaction {
175+ if let Operation :: Read { key, size : _ } = operation {
142176 database
143177 . write (
144- prepopulated_node_keys [ key_index ] . clone ( ) ,
178+ key . clone ( ) ,
145179 0 ,
146- Bytes :: from ( vec ) ,
180+ Bytes :: from ( generate_random_bytes ( & mut rng , 1 .. 32 ) ) ,
147181 )
148182 . expect ( "The write should succeed" ) ;
149183 }
184+ }
150185
151- for _ in 0 .. SMALL_WRITES {
152- operations . push ( Operation :: Write {
153- key_index : rng . random_range ( 0 ..prepopulated_node_keys . len ( ) ) ,
154- data_index : rng . random_range ( 0 ..prepopulated_values . len ( ) ) ,
155- } ) ;
156- }
157-
186+ // Repeat the operations in a transaction for the number of transactions that are being
187+ // benchmarked
188+ let mut operations = Vec :: with_capacity (
189+ erc_20_transaction . len ( ) * ERC_20_TRANSACTIONS + ERC_20_TRANSACTIONS / BLOCK_FREQUENCY ,
190+ ) ;
191+ for i in 0 .. ERC_20_TRANSACTIONS {
192+ operations . extend_from_slice ( & erc_20_transaction ) ;
158193 if i % BLOCK_FREQUENCY == 0 {
159194 operations. push ( Operation :: Hash ) ;
160195 }
161196 }
162197
198+ let mut random_data = vec ! [ 0 ; LARGE_READ_MAX_SIZE ] ;
199+ rng. fill ( random_data. as_mut_slice ( ) ) ;
200+
163201 BenchmarkState {
164202 database,
165203 operations,
166204 prepopulated_node_keys,
167- prepopulated_values ,
205+ random_data ,
168206 read_buffer : vec ! [ 0u8 ; LARGE_READ_MAX_SIZE ] ,
169207 handle,
170208 repo,
@@ -173,8 +211,8 @@ fn setup_benchmark_state<'a>(handle: &'a Handle, repo: &'a DirectoryManager) ->
173211
174212#[ inline( never) ]
175213fn bench_run ( mut state : BenchmarkState ) {
176- // `KeyNotFound` errors are allowed, since the (conceptual, not std::hint) black box that is the transaction inputs may delete keys
177- // which are later used.
214+ // `KeyNotFound` errors are allowed, since arbitrary operations may delete keys which are later
215+ // used.
178216 for operation in state. operations {
179217 match operation {
180218 Operation :: Clone => {
@@ -188,50 +226,40 @@ fn bench_run(mut state: BenchmarkState) {
188226 . expect ( "The clone should succeed" ) ,
189227 ) ;
190228 }
191- Operation :: Delete { key_index } => match state
192- . database
193- . delete ( state. prepopulated_node_keys [ key_index] . clone ( ) )
194- {
229+ Operation :: Delete { key } => match state. database . delete ( key) {
195230 Ok ( _)
196231 | Err ( DatabaseError :: PersistenceLayer ( PersistenceLayerError :: KeyNotFound ) ) => { }
197232 Err ( e) => panic ! ( "The deletion should succeed: {e:?}" ) ,
198233 } ,
199- Operation :: Exists { key_index } => {
234+ Operation :: Exists { key } => {
200235 state
201236 . database
202- . exists ( & state . prepopulated_node_keys [ key_index ] )
237+ . exists ( & key )
203238 . expect ( "The existence check should succeed" ) ;
204239 }
205240 Operation :: Hash => {
206241 black_box ( state. database . hash ( ) ) ;
207242 }
208- Operation :: Read { key_index, size } => {
209- match state. database . read (
210- & state. prepopulated_node_keys [ key_index] ,
211- 0 ,
212- & mut state. read_buffer [ 0 ..size] ,
213- ) {
243+ Operation :: Read { key, size } => {
244+ match state
245+ . database
246+ . read ( & key, 0 , & mut state. read_buffer [ 0 ..size] )
247+ {
214248 Ok ( _)
215249 | Err ( DatabaseError :: PersistenceLayer ( PersistenceLayerError :: KeyNotFound ) ) => { }
216250 Err ( e) => panic ! ( "The read should succeed: {e:?}" ) ,
217251 }
218252 }
219- Operation :: ValueLength { key_index } => match state
220- . database
221- . value_length ( & state. prepopulated_node_keys [ key_index] )
222- {
253+ Operation :: ValueLength { key } => match state. database . value_length ( & key) {
223254 Ok ( _)
224255 | Err ( DatabaseError :: PersistenceLayer ( PersistenceLayerError :: KeyNotFound ) ) => { }
225256 Err ( e) => panic ! ( "The value length calculation should succeed: {e:?}" ) ,
226257 } ,
227- Operation :: Write {
228- key_index,
229- data_index,
230- } => {
258+ Operation :: Write { key, size } => {
231259 match state. database . write (
232- state . prepopulated_node_keys [ key_index ] . clone ( ) ,
260+ key ,
233261 0 ,
234- state. prepopulated_values [ data_index ] . clone ( ) ,
262+ Bytes :: copy_from_slice ( & state. random_data [ 0 ..size ] ) ,
235263 ) {
236264 Ok ( _)
237265 | Err ( DatabaseError :: PersistenceLayer ( PersistenceLayerError :: KeyNotFound ) ) => { }
0 commit comments