11// Copyright (c) Zefchain Labs, Inc.
22// SPDX-License-Identifier: Apache-2.0
33
4- use std:: { fmt:: Debug , sync:: Arc } ;
4+ use std:: { fmt:: Debug , ops :: Deref , sync:: Arc } ;
55
66use async_trait:: async_trait;
77#[ cfg( with_metrics) ]
@@ -337,16 +337,28 @@ enum RootKey {
337337 NetworkDescription ,
338338}
339339
340+ const CHAIN_ID_TAG : u8 = 0 ;
341+ const BLOB_ID_TAG : u8 = 2 ;
342+ const EVENT_ID_TAG : u8 = 3 ;
343+
340344impl RootKey {
341345 fn bytes ( & self ) -> Vec < u8 > {
342346 bcs:: to_bytes ( self ) . unwrap ( )
343347 }
344348}
345349
350+ #[ derive( Debug , Serialize , Deserialize ) ]
351+ struct RestrictedEventId {
352+ pub stream_id : StreamId ,
353+ pub index : u32 ,
354+ }
355+
346356fn event_key ( event_id : & EventId ) -> Vec < u8 > {
347- let mut key = bcs:: to_bytes ( & event_id. stream_id ) . unwrap ( ) ;
348- key. extend ( bcs:: to_bytes ( & event_id. index ) . unwrap ( ) ) ;
349- key
357+ let restricted_event_id = RestrictedEventId {
358+ stream_id : event_id. stream_id . clone ( ) ,
359+ index : event_id. index ,
360+ } ;
361+ bcs:: to_bytes ( & restricted_event_id) . unwrap ( )
350362}
351363
352364fn is_chain_state ( root_key : & [ u8 ] ) -> bool {
@@ -356,11 +368,6 @@ fn is_chain_state(root_key: &[u8]) -> bool {
356368 root_key[ 0 ] == CHAIN_ID_TAG
357369}
358370
359- const CHAIN_ID_TAG : u8 = 0 ;
360- const BLOB_ID_TAG : u8 = 2 ;
361- const CHAIN_ID_LENGTH : usize = std:: mem:: size_of :: < ChainId > ( ) ;
362- const BLOB_ID_LENGTH : usize = std:: mem:: size_of :: < BlobId > ( ) ;
363-
364371#[ cfg( test) ]
365372mod tests {
366373 use linera_base:: {
@@ -371,9 +378,7 @@ mod tests {
371378 } ,
372379 } ;
373380
374- use crate :: db_storage:: {
375- event_key, RootKey , BLOB_ID_LENGTH , BLOB_ID_TAG , CHAIN_ID_LENGTH , CHAIN_ID_TAG ,
376- } ;
381+ use crate :: db_storage:: { event_key, RootKey , BLOB_ID_TAG , CHAIN_ID_TAG , EVENT_ID_TAG } ;
377382
378383 // Several functionalities of the storage rely on the way that the serialization
379384 // is done. Thus we need to check that the serialization works in the way that
@@ -388,7 +393,7 @@ mod tests {
388393 let blob_id = BlobId :: new ( hash, blob_type) ;
389394 let root_key = RootKey :: Blob ( blob_id) . bytes ( ) ;
390395 assert_eq ! ( root_key[ 0 ] , BLOB_ID_TAG ) ;
391- assert_eq ! ( root_key. len ( ) , 1 + BLOB_ID_LENGTH ) ;
396+ assert_eq ! ( bcs :: from_bytes :: < BlobId > ( & root_key[ 1 .. ] ) . unwrap ( ) , blob_id ) ;
392397 }
393398
394399 // The listing of the chains in `list_chain_ids` depends on the serialization
@@ -399,7 +404,10 @@ mod tests {
399404 let chain_id = ChainId ( hash) ;
400405 let root_key = RootKey :: ChainState ( chain_id) . bytes ( ) ;
401406 assert_eq ! ( root_key[ 0 ] , CHAIN_ID_TAG ) ;
402- assert_eq ! ( root_key. len( ) , 1 + CHAIN_ID_LENGTH ) ;
407+ assert_eq ! (
408+ bcs:: from_bytes:: <ChainId >( & root_key[ 1 ..] ) . unwrap( ) ,
409+ chain_id
410+ ) ;
403411 }
404412
405413 // The listing of the events in `read_events_from_index` depends on the
@@ -426,6 +434,12 @@ mod tests {
426434 } ;
427435 let key = event_key ( & event_id) ;
428436 assert ! ( key. starts_with( & prefix) ) ;
437+ let root_key = RootKey :: Event ( chain_id) . bytes ( ) ;
438+ assert_eq ! ( root_key[ 0 ] , EVENT_ID_TAG ) ;
439+ assert_eq ! (
440+ bcs:: from_bytes:: <ChainId >( & root_key[ 1 ..] ) . unwrap( ) ,
441+ chain_id
442+ ) ;
429443 }
430444}
431445
@@ -985,6 +999,56 @@ where
985999 let store = self . database . open_exclusive ( & root_key) ?;
9861000 Ok ( ViewContext :: create_root_context ( store, block_exporter_id) . await ?)
9871001 }
1002+
1003+ async fn list_blob_ids ( & self ) -> Result < Vec < BlobId > , ViewError > {
1004+ let root_keys = self . database . list_root_keys ( ) . await ?;
1005+ let mut blob_ids = Vec :: new ( ) ;
1006+ for root_key in root_keys {
1007+ if !root_key. is_empty ( ) && root_key[ 0 ] == BLOB_ID_TAG {
1008+ let root_key_red = & root_key[ 1 ..] ;
1009+ let blob_id = bcs:: from_bytes ( root_key_red) ?;
1010+ blob_ids. push ( blob_id) ;
1011+ }
1012+ }
1013+ Ok ( blob_ids)
1014+ }
1015+
1016+ async fn list_chain_ids ( & self ) -> Result < Vec < ChainId > , ViewError > {
1017+ let root_keys = self . database . list_root_keys ( ) . await ?;
1018+ let mut chain_ids = Vec :: new ( ) ;
1019+ for root_key in root_keys {
1020+ if !root_key. is_empty ( ) && root_key[ 0 ] == CHAIN_ID_TAG {
1021+ let root_key_red = & root_key[ 1 ..] ;
1022+ let chain_id = bcs:: from_bytes ( root_key_red) ?;
1023+ chain_ids. push ( chain_id) ;
1024+ }
1025+ }
1026+ Ok ( chain_ids)
1027+ }
1028+
1029+ async fn list_event_ids ( & self ) -> Result < Vec < EventId > , ViewError > {
1030+ let database = self . database . deref ( ) ;
1031+ let root_keys = database. list_root_keys ( ) . await ?;
1032+ let mut event_ids = Vec :: new ( ) ;
1033+ for root_key in root_keys {
1034+ if !root_key. is_empty ( ) && root_key[ 0 ] == EVENT_ID_TAG {
1035+ let root_key_red = & root_key[ 1 ..] ;
1036+ let chain_id = bcs:: from_bytes ( root_key_red) ?;
1037+ let store = database. open_shared ( & root_key) ?;
1038+ let keys = store. find_keys_by_prefix ( & [ ] ) . await ?;
1039+ for key in keys {
1040+ let restricted_event_id = bcs:: from_bytes :: < RestrictedEventId > ( & key) ?;
1041+ let event_id = EventId {
1042+ chain_id,
1043+ stream_id : restricted_event_id. stream_id ,
1044+ index : restricted_event_id. index ,
1045+ } ;
1046+ event_ids. push ( event_id) ;
1047+ }
1048+ }
1049+ }
1050+ Ok ( event_ids)
1051+ }
9881052}
9891053
9901054impl < Database , C > DbStorage < Database , C >
@@ -1085,48 +1149,6 @@ where
10851149 let database = Database :: connect ( config, namespace) . await ?;
10861150 Ok ( Self :: new ( database, wasm_runtime, WallClock ) )
10871151 }
1088-
1089- /// Lists the blob IDs of the storage.
1090- pub async fn list_blob_ids (
1091- config : & Database :: Config ,
1092- namespace : & str ,
1093- ) -> Result < Vec < BlobId > , ViewError > {
1094- let database = Database :: connect ( config, namespace) . await ?;
1095- let root_keys = database. list_root_keys ( ) . await ?;
1096- let mut blob_ids = Vec :: new ( ) ;
1097- for root_key in root_keys {
1098- if root_key. len ( ) == 1 + BLOB_ID_LENGTH && root_key[ 0 ] == BLOB_ID_TAG {
1099- let root_key_red = & root_key[ 1 ..=BLOB_ID_LENGTH ] ;
1100- let blob_id = bcs:: from_bytes ( root_key_red) ?;
1101- blob_ids. push ( blob_id) ;
1102- }
1103- }
1104- Ok ( blob_ids)
1105- }
1106- }
1107-
1108- impl < Database > DbStorage < Database , WallClock >
1109- where
1110- Database : KeyValueDatabase + Clone + Send + Sync + ' static ,
1111- Database :: Error : Send + Sync ,
1112- {
1113- /// Lists the chain IDs of the storage.
1114- pub async fn list_chain_ids (
1115- config : & Database :: Config ,
1116- namespace : & str ,
1117- ) -> Result < Vec < ChainId > , ViewError > {
1118- let database = Database :: connect ( config, namespace) . await ?;
1119- let root_keys = database. list_root_keys ( ) . await ?;
1120- let mut chain_ids = Vec :: new ( ) ;
1121- for root_key in root_keys {
1122- if root_key. len ( ) == 1 + CHAIN_ID_LENGTH && root_key[ 0 ] == CHAIN_ID_TAG {
1123- let root_key_red = & root_key[ 1 ..=CHAIN_ID_LENGTH ] ;
1124- let chain_id = bcs:: from_bytes ( root_key_red) ?;
1125- chain_ids. push ( chain_id) ;
1126- }
1127- }
1128- Ok ( chain_ids)
1129- }
11301152}
11311153
11321154#[ cfg( with_testing) ]
0 commit comments