@@ -520,6 +520,15 @@ pub enum WriteBatchEntry {
520520 Delete { key : Box < [ u8 ] > } ,
521521}
522522
523+ impl WriteBatchEntry {
524+ pub fn size ( & self ) -> usize {
525+ match self {
526+ Self :: Put { key, value } => key. len ( ) + value. len ( ) ,
527+ Self :: Delete { key } => key. len ( ) ,
528+ }
529+ }
530+ }
531+
523532#[ derive( Clone , Serialize , Deserialize , Debug ) ]
524533pub struct WriteBatchContainer {
525534 entries : Vec < WriteBatchEntry > ,
@@ -532,6 +541,10 @@ impl WriteBatchContainer {
532541 }
533542 }
534543
544+ pub fn size ( & self ) -> usize {
545+ self . entries . iter ( ) . fold ( 0 , |acc, i| acc + i. size ( ) )
546+ }
547+
535548 pub fn write_batch ( & self ) -> WriteBatch {
536549 let mut batch = WriteBatch :: default ( ) ;
537550 for entry in self . entries . iter ( ) {
@@ -767,6 +780,8 @@ impl RocksStoreConfig {
767780pub trait RocksStoreDetails : Send + Sync {
768781 fn open_db ( & self , path : & Path , config : & Arc < dyn ConfigObj > ) -> Result < DB , CubeError > ;
769782
783+ fn open_readonly_db ( & self , path : & Path , config : & Arc < dyn ConfigObj > ) -> Result < DB , CubeError > ;
784+
770785 fn migrate ( & self , table_ref : DbTableRef ) -> Result < ( ) , CubeError > ;
771786
772787 fn get_name ( & self ) -> & ' static str ;
@@ -1044,10 +1059,14 @@ impl RocksStore {
10441059 let mut serializer = WriteBatchContainer :: new ( ) ;
10451060
10461061 let mut seq_numbers = Vec :: new ( ) ;
1062+ let size_limit = self . config . meta_store_log_upload_size_limit ( ) as usize ;
10471063 for update in updates. into_iter ( ) {
10481064 let ( n, write_batch) = update?;
10491065 seq_numbers. push ( n) ;
10501066 write_batch. iterate ( & mut serializer) ;
1067+ if serializer. size ( ) > size_limit {
1068+ break ;
1069+ }
10511070 }
10521071
10531072 (
@@ -1075,10 +1094,7 @@ impl RocksStore {
10751094 + time:: Duration :: from_secs ( self . config . meta_store_snapshot_interval ( ) )
10761095 < SystemTime :: now ( )
10771096 {
1078- info ! ( "Uploading {} check point" , self . details. get_name( ) ) ;
10791097 self . upload_check_point ( ) . await ?;
1080- let mut check_seq = self . last_check_seq . write ( ) . await ;
1081- * check_seq = last_db_seq;
10821098 }
10831099
10841100 info ! (
@@ -1175,6 +1191,7 @@ impl RocksStore {
11751191 }
11761192
11771193 pub async fn upload_check_point ( & self ) -> Result < ( ) , CubeError > {
1194+ info ! ( "Uploading {} check point" , self . details. get_name( ) ) ;
11781195 let upload_stopped = self . snapshots_upload_stopped . lock ( ) . await ;
11791196 if !* upload_stopped {
11801197 let mut check_point_time = self . last_checkpoint_time . write ( ) . await ;
@@ -1185,11 +1202,25 @@ impl RocksStore {
11851202 self . prepare_checkpoint ( & check_point_time) . await ?
11861203 } ;
11871204
1205+ let details = self . details . clone ( ) ;
1206+ let config = self . config . clone ( ) ;
1207+ let path_to_move = checkpoint_path. clone ( ) ;
1208+ let checkpoint_last_seq =
1209+ cube_ext:: spawn_blocking ( move || -> Result < u64 , CubeError > {
1210+ let snap_db = details. open_readonly_db ( & path_to_move, & config) ?;
1211+ Ok ( snap_db. latest_sequence_number ( ) )
1212+ } )
1213+ . await ??;
1214+
11881215 self . metastore_fs
11891216 . upload_checkpoint ( remote_path, checkpoint_path)
11901217 . await ?;
11911218 let mut snapshot_uploaded = self . snapshot_uploaded . write ( ) . await ;
11921219 * snapshot_uploaded = true ;
1220+ let mut last_uploaded_check_seq = self . last_check_seq . write ( ) . await ;
1221+ * last_uploaded_check_seq = checkpoint_last_seq;
1222+ let mut last_uploaded_seq = self . last_upload_seq . write ( ) . await ;
1223+ * last_uploaded_seq = checkpoint_last_seq;
11931224 self . write_completed_notify . notify_waiters ( ) ;
11941225 }
11951226 Ok ( ( ) )
@@ -1203,6 +1234,11 @@ impl RocksStore {
12031234 * self . last_check_seq . read ( ) . await
12041235 }
12051236
1237+ #[ cfg( test) ]
1238+ pub fn last_seq ( & self ) -> u64 {
1239+ self . db . latest_sequence_number ( )
1240+ }
1241+
12061242 fn get_store_path ( & self , checkpoint_time : & SystemTime ) -> String {
12071243 format ! (
12081244 "{}-{}" ,
@@ -1380,6 +1416,9 @@ impl RocksStore {
13801416mod tests {
13811417 use super :: * ;
13821418 use crate :: config:: Config ;
1419+ use crate :: metastore:: rocks_table:: RocksTable ;
1420+ use crate :: metastore:: schema:: SchemaRocksTable ;
1421+ use crate :: metastore:: Schema ;
13831422 use crate :: metastore:: { BaseRocksStoreFs , RocksMetaStoreDetails } ;
13841423 use crate :: remotefs:: LocalDirRemoteFs ;
13851424 use chrono:: Timelike ;
@@ -1527,4 +1566,135 @@ mod tests {
15271566
15281567 Ok ( ( ) )
15291568 }
1569+
1570+ async fn write_test_data ( rocks_store : & Arc < RocksStore > , name : String ) {
1571+ rocks_store
1572+ . write_operation ( move |db_ref, batch_pipe| {
1573+ let table = SchemaRocksTable :: new ( db_ref. clone ( ) ) ;
1574+ let schema = Schema { name } ;
1575+ Ok ( table. insert ( schema, batch_pipe) ?)
1576+ } )
1577+ . await
1578+ . unwrap ( ) ;
1579+ }
1580+ #[ tokio:: test]
1581+ async fn test_snapshot_uploads ( ) -> Result < ( ) , CubeError > {
1582+ let config = Config :: test ( "test_snapshots_uploads" ) . update_config ( |mut c| {
1583+ c. meta_store_log_upload_size_limit = 300 ;
1584+ c
1585+ } ) ;
1586+ let store_path = env:: current_dir ( )
1587+ . unwrap ( )
1588+ . join ( "test_snapshots_uploads-local" ) ;
1589+ let remote_store_path = env:: current_dir ( )
1590+ . unwrap ( )
1591+ . join ( "test_snapshots_uploads-remote" ) ;
1592+ let _ = fs:: remove_dir_all ( store_path. clone ( ) ) ;
1593+ let _ = fs:: remove_dir_all ( remote_store_path. clone ( ) ) ;
1594+ let remote_fs = LocalDirRemoteFs :: new ( Some ( remote_store_path. clone ( ) ) , store_path. clone ( ) ) ;
1595+
1596+ let details = Arc :: new ( RocksMetaStoreDetails { } ) ;
1597+
1598+ let rocks_store = RocksStore :: new (
1599+ store_path. join ( "metastore" ) . as_path ( ) ,
1600+ BaseRocksStoreFs :: new_for_metastore ( remote_fs. clone ( ) , config. config_obj ( ) ) ,
1601+ config. config_obj ( ) ,
1602+ details. clone ( ) ,
1603+ ) ?;
1604+
1605+ assert_eq ! ( rocks_store. last_upload_seq( ) . await , 0 ) ;
1606+ assert_eq ! ( rocks_store. last_check_seq( ) . await , 0 ) ;
1607+
1608+ write_test_data ( & rocks_store, "test" . to_string ( ) ) . await ;
1609+ write_test_data ( & rocks_store, "test2" . to_string ( ) ) . await ;
1610+
1611+ rocks_store. upload_check_point ( ) . await . unwrap ( ) ;
1612+
1613+ let last_seq = rocks_store. last_seq ( ) ;
1614+
1615+ assert_eq ! ( rocks_store. last_upload_seq( ) . await , last_seq) ;
1616+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1617+
1618+ write_test_data ( & rocks_store, "test3" . to_string ( ) ) . await ;
1619+
1620+ rocks_store. run_upload ( ) . await . unwrap ( ) ;
1621+
1622+ assert_eq ! (
1623+ rocks_store. last_upload_seq( ) . await ,
1624+ rocks_store. last_seq( ) - 1
1625+ ) ;
1626+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1627+
1628+ write_test_data ( & rocks_store, "test4" . to_string ( ) ) . await ;
1629+
1630+ rocks_store. run_upload ( ) . await . unwrap ( ) ;
1631+
1632+ assert_eq ! (
1633+ rocks_store. last_upload_seq( ) . await ,
1634+ rocks_store. last_seq( ) - 1
1635+ ) ;
1636+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1637+
1638+ let last_upl = rocks_store. last_seq ( ) ;
1639+
1640+ write_test_data ( & rocks_store, "a" . repeat ( 150 ) ) . await ;
1641+ write_test_data ( & rocks_store, "b" . repeat ( 150 ) ) . await ;
1642+
1643+ rocks_store. run_upload ( ) . await . unwrap ( ) ;
1644+
1645+ assert_eq ! ( rocks_store. last_upload_seq( ) . await , last_upl + 2 ) ; // +1 is seq number write and +1 first insert batch
1646+ assert ! ( rocks_store. last_upload_seq( ) . await < rocks_store. last_seq( ) - 1 ) ;
1647+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1648+
1649+ rocks_store. run_upload ( ) . await . unwrap ( ) ;
1650+ assert_eq ! (
1651+ rocks_store. last_upload_seq( ) . await ,
1652+ rocks_store. last_seq( ) - 1
1653+ ) ;
1654+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1655+
1656+ write_test_data ( & rocks_store, "c" . repeat ( 150 ) ) . await ;
1657+ write_test_data ( & rocks_store, "e" . repeat ( 150 ) ) . await ;
1658+
1659+ rocks_store. run_upload ( ) . await . unwrap ( ) ;
1660+ assert_eq ! (
1661+ rocks_store. last_upload_seq( ) . await ,
1662+ rocks_store. last_seq( ) - 4
1663+ ) ;
1664+ assert_eq ! ( rocks_store. last_check_seq( ) . await , last_seq) ;
1665+
1666+ let _ = fs:: remove_dir_all ( store_path. clone ( ) ) ;
1667+ drop ( rocks_store) ;
1668+
1669+ let rocks_fs = BaseRocksStoreFs :: new_for_metastore ( remote_fs. clone ( ) , config. config_obj ( ) ) ;
1670+ let path = store_path. join ( "metastore" ) . to_string_lossy ( ) . to_string ( ) ;
1671+ let rocks_store = rocks_fs
1672+ . load_from_remote ( & path, config. config_obj ( ) , details)
1673+ . await
1674+ . unwrap ( ) ;
1675+ let all_schemas = rocks_store
1676+ . read_operation_out_of_queue ( move |db_ref| SchemaRocksTable :: new ( db_ref) . all_rows ( ) )
1677+ . await
1678+ . unwrap ( ) ;
1679+ let expected = vec ! [
1680+ "test" . to_string( ) ,
1681+ "test2" . to_string( ) ,
1682+ "test3" . to_string( ) ,
1683+ "test4" . to_string( ) ,
1684+ "a" . repeat( 150 ) ,
1685+ "b" . repeat( 150 ) ,
1686+ "c" . repeat( 150 ) ,
1687+ ] ;
1688+
1689+ assert_eq ! ( expected. len( ) , all_schemas. len( ) ) ;
1690+
1691+ for ( exp, row) in expected. into_iter ( ) . zip ( all_schemas. into_iter ( ) ) {
1692+ assert_eq ! ( & exp, row. get_row( ) . get_name( ) ) ;
1693+ }
1694+
1695+ let _ = fs:: remove_dir_all ( store_path. clone ( ) ) ;
1696+ let _ = fs:: remove_dir_all ( remote_store_path. clone ( ) ) ;
1697+
1698+ Ok ( ( ) )
1699+ }
15301700}
0 commit comments