@@ -320,7 +320,7 @@ impl SqlServiceImpl {
320320
321321 let max_disk_space = self . config_obj . max_disk_space ( ) ;
322322 if max_disk_space > 0 {
323- let used_space = self . db . get_used_disk_space_out_of_queue ( ) . await ?;
323+ let used_space = self . db . get_used_disk_space_out_of_queue ( None ) . await ?;
324324 if max_disk_space < used_space {
325325 return Err ( CubeError :: user ( format ! (
326326 "Exceeded available storage space: {:.3} GB out of {} GB allowed. Please consider changing pre-aggregations build range, reducing index count or pre-aggregations granularity." ,
@@ -3602,18 +3602,105 @@ mod tests {
36023602 c. partition_split_threshold = 1000000 ;
36033603 c. compaction_chunks_count_threshold = 100 ;
36043604 c. max_disk_space = 300_000 ;
3605- c. select_workers = vec ! [ "127.0.0.1:24306 " . to_string( ) ] ;
3606- c. metastore_bind_address = Some ( "127.0.0.1:25312 " . to_string ( ) ) ;
3605+ c. select_workers = vec ! [ "127.0.0.1:24308 " . to_string( ) ] ;
3606+ c. metastore_bind_address = Some ( "127.0.0.1:25314 " . to_string ( ) ) ;
36073607 c
36083608 } )
36093609 . start_test ( async move |services| {
36103610 let service = services. sql_service ;
36113611
36123612 Config :: test ( "disk_space_limit_worker_1" )
36133613 . update_config ( |mut c| {
3614- c. worker_bind_address = Some ( "127.0.0.1:24306" . to_string ( ) ) ;
3615- c. server_name = "127.0.0.1:24306" . to_string ( ) ;
3616- c. metastore_remote_address = Some ( "127.0.0.1:25312" . to_string ( ) ) ;
3614+ c. worker_bind_address = Some ( "127.0.0.1:24308" . to_string ( ) ) ;
3615+ c. server_name = "127.0.0.1:24308" . to_string ( ) ;
3616+ c. max_disk_space = 300_000 ;
3617+ c. metastore_remote_address = Some ( "127.0.0.1:25314" . to_string ( ) ) ;
3618+ c. store_provider = FileStoreProvider :: Filesystem {
3619+ remote_dir : Some ( env:: current_dir ( )
3620+ . unwrap ( )
3621+ . join ( "disk_space_limit-upstream" ) ) ,
3622+ } ;
3623+ c
3624+ } )
3625+ . start_test_worker ( async move |_| {
3626+ let paths = {
3627+ let dir = env:: temp_dir ( ) ;
3628+
3629+ let path_1 = dir. clone ( ) . join ( "foo-cluster-1.csv" ) ;
3630+ let path_2 = dir. clone ( ) . join ( "foo-cluster-2.csv.gz" ) ;
3631+ let mut file = File :: create ( path_1. clone ( ) ) . unwrap ( ) ;
3632+
3633+ file. write_all ( "id,city,arr,t\n " . as_bytes ( ) ) . unwrap ( ) ;
3634+ for i in 0 ..100000
3635+ {
3636+ file. write_all ( format ! ( "{},\" New York\" ,\" [\" \" \" \" ]\" ,2021-01-24 19:12:23.123 UTC\n " , i) . as_bytes ( ) ) . unwrap ( ) ;
3637+ }
3638+
3639+
3640+ let mut file = GzipEncoder :: new ( BufWriter :: new ( tokio:: fs:: File :: create ( path_2. clone ( ) ) . await . unwrap ( ) ) ) ;
3641+
3642+ file. write_all ( "id,city,arr,t\n " . as_bytes ( ) ) . await . unwrap ( ) ;
3643+ for i in 0 ..100000
3644+ {
3645+ file. write_all ( format ! ( "{},San Francisco,\" [\" \" Foo\" \" ,\" \" Bar\" \" ,\" \" FooBar\" \" ]\" ,\" 2021-01-24 12:12:23 UTC\" \n " , i) . as_bytes ( ) ) . await . unwrap ( ) ;
3646+ }
3647+
3648+ file. shutdown ( ) . await . unwrap ( ) ;
3649+
3650+ vec ! [ path_1, path_2]
3651+ } ;
3652+
3653+ let _ = service. exec_query ( "CREATE SCHEMA IF NOT EXISTS Foo" ) . await . unwrap ( ) ;
3654+ let _ = service. exec_query (
3655+ & format ! (
3656+ "CREATE TABLE Foo.Persons (id int, city text, t timestamp, arr text) INDEX persons_city (`city`, `id`) LOCATION {}" ,
3657+ paths. iter( ) . map( |p| format!( "'{}'" , p. to_string_lossy( ) ) ) . join( "," )
3658+ )
3659+ ) . await . unwrap ( ) ;
3660+
3661+ let res = service. exec_query (
3662+ & format ! (
3663+ "CREATE TABLE Foo.Persons2 (id int, city text, t timestamp, arr text) INDEX persons_city (`city`, `id`) LOCATION {}" ,
3664+ paths. iter( ) . map( |p| format!( "'{}'" , p. to_string_lossy( ) ) ) . join( "," )
3665+ )
3666+ ) . await ;
3667+ if let Err ( err) = res {
3668+ assert ! ( err. message. starts_with( "Exceeded available storage space:" ) ) ;
3669+ } else {
3670+ assert ! ( false ) ;
3671+ }
3672+
3673+ } )
3674+ . await ;
3675+ } )
3676+ . await ;
3677+ }
3678+
3679+ #[ tokio:: test]
3680+ async fn disk_space_limit_per_worker ( ) {
3681+ Config :: test ( "disk_space_limit_per_worker" )
3682+ . update_config ( |mut c| {
3683+ c. partition_split_threshold = 1000000 ;
3684+ c. compaction_chunks_count_threshold = 100 ;
3685+ c. max_disk_space_per_worker = 6_000_000 ;
3686+ c. select_workers = vec ! [ "127.0.0.1:24309" . to_string( ) ] ;
3687+ c. metastore_bind_address = Some ( "127.0.0.1:25315" . to_string ( ) ) ;
3688+ c
3689+ } )
3690+ . start_test ( async move |services| {
3691+ let service = services. sql_service ;
3692+
3693+ Config :: test ( "disk_space_limit_per_worker_worker_1" )
3694+ . update_config ( |mut c| {
3695+ c. worker_bind_address = Some ( "127.0.0.1:24309" . to_string ( ) ) ;
3696+ c. server_name = "127.0.0.1:24309" . to_string ( ) ;
3697+ c. max_disk_space_per_worker = 6_000_000 ;
3698+ c. metastore_remote_address = Some ( "127.0.0.1:25315" . to_string ( ) ) ;
3699+ c. store_provider = FileStoreProvider :: Filesystem {
3700+ remote_dir : Some ( env:: current_dir ( )
3701+ . unwrap ( )
3702+ . join ( "disk_space_limit_per_worker-upstream" ) ) ,
3703+ } ;
36173704 c
36183705 } )
36193706 . start_test_worker ( async move |_| {
@@ -3659,7 +3746,7 @@ mod tests {
36593746 )
36603747 ) . await ;
36613748 if let Err ( err) = res {
3662- assert ! ( err. message. starts_with ( "Exceeded available storage " ) ) ;
3749+ assert ! ( err. message. contains ( "Exceeded available storage space on worker " ) ) ;
36633750 } else {
36643751 assert ! ( false ) ;
36653752 }
0 commit comments