@@ -9,12 +9,15 @@ use uuid::Uuid;
99use crate :: {
1010 cache:: {
1111 Cache , CacheTrait ,
12- keys:: { PROJECT_CACHE_KEY , WORKSPACE_LIMITS_CACHE_KEY } ,
12+ keys:: { PROJECT_CACHE_KEY , WORKSPACE_LIMITS_CACHE_KEY , WORKSPACE_PARTIAL_USAGE_CACHE_KEY } ,
1313 } ,
1414 ch,
1515 db:: { self , DB , projects:: ProjectWithWorkspaceBillingInfo , stats:: WorkspaceLimitsExceeded } ,
1616} ;
1717
18+ // Threshold in bytes (16MB) - only recompute workspace limits after this much data is written
19+ const RECOMPUTE_THRESHOLD_BYTES : usize = 16 * 1024 * 1024 ; // 16MB
20+
1821pub async fn get_workspace_limit_exceeded_by_project_id (
1922 db : Arc < DB > ,
2023 clickhouse : clickhouse:: Client ,
@@ -56,6 +59,7 @@ pub async fn update_workspace_limit_exceeded_by_project_id(
5659 clickhouse : clickhouse:: Client ,
5760 cache : Arc < Cache > ,
5861 project_id : Uuid ,
62+ written_bytes : usize ,
5963) -> Result < ( ) > {
6064 tokio:: spawn ( async move {
6165 let project_info = get_workspace_info_for_project_id ( db. clone ( ) , cache. clone ( ) , project_id)
@@ -74,26 +78,57 @@ pub async fn update_workspace_limit_exceeded_by_project_id(
7478 return ;
7579 }
7680
77- let cache_key = format ! ( "{WORKSPACE_LIMITS_CACHE_KEY}:{workspace_id}" ) ;
78- let workspace_limits_exceeded = is_workspace_over_limit (
79- clickhouse,
80- project_info. workspace_project_ids ,
81- project_info. bytes_limit ,
82- project_info. reset_time ,
83- )
84- . await
85- . map_err ( |e| {
86- log:: error!(
87- "Failed to update workspace limit exceeded for project [{}]: {:?}" ,
88- project_id,
89- e
90- ) ;
91- } )
92- . unwrap ( ) ;
93- cache
94- . insert :: < WorkspaceLimitsExceeded > ( & cache_key, workspace_limits_exceeded. clone ( ) )
81+ let partial_usage_cache_key = format ! ( "{WORKSPACE_PARTIAL_USAGE_CACHE_KEY}:{workspace_id}" ) ;
82+ let limits_cache_key = format ! ( "{WORKSPACE_LIMITS_CACHE_KEY}:{workspace_id}" ) ;
83+
84+ // Get current partial usage from cache
85+ let cache_result = cache. get :: < usize > ( & partial_usage_cache_key) . await ;
86+
87+ // If cache is missing or errored, we should recompute
88+ let ( current_partial_usage, cache_available) = match cache_result {
89+ Ok ( Some ( value) ) => ( value, true ) ,
90+ Ok ( None ) | Err ( _) => ( 0 , false ) ,
91+ } ;
92+
93+ let new_partial_usage = current_partial_usage + written_bytes;
94+
95+ // Recompute if: cache was unavailable, or we've accumulated at least RECOMPUTE_THRESHOLD_BYTES
96+ let should_recompute = !cache_available || new_partial_usage >= RECOMPUTE_THRESHOLD_BYTES ;
97+
98+ if should_recompute {
99+ // Perform the heavy computation
100+ let workspace_limits_exceeded = is_workspace_over_limit (
101+ clickhouse,
102+ project_info. workspace_project_ids ,
103+ project_info. bytes_limit ,
104+ project_info. reset_time ,
105+ )
95106 . await
107+ . map_err ( |e| {
108+ log:: error!(
109+ "Failed to update workspace limit exceeded for project [{}]: {:?}" ,
110+ project_id,
111+ e
112+ ) ;
113+ } )
96114 . unwrap ( ) ;
115+
116+ // Update the limits cache
117+ let _ = cache
118+ . insert :: < WorkspaceLimitsExceeded > (
119+ & limits_cache_key,
120+ workspace_limits_exceeded. clone ( ) ,
121+ )
122+ . await ;
123+
124+ // Reset the partial usage counter
125+ let _ = cache. insert :: < usize > ( & partial_usage_cache_key, 0 ) . await ;
126+ } else {
127+ // Just update the partial usage counter
128+ let _ = cache
129+ . insert :: < usize > ( & partial_usage_cache_key, new_partial_usage)
130+ . await ;
131+ }
97132 } ) ;
98133
99134 Ok ( ( ) )
0 commit comments