Skip to content

Commit d810695

Browse files
committed
chore(cubesql): Do not call async Node functions while planning
1 parent 4b85612 commit d810695

File tree

5 files changed

+137
-121
lines changed

5 files changed

+137
-121
lines changed

rust/cubesql/cubesql/src/compile/query_engine.rs

Lines changed: 69 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
use crate::compile::engine::df::planner::CubeQueryPlanner;
2-
use std::{backtrace::Backtrace, collections::HashMap, future::Future, pin::Pin, sync::Arc};
2+
use std::{
3+
backtrace::Backtrace, collections::HashMap, future::Future, pin::Pin, sync::Arc,
4+
time::SystemTime,
5+
};
36

47
use crate::{
58
compile::{
@@ -21,8 +24,9 @@ use crate::{
2124
},
2225
config::ConfigObj,
2326
sql::{
24-
compiler_cache::CompilerCache, statement::SensitiveDataSanitizer, SessionManager,
25-
SessionState,
27+
compiler_cache::{CompilerCache, CompilerCacheEntry},
28+
statement::SensitiveDataSanitizer,
29+
SessionManager, SessionState,
2630
},
2731
transport::{LoadRequestMeta, MetaContext, SpanId, TransportService},
2832
CubeErrorCauseType,
@@ -78,6 +82,11 @@ pub trait QueryEngine {
7882

7983
fn sanitize_statement(&self, stmt: &Self::AstStatementType) -> Self::AstStatementType;
8084

85+
async fn get_cache_entry(
86+
&self,
87+
state: Arc<SessionState>,
88+
) -> Result<Arc<CompilerCacheEntry>, CompilationError>;
89+
8190
async fn plan(
8291
&self,
8392
stmt: Self::AstStatementType,
@@ -86,6 +95,26 @@ pub trait QueryEngine {
8695
meta: Arc<MetaContext>,
8796
state: Arc<SessionState>,
8897
) -> CompilationResult<(QueryPlan, Self::PlanMetadataType)> {
98+
let cache_entry = self.get_cache_entry(state.clone()).await?;
99+
100+
let planning_start = SystemTime::now();
101+
if let Some(span_id) = span_id.as_ref() {
102+
if let Some(auth_context) = state.auth_context() {
103+
self.transport_ref()
104+
.log_load_state(
105+
Some(span_id.clone()),
106+
auth_context,
107+
state.get_load_request_meta(),
108+
"SQL API Query Planning".to_string(),
109+
serde_json::json!({
110+
"query": span_id.query_key.clone(),
111+
}),
112+
)
113+
.await
114+
.map_err(|e| CompilationError::internal(e.to_string()))?;
115+
}
116+
}
117+
89118
let ctx = self.create_session_ctx(state.clone())?;
90119
let cube_ctx = self.create_cube_ctx(state.clone(), meta.clone(), ctx.clone())?;
91120

@@ -144,7 +173,7 @@ pub trait QueryEngine {
144173
let mut finalized_graph = self
145174
.compiler_cache_ref()
146175
.rewrite(
147-
state.auth_context().unwrap(),
176+
Arc::clone(&cache_entry),
148177
cube_ctx.clone(),
149178
converter.take_egraph(),
150179
&query_params.unwrap(),
@@ -192,6 +221,7 @@ pub trait QueryEngine {
192221
let result = rewriter
193222
.find_best_plan(
194223
root,
224+
cache_entry,
195225
state.auth_context().unwrap(),
196226
qtrace,
197227
span_id.clone(),
@@ -243,12 +273,31 @@ pub trait QueryEngine {
243273
// TODO: We should find what optimizers will be safety to use for OLAP queries
244274
guard.optimizer.rules = vec![];
245275
}
246-
if let Some(span_id) = span_id {
276+
if let Some(span_id) = &span_id {
247277
span_id.set_is_data_query(true).await;
248278
}
249279
};
250280

251281
log::debug!("Rewrite: {:#?}", rewrite_plan);
282+
283+
if let Some(span_id) = span_id.as_ref() {
284+
if let Some(auth_context) = state.auth_context() {
285+
self.transport_ref()
286+
.log_load_state(
287+
Some(span_id.clone()),
288+
auth_context,
289+
state.get_load_request_meta(),
290+
"SQL API Query Planning Success".to_string(),
291+
serde_json::json!({
292+
"query": span_id.query_key.clone(),
293+
"duration": planning_start.elapsed().unwrap().as_millis() as u64,
294+
}),
295+
)
296+
.await
297+
.map_err(|e| CompilationError::internal(e.to_string()))?;
298+
}
299+
}
300+
252301
let rewrite_plan = Self::evaluate_wrapped_sql(
253302
self.transport_ref().clone(),
254303
Arc::new(state.get_load_request_meta()),
@@ -501,6 +550,21 @@ impl QueryEngine for SqlQueryEngine {
501550
fn sanitize_statement(&self, stmt: &Self::AstStatementType) -> Self::AstStatementType {
502551
SensitiveDataSanitizer::new().replace(stmt.clone())
503552
}
553+
554+
async fn get_cache_entry(
555+
&self,
556+
state: Arc<SessionState>,
557+
) -> Result<Arc<CompilerCacheEntry>, CompilationError> {
558+
self.compiler_cache_ref()
559+
.get_cache_entry(
560+
state.auth_context().ok_or_else(|| {
561+
CompilationError::internal("Unable to get auth context".to_string())
562+
})?,
563+
state.protocol.clone(),
564+
)
565+
.await
566+
.map_err(|e| CompilationError::internal(e.to_string()))
567+
}
504568
}
505569

506570
fn is_olap_query(parent: &LogicalPlan) -> Result<bool, CompilationError> {

rust/cubesql/cubesql/src/compile/rewrite/rewriter.rs

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ use crate::{
1515
CubeContext,
1616
},
1717
config::ConfigObj,
18-
sql::AuthContextRef,
18+
sql::{compiler_cache::CompilerCacheEntry, AuthContextRef},
1919
transport::{MetaContext, SpanId},
2020
CubeError,
2121
};
@@ -310,7 +310,7 @@ impl Rewriter {
310310

311311
pub async fn run_rewrite_to_completion(
312312
&mut self,
313-
auth_context: AuthContextRef,
313+
cache_entry: Arc<CompilerCacheEntry>,
314314
qtrace: &mut Option<Qtrace>,
315315
) -> Result<CubeEGraph, CubeError> {
316316
let cube_context = self.cube_context.clone();
@@ -323,11 +323,7 @@ impl Rewriter {
323323
.sessions
324324
.server
325325
.compiler_cache
326-
.rewrite_rules(
327-
auth_context.clone(),
328-
cube_context.session_state.protocol.clone(),
329-
false,
330-
)
326+
.rewrite_rules(cache_entry, false)
331327
.await?;
332328

333329
let (plan, qtrace_egraph_iterations) = tokio::task::spawn_blocking(move || {
@@ -392,6 +388,7 @@ impl Rewriter {
392388
pub async fn find_best_plan(
393389
&mut self,
394390
root: Id,
391+
cache_entry: Arc<CompilerCacheEntry>,
395392
auth_context: AuthContextRef,
396393
qtrace: &mut Option<Qtrace>,
397394
span_id: Option<Arc<SpanId>>,
@@ -407,11 +404,7 @@ impl Rewriter {
407404
.sessions
408405
.server
409406
.compiler_cache
410-
.rewrite_rules(
411-
auth_context.clone(),
412-
cube_context.session_state.protocol.clone(),
413-
true,
414-
)
407+
.rewrite_rules(cache_entry, true)
415408
.await?;
416409

417410
let (plan, qtrace_egraph_iterations, qtrace_best_graph) =

rust/cubesql/cubesql/src/compile/router.rs

Lines changed: 3 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ use crate::compile::{
33
StatusFlags,
44
};
55
use sqlparser::ast;
6-
use std::{collections::HashMap, sync::Arc, time::SystemTime};
6+
use std::{collections::HashMap, sync::Arc};
77

88
use crate::{
99
compile::{
@@ -61,50 +61,8 @@ impl QueryRouter {
6161
qtrace: &mut Option<Qtrace>,
6262
span_id: Option<Arc<SpanId>>,
6363
) -> CompilationResult<QueryPlan> {
64-
let planning_start = SystemTime::now();
65-
if let Some(span_id) = span_id.as_ref() {
66-
if let Some(auth_context) = self.state.auth_context() {
67-
self.session_manager
68-
.server
69-
.transport
70-
.log_load_state(
71-
Some(span_id.clone()),
72-
auth_context,
73-
self.state.get_load_request_meta(),
74-
"SQL API Query Planning".to_string(),
75-
serde_json::json!({
76-
"query": span_id.query_key.clone(),
77-
}),
78-
)
79-
.await
80-
.map_err(|e| CompilationError::internal(e.to_string()))?;
81-
}
82-
}
83-
let result = self
84-
.create_df_logical_plan(stmt.clone(), qtrace, span_id.clone())
85-
.await?;
86-
87-
if let Some(span_id) = span_id.as_ref() {
88-
if let Some(auth_context) = self.state.auth_context() {
89-
self.session_manager
90-
.server
91-
.transport
92-
.log_load_state(
93-
Some(span_id.clone()),
94-
auth_context,
95-
self.state.get_load_request_meta(),
96-
"SQL API Query Planning Success".to_string(),
97-
serde_json::json!({
98-
"query": span_id.query_key.clone(),
99-
"duration": planning_start.elapsed().unwrap().as_millis() as u64,
100-
}),
101-
)
102-
.await
103-
.map_err(|e| CompilationError::internal(e.to_string()))?;
104-
}
105-
}
106-
107-
return Ok(result);
64+
self.create_df_logical_plan(stmt.clone(), qtrace, span_id.clone())
65+
.await
10866
}
10967

11068
pub async fn plan(

0 commit comments

Comments
 (0)