diff --git a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js index a90ecc14df3ea..b77536d90d277 100644 --- a/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js +++ b/packages/cubejs-schema-compiler/src/adapter/BaseQuery.js @@ -6,34 +6,34 @@ * @license Apache-2.0 */ -import R from 'ramda'; import cronParser from 'cron-parser'; -import moment from 'moment-timezone'; import inflection from 'inflection'; +import moment from 'moment-timezone'; +import R from 'ramda'; +import { + buildSqlAndParams as nativeBuildSqlAndParams, +} from '@cubejs-backend/native'; import { FROM_PARTITION_RANGE, MAX_SOURCE_ROW_LIMIT, - localTimestampToUtc, QueryAlias, getEnv, + localTimestampToUtc, timeSeries as timeSeriesBase } from '@cubejs-backend/shared'; -import { - buildSqlAndParams as nativeBuildSqlAndParams, -} from '@cubejs-backend/native'; import { UserError } from '../compiler/UserError'; -import { BaseMeasure } from './BaseMeasure'; +import { SqlParser } from '../parser/SqlParser'; import { BaseDimension } from './BaseDimension'; -import { BaseSegment } from './BaseSegment'; import { BaseFilter } from './BaseFilter'; import { BaseGroupFilter } from './BaseGroupFilter'; +import { BaseMeasure } from './BaseMeasure'; +import { BaseSegment } from './BaseSegment'; import { BaseTimeDimension } from './BaseTimeDimension'; +import { Granularity } from './Granularity'; import { ParamAllocator } from './ParamAllocator'; import { PreAggregations } from './PreAggregations'; -import { SqlParser } from '../parser/SqlParser'; -import { Granularity } from './Granularity'; const DEFAULT_PREAGGREGATIONS_SCHEMA = 'stb_pre_aggregations'; @@ -571,12 +571,9 @@ export class BaseQuery { * @returns {string} */ countAllQuery(sql) { - return `select count(*) ${ - this.escapeColumnName(QueryAlias.TOTAL_COUNT) - } from (\n${ - sql - }\n) ${ - this.escapeColumnName(QueryAlias.ORIGINAL_QUERY) + return `select count(*) ${this.escapeColumnName(QueryAlias.TOTAL_COUNT) + } from (\n${sql + }\n) ${this.escapeColumnName(QueryAlias.ORIGINAL_QUERY) }`; } @@ -967,8 +964,7 @@ export class BaseQuery { ).concat( R.map( ([multiplied, measure]) => this.withCubeAliasPrefix( - `${ - this.aliasName(measure.measure.replace('.', '_')) + `${this.aliasName(measure.measure.replace('.', '_')) }_cumulative`, () => this.overTimeSeriesQuery( multiplied @@ -983,7 +979,7 @@ export class BaseQuery { ), ) )(cumulativeMeasures) - // TODO SELECT * + // TODO SELECT * ).concat(multiStageMembers.map(m => `SELECT * FROM ${m.alias}`)); } @@ -1891,10 +1887,9 @@ export class BaseQuery { ) ), inlineWhereConditions); - return `SELECT ${this.selectAllDimensionsAndMeasures(measures)} FROM ${ - query + return `SELECT ${this.selectAllDimensionsAndMeasures(measures)} FROM ${query } ${this.baseWhere(filters.concat(inlineWhereConditions))}` + - (!this.safeEvaluateSymbolContext().ungrouped && this.groupByClause() || ''); + (!this.safeEvaluateSymbolContext().ungrouped && this.groupByClause() || ''); } /** @@ -1946,14 +1941,11 @@ export class BaseQuery { .join(', '); const primaryKeyJoinConditions = primaryKeyDimensions.map((pkd) => ( - `${ - this.escapeColumnName(QueryAlias.AGG_SUB_QUERY_KEYS) - }.${ - pkd.aliasName() - } = ${ - shouldBuildJoinForMeasureSelect - ? `${this.cubeAlias(keyCubeName)}.${pkd.aliasName()}` - : this.dimensionSql(pkd) + `${this.escapeColumnName(QueryAlias.AGG_SUB_QUERY_KEYS) + }.${pkd.aliasName() + } = ${shouldBuildJoinForMeasureSelect + ? `${this.cubeAlias(keyCubeName)}.${pkd.aliasName()}` + : this.dimensionSql(pkd) }` )).join(' AND '); @@ -2026,8 +2018,7 @@ export class BaseQuery { 'collectSubQueryDimensionsFor' ) ), inlineWhereConditions); - return `SELECT DISTINCT ${this.keysSelect(primaryKeyDimensions)} FROM ${ - query + return `SELECT DISTINCT ${this.keysSelect(primaryKeyDimensions)} FROM ${query } ${this.baseWhere(filters.concat(inlineWhereConditions))}`; } @@ -3337,7 +3328,7 @@ export class BaseQuery { } throw new UserError('Output schema is only supported for rollup pre-aggregations'); }, - { inputProps: { }, cache: this.queryCache } + { inputProps: {}, cache: this.queryCache } ); } @@ -3457,14 +3448,18 @@ export class BaseQuery { join: '{{ join_type }} JOIN {{ source }} ON {{ condition }}', cte: '{{ alias }} AS ({{ query | indent(2, true) }})', time_series_select: 'SELECT date_from::timestamp AS "date_from",\n' + - 'date_to::timestamp AS "date_to" \n' + - 'FROM(\n' + - ' VALUES ' + - '{% for time_item in seria %}' + - '(\'{{ time_item | join(\'\\\', \\\'\') }}\')' + - '{% if not loop.last %}, {% endif %}' + - '{% endfor %}' + - ') AS dates (date_from, date_to)' + 'date_to::timestamp AS "date_to" \n' + + 'FROM(\n' + + ' VALUES ' + + '{% for time_item in seria %}' + + '(\'{{ time_item | join(\'\\\', \\\'\') }}\')' + + '{% if not loop.last %}, {% endif %}' + + '{% endfor %}' + + ') AS dates (date_from, date_to)', + time_series_get_range: 'SELECT {{ max_expr }} as {{ quoted_max_name }},\n' + + '{{ min_expr }} as {{ quoted_min_name }}\n' + + 'FROM {{ from_prepared }}\n' + + '{% if filter %}WHERE {{ filter }}{% endif %}' }, expressions: { column_reference: '{% if table_name %}{{ table_name }}.{% endif %}{{ name }}', @@ -4078,11 +4073,11 @@ export class BaseQuery { const filterParamArg = filterParamArgs.filter(p => { const member = p.__member(); return member === filter.measure || - member === filter.dimension || - (aliases[member] && ( - aliases[member] === filter.measure || - aliases[member] === filter.dimension - )); + member === filter.dimension || + (aliases[member] && ( + aliases[member] === filter.measure || + aliases[member] === filter.dimension + )); })[0]; if (!filterParamArg) { diff --git a/packages/cubejs-schema-compiler/src/adapter/PostgresQuery.ts b/packages/cubejs-schema-compiler/src/adapter/PostgresQuery.ts index 818a2f3c92d09..747b23fd9d19c 100644 --- a/packages/cubejs-schema-compiler/src/adapter/PostgresQuery.ts +++ b/packages/cubejs-schema-compiler/src/adapter/PostgresQuery.ts @@ -82,6 +82,9 @@ export class PostgresQuery extends BaseQuery { templates.types.double = 'DOUBLE PRECISION'; templates.types.binary = 'BYTEA'; templates.operators.is_not_distinct_from = 'IS NOT DISTINCT FROM'; + templates.statements.generated_time_series_select = 'SELECT date_from AS "date_from",\n' + + 'date_from + interval \'{{ granularity }}\' - interval \'1 millisecond\' AS "date_to" \n' + + 'FROM generate_series({{ start }}::timestamp, {{ end }}:: timestamp, \'{{ granularity }}\'::interval) "date_from" '; return templates; } diff --git a/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts b/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts index c2bc3d7d93b17..b9d0088fe7252 100644 --- a/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts +++ b/packages/cubejs-schema-compiler/test/integration/postgres/sql-generation.test.ts @@ -90,6 +90,13 @@ describe('SQL Generation', () => { offset: 'start' } }, + countRollingThreeMonth: { + type: 'count', + rollingWindow: { + trailing: '3 month', + offset: 'end' + } + }, countRollingUnbounded: { type: 'count', rollingWindow: { @@ -1088,6 +1095,34 @@ SELECT 1 AS revenue, cast('2024-01-01' AS timestamp) as time UNION ALL { visitors__created_at_day: '2017-01-10T00:00:00.000Z', visitors__count_rolling: null } ])); + if (getEnv('nativeSqlPlanner')) { + it('rolling count without date range', async () => { + await runQueryTest({ + measures: [ + 'visitors.countRollingThreeMonth' + ], + timeDimensions: [{ + dimension: 'visitors.created_at', + granularity: 'month', + }], + order: [{ + id: 'visitors.created_at' + }], + timezone: 'America/Los_Angeles' + }, [ + { visitors__created_at_month: '2016-09-01T00:00:00.000Z', visitors__count_rolling_three_month: '1' }, + { visitors__created_at_month: '2016-10-01T00:00:00.000Z', visitors__count_rolling_three_month: '1' }, + { visitors__created_at_month: '2016-11-01T00:00:00.000Z', visitors__count_rolling_three_month: '1' }, + { visitors__created_at_month: '2016-12-01T00:00:00.000Z', visitors__count_rolling_three_month: null }, + { visitors__created_at_month: '2017-01-01T00:00:00.000Z', visitors__count_rolling_three_month: '5' }, + ]); + }); + } else { + it.skip('rolling count without date range', () => { + // Skipping because it works only in Tesseract + }); + } + it('rolling qtd', async () => runQueryTest({ measures: [ 'visitors.revenue_qtd' diff --git a/rust/cubesqlplanner/cubesqlplanner/src/plan/builder/select.rs b/rust/cubesqlplanner/cubesqlplanner/src/plan/builder/select.rs index 33b22bb41f35f..ce1518295d22b 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/plan/builder/select.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/plan/builder/select.rs @@ -75,6 +75,28 @@ impl SelectBuilder { .add_column(SchemaColumn::new(alias.clone(), Some(member.full_name()))); } + pub fn add_projection_function_expression( + &mut self, + function: &str, + args: Vec>, + alias: String, + ) { + let expr = Expr::Function(FunctionExpression { + function: function.to_string(), + arguments: args + .into_iter() + .map(|r| Expr::Member(MemberExpression::new(r.clone()))) + .collect(), + }); + let aliased_expr = AliasedExpr { + expr, + alias: alias.clone(), + }; + + self.projection_columns.push(aliased_expr); + self.result_schema + .add_column(SchemaColumn::new(alias.clone(), None)); + } pub fn add_projection_coalesce_member( &mut self, member: &Rc, @@ -147,16 +169,16 @@ impl SelectBuilder { self.ctes = ctes; } - fn make_cube_references(&self) -> HashMap { + pub fn make_cube_references(from: Rc) -> HashMap { let mut refs = HashMap::new(); - match &self.from.source { + match &from.source { crate::plan::FromSource::Single(source) => { - self.add_cube_reference_if_needed(source, &mut refs) + Self::add_cube_reference_if_needed(source, &mut refs) } crate::plan::FromSource::Join(join) => { - self.add_cube_reference_if_needed(&join.root, &mut refs); + Self::add_cube_reference_if_needed(&join.root, &mut refs); for join_item in join.joins.iter() { - self.add_cube_reference_if_needed(&join_item.from, &mut refs); + Self::add_cube_reference_if_needed(&join_item.from, &mut refs); } } crate::plan::FromSource::Empty => {} @@ -165,7 +187,6 @@ impl SelectBuilder { } fn add_cube_reference_if_needed( - &self, source: &SingleAliasedSource, refs: &mut HashMap, ) { @@ -194,7 +215,7 @@ impl SelectBuilder { } pub fn build(self, mut nodes_factory: SqlNodesFactory) -> Select { - let cube_references = self.make_cube_references(); + let cube_references = Self::make_cube_references(self.from.clone()); nodes_factory.set_cube_name_references(cube_references); let schema = if self.projection_columns.is_empty() { self.make_asteriks_schema() diff --git a/rust/cubesqlplanner/cubesqlplanner/src/plan/mod.rs b/rust/cubesqlplanner/cubesqlplanner/src/plan/mod.rs index 2117bf42d5611..97d6d46cd27f7 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/plan/mod.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/plan/mod.rs @@ -21,5 +21,5 @@ pub use order::OrderBy; pub use query_plan::QueryPlan; pub use schema::{QualifiedColumnName, Schema, SchemaColumn}; pub use select::{AliasedExpr, Select}; -pub use time_series::TimeSeries; +pub use time_series::{TimeSeries, TimeSeriesDateRange}; pub use union::Union; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/plan/time_series.rs b/rust/cubesqlplanner/cubesqlplanner/src/plan/time_series.rs index 43de9351a5405..32672739de1ec 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/plan/time_series.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/plan/time_series.rs @@ -1,30 +1,39 @@ use super::{Schema, SchemaColumn}; -use crate::planner::sql_templates::PlanSqlTemplates; +use crate::planner::{ + query_tools::QueryTools, + sql_templates::{PlanSqlTemplates, TemplateProjectionColumn}, +}; use cubenativeutils::CubeError; use std::rc::Rc; pub struct TimeSeries { - pub time_dimension_name: String, - pub from_date: Option, - pub to_date: Option, - pub seria: Vec>, - pub schema: Rc, + query_tools: Rc, + #[allow(dead_code)] + time_dimension_name: String, + date_range: TimeSeriesDateRange, + granularity: String, + schema: Rc, +} + +pub enum TimeSeriesDateRange { + Filter(String, String), + Generated(String), // Name of cte with min/max dates } impl TimeSeries { pub fn new( + query_tools: Rc, time_dimension_name: String, - from_date: Option, - to_date: Option, - seria: Vec>, + date_range: TimeSeriesDateRange, + granularity: String, ) -> Self { let column = SchemaColumn::new(format!("date_from"), Some(time_dimension_name.clone())); let schema = Rc::new(Schema::new(vec![column])); Self { + query_tools, time_dimension_name, - from_date, - to_date, - seria, + granularity, + date_range, schema, } } @@ -34,10 +43,68 @@ impl TimeSeries { } pub fn to_sql(&self, templates: &PlanSqlTemplates) -> Result { - templates.time_series_select( - self.from_date.clone(), - self.to_date.clone(), - self.seria.clone(), - ) + if templates.supports_generated_time_series() { + let (from_date, to_date) = match &self.date_range { + TimeSeriesDateRange::Filter(from_date, to_date) => { + (format!("'{}'", from_date), format!("'{}'", to_date)) + } + TimeSeriesDateRange::Generated(cte_name) => { + let date_from_name = format!("date_from"); + let date_to_name = format!("date_to"); + let from_column = TemplateProjectionColumn { + expr: date_from_name.clone(), + alias: date_from_name.clone(), + aliased: templates.column_aliased(&date_from_name, &date_from_name)?, + }; + let to_column = TemplateProjectionColumn { + expr: date_to_name.clone(), + alias: date_to_name.clone(), + aliased: templates.column_aliased(&date_to_name, &date_to_name)?, + }; + let from = templates.select( + vec![], + &cte_name, + vec![from_column], + None, + vec![], + None, + vec![], + None, + None, + false, + )?; + let to = templates.select( + vec![], + &cte_name, + vec![to_column], + None, + vec![], + None, + vec![], + None, + None, + false, + )?; + (format!("({})", from), format!("({})", to)) + } + }; + templates.generated_time_series_select(&from_date, &to_date, &self.granularity) + } else { + let (from_date, to_date) = match &self.date_range { + TimeSeriesDateRange::Filter(from_date, to_date) => { + (format!("'{}'", from_date), format!("'{}'", to_date)) + } + TimeSeriesDateRange::Generated(_) => { + return Err(CubeError::user( + "Date range is required for time series in drivers where generated time series is not supported".to_string(), + )); + } + }; + let series = self.query_tools.base_tools().generate_time_series( + self.granularity.clone(), + vec![from_date.clone(), to_date.clone()], + )?; + templates.time_series_select(from_date.clone(), to_date.clone(), series) + } } } diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_filter.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_filter.rs index c6895eec4b81d..4ee4af6536d67 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_filter.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_filter.rs @@ -26,6 +26,7 @@ pub struct BaseFilter { filter_type: FilterType, filter_operator: FilterOperator, values: Vec>, + use_raw_values: bool, templates: FilterTemplates, } @@ -66,6 +67,7 @@ impl BaseFilter { filter_operator, values, templates, + use_raw_values: false, })) } @@ -73,6 +75,7 @@ impl BaseFilter { &self, filter_operator: FilterOperator, values: Vec>, + use_raw_values: bool, ) -> Rc { Rc::new(Self { query_tools: self.query_tools.clone(), @@ -81,6 +84,7 @@ impl BaseFilter { filter_operator, values, templates: self.templates.clone(), + use_raw_values, }) } @@ -96,6 +100,10 @@ impl BaseFilter { &self.filter_operator } + pub fn use_raw_values(&self) -> bool { + self.use_raw_values + } + pub fn member_name(&self) -> String { self.member_evaluator.full_name() } @@ -432,6 +440,9 @@ impl BaseFilter { value: &String, use_db_time_zone: bool, ) -> Result { + if self.use_raw_values { + return Ok(value.clone()); + } let from = self.format_from_date(value)?; let res = if use_db_time_zone && &from != FROM_PARTITION_RANGE { @@ -447,6 +458,9 @@ impl BaseFilter { value: &String, use_db_time_zone: bool, ) -> Result { + if self.use_raw_values { + return Ok(value.clone()); + } let from = self.format_to_date(value)?; let res = if use_db_time_zone && &from != TO_PARTITION_RANGE { @@ -552,6 +566,9 @@ impl BaseFilter { } fn allocate_timestamp_param(&self, param: &str) -> String { + if self.use_raw_values { + return param.to_string(); + } let placeholder = self.query_tools.allocate_param(param); format!("{}::timestamptz", placeholder) } diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_segment.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_segment.rs index e82c46bde2a5f..865f7ddcf676d 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_segment.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/filter/base_segment.rs @@ -1,8 +1,7 @@ -use crate::cube_bridge::dimension_definition::DimensionDefinition; use crate::planner::query_tools::QueryTools; use crate::planner::sql_evaluator::{MemberExpressionSymbol, MemberSymbol, SqlCall}; use crate::planner::sql_templates::PlanSqlTemplates; -use crate::planner::{evaluate_with_context, BaseMember, BaseMemberHelper, VisitorContext}; +use crate::planner::{evaluate_with_context, VisitorContext}; use cubenativeutils::CubeError; use std::rc::Rc; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/mod.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/mod.rs index 72f0fba465109..b3cad91e92b19 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/mod.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/mod.rs @@ -3,7 +3,6 @@ pub mod dimension_subquery_planner; pub mod full_key_query_aggregate_planner; pub mod join_planner; pub mod multi_stage; -pub mod multi_stage_query_planner; pub mod multiplied_measures_query_planner; pub mod order_planner; pub mod query_planner; @@ -13,7 +12,7 @@ pub use common_utils::CommonUtils; pub use dimension_subquery_planner::DimensionSubqueryPlanner; pub use full_key_query_aggregate_planner::FullKeyAggregateQueryPlanner; pub use join_planner::JoinPlanner; -pub use multi_stage_query_planner::MultiStageQueryPlanner; +pub use multi_stage::MultiStageQueryPlanner; pub use multiplied_measures_query_planner::MultipliedMeasuresQueryPlanner; pub use order_planner::OrderPlanner; pub use query_planner::QueryPlanner; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs index db86d06d00b43..0846d4c6bdbf0 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/applied_state.rs @@ -174,6 +174,7 @@ impl MultiStageAppliedState { member_name, &self.time_dimensions_filters, &operator, + None, &values, &None, ); @@ -190,6 +191,7 @@ impl MultiStageAppliedState { member_name, &self.time_dimensions_filters, &operator, + None, &values, &None, ); @@ -207,6 +209,25 @@ impl MultiStageAppliedState { member_name, &self.time_dimensions_filters, &operator, + None, + &vec![], + &Some(replacement_values), + ); + } + + pub fn replace_range_to_subquery_in_date_filter( + &mut self, + member_name: &String, + new_from: String, + new_to: String, + ) { + let operator = FilterOperator::InDateRange; + let replacement_values = vec![Some(new_from), Some(new_to)]; + self.time_dimensions_filters = self.change_date_range_filter_impl( + member_name, + &self.time_dimensions_filters, + &operator, + Some(true), &vec![], &Some(replacement_values), ); @@ -217,6 +238,7 @@ impl MultiStageAppliedState { member_name: &String, filters: &Vec, operator: &FilterOperator, + use_raw_values: Option, additional_values: &Vec>, replacement_values: &Option>>, ) -> Vec { @@ -230,6 +252,7 @@ impl MultiStageAppliedState { member_name, filters, operator, + use_raw_values, additional_values, replacement_values, ), @@ -246,7 +269,8 @@ impl MultiStageAppliedState { itm.values().clone() }; values.extend(additional_values.iter().cloned()); - itm.change_operator(operator.clone(), values) + let use_raw_values = use_raw_values.unwrap_or(itm.use_raw_values()); + itm.change_operator(operator.clone(), values, use_raw_values) } else { itm.clone() }; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member.rs index de7b566469b72..982318a1cf5d2 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member.rs @@ -59,10 +59,17 @@ impl MultiStageTimeShift { } } +#[derive(Clone)] +pub struct TimeSeriesDescription { + pub time_dimension: Rc, + pub date_range_cte: Option, +} + #[derive(Clone)] pub enum MultiStageLeafMemberType { Measure, - TimeSeries(Rc), + TimeSeries(Rc), + TimeSeriesGetRange(Rc), } #[derive(Clone)] diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member_query_planner.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member_query_planner.rs index f50a29733f513..0a7752bd3a21a 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member_query_planner.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/member_query_planner.rs @@ -1,12 +1,14 @@ use super::{ MultiStageInodeMember, MultiStageInodeMemberType, MultiStageMemberType, - MultiStageQueryDescription, RollingWindowDescription, + MultiStageQueryDescription, RollingWindowDescription, TimeSeriesDescription, }; use crate::plan::{ Cte, Expr, From, JoinBuilder, JoinCondition, MemberExpression, OrderBy, QualifiedColumnName, - QueryPlan, Schema, SelectBuilder, TimeSeries, + QueryPlan, Schema, SelectBuilder, TimeSeries, TimeSeriesDateRange, +}; +use crate::planner::planners::{ + multi_stage::RollingWindowType, OrderPlanner, QueryPlanner, SimpleQueryPlanner, }; -use crate::planner::planners::{multi_stage::RollingWindowType, OrderPlanner, QueryPlanner}; use crate::planner::query_tools::QueryTools; use crate::planner::sql_evaluator::sql_nodes::SqlNodesFactory; use crate::planner::sql_evaluator::ReferencesBuilder; @@ -52,27 +54,89 @@ impl MultiStageMemberQueryPlanner { super::MultiStageLeafMemberType::TimeSeries(time_dimension) => { self.plan_time_series_query(time_dimension.clone()) } + super::MultiStageLeafMemberType::TimeSeriesGetRange(time_dimension) => { + self.plan_time_series_get_range_query(time_dimension.clone()) + } }, } } - fn plan_time_series_query( + fn plan_time_series_get_range_query( &self, time_dimension: Rc, ) -> Result, CubeError> { - let granularity = time_dimension.get_granularity().unwrap(); //FIXME remove this unwrap - let date_range = time_dimension.get_date_range().unwrap(); //FIXME remove this unwrap - let from_date = date_range[0].clone(); - let to_date = date_range[1].clone(); - let seria = self - .query_tools - .base_tools() - .generate_time_series(granularity, date_range.clone())?; + let cte_query_properties = QueryProperties::try_new_from_precompiled( + self.query_tools.clone(), + vec![], + vec![], + vec![time_dimension.clone()], + vec![], + vec![], + vec![], + vec![], + vec![], + None, + None, + true, + true, + )?; + let mut context_factory = SqlNodesFactory::new(); + let simple_query_planer = SimpleQueryPlanner::new( + self.query_tools.clone(), + cte_query_properties, + SqlNodesFactory::new(), + ); + let (mut select_builder, render_references) = simple_query_planer.make_select_builder()?; + let args = vec![time_dimension.clone().as_base_member()]; + select_builder.add_projection_function_expression( + "MAX", + args.clone(), + "date_to".to_string(), + ); + + select_builder.add_projection_function_expression( + "MIN", + args.clone(), + "date_from".to_string(), + ); + context_factory.set_render_references(render_references); + let select = Rc::new(select_builder.build(context_factory)); + let query_plan = Rc::new(QueryPlan::Select(select)); + Ok(Rc::new(Cte::new( + query_plan, + format!("time_series_get_range"), + ))) + } + + fn plan_time_series_query( + &self, + time_series_description: Rc, + ) -> Result, CubeError> { + let time_dimension = time_series_description.time_dimension.clone(); + let granularity = time_dimension.get_granularity().map_or_else( + || { + Err(CubeError::user( + "Time dimension granularity is required for rolling window".to_string(), + )) + }, + |g| Ok(g.clone()), + )?; + let ts_date_range = if let Some(date_range) = time_dimension.get_date_range() { + TimeSeriesDateRange::Filter(date_range[0].clone(), date_range[1].clone()) + } else { + if let Some(date_range_cte) = &time_series_description.date_range_cte { + TimeSeriesDateRange::Generated(date_range_cte.clone()) + } else { + return Err(CubeError::internal( + "Date range cte is required for time series without date range".to_string(), + )); + } + }; let time_seira = TimeSeries::new( + self.query_tools.clone(), time_dimension.full_name(), - Some(from_date), - Some(to_date), - seria, + ts_date_range, + granularity, ); let query_plan = Rc::new(QueryPlan::TimeSeries(Rc::new(time_seira))); Ok(Rc::new(Cte::new(query_plan, format!("time_series")))) diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/mod.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/mod.rs index 4a70b00f7c009..7911ee00124b6 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/mod.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/mod.rs @@ -1,9 +1,13 @@ mod applied_state; mod member; mod member_query_planner; +mod multi_stage_query_planner; mod query_description; +mod rolling_window_planner; pub use applied_state::MultiStageAppliedState; pub use member::*; pub use member_query_planner::MultiStageMemberQueryPlanner; +pub use multi_stage_query_planner::MultiStageQueryPlanner; pub use query_description::MultiStageQueryDescription; +pub use rolling_window_planner::RollingWindowPlanner; diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage_query_planner.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs similarity index 50% rename from rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage_query_planner.rs rename to rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs index 771f61cf0de54..f3c2c2279a247 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage_query_planner.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/multi_stage_query_planner.rs @@ -1,17 +1,17 @@ -use super::multi_stage::{ +use super::{ MultiStageAppliedState, MultiStageInodeMember, MultiStageInodeMemberType, MultiStageLeafMemberType, MultiStageMember, MultiStageMemberQueryPlanner, MultiStageMemberType, - MultiStageQueryDescription, MultiStageTimeShift, RollingWindowDescription, + MultiStageQueryDescription, MultiStageTimeShift, RollingWindowPlanner, }; -use crate::cube_bridge::measure_definition::RollingWindow; use crate::plan::{Cte, From, Schema, Select, SelectBuilder}; use crate::planner::query_tools::QueryTools; use crate::planner::sql_evaluator::collectors::has_multi_stage_members; use crate::planner::sql_evaluator::collectors::member_childs; use crate::planner::sql_evaluator::sql_nodes::SqlNodesFactory; use crate::planner::sql_evaluator::MemberSymbol; +use crate::planner::BaseMember; +use crate::planner::QueryProperties; use crate::planner::{BaseDimension, BaseMeasure}; -use crate::planner::{BaseTimeDimension, GranularityHelper, QueryProperties}; use cubenativeutils::CubeError; use itertools::Itertools; use std::collections::HashMap; @@ -20,11 +20,16 @@ use std::rc::Rc; pub struct MultiStageQueryPlanner { query_tools: Rc, query_properties: Rc, + rolling_window_planner: RollingWindowPlanner, } impl MultiStageQueryPlanner { pub fn new(query_tools: Rc, query_properties: Rc) -> Self { Self { + rolling_window_planner: RollingWindowPlanner::new( + query_tools.clone(), + query_properties.clone(), + ), query_tools, query_properties, } @@ -34,7 +39,7 @@ impl MultiStageQueryPlanner { .query_properties .all_members(false) .into_iter() - .filter_map(|memb| -> Option> { + .filter_map(|memb: Rc| -> Option> { match has_multi_stage_members(&memb.member_evaluator(), false) { Ok(true) => Some(Ok(memb)), Ok(false) => None, @@ -156,235 +161,6 @@ impl MultiStageQueryPlanner { Ok(inode) } - fn add_time_series( - &self, - time_dimension: Rc, - state: Rc, - descriptions: &mut Vec>, - ) -> Result, CubeError> { - let description = - if let Some(description) = descriptions.iter().find(|d| d.alias() == "time_series") { - description.clone() - } else { - let time_series_node = MultiStageQueryDescription::new( - MultiStageMember::new( - MultiStageMemberType::Leaf(MultiStageLeafMemberType::TimeSeries( - time_dimension.clone(), - )), - time_dimension.member_evaluator(), - true, - false, - ), - state.clone(), - vec![], - "time_series".to_string(), - ); - descriptions.push(time_series_node.clone()); - time_series_node - }; - Ok(description) - } - - fn add_rolling_window_base( - &self, - member: Rc, - state: Rc, - ungrouped: bool, - descriptions: &mut Vec>, - ) -> Result, CubeError> { - let alias = format!("cte_{}", descriptions.len()); - let description = MultiStageQueryDescription::new( - MultiStageMember::new( - MultiStageMemberType::Leaf(MultiStageLeafMemberType::Measure), - member, - self.query_properties.ungrouped() || ungrouped, - true, - ), - state, - vec![], - alias.clone(), - ); - descriptions.push(description.clone()); - Ok(description) - } - - fn get_to_date_rolling_granularity( - &self, - rolling_window: &RollingWindow, - ) -> Result, CubeError> { - let is_to_date = rolling_window - .rolling_type - .as_ref() - .map_or(false, |tp| tp == "to_date"); - - if is_to_date { - if let Some(granularity) = &rolling_window.granularity { - Ok(Some(granularity.clone())) - } else { - Err(CubeError::user(format!( - "Granularity required for to_date rolling window" - ))) - } - } else { - Ok(None) - } - } - - fn make_rolling_base_state( - &self, - time_dimension: Rc, - rolling_window: &RollingWindow, - state: Rc, - ) -> Result, CubeError> { - let time_dimension_name = time_dimension.member_evaluator().full_name(); - let mut new_state = state.clone_state(); - let trailing_granularity = - GranularityHelper::granularity_from_interval(&rolling_window.trailing); - let leading_granularity = - GranularityHelper::granularity_from_interval(&rolling_window.leading); - let window_granularity = - GranularityHelper::min_granularity(&trailing_granularity, &leading_granularity)?; - let result_granularity = GranularityHelper::min_granularity( - &window_granularity, - &time_dimension.get_granularity(), - )?; - - if time_dimension.get_date_range().is_some() && result_granularity.is_some() { - let granularity = time_dimension.get_granularity().unwrap(); //FIXME remove this unwrap - let date_range = time_dimension.get_date_range().unwrap(); //FIXME remove this unwrap - let series = self - .query_tools - .base_tools() - .generate_time_series(granularity, date_range.clone())?; - if !series.is_empty() { - let new_from_date = series.first().unwrap()[0].clone(); - let new_to_date = series.last().unwrap()[1].clone(); - new_state.replace_range_in_date_filter( - &time_dimension_name, - new_from_date, - new_to_date, - ); - } - } - - new_state - .change_time_dimension_granularity(&time_dimension_name, result_granularity.clone()); - - if let Some(granularity) = self.get_to_date_rolling_granularity(rolling_window)? { - new_state.replace_to_date_date_range_filter(&time_dimension_name, &granularity); - } else { - new_state.replace_regular_date_range_filter( - &time_dimension_name, - rolling_window.trailing.clone(), - rolling_window.leading.clone(), - ); - } - - Ok(Rc::new(new_state)) - } - - fn try_make_rolling_window( - &self, - member: Rc, - state: Rc, - descriptions: &mut Vec>, - ) -> Result>, CubeError> { - if let Some(measure) = BaseMeasure::try_new(member.clone(), self.query_tools.clone())? { - if measure.is_cumulative() { - let rolling_window = if let Some(rolling_window) = measure.rolling_window() { - rolling_window.clone() - } else { - RollingWindow { - trailing: Some("unbounded".to_string()), - leading: None, - offset: None, - rolling_type: None, - granularity: None, - } - }; - let ungrouped = match member.as_ref() { - MemberSymbol::Measure(measure_symbol) => { - measure_symbol.is_rolling_window() && !measure_symbol.is_addictive() - } - _ => false, - }; - let time_dimensions = self.query_properties.time_dimensions(); - if time_dimensions.len() == 0 { - let rolling_base = self.add_rolling_window_base( - member.clone(), - state.clone(), - ungrouped, - descriptions, - )?; - return Ok(Some(rolling_base)); - } - if time_dimensions.len() != 1 { - return Err(CubeError::internal( - "Rolling window requires one time dimension".to_string(), - )); - } - let time_dimension = time_dimensions[0].clone(); - - let input = vec![ - self.add_time_series(time_dimension.clone(), state.clone(), descriptions)?, - self.add_rolling_window_base( - member.clone(), - self.make_rolling_base_state( - time_dimension.clone(), - &rolling_window, - state.clone(), - )?, - ungrouped, - descriptions, - )?, - ]; - - let time_dimension = time_dimensions[0].clone(); - - let alias = format!("cte_{}", descriptions.len()); - - let rolling_window_descr = if let Some(granularity) = - self.get_to_date_rolling_granularity(&rolling_window)? - { - RollingWindowDescription::new_to_date(time_dimension, granularity) - } else { - RollingWindowDescription::new_regular( - time_dimension, - rolling_window.trailing.clone(), - rolling_window.leading.clone(), - rolling_window.offset.clone().unwrap_or("end".to_string()), - ) - }; - - let inode_member = MultiStageInodeMember::new( - MultiStageInodeMemberType::RollingWindow(rolling_window_descr), - vec![], - vec![], - None, - vec![], - ); - - let description = MultiStageQueryDescription::new( - MultiStageMember::new( - MultiStageMemberType::Inode(inode_member), - member, - self.query_properties.ungrouped(), - false, - ), - state.clone(), - input, - alias.clone(), - ); - descriptions.push(description.clone()); - Ok(Some(description)) - } else { - Ok(None) - } - } else { - Ok(None) - } - } - fn make_queries_descriptions( &self, member: Rc, @@ -399,9 +175,11 @@ impl MultiStageQueryPlanner { return Ok(exists.clone()); }; - if let Some(rolling_window_query) = - self.try_make_rolling_window(member.clone(), state.clone(), descriptions)? - { + if let Some(rolling_window_query) = self.rolling_window_planner.try_plan_rolling_window( + member.clone(), + state.clone(), + descriptions, + )? { return Ok(rolling_window_query); } diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/rolling_window_planner.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/rolling_window_planner.rs new file mode 100644 index 0000000000000..418648c4541c0 --- /dev/null +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/multi_stage/rolling_window_planner.rs @@ -0,0 +1,352 @@ +use super::{ + MultiStageAppliedState, MultiStageInodeMember, MultiStageInodeMemberType, + MultiStageLeafMemberType, MultiStageMember, MultiStageMemberType, MultiStageQueryDescription, + RollingWindowDescription, TimeSeriesDescription, +}; +use crate::cube_bridge::measure_definition::RollingWindow; +use crate::planner::query_tools::QueryTools; +use crate::planner::sql_evaluator::MemberSymbol; +use crate::planner::sql_templates::{PlanSqlTemplates, TemplateProjectionColumn}; +use crate::planner::BaseMeasure; +use crate::planner::{BaseTimeDimension, GranularityHelper, QueryProperties}; +use cubenativeutils::CubeError; +use std::rc::Rc; + +pub struct RollingWindowPlanner { + query_tools: Rc, + query_properties: Rc, +} + +impl RollingWindowPlanner { + pub fn new(query_tools: Rc, query_properties: Rc) -> Self { + Self { + query_tools, + query_properties, + } + } + + pub fn try_plan_rolling_window( + &self, + member: Rc, + state: Rc, + descriptions: &mut Vec>, + ) -> Result>, CubeError> { + if let Some(measure) = BaseMeasure::try_new(member.clone(), self.query_tools.clone())? { + if measure.is_cumulative() { + let rolling_window = if let Some(rolling_window) = measure.rolling_window() { + rolling_window.clone() + } else { + RollingWindow { + trailing: Some("unbounded".to_string()), + leading: None, + offset: None, + rolling_type: None, + granularity: None, + } + }; + let ungrouped = match member.as_ref() { + MemberSymbol::Measure(measure_symbol) => { + measure_symbol.is_rolling_window() && !measure_symbol.is_addictive() + } + _ => false, + }; + let time_dimensions = self.query_properties.time_dimensions(); + if time_dimensions.len() == 0 { + let rolling_base = self.add_rolling_window_base( + member.clone(), + state.clone(), + ungrouped, + descriptions, + )?; + return Ok(Some(rolling_base)); + } + if time_dimensions.len() != 1 { + return Err(CubeError::internal( + "Rolling window requires one time dimension".to_string(), + )); + } + let time_dimension = time_dimensions[0].clone(); + + let input = vec![ + self.add_time_series(time_dimension.clone(), state.clone(), descriptions)?, + self.add_rolling_window_base( + member.clone(), + self.make_rolling_base_state( + time_dimension.clone(), + &rolling_window, + state.clone(), + )?, + ungrouped, + descriptions, + )?, + ]; + + let time_dimension = time_dimensions[0].clone(); + + let alias = format!("cte_{}", descriptions.len()); + + let rolling_window_descr = if let Some(granularity) = + self.get_to_date_rolling_granularity(&rolling_window)? + { + RollingWindowDescription::new_to_date(time_dimension, granularity) + } else { + RollingWindowDescription::new_regular( + time_dimension, + rolling_window.trailing.clone(), + rolling_window.leading.clone(), + rolling_window.offset.clone().unwrap_or("end".to_string()), + ) + }; + + let inode_member = MultiStageInodeMember::new( + MultiStageInodeMemberType::RollingWindow(rolling_window_descr), + vec![], + vec![], + None, + vec![], + ); + + let description = MultiStageQueryDescription::new( + MultiStageMember::new( + MultiStageMemberType::Inode(inode_member), + member, + self.query_properties.ungrouped(), + false, + ), + state.clone(), + input, + alias.clone(), + ); + descriptions.push(description.clone()); + Ok(Some(description)) + } else { + Ok(None) + } + } else { + Ok(None) + } + } + fn add_time_series_get_range_query( + &self, + time_dimension: Rc, + state: Rc, + descriptions: &mut Vec>, + ) -> Result, CubeError> { + let description = if let Some(description) = descriptions + .iter() + .find(|d| d.alias() == "time_series_get_range") + { + description.clone() + } else { + let time_series_get_range_node = MultiStageQueryDescription::new( + MultiStageMember::new( + MultiStageMemberType::Leaf(MultiStageLeafMemberType::TimeSeriesGetRange( + time_dimension.clone(), + )), + time_dimension.member_evaluator(), + true, + false, + ), + state.clone(), + vec![], + "time_series_get_range".to_string(), + ); + descriptions.push(time_series_get_range_node.clone()); + time_series_get_range_node + }; + Ok(description) + } + + fn add_time_series( + &self, + time_dimension: Rc, + state: Rc, + descriptions: &mut Vec>, + ) -> Result, CubeError> { + let description = if let Some(description) = + descriptions.iter().find(|d| d.alias() == "time_series") + { + description.clone() + } else { + let get_range_query_description = if time_dimension.get_date_range().is_some() { + None + } else { + Some(self.add_time_series_get_range_query( + time_dimension.clone(), + state.clone(), + descriptions, + )?) + }; + let time_series_node = MultiStageQueryDescription::new( + MultiStageMember::new( + MultiStageMemberType::Leaf(MultiStageLeafMemberType::TimeSeries(Rc::new( + TimeSeriesDescription { + time_dimension: time_dimension.clone(), + date_range_cte: get_range_query_description.map(|d| d.alias().clone()), + }, + ))), + time_dimension.member_evaluator(), + true, + false, + ), + state.clone(), + vec![], + "time_series".to_string(), + ); + descriptions.push(time_series_node.clone()); + time_series_node + }; + Ok(description) + } + + fn add_rolling_window_base( + &self, + member: Rc, + state: Rc, + ungrouped: bool, + descriptions: &mut Vec>, + ) -> Result, CubeError> { + let alias = format!("cte_{}", descriptions.len()); + let description = MultiStageQueryDescription::new( + MultiStageMember::new( + MultiStageMemberType::Leaf(MultiStageLeafMemberType::Measure), + member, + self.query_properties.ungrouped() || ungrouped, + true, + ), + state, + vec![], + alias.clone(), + ); + descriptions.push(description.clone()); + Ok(description) + } + + fn get_to_date_rolling_granularity( + &self, + rolling_window: &RollingWindow, + ) -> Result, CubeError> { + let is_to_date = rolling_window + .rolling_type + .as_ref() + .map_or(false, |tp| tp == "to_date"); + + if is_to_date { + if let Some(granularity) = &rolling_window.granularity { + Ok(Some(granularity.clone())) + } else { + Err(CubeError::user(format!( + "Granularity required for to_date rolling window" + ))) + } + } else { + Ok(None) + } + } + + fn make_time_seires_from_to_dates_suqueries_conditions( + &self, + time_series_cte_name: &str, + ) -> Result<(String, String), CubeError> { + let templates = PlanSqlTemplates::new(self.query_tools.templates_render()); + let from_expr = format!("min(date_from)"); + let to_expr = format!("max(date_to)"); + let alias = format!("value"); + + let from_column = TemplateProjectionColumn { + expr: from_expr.clone(), + alias: alias.clone(), + aliased: templates.column_aliased(&from_expr, &alias)?, + }; + + let to_column = TemplateProjectionColumn { + expr: to_expr.clone(), + alias: alias.clone(), + aliased: templates.column_aliased(&to_expr, &alias)?, + }; + let from = templates.select( + vec![], + &time_series_cte_name, + vec![from_column], + None, + vec![], + None, + vec![], + None, + None, + false, + )?; + let to = templates.select( + vec![], + &time_series_cte_name, + vec![to_column], + None, + vec![], + None, + vec![], + None, + None, + false, + )?; + Ok((format!("({})", from), format!("({})", to))) + } + + fn make_rolling_base_state( + &self, + time_dimension: Rc, + rolling_window: &RollingWindow, + state: Rc, + ) -> Result, CubeError> { + let time_dimension_name = time_dimension.member_evaluator().full_name(); + let mut new_state = state.clone_state(); + let trailing_granularity = + GranularityHelper::granularity_from_interval(&rolling_window.trailing); + let leading_granularity = + GranularityHelper::granularity_from_interval(&rolling_window.leading); + let window_granularity = + GranularityHelper::min_granularity(&trailing_granularity, &leading_granularity)?; + let result_granularity = GranularityHelper::min_granularity( + &window_granularity, + &time_dimension.get_granularity(), + )?; + + let templates = PlanSqlTemplates::new(self.query_tools.templates_render()); + + if templates.supports_generated_time_series() { + let (from, to) = + self.make_time_seires_from_to_dates_suqueries_conditions("time_series")?; + new_state.replace_range_to_subquery_in_date_filter(&time_dimension_name, from, to); + } else if time_dimension.get_date_range().is_some() && result_granularity.is_some() { + let granularity = time_dimension.get_granularity().unwrap(); //FIXME remove this unwrap + let date_range = time_dimension.get_date_range().unwrap(); //FIXME remove this unwrap + let series = self + .query_tools + .base_tools() + .generate_time_series(granularity, date_range.clone())?; + if !series.is_empty() { + let new_from_date = series.first().unwrap()[0].clone(); + let new_to_date = series.last().unwrap()[1].clone(); + new_state.replace_range_in_date_filter( + &time_dimension_name, + new_from_date, + new_to_date, + ); + } + } + + new_state + .change_time_dimension_granularity(&time_dimension_name, result_granularity.clone()); + + if let Some(granularity) = self.get_to_date_rolling_granularity(rolling_window)? { + new_state.replace_to_date_date_range_filter(&time_dimension_name, &granularity); + } else { + new_state.replace_regular_date_range_filter( + &time_dimension_name, + rolling_window.trailing.clone(), + rolling_window.leading.clone(), + ); + } + + Ok(Rc::new(new_state)) + } +} diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/simple_query_planer.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/simple_query_planer.rs index 55ea723ecb739..85de141ea0c9e 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/simple_query_planer.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/planners/simple_query_planer.rs @@ -1,10 +1,11 @@ use super::{DimensionSubqueryPlanner, JoinPlanner, OrderPlanner}; -use crate::plan::{Filter, Select, SelectBuilder}; +use crate::plan::{Filter, QualifiedColumnName, Select, SelectBuilder}; use crate::planner::query_tools::QueryTools; use crate::planner::sql_evaluator::collectors::collect_sub_query_dimensions_from_symbols; use crate::planner::sql_evaluator::sql_nodes::SqlNodesFactory; use crate::planner::QueryProperties; use cubenativeutils::CubeError; +use std::collections::HashMap; use std::rc::Rc; pub struct SimpleQueryPlanner { @@ -30,18 +31,7 @@ impl SimpleQueryPlanner { } pub fn plan(&self) -> Result, CubeError> { - let join = self.query_properties.simple_query_join()?; - let subquery_dimensions = collect_sub_query_dimensions_from_symbols( - &self.query_properties.all_member_symbols(false), - &self.join_planner, - &join, - self.query_tools.clone(), - )?; - let dimension_subquery_planner = DimensionSubqueryPlanner::try_new( - &subquery_dimensions, - self.query_tools.clone(), - self.query_properties.clone(), - )?; + let (mut select_builder, render_references) = self.make_select_builder()?; let filter = self.query_properties.all_filters(); let having = if self.query_properties.measures_filters().is_empty() { @@ -52,10 +42,6 @@ impl SimpleQueryPlanner { }) }; let mut context_factory = self.context_factory.clone(); - let from = - self.join_planner - .make_join_node_impl(&None, join, &dimension_subquery_planner)?; - let mut select_builder = SelectBuilder::new(from.clone()); for member in self .query_properties @@ -64,7 +50,6 @@ impl SimpleQueryPlanner { { select_builder.add_projection_member(member, None); } - let render_references = dimension_subquery_planner.dimensions_refs().clone(); context_factory.set_render_references(render_references); context_factory.set_rendered_as_multiplied_measures( self.query_properties @@ -81,4 +66,27 @@ impl SimpleQueryPlanner { let res = Rc::new(select_builder.build(context_factory)); Ok(res) } + + pub fn make_select_builder( + &self, + ) -> Result<(SelectBuilder, HashMap), CubeError> { + let join = self.query_properties.simple_query_join().unwrap(); + let subquery_dimensions = collect_sub_query_dimensions_from_symbols( + &self.query_properties.all_member_symbols(false), + &self.join_planner, + &join, + self.query_tools.clone(), + )?; + let dimension_subquery_planner = DimensionSubqueryPlanner::try_new( + &subquery_dimensions, + self.query_tools.clone(), + self.query_properties.clone(), + )?; + let from = + self.join_planner + .make_join_node_impl(&None, join, &dimension_subquery_planner)?; + let render_references = dimension_subquery_planner.dimensions_refs().clone(); + let select_builder = SelectBuilder::new(from); + Ok((select_builder, render_references)) + } } diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_evaluator/sql_call.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_evaluator/sql_call.rs index 3436c9ce4b041..46ae9fb850c9d 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_evaluator/sql_call.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_evaluator/sql_call.rs @@ -285,14 +285,20 @@ impl SqlCall { values: None, }) } - FilterItem::Item(filter) => Some(NativeFilterItem { - or: None, - and: None, - member: Some(filter.member_name()), - dimension: None, - operator: Some(filter.filter_operator().to_string()), - values: Some(filter.values().clone()), - }), + FilterItem::Item(filter) => { + if filter.use_raw_values() { + None + } else { + Some(NativeFilterItem { + or: None, + and: None, + member: Some(filter.member_name()), + dimension: None, + operator: Some(filter.filter_operator().to_string()), + values: Some(filter.values().clone()), + }) + } + } FilterItem::Segment(_) => None, } } diff --git a/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_templates/plan.rs b/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_templates/plan.rs index 7ef0e6e1f6001..3ee5f623478a3 100644 --- a/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_templates/plan.rs +++ b/rust/cubesqlplanner/cubesqlplanner/src/planner/sql_templates/plan.rs @@ -158,6 +158,16 @@ impl PlanSqlTemplates { ) } + pub fn max(&self, expr: &str) -> Result { + self.render + .render_template("functions/MAX", context! { args_concat => expr }) + } + + pub fn min(&self, expr: &str) -> Result { + self.render + .render_template("functions/MIN", context! { args_concat => expr }) + } + pub fn concat_strings(&self, strings: &Vec) -> Result { self.render.render_template( "expressions/concat_strings", @@ -188,8 +198,8 @@ impl PlanSqlTemplates { pub fn time_series_select( &self, - from_date: Option, - to_date: Option, + from_date: String, + to_date: String, seria: Vec>, ) -> Result { self.render.render_template( @@ -202,6 +212,28 @@ impl PlanSqlTemplates { ) } + pub fn time_series_get_range( + &self, + max_expr: &str, + min_expr: &str, + max_name: &str, + min_name: &str, + from: &str, + ) -> Result { + let quoted_min_name = self.quote_identifier(min_name)?; + let quoted_max_name = self.quote_identifier(max_name)?; + self.render.render_template( + "expressions/time_series_get_range", + context! { + max_expr => max_expr, + min_expr => min_expr, + from_prepared => from, + quoted_min_name => quoted_min_name, + quoted_max_name => quoted_max_name + }, + ) + } + pub fn select( &self, ctes: Vec, @@ -295,6 +327,24 @@ impl PlanSqlTemplates { .contains_template("operators/is_not_distinct_from") } + pub fn supports_generated_time_series(&self) -> bool { + self.render + .contains_template("statements/generated_time_series_select") + } + + pub fn generated_time_series_select( + &self, + start: &str, + end: &str, + granularity: &str, + ) -> Result { + let granularity = format!("1 {}", granularity); + self.render.render_template( + "statements/generated_time_series_select", + context! { start => start, end => end, granularity => granularity }, + ) + } + pub fn param(&self, param_index: usize) -> Result { self.render .render_template("params/param", context! { param_index => param_index }) diff --git a/yarn.lock b/yarn.lock index da0ef281c598e..15531f0346097 100644 --- a/yarn.lock +++ b/yarn.lock @@ -17235,7 +17235,7 @@ generate-object-property@^1.0.0: dependencies: is-property "^1.0.0" -generic-pool@*, generic-pool@^3.8.2: +generic-pool@*, generic-pool@^3.6.0, generic-pool@^3.8.2: version "3.9.0" resolved "https://registry.yarnpkg.com/generic-pool/-/generic-pool-3.9.0.tgz#36f4a678e963f4fdb8707eab050823abc4e8f5e4" integrity sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==