Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
use std::any::Any;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
Expand Down Expand Up @@ -84,9 +85,47 @@ impl TransformRecursiveCteSource {
union_plan: UnionAll,
) -> Result<ProcessorPtr> {
let mut union_plan = union_plan;

// Recursive CTE uses internal MEMORY tables addressed by name in the current database.
// If we keep using the stable scan name (cte name/alias), concurrent queries can interfere
// by creating/dropping/recreating the same table name, leading to wrong or flaky results.
//
// Make the internal table names query-unique by prefixing them with the query id.
// This is purely internal and does not change user-visible semantics.
let rcte_prefix = make_rcte_prefix(&ctx.get_id());
let local_cte_scan_names = {
let names = collect_local_recursive_scan_names(&union_plan.right);
if names.is_empty() {
union_plan.cte_scan_names.clone()
} else {
names
}
};
if union_plan.cte_scan_names != local_cte_scan_names {
union_plan.cte_scan_names = local_cte_scan_names;
}
let local_cte_scan_name_set: HashSet<&str> = union_plan
.cte_scan_names
.iter()
.map(String::as_str)
.collect();

let mut exec_ids: HashMap<String, Vec<u64>> = HashMap::new();
assign_exec_ids(&mut union_plan.left, &mut exec_ids);
assign_exec_ids(&mut union_plan.right, &mut exec_ids);
rewrite_assign_and_strip_recursive_cte(
&mut union_plan.left,
&local_cte_scan_name_set,
&rcte_prefix,
&mut exec_ids,
);
rewrite_assign_and_strip_recursive_cte(
&mut union_plan.right,
&local_cte_scan_name_set,
&rcte_prefix,
&mut exec_ids,
);
for name in union_plan.cte_scan_names.iter_mut() {
*name = format!("{rcte_prefix}{name}");
}

let left_outputs = union_plan
.left_outputs
Expand Down Expand Up @@ -134,6 +173,8 @@ impl TransformRecursiveCteSource {
if ctx.get_settings().get_max_cte_recursive_depth()? < recursive_step {
return Err(ErrorCode::Internal("Recursive depth is reached"));
}
#[cfg(debug_assertions)]
crate::test_kits::rcte_hooks::maybe_pause_before_step(&ctx.get_id(), recursive_step).await;
let mut cte_scan_tables = vec![];
let plan = if recursive_step == 0 {
// Find all cte scan in the union right child plan, then create memory table for them.
Expand Down Expand Up @@ -172,6 +213,88 @@ impl TransformRecursiveCteSource {
}
}

fn make_rcte_prefix(query_id: &str) -> String {
// Keep it readable and safe as an identifier.
// Use enough entropy to be effectively unique for concurrent queries.
let mut short = String::with_capacity(32);
for ch in query_id.chars() {
if ch.is_ascii_alphanumeric() {
short.push(ch);
}
if short.len() >= 32 {
break;
Comment on lines +224 to +225

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Preserve full query-id entropy in RCTE table prefix

make_rcte_prefix stops after collecting 32 alphanumeric characters, so two concurrent queries whose IDs share the same first 32 alnum chars will still map to the same internal __rcte_* table names. That reintroduces the same cross-query interference this patch is trying to eliminate (wrong/flaky recursive CTE results) for clients that provide custom/long query IDs. Generate the prefix from the full query ID (e.g., full sanitized ID or a hash of it) instead of truncating here.

Useful? React with 👍 / 👎.

}
}
if short.is_empty() {
short.push_str("unknown");
}
format!("__rcte_{short}_")
}

fn rewrite_assign_and_strip_recursive_cte(
plan: &mut PhysicalPlan,
local_cte_scan_name_set: &HashSet<&str>,
prefix: &str,
exec_ids: &mut HashMap<String, Vec<u64>>,
) {
// Only nested recursive UNION nodes that reference the current recursive CTE should be
// downgraded to normal unions to avoid nested recursive sources for the same table.
if let Some(union_all) = UnionAll::from_mut_physical_plan(plan) {
if !union_all.cte_scan_names.is_empty()
&& union_all
.cte_scan_names
.iter()
.all(|name| local_cte_scan_name_set.contains(name.as_str()))
{
union_all.cte_scan_names.clear();
}
}

if let Some(recursive_cte_scan) = RecursiveCteScan::from_mut_physical_plan(plan) {
if local_cte_scan_name_set.contains(recursive_cte_scan.table_name.as_str()) {
recursive_cte_scan.table_name = format!("{prefix}{}", recursive_cte_scan.table_name);
let id = NEXT_R_CTE_ID.fetch_add(1, Ordering::Relaxed);
recursive_cte_scan.exec_id = Some(id);
exec_ids
.entry(recursive_cte_scan.table_name.clone())
.or_default()
.push(id);
}
}

for child in plan.children_mut() {
rewrite_assign_and_strip_recursive_cte(child, local_cte_scan_name_set, prefix, exec_ids);
}
}

fn collect_local_recursive_scan_names(plan: &PhysicalPlan) -> Vec<String> {
fn walk(plan: &PhysicalPlan, names: &mut Vec<String>, seen: &mut HashSet<String>) {
// Nested recursive unions belong to other recursive CTEs. Leave them to their own
// TransformRecursiveCteSource instance.
if let Some(union_all) = UnionAll::from_physical_plan(plan) {
if !union_all.cte_scan_names.is_empty() {
return;
}
}

if let Some(recursive_cte_scan) = RecursiveCteScan::from_physical_plan(plan) {
if seen.insert(recursive_cte_scan.table_name.clone()) {
names.push(recursive_cte_scan.table_name.clone());
}
return;
}

for child in plan.children() {
walk(child, names, seen);
}
}

let mut names = Vec::new();
let mut seen = HashSet::new();
walk(plan, &mut names, &mut seen);
names
}

#[async_trait::async_trait]
impl AsyncSource for TransformRecursiveCteSource {
const NAME: &'static str = "TransformRecursiveCteSource";
Expand Down Expand Up @@ -236,21 +359,6 @@ impl AsyncSource for TransformRecursiveCteSource {
}
}

fn assign_exec_ids(plan: &mut PhysicalPlan, mapping: &mut HashMap<String, Vec<u64>>) {
if let Some(recursive_cte_scan) = RecursiveCteScan::from_mut_physical_plan(plan) {
let id = NEXT_R_CTE_ID.fetch_add(1, Ordering::Relaxed);
recursive_cte_scan.exec_id = Some(id);
mapping
.entry(recursive_cte_scan.table_name.clone())
.or_default()
.push(id);
}

for child in plan.children_mut() {
assign_exec_ids(child, mapping);
}
}

async fn drop_tables(ctx: Arc<QueryContext>, table_names: Vec<String>) -> Result<()> {
for table_name in table_names {
let drop_table_plan = DropTablePlan {
Expand Down Expand Up @@ -311,7 +419,6 @@ async fn create_memory_table_for_cte_scan(

let mut options = BTreeMap::new();
options.insert(OPT_KEY_RECURSIVE_CTE.to_string(), "1".to_string());

self.plans.push(CreateTablePlan {
schema,
create_option: CreateOption::CreateIfNotExists,
Expand Down
1 change: 1 addition & 0 deletions src/query/service/src/test_kits/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,4 @@ pub use config::config_with_spill;
pub use context::*;
pub use fixture::*;
pub use fuse::*;
pub mod rcte_hooks;
180 changes: 180 additions & 0 deletions src/query/service/src/test_kits/rcte_hooks.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
// Copyright 2021 Datafuse Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

//! Test-only hooks for recursive CTE execution.
//!
//! This module is intended to make race conditions reproducible by providing
//! deterministic pause/resume points in the recursive CTE executor.
//!
//! By default no hooks are installed and the hook checks are no-ops.

use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::OnceLock;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;

use tokio::sync::Notify;

static HOOKS: OnceLock<Arc<RcteHookRegistry>> = OnceLock::new();

#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct GateKey {
query_id: String,
step: usize,
}

impl GateKey {
fn new(query_id: &str, step: usize) -> Self {
Self {
query_id: query_id.to_string(),
step,
}
}
}

#[derive(Default)]
pub struct RcteHookRegistry {
gates: Mutex<HashMap<GateKey, Arc<PauseGate>>>,
}

impl RcteHookRegistry {
pub fn global() -> Arc<RcteHookRegistry> {
HOOKS
.get_or_init(|| Arc::new(RcteHookRegistry::default()))
.clone()
}

pub fn install_pause_before_step(&self, query_id: &str, step: usize) -> Arc<PauseGate> {
let mut gates = self.gates.lock().unwrap();
let key = GateKey::new(query_id, step);
gates
.entry(key)
.or_insert_with(|| Arc::new(PauseGate::new(step)))
.clone()
}

fn get_gate(&self, query_id: &str, step: usize) -> Option<Arc<PauseGate>> {
let key = GateKey::new(query_id, step);
self.gates.lock().unwrap().get(&key).cloned()
}
}

/// A reusable pause gate for a single step number.
///
/// When the code hits the hook point, it increments `arrived` and blocks until
/// the test releases the same hit index via `release(hit_no)`.
pub struct PauseGate {
step: usize,
arrived: AtomicUsize,
released: AtomicUsize,
arrived_notify: Notify,
released_notify: Notify,
}

impl PauseGate {
fn new(step: usize) -> Self {
Self {
step,
arrived: AtomicUsize::new(0),
released: AtomicUsize::new(0),
arrived_notify: Notify::new(),
released_notify: Notify::new(),
}
}

pub fn step(&self) -> usize {
self.step
}

pub fn arrived(&self) -> usize {
self.arrived.load(Ordering::Acquire)
}

pub async fn wait_arrived_at_least(&self, n: usize) {
loop {
let notified = self.arrived_notify.notified();
tokio::pin!(notified);
notified.as_mut().enable();

if self.arrived() >= n {
return;
}

// Re-check after registration to avoid missing a notify between
// condition check and awaiting.
if self.arrived() >= n {
return;
}

notified.await;
}
}

/// Release the `hit_no`-th arrival (1-based).
pub fn release(&self, hit_no: usize) {
// Monotonic release.
let mut cur = self.released.load(Ordering::Acquire);
while cur < hit_no {
match self
.released
.compare_exchange(cur, hit_no, Ordering::AcqRel, Ordering::Acquire)
{
Ok(_) => break,
Err(v) => cur = v,
}
}
self.released_notify.notify_waiters();
}

async fn hit(&self) {
let hit_no = self.arrived.fetch_add(1, Ordering::AcqRel) + 1;
self.arrived_notify.notify_waiters();

loop {
let notified = self.released_notify.notified();
tokio::pin!(notified);
notified.as_mut().enable();

let released = self.released.load(Ordering::Acquire);
if released >= hit_no {
return;
}

// Re-check after registration to avoid missing a notify between
// condition check and awaiting.
let released = self.released.load(Ordering::Acquire);
if released >= hit_no {
return;
}

notified.await;
}
}
}

/// Called from the recursive CTE executor.
///
/// If a pause gate is installed for `step`, this call will block until released.
#[async_backtrace::framed]
pub async fn maybe_pause_before_step(query_id: &str, step: usize) {
let Some(registry) = HOOKS.get() else {
return;
};
let Some(gate) = registry.get_gate(query_id, step) else {
return;
};
gate.hit().await;
}
1 change: 1 addition & 0 deletions src/query/service/tests/it/sql/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,4 @@
mod exec;
mod expr;
mod planner;
mod recursive_cte;
Loading