Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11089,7 +11089,7 @@ ORDER BY 1, 2, 3, 4 DESC LIMIT 25;
35NAME | 7 | 5NAME | 1
(25 rows)

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;
-- start_ignore
CREATE TABLE car (a int, b int);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ select gp_inject_fault('execsort_sort_bounded_heap', 'status', 2);

(1 row)

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node
drop table if exists testsisc;
NOTICE: table "testsisc" does not exist, skipping
Expand All @@ -104,7 +104,7 @@ insert into testsisc select i, i % 1000, i % 100000, i % 75 from
(select count(*) as nsegments from gp_segment_configuration where role='p' and content >= 0) foo) bar;
set gp_resqueue_print_operator_memory_limits=on;
set statement_mem='2MB';
-- ORCA does not generate SharedInputScan with a Sort node underneath it. For
-- ORCA does not generate ShareInputScan with a Sort node underneath it. For
-- the following query, ORCA disregards the order by inside the cte definition;
-- planner on the other hand does not.
set optimizer=off;
Expand All @@ -114,8 +114,8 @@ select gp_inject_fault('execshare_input_next', 'reset', 2);
Success:
(1 row)

-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Expand All @@ -137,16 +137,16 @@ select gp_inject_fault('execshare_input_next', 'status', 2);

(1 row)

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node and sort_mk algorithm is used
select gp_inject_fault('execshare_input_next', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)

-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i);
insert into tts_foo select i, i from generate_series(1,80000)i;
ANALYZE tts_foo;
set gp_cte_sharing=on;
-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files
-- CBDB_PARALLEL_FIXME: since we disabled Share Input Scan in parallel mode, sisc_xslice_temp_files
-- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case.
set max_parallel_workers_per_gather = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i);
insert into tts_foo select i, i from generate_series(1,80000)i;
ANALYZE tts_foo;
set gp_cte_sharing=on;
-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files
-- CBDB_PARALLEL_FIXME: since we disabled Share Input Scan in parallel mode, sisc_xslice_temp_files
-- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case.
set max_parallel_workers_per_gather = 0;
-- CASE 1: when temp_tablespaces is set, hashagg and share-input-scan
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10329,7 +10329,7 @@ WHERE e.deptno = dc1.deptno AND
m.deptno = dmc1.dept_mgr_no
ORDER BY 1, 2, 3, 4 DESC LIMIT 25;

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;

-- start_ignore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ reset enable_parallel;

select gp_inject_fault('execsort_sort_bounded_heap', 'status', 2);

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node
drop table if exists testsisc;
create table testsisc (i1 int, i2 int, i3 int, i4 int);
Expand All @@ -48,13 +48,13 @@ insert into testsisc select i, i % 1000, i % 100000, i % 75 from

set gp_resqueue_print_operator_memory_limits=on;
set statement_mem='2MB';
-- ORCA does not generate SharedInputScan with a Sort node underneath it. For
-- ORCA does not generate ShareInputScan with a Sort node underneath it. For
-- the following query, ORCA disregards the order by inside the cte definition;
-- planner on the other hand does not.
set optimizer=off;
select gp_inject_fault('execshare_input_next', 'reset', 2);
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);

set enable_parallel = off;
Expand All @@ -64,13 +64,13 @@ select * from cte c1, cte c2 limit 2;

select gp_inject_fault('execshare_input_next', 'status', 2);

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node and sort_mk algorithm is used


select gp_inject_fault('execshare_input_next', 'reset', 2);
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);

with cte as (select i2 from testsisc order by i2)
Expand Down
8 changes: 4 additions & 4 deletions src/backend/cdb/cdbmutate.c
Original file line number Diff line number Diff line change
Expand Up @@ -825,8 +825,8 @@ shareinput_peekmot(ApplyShareInputContext *ctxt)
* plan.
*
* To work around that issue, create a CTE for each shared input node, with
* columns that match the target list of the SharedInputScan's subplan,
* and replace the target list entries of the SharedInputScan with
* columns that match the target list of the ShareInputScan's subplan,
* and replace the target list entries of the ShareInputScan with
* Vars that point to the CTE instead of the child plan.
*/
Plan *
Expand Down Expand Up @@ -893,8 +893,8 @@ replace_shareinput_targetlists_walker(Node *node, PlannerInfo *root, bool fPop)
/*
* Replace all the target list entries.
*
* SharedInputScan nodes are not projection-capable, so the target
* list of the SharedInputScan matches the subplan's target list.
* ShareInputScan nodes are not projection-capable, so the target
* list of the ShareInputScan matches the subplan's target list.
*/
newtargetlist = NIL;
attno = 1;
Expand Down
6 changes: 3 additions & 3 deletions src/backend/executor/nodeShareInputScan.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@
/*
* In a cross-slice ShareinputScan, the producer and consumer processes
* communicate using shared memory. There's a hash table containing one
* 'shareinput_share_state' for each in-progress shared input scan.
* 'shareinput_share_state' for each in-progress Share Input Scan.
*
* The hash table itself,, and the fields within every entry, are protected
* by ShareInputScanLock. (Although some operations get away without the
Expand Down Expand Up @@ -631,11 +631,11 @@ ExecSquelchShareInputScan(ShareInputScanState *node, bool force)
ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);

/*
* If this SharedInputScan is shared within the same slice then its
* If this ShareInputScan is shared within the same slice then its
* subtree may still need to be executed and the motions in the subtree
* cannot yet be stopped. Thus, don't recurse in this case.
*
* In squelching a cross-slice SharedInputScan writer, we need to ensure
* In squelching a cross-slice ShareInputScan writer, we need to ensure
* we don't block any reader on other slices as a result of not
* materializing the shared plan.
*
Expand Down
2 changes: 1 addition & 1 deletion src/backend/gpopt/translate/CTranslatorDXLToPlStmt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4445,7 +4445,7 @@ CTranslatorDXLToPlStmt::TranslateDXLCTEProducerToSharedScan(
CDXLPhysicalCTEProducer::Cast(cte_producer_dxlnode->GetOperator());
ULONG cte_id = cte_prod_dxlop->Id();

// create the shared input scan representing the CTE Producer
// create the Share Input Scan representing the CTE Producer
ShareInputScan *shared_input_scan = MakeNode(ShareInputScan);
shared_input_scan->share_id = cte_id;
shared_input_scan->discard_output = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ void CTranslatorDXLToExpr::PruneCTEs()
poper->RecalOutputColumns(umask, pprgpcrsz);

// Align consumer and producer output columns.
// In fact, we can support the column projection in consumer(SharedInputScan).
// In fact, we can support the column projection in consumer(ShareInputScan).
// However, non-requested columns in consumer(which from producer) may be added to the consumer,
// which will make the plan for consumer(support projection) very complicated.
//
Expand Down
2 changes: 1 addition & 1 deletion src/include/gpopt/translate/CContextDXLToPlStmt.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ class CContextDXLToPlStmt
// register a newly CTE producer
void RegisterCTEProducerInfo(ULONG cte_id, ULongPtrArray *producer_output_colidx_map, ShareInputScan *siscan);

// return the shared input scan plans representing the CTE producer
// return the Share Input Scan plans representing the CTE producer
std::pair<ULongPtrArray *, ShareInputScan *> GetCTEProducerInfo(ULONG cte_id) const;

// return list of range table entries
Expand Down
2 changes: 1 addition & 1 deletion src/include/optimizer/cost.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ extern void cost_group(Path *path, PlannerInfo *root,
List *quals,
Cost input_startup_cost, Cost input_total_cost,
double input_tuples);
/* GPDB_92_MERGE_FIXME: parameterized path for shared input scan? */
/* GPDB_92_MERGE_FIXME: parameterized path for Share Input Scan? */
extern void cost_shareinputscan(Path *path, PlannerInfo *root, Cost sharecost, double ntuples, int width);
extern void initial_cost_nestloop(PlannerInfo *root,
JoinCostWorkspace *workspace,
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/expected/qp_with_clause.out
Original file line number Diff line number Diff line change
Expand Up @@ -11090,7 +11090,7 @@ ORDER BY 1, 2, 3, 4 DESC LIMIT 25;
35NAME | 7 | 5NAME | 1
(25 rows)

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;
-- start_ignore
CREATE TABLE car (a int, b int);
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/expected/qp_with_clause_optimizer.out
Original file line number Diff line number Diff line change
Expand Up @@ -11139,7 +11139,7 @@ ORDER BY 1, 2, 3, 4 DESC LIMIT 25;
35NAME | 7 | 5NAME | 1
(25 rows)

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;
-- start_ignore
CREATE TABLE car (a int, b int);
Expand Down
14 changes: 7 additions & 7 deletions src/test/regress/expected/query_finish_pending.out
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ select gp_inject_fault('execsort_sort_bounded_heap', 'status', 2);

(1 row)

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node
drop table if exists testsisc;
NOTICE: table "testsisc" does not exist, skipping
Expand All @@ -104,7 +104,7 @@ insert into testsisc select i, i % 1000, i % 100000, i % 75 from
(select count(*) as nsegments from gp_segment_configuration where role='p' and content >= 0) foo) bar;
set gp_resqueue_print_operator_memory_limits=on;
set statement_mem='2MB';
-- ORCA does not generate SharedInputScan with a Sort node underneath it. For
-- ORCA does not generate ShareInputScan with a Sort node underneath it. For
-- the following query, ORCA disregards the order by inside the cte definition;
-- planner on the other hand does not.
set optimizer=off;
Expand All @@ -114,8 +114,8 @@ select gp_inject_fault('execshare_input_next', 'reset', 2);
Success:
(1 row)

-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Expand All @@ -137,16 +137,16 @@ select gp_inject_fault('execshare_input_next', 'status', 2);

(1 row)

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node and sort_mk algorithm is used
select gp_inject_fault('execshare_input_next', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)

-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/input/temp_tablespaces.source
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i);
insert into tts_foo select i, i from generate_series(1,80000)i;
ANALYZE tts_foo;
set gp_cte_sharing=on;
-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files
-- CBDB_PARALLEL_FIXME: since we disabled Share Input Scan in parallel mode, sisc_xslice_temp_files
-- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case.
set max_parallel_workers_per_gather = 0;

Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/output/temp_tablespaces.source
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ CREATE TABLE tts_foo (i int, j int) distributed by(i);
insert into tts_foo select i, i from generate_series(1,80000)i;
ANALYZE tts_foo;
set gp_cte_sharing=on;
-- CBDB_PARALLEL_FIXME: since we disabled shared input scan in parallel mode, sisc_xslice_temp_files
-- CBDB_PARALLEL_FIXME: since we disabled Share Input Scan in parallel mode, sisc_xslice_temp_files
-- will never be triggered. We need set max_parallel_workers_per_gather to 0 in this case.
set max_parallel_workers_per_gather = 0;
-- CASE 1: when temp_tablespaces is set, hashagg and share-input-scan
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/sql/qp_with_clause.sql
Original file line number Diff line number Diff line change
Expand Up @@ -10331,7 +10331,7 @@ WHERE e.deptno = dc1.deptno AND
m.deptno = dmc1.dept_mgr_no
ORDER BY 1, 2, 3, 4 DESC LIMIT 25;

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;

-- start_ignore
Expand Down
14 changes: 7 additions & 7 deletions src/test/regress/sql/query_finish_pending.sql
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ reset enable_parallel;

select gp_inject_fault('execsort_sort_bounded_heap', 'status', 2);

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node
drop table if exists testsisc;
create table testsisc (i1 int, i2 int, i3 int, i4 int);
Expand All @@ -48,13 +48,13 @@ insert into testsisc select i, i % 1000, i % 100000, i % 75 from

set gp_resqueue_print_operator_memory_limits=on;
set statement_mem='2MB';
-- ORCA does not generate SharedInputScan with a Sort node underneath it. For
-- ORCA does not generate ShareInputScan with a Sort node underneath it. For
-- the following query, ORCA disregards the order by inside the cte definition;
-- planner on the other hand does not.
set optimizer=off;
select gp_inject_fault('execshare_input_next', 'reset', 2);
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);

set enable_parallel = off;
Expand All @@ -64,13 +64,13 @@ select * from cte c1, cte c2 limit 2;

select gp_inject_fault('execshare_input_next', 'status', 2);

-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- test if Share Input Scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node and sort_mk algorithm is used


select gp_inject_fault('execshare_input_next', 'reset', 2);
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
-- Set QueryFinishPending to true after ShareInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of Share Input Scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);

with cte as (select i2 from testsisc order by i2)
Expand Down
2 changes: 1 addition & 1 deletion src/test/singlenode_regress/expected/qp_with_clause.out
Original file line number Diff line number Diff line change
Expand Up @@ -11078,7 +11078,7 @@ ORDER BY 1, 2, 3, 4 DESC LIMIT 25;
35NAME | 7 | 5NAME | 1
(25 rows)

-- Test that SharedInputScan within the same slice is always executed
-- Test that ShareInputScan within the same slice is always executed
set gp_cte_sharing=on;
-- start_ignore
CREATE TABLE car (a int, b int);
Expand Down
Loading
Loading