diff --git a/src/common/backend/nodes/tidbitmap.cpp b/src/common/backend/nodes/tidbitmap.cpp index 7a8c453b2..fd11638b1 100755 --- a/src/common/backend/nodes/tidbitmap.cpp +++ b/src/common/backend/nodes/tidbitmap.cpp @@ -45,6 +45,7 @@ #include "nodes/bitmapset.h" #include "nodes/tidbitmap.h" #include "utils/hsearch.h" +#include "utils/memutils.h" #include "storage/lwlock.h" /* @@ -161,6 +162,9 @@ struct TIDBitmap { bool isGlobalPart; /* represent global partition index tbm */ bool isShared; /* is shared pagetable? */ PagetableEntry entry1; /* used when status == TBM_ONE_PAGE */ + pg_atomic_uint32 pagetableRefcount; /* ref count for pagetable */ + pg_atomic_uint32 pagesRefcount; /* ref count for spages */ + pg_atomic_uint32 chunksRefcount; /* ref count for schunks */ /* these are valid when iterating is true: */ PagetableEntry** spages; /* sorted exact-page list, or NULL */ PagetableEntry** schunks; /* sorted lossy-chunk list, or NULL */ @@ -194,9 +198,6 @@ struct TBMSharedIteratorState { HTAB* pagetable; /* hash table of PagetableEntry's */ PagetableEntry** spages; /* sorted exact-page list, or NULL */ PagetableEntry** schunks; /* sorted lossy-chunk list, or NULL */ - pg_atomic_uint32 pagetableRefcount; /* ref count for pagetable */ - pg_atomic_uint32 pagesRefcount; /* ref count for spages */ - pg_atomic_uint32 chunksRefcount; /* ref count for schunks */ LWLock lock; /* lock to protect below members */ int spageptr; /* next spages index */ int schunkptr; /* next schunks index */ @@ -244,7 +245,12 @@ TIDBitmap* tbm_create(long maxbytes, MemoryContext dsa) tbm->mcxt = CurrentMemoryContext; tbm->isShared = false; } else { - tbm->mcxt = dsa; + /* + * Create a new memctx, cause when destory the hash table, it will delete the memctx, so we + * need to use a new memctx. Check tbm_free_shared_area -> hash_destroy + */ + tbm->mcxt = AllocSetContextCreate(dsa, "shared tbm hash table", ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, SHARED_CONTEXT); tbm->isShared = true; } tbm->status = TBM_EMPTY; @@ -327,15 +333,15 @@ void tbm_free(TIDBitmap* tbm) * memory if they are not referred by any of the shared iterator i.e recount * is becomes 0. */ -void tbm_free_shared_area(TBMSharedIteratorState *istate) +void tbm_free_shared_area(TIDBitmap* tbm, TBMSharedIteratorState *istate) { - if (pg_atomic_sub_fetch_u32(&istate->pagetableRefcount, 1) == 0) { + if (pg_atomic_sub_fetch_u32(&tbm->pagetableRefcount, 1) == 0) { hash_destroy(istate->pagetable); } - if (pg_atomic_sub_fetch_u32(&istate->pagesRefcount, 1) == 0) { + if (pg_atomic_sub_fetch_u32(&tbm->pagesRefcount, 1) == 0) { pfree_ext(istate->spages); } - if (pg_atomic_sub_fetch_u32(&istate->chunksRefcount, 1) == 0) { + if (pg_atomic_sub_fetch_u32(&tbm->chunksRefcount, 1) == 0) { pfree_ext(istate->schunks); } pfree_ext(istate); @@ -717,9 +723,9 @@ TBMSharedIteratorState* tbm_prepare_shared_iterate(TIDBitmap *tbm) if (tbm->status == TBM_HASH) { tbm_sort_pages(tbm); } - pg_atomic_init_u32(&istate->pagetableRefcount, 0); - pg_atomic_init_u32(&istate->pagesRefcount, 0); - pg_atomic_init_u32(&istate->chunksRefcount, 0); + pg_atomic_init_u32(&tbm->pagetableRefcount, 0); + pg_atomic_init_u32(&tbm->pagesRefcount, 0); + pg_atomic_init_u32(&tbm->chunksRefcount, 0); } /* @@ -741,9 +747,9 @@ TBMSharedIteratorState* tbm_prepare_shared_iterate(TIDBitmap *tbm) * increase the refcount by 1 so that while freeing the shared iterator we * don't free pagetable and iterator array until its refcount becomes 0. */ - (void)pg_atomic_add_fetch_u32(&istate->pagetableRefcount, 1); - (void)pg_atomic_add_fetch_u32(&istate->pagesRefcount, 1); - (void)pg_atomic_add_fetch_u32(&istate->chunksRefcount, 1); + (void)pg_atomic_add_fetch_u32(&tbm->pagetableRefcount, 1); + (void)pg_atomic_add_fetch_u32(&tbm->pagesRefcount, 1); + (void)pg_atomic_add_fetch_u32(&tbm->chunksRefcount, 1); /* Initialize the iterator lock */ LWLockInitialize(&istate->lock, LWTRANCHE_TBM); diff --git a/src/gausskernel/optimizer/path/costsize.cpp b/src/gausskernel/optimizer/path/costsize.cpp index edba68e1a..f4be88a57 100644 --- a/src/gausskernel/optimizer/path/costsize.cpp +++ b/src/gausskernel/optimizer/path/costsize.cpp @@ -1260,6 +1260,8 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count, bool part cpu_run_cost += cpu_per_tuple * tuples_fetched; + run_cost += cpu_run_cost; + /* Adjust costing for parallelism, if used. */ if (path->path.parallel_workers > 0) { double parallel_divisor = get_parallel_divisor(&path->path); @@ -1267,11 +1269,9 @@ void cost_index(IndexPath* path, PlannerInfo* root, double loop_count, bool part path->path.rows = clamp_row_est(path->path.rows / parallel_divisor); /* The CPU cost is divided among all the workers. */ - cpu_run_cost /= parallel_divisor; + run_cost /= parallel_divisor; } - run_cost += cpu_run_cost; - path->path.startup_cost = startup_cost; path->path.total_cost = startup_cost + run_cost; path->path.stream_cost = 0; @@ -1531,18 +1531,18 @@ void cost_bitmap_heap_scan( cpu_per_tuple = u_sess->attr.attr_sql.cpu_tuple_cost + qpqual_cost.per_tuple; cpu_run_cost = cpu_per_tuple * tuples_fetched; + run_cost += cpu_run_cost; + /* Adjust costing for parallelism, if used. */ if (path->parallel_workers > 0) { double parallel_divisor = get_parallel_divisor(path); /* The CPU cost is divided among all the workers. */ - cpu_run_cost /= parallel_divisor; + run_cost /= parallel_divisor; path->rows = clamp_row_est(path->rows / parallel_divisor); } - run_cost += cpu_run_cost; - path->startup_cost = startup_cost; path->total_cost = startup_cost + run_cost; path->stream_cost = 0; diff --git a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp index 944d75e1c..e781ea5c3 100755 --- a/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp +++ b/src/gausskernel/runtime/executor/nodeBitmapHeapscan.cpp @@ -1242,10 +1242,10 @@ void ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node, ParallelContext *p pstate->state = BM_INITIAL; if (pstate->tbmiterator != NULL) { - tbm_free_shared_area(pstate->tbmiterator); + tbm_free_shared_area(node->tbm, pstate->tbmiterator); } if (pstate->prefetch_iterator != NULL) { - tbm_free_shared_area(pstate->prefetch_iterator); + tbm_free_shared_area(node->tbm, pstate->prefetch_iterator); } pstate->tbmiterator = NULL; pstate->prefetch_iterator = NULL; diff --git a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp index 33f965cc2..fe16b3da1 100755 --- a/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexonlyscan.cpp @@ -78,6 +78,30 @@ static TupleTableSlot* IndexOnlyNext(IndexOnlyScanState* node) econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; + if (scandesc == NULL) { + /* + * We reach here if the index only scan is not parallel, or if we're + * executing a index only scan that was intended to be parallel + * serially. + */ + scandesc = abs_idx_beginscan(node->ss.ss_currentRelation, node->ioss_RelationDesc, estate->es_snapshot, + node->ioss_NumScanKeys, node->ioss_NumOrderByKeys, (ScanState*)node); + node->ioss_ScanDesc = scandesc; + + /* Set it up for index-only scan */ + GetIndexScanDesc(node->ioss_ScanDesc)->xs_want_itup = true; + node->ioss_VMBuffer = InvalidBuffer; + + /* + * If no run-time keys to calculate or they are ready, go ahead and + * pass the scankeys to the index AM. + */ + if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) { + abs_idx_rescan_local(scandesc, node->ioss_ScanKeys, node->ioss_NumScanKeys, + node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); + } + } + /* * OK, now that we have what we need, fetch the next tuple. */ @@ -612,42 +636,14 @@ IndexOnlyScanState* ExecInitIndexOnlyScan(IndexOnlyScan* node, EState* estate, i indexstate->ioss_NumOrderByKeys, (ScanState*)indexstate); } - } else { - /* - * Initialize scan descriptor. - */ - if (!node->scan.plan.parallel_aware) { - indexstate->ioss_ScanDesc = abs_idx_beginscan(currentRelation, - indexstate->ioss_RelationDesc, - estate->es_snapshot, - indexstate->ioss_NumScanKeys, - indexstate->ioss_NumOrderByKeys, - (ScanState*)indexstate); - } } - if (!node->scan.plan.parallel_aware) { + if (indexstate->ioss_ScanDesc == NULL) { /* - * If is Partition table, if ( 0 == node->scan.itrs), scan_desc is NULL. - */ - if (PointerIsValid(indexstate->ioss_ScanDesc)) { - /* Set it up for index-only scan */ - GetIndexScanDesc(indexstate->ioss_ScanDesc)->xs_want_itup = true; - indexstate->ioss_VMBuffer = InvalidBuffer; - - /* - * If no run-time keys to calculate, go ahead and pass the scankeys to the - * index AM. - */ - if (indexstate->ioss_NumRuntimeKeys == 0) - abs_idx_rescan_local(indexstate->ioss_ScanDesc, - indexstate->ioss_ScanKeys, - indexstate->ioss_NumScanKeys, - indexstate->ioss_OrderByKeys, - indexstate->ioss_NumOrderByKeys); - } else { - indexstate->ss.ps.stubType = PST_Scan; - } + * For non-partition table, ioss_ScanDesc always none, so set stubType to PST_None. + * For partition table, if ( 0 == node->scan.itrs), scan_desc is NULL, so set stubType to PST_Scan. + */ + indexstate->ss.ps.stubType = node->scan.isPartTbl ? PST_Scan : PST_None; } /* @@ -841,10 +837,10 @@ void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, ParallelContext *p node->ioss_VMBuffer = InvalidBuffer; /* - * If no run-time keys to calculate, go ahead and pass the scankeys to + * If no run-time keys to calculate or they are ready, go ahead and pass the scankeys to * the index AM. */ - if (node->ioss_NumRuntimeKeys == 0) { + if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) { abs_idx_rescan(node->ioss_ScanDesc, node->ioss_ScanKeys, node->ioss_NumScanKeys, node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); } @@ -888,10 +884,10 @@ void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, void *context) GetIndexScanDesc(node->ioss_ScanDesc)->xs_want_itup = true; /* - * If no run-time keys to calculate, go ahead and pass the scankeys to the + * If no run-time keys to calculate or they are ready, go ahead and pass the scankeys to the * index AM. */ - if (node->ioss_NumRuntimeKeys == 0) { + if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) { abs_idx_rescan(node->ioss_ScanDesc, node->ioss_ScanKeys, node->ioss_NumScanKeys, node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); } diff --git a/src/gausskernel/runtime/executor/nodeIndexscan.cpp b/src/gausskernel/runtime/executor/nodeIndexscan.cpp index 79a1e534d..b78161866 100755 --- a/src/gausskernel/runtime/executor/nodeIndexscan.cpp +++ b/src/gausskernel/runtime/executor/nodeIndexscan.cpp @@ -80,6 +80,25 @@ static TupleTableSlot* IndexNext(IndexScanState* node) econtext = node->ss.ps.ps_ExprContext; slot = node->ss.ss_ScanTupleSlot; + if (scandesc == NULL) { + /* + * We reach here if the index scan is not parallel, or if we're + * serially executing an index scan that was planned to be parallel. + */ + scandesc = abs_idx_beginscan(node->ss.ss_currentRelation, node->iss_RelationDesc, estate->es_snapshot, + node->iss_NumScanKeys, node->iss_NumOrderByKeys, (ScanState*)node); + node->iss_ScanDesc = scandesc; + + /* + * If no run-time keys to calculate or they are ready, go ahead and + * pass the scankeys to the index AM. + */ + if (node->iss_NumRuntimeKeys == 0 || node->iss_RuntimeKeysReady) { + abs_idx_rescan_local(scandesc, node->iss_ScanKeys, node->iss_NumScanKeys, + node->iss_OrderByKeys, node->iss_NumOrderByKeys); + } + } + /* * ok, now that we have what we need, fetch the next tuple. */ @@ -687,38 +706,14 @@ IndexScanState* ExecInitIndexScan(IndexScan* node, EState* estate, int eflags) (ScanState*)index_state); Assert(PointerIsValid(index_state->iss_ScanDesc)); } - } else { - /* - * for parallel-aware node, we initialize the scan descriptor after - * initializing the shared memory for parallel execution. - */ - if (!node->scan.plan.parallel_aware) { - /* - * Initialize scan descriptor. - */ - index_state->iss_ScanDesc = abs_idx_beginscan(current_relation, - index_state->iss_RelationDesc, - estate->es_snapshot, - index_state->iss_NumScanKeys, - index_state->iss_NumOrderByKeys, - (ScanState*)index_state); - } } - if (!node->scan.plan.parallel_aware) { + if (index_state->iss_ScanDesc == NULL) { /* - * If no run-time keys to calculate, go ahead and pass the scankeys to the - * index AM. - */ - if (index_state->iss_ScanDesc == NULL) { - index_state->ss.ps.stubType = PST_Scan; - } else if (index_state->iss_NumRuntimeKeys == 0) { - abs_idx_rescan_local(index_state->iss_ScanDesc, - index_state->iss_ScanKeys, - index_state->iss_NumScanKeys, - index_state->iss_OrderByKeys, - index_state->iss_NumOrderByKeys); - } + * For non-partition table, iss_ScanDesc always none, so set stubType to PST_None. + * For partition table, if ( 0 == node->scan.itrs), scan_desc is NULL, so set stubType to PST_Scan. + */ + index_state->ss.ps.stubType = node->scan.isPartTbl ? PST_Scan : PST_None; } /* @@ -1395,10 +1390,10 @@ void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt, int node->iss_NumScanKeys, node->iss_NumOrderByKeys, cxt->pwCtx->queryInfo.piscan[nodeid]); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to the + * If no run-time keys to calculate or they are ready, go ahead and pass the scankeys to the * index AM. */ - if (node->iss_NumRuntimeKeys == 0) { + if (node->iss_NumRuntimeKeys == 0 || node->iss_RuntimeKeysReady) { abs_idx_rescan(node->iss_ScanDesc, node->iss_ScanKeys, node->iss_NumScanKeys, node->iss_OrderByKeys, node->iss_NumOrderByKeys); } @@ -1441,10 +1436,10 @@ void ExecIndexScanInitializeWorker(IndexScanState *node, void *context) node->iss_NumScanKeys, node->iss_NumOrderByKeys, piscan); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to the + * If no run-time keys to calculate or they are ready, go ahead and pass the scankeys to the * index AM. */ - if (node->iss_NumRuntimeKeys == 0) { + if (node->iss_NumRuntimeKeys == 0 || node->iss_RuntimeKeysReady) { abs_idx_rescan(node->iss_ScanDesc, node->iss_ScanKeys, node->iss_NumScanKeys, node->iss_OrderByKeys, node->iss_NumOrderByKeys); } diff --git a/src/gausskernel/storage/file/fd.cpp b/src/gausskernel/storage/file/fd.cpp index ce87709b9..c0c6cc655 100755 --- a/src/gausskernel/storage/file/fd.cpp +++ b/src/gausskernel/storage/file/fd.cpp @@ -1376,7 +1376,7 @@ void PathNameCreateTemporaryDir(const char *basedir, const char *directory) * EEXIST to close a race against another process following the same * algorithm. */ - if (mkdir(directory, S_IRWXU) < 0 && errno != EEXIST) { + if (mkdir(basedir, S_IRWXU) < 0 && errno != EEXIST) { ereport(ERROR, (errcode_for_file_access(), errmsg("cannot create temporary directory \"%s\": %m", basedir))); } diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h index 81df2e12b..b746b6919 100755 --- a/src/include/nodes/tidbitmap.h +++ b/src/include/nodes/tidbitmap.h @@ -48,7 +48,7 @@ typedef struct { /* function prototypes in nodes/tidbitmap.c */ extern TIDBitmap* tbm_create(long maxbytes, MemoryContext dsa); extern void tbm_free(TIDBitmap* tbm); -extern void tbm_free_shared_area(TBMSharedIteratorState *istate); +extern void tbm_free_shared_area(TIDBitmap* tbm, TBMSharedIteratorState *istate); extern void tbm_add_tuples( TIDBitmap* tbm, const ItemPointer tids, int ntids, bool recheck, Oid partitionOid = InvalidOid); diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 78ba3291b..c8709a18d 100755 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -51,7 +51,7 @@ #define LOCAL_RECEIVE_KDATA_COST 1.3 /* The receive cost for local stream */ #define DEFAULT_SMP_THREAD_COST 1000 /* The cost for add a new thread */ #define DEFAULT_STREAM_MULTIPLE 1.0 -#define DEFAULT_PARALLEL_TUPLE_COST 0.1 +#define DEFAULT_PARALLEL_TUPLE_COST 0.05 #define DEFAULT_PARALLEL_SETUP_COST 1000.0 #define DEFAULT_EFFECTIVE_CACHE_SIZE 16384 /* measured in pages */ diff --git a/src/test/regress/expected/parallel_query.out b/src/test/regress/expected/parallel_query.out index 99a424578..340d31b24 100644 --- a/src/test/regress/expected/parallel_query.out +++ b/src/test/regress/expected/parallel_query.out @@ -711,14 +711,14 @@ CREATE TABLE onepage2 as select * from onepage1; explain select * from onepage2 natural join onepage1 where onepage2.val > 2 and onepage1.val < 4; QUERY PLAN ----------------------------------------------------------------------------------------------- - Gather (cost=15.04..32.69 rows=1 width=8) + Gather (cost=8.90..26.54 rows=1 width=8) Number of Workers: 2 - -> Parallel Hash Join (cost=15.04..32.69 rows=1 width=8) + -> Parallel Hash Join (cost=8.90..26.54 rows=1 width=8) Hash Cond: ((onepage2.val = onepage1.val) AND (onepage2.val2 = onepage1.val2)) -> Parallel Seq Scan on onepage2 (cost=0.00..17.60 rows=5 width=8) Filter: ((val > 2) AND (val < 4)) - -> Parallel Hash (cost=14.97..14.97 rows=5 width=8) - -> Parallel Bitmap Heap Scan on onepage1 (cost=4.36..14.97 rows=5 width=8) + -> Parallel Hash (cost=8.82..8.82 rows=5 width=8) + -> Parallel Bitmap Heap Scan on onepage1 (cost=4.36..8.82 rows=5 width=8) Recheck Cond: ((val < 4) AND (val > 2)) -> Bitmap Index Scan on onepage1_pkey (cost=0.00..4.36 rows=11 width=0) Index Cond: ((val < 4) AND (val > 2)) @@ -1127,6 +1127,7 @@ reset min_parallel_index_scan_size; -- nestloop set enable_hashjoin=off; set enable_mergejoin=off; +set enable_bitmapscan=off; explain (costs off, analyse on) select schemaname, tablename from pg_tables where tablename like 'sql%' order by tablename; --?.* --?.* @@ -1209,5 +1210,6 @@ reset parallel_tuple_cost; reset max_parallel_workers_per_gather; reset min_parallel_table_scan_size; reset parallel_leader_participation; +reset enable_bitmapscan; reset enable_hashjoin; reset enable_mergejoin; \ No newline at end of file diff --git a/src/test/regress/expected/single_node_sqlbypass.out b/src/test/regress/expected/single_node_sqlbypass.out index fca25e3ea..64c962c16 100644 --- a/src/test/regress/expected/single_node_sqlbypass.out +++ b/src/test/regress/expected/single_node_sqlbypass.out @@ -3,6 +3,7 @@ set enable_bitmapscan=off; set enable_material=off; set enable_beta_opfusion=on; set enable_beta_nestloop_fusion=on; +set max_parallel_workers_per_gather=0; drop table if exists t1; NOTICE: table "t1" does not exist, skipping create table t1 (c1 int, c2 numeric, c3 numeric, c4 int, colreal real); @@ -351,3 +352,4 @@ reset enable_bitmapscan; reset enable_material; reset enable_beta_opfusion; reset enable_beta_nestloop_fusion; +reset max_parallel_workers_per_gather; diff --git a/src/test/regress/sql/parallel_create_index.sql b/src/test/regress/input/parallel_create_index.source similarity index 89% rename from src/test/regress/sql/parallel_create_index.sql rename to src/test/regress/input/parallel_create_index.source index 12bee8201..e972b629c 100644 --- a/src/test/regress/sql/parallel_create_index.sql +++ b/src/test/regress/input/parallel_create_index.source @@ -9,6 +9,8 @@ set max_parallel_maintenance_workers=2; set min_parallel_index_scan_size=0; set min_parallel_table_scan_size=0; set maintenance_work_mem=262144; +--test rm tmp dir first +\! rm -rf '@abs_srcdir@/tmp_check/datanode1/base/pgsql_tmp'; CREATE INDEX parallel_index ON parallel_sort_test (randint); --clean up diff --git a/src/test/regress/expected/parallel_create_index.out b/src/test/regress/output/parallel_create_index.source similarity index 96% rename from src/test/regress/expected/parallel_create_index.out rename to src/test/regress/output/parallel_create_index.source index afe092e5b..5b7a9f1a8 100644 --- a/src/test/regress/expected/parallel_create_index.out +++ b/src/test/regress/output/parallel_create_index.source @@ -12,6 +12,8 @@ set min_parallel_table_scan_size=0; LOG: statement: set min_parallel_table_scan_size=0; set maintenance_work_mem=262144; LOG: statement: set maintenance_work_mem=262144; +--test rm tmp dir first +\! rm -rf '@abs_srcdir@/tmp_check/datanode1/base/pgsql_tmp'; CREATE INDEX parallel_index ON parallel_sort_test (randint); LOG: statement: CREATE INDEX parallel_index ON parallel_sort_test (randint); --?LOG: begin index sort: unique = f, workMem = .*, randomAccess = f, maxMem = .* diff --git a/src/test/regress/sql/parallel_query.sql b/src/test/regress/sql/parallel_query.sql index 382296f37..d865734bb 100644 --- a/src/test/regress/sql/parallel_query.sql +++ b/src/test/regress/sql/parallel_query.sql @@ -308,6 +308,7 @@ reset min_parallel_index_scan_size; -- nestloop set enable_hashjoin=off; set enable_mergejoin=off; +set enable_bitmapscan=off; explain (costs off, analyse on) select schemaname, tablename from pg_tables where tablename like 'sql%' order by tablename; --set parallel parameter set force_parallel_mode=on; @@ -339,5 +340,6 @@ reset parallel_tuple_cost; reset max_parallel_workers_per_gather; reset min_parallel_table_scan_size; reset parallel_leader_participation; +reset enable_bitmapscan; reset enable_hashjoin; reset enable_mergejoin; \ No newline at end of file diff --git a/src/test/regress/sql/single_node_sqlbypass.sql b/src/test/regress/sql/single_node_sqlbypass.sql index ca80232d8..137157ea6 100644 --- a/src/test/regress/sql/single_node_sqlbypass.sql +++ b/src/test/regress/sql/single_node_sqlbypass.sql @@ -3,6 +3,7 @@ set enable_bitmapscan=off; set enable_material=off; set enable_beta_opfusion=on; set enable_beta_nestloop_fusion=on; +set max_parallel_workers_per_gather=0; drop table if exists t1; create table t1 (c1 int, c2 numeric, c3 numeric, c4 int, colreal real); create table t2 (c1 int, c2 numeric, c3 numeric, c4 int, colreal real); @@ -78,3 +79,4 @@ reset enable_bitmapscan; reset enable_material; reset enable_beta_opfusion; reset enable_beta_nestloop_fusion; +reset max_parallel_workers_per_gather;