From f8d2bf261e4464ba5cd3137651a09aa4d2008661 Mon Sep 17 00:00:00 2001 From: "arcoalien@qq.com" Date: Thu, 18 Jul 2024 09:05:05 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8Dbtvacuumscan=E4=B8=AD?= =?UTF-8?q?=E6=9C=89=E9=81=8D=E5=8E=86=E6=89=AB=E6=8F=8FlastBlockVacuumed?= =?UTF-8?q?=E5=88=B0lastBlockLocked=E7=94=9F=E6=88=90btree=20vacuum=20xlog?= =?UTF-8?q?=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/access/nbtree/nbtree.cpp | 49 +------------------ 1 file changed, 1 insertion(+), 48 deletions(-) diff --git a/src/gausskernel/storage/access/nbtree/nbtree.cpp b/src/gausskernel/storage/access/nbtree/nbtree.cpp index 9c120610b..22524e575 100644 --- a/src/gausskernel/storage/access/nbtree/nbtree.cpp +++ b/src/gausskernel/storage/access/nbtree/nbtree.cpp @@ -44,8 +44,6 @@ typedef struct { IndexBulkDeleteCallback callback; void *callback_state; BTCycleId cycleid; - BlockNumber lastBlockVacuumed; /* highest blkno actually vacuumed */ - BlockNumber lastBlockLocked; /* highest blkno we've cleanup-locked */ BlockNumber totFreePages; /* true total # of free pages */ MemoryContext pagedelcontext; } BTVacState; @@ -828,8 +826,6 @@ static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, In vstate.callback = callback; vstate.callback_state = callback_state; vstate.cycleid = cycleid; - vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */ - vstate.lastBlockLocked = BTREE_METAPAGE; vstate.totFreePages = 0; /* Create a temporary memory context to run _bt_pagedel in */ @@ -882,33 +878,6 @@ static void btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, In } } - /* - * If the WAL is replayed in hot standby, the replay process needs to get - * cleanup locks on all index leaf pages, just as we've been doing here. - * However, we won't issue any WAL records about pages that have no items - * to be deleted. For pages between pages we've vacuumed, the replay code - * will take locks under the direction of the lastBlockVacuumed fields in - * the XLOG_BTREE_VACUUM WAL records. To cover pages after the last one - * we vacuum, we need to issue a dummy XLOG_BTREE_VACUUM WAL record - * against the last leaf page in the index, if that one wasn't vacuumed. - */ - if (XLogStandbyInfoActive() && vstate.lastBlockVacuumed < vstate.lastBlockLocked && !SS_SINGLE_CLUSTER) { - Buffer buf; - - /* - * The page should be valid, but we can't use _bt_getbuf() because we - * want to use a nondefault buffer access strategy. Since we aren't - * going to delete any items, getting cleanup lock again is probably - * overkill, but for consistency do that anyway. - */ - buf = ReadBufferExtended(rel, MAIN_FORKNUM, vstate.lastBlockLocked, RBM_NORMAL, info->strategy); - _bt_checkbuffer_valid(rel, buf); - LockBufferForCleanup(buf); - _bt_checkpage(rel, buf); - _bt_delitems_vacuum(rel, buf, NULL, 0, NULL, 0,vstate.lastBlockVacuumed); - _bt_relbuf(rel, buf); - } - MemoryContextDelete(vstate.pagedelcontext); /* update statistics */ @@ -1005,14 +974,6 @@ restart: LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockBufferForCleanup(buf); - /* - * Remember highest leaf page number we've taken cleanup lock on; see - * notes in btvacuumscan - */ - if (blkno > vstate->lastBlockLocked) { - vstate->lastBlockLocked = blkno; - } - /* * Check whether we need to recurse back to earlier pages. What we * are concerned about is a page split that happened since we started @@ -1110,15 +1071,7 @@ restart: Assert(num_dead_heap_tids >= Max(num_deletable, 1)); Assert(num_deletable > 0 || updatable > 0); _bt_delitems_vacuum(rel, buf, deletable, num_deletable, updatable, num_updatable, - vstate->lastBlockVacuumed); - - /* - * Remember highest leaf page number we've issued a - * XLOG_BTREE_VACUUM WAL record for. - */ - if (blkno > vstate->lastBlockVacuumed) { - vstate->lastBlockVacuumed = blkno; - } + 0); stats->tuples_removed += num_dead_heap_tids; /* must recompute maxoff */