fix core restartpoint

This commit is contained in:
MuJinqiang
2021-02-06 21:58:19 +08:00
parent 27ef270368
commit 2b1f587e9d
6 changed files with 16 additions and 1 deletions

View File

@ -1275,7 +1275,7 @@ static bool ckpt_found_valid_and_invalid_buffer_loc(
dirty_page_num = get_dirty_page_num();
if (dirty_page_num < g_instance.ckpt_cxt_ctl->dirty_page_queue_size * NEED_PRUNE_DIRTY_QUEUE_SLOT) {
if (dirty_page_num < g_instance.ckpt_cxt_ctl->dirty_page_queue_size * NEED_PRUNE_DIRTY_QUEUE_SLOT || FULL_CKPT) {
return false;
}
@ -1342,6 +1342,7 @@ static void ckpt_try_prune_dirty_page_queue()
* pages are moved to a new position after slot 100 due to this prune queue. than
* the redo point will be wrong, because some page not flush to disk.
*/
(void)LWLockAcquire(g_instance.ckpt_cxt_ctl->prune_queue_lock, LW_EXCLUSIVE);
if (last_invalid_slot > pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc)) {
pg_atomic_write_u64(&g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc, (last_invalid_slot + 1));
}
@ -1381,6 +1382,7 @@ static void ckpt_try_prune_dirty_page_queue()
last_invalid_slot--;
}
LWLockRelease(g_instance.ckpt_cxt_ctl->prune_queue_lock);
if (u_sess->attr.attr_storage.log_pagewriter) {
print_dirty_page_queue_info(true);

View File

@ -10256,6 +10256,7 @@ void ShutdownXLOG(int code, Datum arg)
ckpt_shutdown_pagewriter();
free(g_instance.ckpt_cxt_ctl->dirty_page_queue);
g_instance.ckpt_cxt_ctl->dirty_page_queue = NULL;
g_instance.ckpt_cxt_ctl->prune_queue_lock = NULL;
g_instance.ckpt_cxt_ctl->ckpt_redo_state.recovery_queue_lock = NULL;
ShutdownCLOG();
@ -11416,7 +11417,9 @@ bool CreateRestartPoint(int flags)
ereport(LOG, (errmsg("CreateRestartPoint, need flush %ld pages.", get_dirty_page_num())));
} else if (ENABLE_INCRE_CKPT) {
g_instance.ckpt_cxt_ctl->full_ckpt_redo_ptr = lastCheckPoint.redo;
(void)LWLockAcquire(g_instance.ckpt_cxt_ctl->prune_queue_lock, LW_EXCLUSIVE);
g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc = get_loc_for_lsn(lastCheckPoint.redo);
LWLockRelease(g_instance.ckpt_cxt_ctl->prune_queue_lock);
pg_write_barrier();
uint64 head = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);

View File

@ -363,6 +363,10 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
LsnXlogFlushChkShmInit();
if (g_instance.ckpt_cxt_ctl->prune_queue_lock == NULL) {
g_instance.ckpt_cxt_ctl->prune_queue_lock = LWLockAssign(LWTRANCHE_PRUNE_DIRTY_QUEUE);
}
if (g_instance.pid_cxt.PageWriterPID == NULL) {
MemoryContext oldcontext = MemoryContextSwitchTo(g_instance.increCheckPoint_context);
g_instance.pid_cxt.PageWriterPID =

View File

@ -158,6 +158,7 @@ static const char *BuiltinTrancheNames[] = {
"DWSingleFlushPosLock",
"DWSingleFlushWriteLock",
"RestartPointQueueLock",
"PruneDirtyQueueLock",
"LWTRANCHE_ACCOUNT_TABLE",
"GeneralExtendedLock",
"MPFLLOCK",
@ -378,6 +379,9 @@ int NumLWLocks(void)
/* for recovery state queue */
numLocks += 1;
/* for prune dirty queue */
numLocks += 1;
/*
* Add any requested by loadable modules; for backwards-compatibility
* reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if

View File

@ -399,6 +399,7 @@ typedef struct knl_g_ckpt_context {
pg_atomic_uint64 dirty_page_queue_head;
pg_atomic_uint32 actual_dirty_page_num;
slock_t queue_lock;
struct LWLock* prune_queue_lock;
/* pagewriter thread */
PageWriterProcs page_writer_procs;

View File

@ -178,6 +178,7 @@ enum BuiltinTrancheIds
LWTRANCHE_DW_SINGLE_POS,
LWTRANCHE_DW_SINGLE_WRITE,
LWTRANCHE_REDO_POINT_QUEUE,
LWTRANCHE_PRUNE_DIRTY_QUEUE,
LWTRANCHE_ACCOUNT_TABLE,
LWTRANCHE_EXTEND, // For general 3rd plugin
LWTRANCHE_MPFL,