[CP] wash chunk_cache when physic memory exhausted

This commit is contained in:
obdev
2024-02-10 11:21:09 +00:00
committed by ob-robot
parent 6637b5ea7d
commit 3abb03d768
5 changed files with 105 additions and 11 deletions

View File

@ -53,12 +53,17 @@ public:
int64_t server_limit_;
};
};
bool need_wash() const
bool need_wash_block() const
{
return reason_ == lib::CTX_HOLD_REACH_LIMIT ||
reason_ == lib::TENANT_HOLD_REACH_LIMIT ||
reason_ == lib::SERVER_HOLD_REACH_LIMIT;
}
bool need_wash_chunk() const
{
return reason_ == lib::PHYSICAL_MEMORY_EXHAUST;
}
};
char *alloc_failed_msg();

View File

@ -430,12 +430,20 @@ void* ObTenantCtxAllocator::common_alloc(const int64_t size, const ObMemAttr &at
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
obj = allocator.alloc_object(alloc_size, attr);
if (OB_ISNULL(obj) && g_alloc_failed_ctx().need_wash()) {
int64_t total_size = ta.sync_wash();
ObMallocTimeMonitor::click("SYNC_WASH_END");
if (OB_ISNULL(obj)) {
int64_t total_size = 0;
if (g_alloc_failed_ctx().need_wash_block()) {
total_size += ta.sync_wash();
ObMallocTimeMonitor::click("WASH_BLOCK_END");
} else if (g_alloc_failed_ctx().need_wash_chunk()) {
total_size += CHUNK_MGR.sync_wash();
ObMallocTimeMonitor::click("WASH_CHUNK_END");
}
if (total_size > 0) {
obj = allocator.alloc_object(alloc_size, attr);
}
}
}
if (NULL != obj) {
obj->on_malloc_sample_ = sample_allowed;
@ -504,12 +512,20 @@ void* ObTenantCtxAllocator::common_realloc(const void *ptr, const int64_t size,
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
obj = allocator.realloc_object(obj, alloc_size, attr);
if(OB_ISNULL(obj) && g_alloc_failed_ctx().need_wash()) {
int64_t total_size = ta.sync_wash();
ObMallocTimeMonitor::click("SYNC_WASH_END");
if(OB_ISNULL(obj)) {
int64_t total_size = 0;
if (g_alloc_failed_ctx().need_wash_block()) {
total_size += ta.sync_wash();
ObMallocTimeMonitor::click("WASH_BLOCK_END");
} else if (g_alloc_failed_ctx().need_wash_chunk()) {
total_size += CHUNK_MGR.sync_wash();
ObMallocTimeMonitor::click("WASH_CHUNK_END");
}
if (total_size > 0) {
obj = allocator.realloc_object(obj, alloc_size, attr);
}
}
}
if (obj != NULL) {
obj->on_malloc_sample_ = sample_allowed;

View File

@ -437,3 +437,27 @@ int AChunkMgr::madvise(void *addr, size_t length, int advice)
{
return ::madvise(addr, length, advice);
}
int64_t AChunkMgr::sync_wash()
{
int64_t washed_size = 0;
AChunk *free_lists[2] = {};
free_lists[0] = free_list_.popall();
free_lists[1] = large_free_list_.popall();
for (int i = 0; i < ARRAYSIZEOF(free_lists); ++i) {
AChunk *head = free_lists[i];
if (OB_NOT_NULL(head)) {
AChunk *chunk = head;
do {
const int64_t hold_size = chunk->hold();
const int64_t all_size = chunk->aligned();
AChunk *next_chunk = chunk->next_;
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
washed_size += hold_size;
chunk = next_chunk;
} while (chunk != head);
}
}
return washed_size;
}

View File

@ -117,6 +117,24 @@ public:
}
return chunk;
}
inline AChunk *popall()
{
AChunk *chunk = NULL;
if (!OB_ISNULL(header_)) {
ObDisableDiagnoseGuard disable_diagnose_guard;
if (with_mutex_) {
mutex_.lock();
}
DEFER(if (with_mutex_) {mutex_.unlock();});
if (!OB_ISNULL(header_)) {
chunk = header_;
hold_ = 0;
pops_ = pushes_;
header_ = NULL;
}
}
return chunk;
}
inline int64_t count() const
{
@ -222,6 +240,8 @@ public:
inline int64_t get_huge_unmaps() { return huge_unmaps_; }
inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); }
int64_t sync_wash();
private:
typedef ABitSet ChunkBitMap;

View File

@ -341,3 +341,32 @@ TEST_F(TestChunkMgr, FreeListManyChunk)
EXPECT_EQ(2* AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pops());
}
TEST_F(TestChunkMgr, sync_wash)
{
set_limit(1LL<<30);
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
free_list_.set_max_chunk_cache_size(1LL<<30);
large_free_list_.set_max_chunk_cache_size(1LL<<30);
AChunk *chunks[16][2] = {};
for (int i = 0; i < 16; ++i) {
chunks[i][0] = alloc_chunk(NORMAL_SIZE);
chunks[i][1] = alloc_chunk(LARGE_SIZE);
}
for (int i = 0; i < 16; ++i) {
for (int j = 0; j < 2; ++j) {
free_chunk(chunks[i][j]);
chunks[i][j] = NULL;
}
}
int64_t hold = free_list_.hold() + large_free_list_.hold();
EXPECT_EQ(hold, hold_);
EXPECT_EQ(16, free_list_.count());
EXPECT_EQ(16, large_free_list_.count());
int64_t washed_size = sync_wash();
EXPECT_EQ(hold, washed_size);
EXPECT_EQ(0, hold_);
EXPECT_EQ(0, free_list_.count());
EXPECT_EQ(0, large_free_list_.count());
}