[CP] wash chunk_cache when physic memory exhausted
This commit is contained in:
11
deps/oblib/src/lib/alloc/alloc_failed_reason.h
vendored
11
deps/oblib/src/lib/alloc/alloc_failed_reason.h
vendored
@ -53,11 +53,16 @@ public:
|
|||||||
int64_t server_limit_;
|
int64_t server_limit_;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
bool need_wash() const
|
bool need_wash_block() const
|
||||||
{
|
{
|
||||||
return reason_ == lib::CTX_HOLD_REACH_LIMIT ||
|
return reason_ == lib::CTX_HOLD_REACH_LIMIT ||
|
||||||
reason_ == lib::TENANT_HOLD_REACH_LIMIT ||
|
reason_ == lib::TENANT_HOLD_REACH_LIMIT ||
|
||||||
reason_ == lib::SERVER_HOLD_REACH_LIMIT;
|
reason_ == lib::SERVER_HOLD_REACH_LIMIT;
|
||||||
|
}
|
||||||
|
bool need_wash_chunk() const
|
||||||
|
{
|
||||||
|
return reason_ == lib::PHYSICAL_MEMORY_EXHAUST;
|
||||||
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -430,10 +430,18 @@ void* ObTenantCtxAllocator::common_alloc(const int64_t size, const ObMemAttr &at
|
|||||||
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
|
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
|
||||||
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
|
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
|
||||||
obj = allocator.alloc_object(alloc_size, attr);
|
obj = allocator.alloc_object(alloc_size, attr);
|
||||||
if (OB_ISNULL(obj) && g_alloc_failed_ctx().need_wash()) {
|
if (OB_ISNULL(obj)) {
|
||||||
int64_t total_size = ta.sync_wash();
|
int64_t total_size = 0;
|
||||||
ObMallocTimeMonitor::click("SYNC_WASH_END");
|
if (g_alloc_failed_ctx().need_wash_block()) {
|
||||||
obj = allocator.alloc_object(alloc_size, attr);
|
total_size += ta.sync_wash();
|
||||||
|
ObMallocTimeMonitor::click("WASH_BLOCK_END");
|
||||||
|
} else if (g_alloc_failed_ctx().need_wash_chunk()) {
|
||||||
|
total_size += CHUNK_MGR.sync_wash();
|
||||||
|
ObMallocTimeMonitor::click("WASH_CHUNK_END");
|
||||||
|
}
|
||||||
|
if (total_size > 0) {
|
||||||
|
obj = allocator.alloc_object(alloc_size, attr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,10 +512,18 @@ void* ObTenantCtxAllocator::common_realloc(const void *ptr, const int64_t size,
|
|||||||
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
|
sample_allowed = ObMallocSampleLimiter::malloc_sample_allowed(size, attr);
|
||||||
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
|
alloc_size = sample_allowed ? (size + AOBJECT_BACKTRACE_SIZE) : size;
|
||||||
obj = allocator.realloc_object(obj, alloc_size, attr);
|
obj = allocator.realloc_object(obj, alloc_size, attr);
|
||||||
if(OB_ISNULL(obj) && g_alloc_failed_ctx().need_wash()) {
|
if(OB_ISNULL(obj)) {
|
||||||
int64_t total_size = ta.sync_wash();
|
int64_t total_size = 0;
|
||||||
ObMallocTimeMonitor::click("SYNC_WASH_END");
|
if (g_alloc_failed_ctx().need_wash_block()) {
|
||||||
obj = allocator.realloc_object(obj, alloc_size, attr);
|
total_size += ta.sync_wash();
|
||||||
|
ObMallocTimeMonitor::click("WASH_BLOCK_END");
|
||||||
|
} else if (g_alloc_failed_ctx().need_wash_chunk()) {
|
||||||
|
total_size += CHUNK_MGR.sync_wash();
|
||||||
|
ObMallocTimeMonitor::click("WASH_CHUNK_END");
|
||||||
|
}
|
||||||
|
if (total_size > 0) {
|
||||||
|
obj = allocator.realloc_object(obj, alloc_size, attr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
24
deps/oblib/src/lib/resource/achunk_mgr.cpp
vendored
24
deps/oblib/src/lib/resource/achunk_mgr.cpp
vendored
@ -437,3 +437,27 @@ int AChunkMgr::madvise(void *addr, size_t length, int advice)
|
|||||||
{
|
{
|
||||||
return ::madvise(addr, length, advice);
|
return ::madvise(addr, length, advice);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int64_t AChunkMgr::sync_wash()
|
||||||
|
{
|
||||||
|
int64_t washed_size = 0;
|
||||||
|
AChunk *free_lists[2] = {};
|
||||||
|
free_lists[0] = free_list_.popall();
|
||||||
|
free_lists[1] = large_free_list_.popall();
|
||||||
|
for (int i = 0; i < ARRAYSIZEOF(free_lists); ++i) {
|
||||||
|
AChunk *head = free_lists[i];
|
||||||
|
if (OB_NOT_NULL(head)) {
|
||||||
|
AChunk *chunk = head;
|
||||||
|
do {
|
||||||
|
const int64_t hold_size = chunk->hold();
|
||||||
|
const int64_t all_size = chunk->aligned();
|
||||||
|
AChunk *next_chunk = chunk->next_;
|
||||||
|
direct_free(chunk, all_size);
|
||||||
|
IGNORE_RETURN update_hold(-hold_size, false);
|
||||||
|
washed_size += hold_size;
|
||||||
|
chunk = next_chunk;
|
||||||
|
} while (chunk != head);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return washed_size;
|
||||||
|
}
|
||||||
20
deps/oblib/src/lib/resource/achunk_mgr.h
vendored
20
deps/oblib/src/lib/resource/achunk_mgr.h
vendored
@ -117,6 +117,24 @@ public:
|
|||||||
}
|
}
|
||||||
return chunk;
|
return chunk;
|
||||||
}
|
}
|
||||||
|
inline AChunk *popall()
|
||||||
|
{
|
||||||
|
AChunk *chunk = NULL;
|
||||||
|
if (!OB_ISNULL(header_)) {
|
||||||
|
ObDisableDiagnoseGuard disable_diagnose_guard;
|
||||||
|
if (with_mutex_) {
|
||||||
|
mutex_.lock();
|
||||||
|
}
|
||||||
|
DEFER(if (with_mutex_) {mutex_.unlock();});
|
||||||
|
if (!OB_ISNULL(header_)) {
|
||||||
|
chunk = header_;
|
||||||
|
hold_ = 0;
|
||||||
|
pops_ = pushes_;
|
||||||
|
header_ = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
|
||||||
inline int64_t count() const
|
inline int64_t count() const
|
||||||
{
|
{
|
||||||
@ -222,6 +240,8 @@ public:
|
|||||||
inline int64_t get_huge_unmaps() { return huge_unmaps_; }
|
inline int64_t get_huge_unmaps() { return huge_unmaps_; }
|
||||||
inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); }
|
inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); }
|
||||||
|
|
||||||
|
int64_t sync_wash();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
typedef ABitSet ChunkBitMap;
|
typedef ABitSet ChunkBitMap;
|
||||||
|
|
||||||
|
|||||||
29
deps/oblib/unittest/lib/alloc/test_chunk_mgr.cpp
vendored
29
deps/oblib/unittest/lib/alloc/test_chunk_mgr.cpp
vendored
@ -341,3 +341,32 @@ TEST_F(TestChunkMgr, FreeListManyChunk)
|
|||||||
EXPECT_EQ(2* AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
|
EXPECT_EQ(2* AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
|
||||||
EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pops());
|
EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pops());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TestChunkMgr, sync_wash)
|
||||||
|
{
|
||||||
|
set_limit(1LL<<30);
|
||||||
|
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
|
||||||
|
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
|
||||||
|
free_list_.set_max_chunk_cache_size(1LL<<30);
|
||||||
|
large_free_list_.set_max_chunk_cache_size(1LL<<30);
|
||||||
|
AChunk *chunks[16][2] = {};
|
||||||
|
for (int i = 0; i < 16; ++i) {
|
||||||
|
chunks[i][0] = alloc_chunk(NORMAL_SIZE);
|
||||||
|
chunks[i][1] = alloc_chunk(LARGE_SIZE);
|
||||||
|
}
|
||||||
|
for (int i = 0; i < 16; ++i) {
|
||||||
|
for (int j = 0; j < 2; ++j) {
|
||||||
|
free_chunk(chunks[i][j]);
|
||||||
|
chunks[i][j] = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
int64_t hold = free_list_.hold() + large_free_list_.hold();
|
||||||
|
EXPECT_EQ(hold, hold_);
|
||||||
|
EXPECT_EQ(16, free_list_.count());
|
||||||
|
EXPECT_EQ(16, large_free_list_.count());
|
||||||
|
int64_t washed_size = sync_wash();
|
||||||
|
EXPECT_EQ(hold, washed_size);
|
||||||
|
EXPECT_EQ(0, hold_);
|
||||||
|
EXPECT_EQ(0, free_list_.count());
|
||||||
|
EXPECT_EQ(0, large_free_list_.count());
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user