fix CHUNK_MGR with bad used_

This commit is contained in:
tushicheng
2024-06-17 22:04:50 +00:00
committed by ob-robot
parent 16a442b5da
commit 60629a37aa
2 changed files with 94 additions and 43 deletions

View File

@ -60,7 +60,7 @@ AChunkMgr &AChunkMgr::instance()
AChunkMgr::AChunkMgr()
: limit_(DEFAULT_LIMIT), urgent_(0), hold_(0),
total_hold_(0), used_(0), shadow_hold_(0),
total_hold_(0), cache_hold_(0), shadow_hold_(0),
max_chunk_cache_size_(limit_)
{
// only cache normal_chunk or large_chunk
@ -99,7 +99,7 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
}
}
if (ptr != nullptr) {
ATOMIC_FAA(&get_slot(size).maps_, 1);
inc_maps(size);
IGNORE_RETURN ATOMIC_FAA(&total_hold_, size);
} else {
LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "low alloc fail", K(size), K(orig_errno), K(errno));
@ -116,7 +116,7 @@ void AChunkMgr::direct_free(const void *ptr, const uint64_t size)
{
common::ObTimeGuard time_guard(__func__, 1000 * 1000);
ATOMIC_FAA(&get_slot(size).unmaps_, 1);
inc_unmaps(size);
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -size);
low_free(ptr, size);
}
@ -207,7 +207,7 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
AChunk *chunk = nullptr;
// Reuse chunk from self-cache
if (OB_NOT_NULL(chunk = get_slot(all_size)->pop())) {
if (OB_NOT_NULL(chunk = pop_chunk_with_size(all_size))) {
int64_t orig_hold_size = chunk->hold();
if (hold_size == orig_hold_size) {
// do-nothing
@ -233,7 +233,7 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
bool updated = false;
for (int i = MAX_LARGE_ACHUNK_INDEX; !updated && i >= 0; --i) {
while (!(updated = update_hold(hold_size, high_prio)) &&
OB_NOT_NULL(chunk = slots_[i]->pop())) {
OB_NOT_NULL(chunk = pop_chunk_with_index(i))) {
// Wash chunk from all-cache when observer's hold reaches limit
int64_t orig_all_size = chunk->aligned();
int64_t orig_hold_size = chunk->hold();
@ -254,7 +254,6 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
}
}
if (OB_NOT_NULL(chunk)) {
IGNORE_RETURN ATOMIC_AAF(&used_, hold_size);
chunk->alloc_bytes_ = size;
SANITY_UNPOISON(chunk, all_size); // maybe no need?
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
@ -270,16 +269,14 @@ void AChunkMgr::free_chunk(AChunk *chunk)
if (OB_NOT_NULL(chunk)) {
const int64_t hold_size = chunk->hold();
const uint64_t all_size = chunk->aligned();
IGNORE_RETURN ATOMIC_AAF(&used_, -hold_size);
const double max_large_cache_ratio = 0.5;
int64_t max_large_cache_size = min(limit_ - used_, max_chunk_cache_size_) * max_large_cache_ratio;
const int64_t cache_hold = hold_ - used_;
const int64_t large_cache_hold = cache_hold - slots_[NORMAL_ACHUNK_INDEX]->hold();
int64_t max_large_cache_size = min(limit_ - get_used(), max_chunk_cache_size_) * max_large_cache_ratio;
const int64_t large_cache_hold = cache_hold_ - get_freelist(NORMAL_ACHUNK_INDEX).hold();
bool freed = true;
if (cache_hold + hold_size <= max_chunk_cache_size_
if (cache_hold_ + hold_size <= max_chunk_cache_size_
&& (NORMAL_ACHUNK_SIZE == all_size || large_cache_hold <= max_large_cache_size)
&& 0 == chunk->washed_size_) {
freed = !get_slot(all_size)->push(chunk);
freed = !push_chunk(chunk);
}
if (freed) {
direct_free(chunk, all_size);
@ -297,7 +294,7 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
bool updated = false;
for (int i = MAX_LARGE_ACHUNK_INDEX; !updated && i >= 0; --i) {
while (!(updated = update_hold(hold_size, true)) &&
OB_NOT_NULL(chunk = slots_[i]->pop())) {
OB_NOT_NULL(chunk = pop_chunk_with_index(i))) {
int64_t all_size = chunk->aligned();
int64_t hold_size = chunk->hold();
direct_free(chunk, all_size);
@ -318,7 +315,6 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
}
if (OB_NOT_NULL(chunk)) {
IGNORE_RETURN ATOMIC_AAF(&used_, hold_size);
chunk->alloc_bytes_ = size;
//SANITY_UNPOISON(chunk, all_size); // maybe no need?
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
@ -334,7 +330,6 @@ void AChunkMgr::free_co_chunk(AChunk *chunk)
if (OB_NOT_NULL(chunk)) {
const int64_t hold_size = chunk->hold();
const uint64_t all_size = chunk->aligned();
IGNORE_RETURN ATOMIC_AAF(&used_, -hold_size);
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
}
@ -385,11 +380,15 @@ int64_t AChunkMgr::to_string(char *buf, const int64_t buf_len) const
int64_t large_maps = 0;
int64_t large_unmaps = 0;
for (int i = MIN_LARGE_ACHUNK_INDEX; i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
large_maps += slots_[i].maps_;
large_unmaps += slots_[i].unmaps_;
large_maps += get_maps(i);
large_unmaps += get_unmaps(i);
}
int64_t total_maps = 0;
int64_t total_unmaps = 0;
for (int i = 0; i <= MAX_ACHUNK_INDEX; ++i) {
total_maps += get_maps(i);
total_unmaps += get_unmaps(i);
}
int64_t total_maps = slots_[NORMAL_ACHUNK_INDEX].maps_ + large_maps + slots_[HUGE_ACHUNK_INDEX].maps_;
int64_t total_unmaps = slots_[NORMAL_ACHUNK_INDEX].unmaps_ + large_unmaps + slots_[HUGE_ACHUNK_INDEX].unmaps_;
ret = databuff_printf(buf, buf_len, pos,
"[CHUNK_MGR] limit=%'15ld hold=%'15ld total_hold=%'15ld used=%'15ld freelists_hold=%'15ld"
" total_maps=%'15ld total_unmaps=%'15ld large_maps=%'15ld large_unmaps=%'15ld huge_maps=%'15ld huge_unmaps=%'15ld"
@ -399,8 +398,8 @@ int64_t AChunkMgr::to_string(char *buf, const int64_t buf_len) const
#else
" virtual_memory_used=%'15ld actual_virtual_memory_used=%'15ld\n",
#endif
limit_, hold_, total_hold_, used_, hold_ - used_,
total_maps, total_unmaps, large_maps, large_unmaps, slots_[HUGE_ACHUNK_INDEX].maps_, slots_[HUGE_ACHUNK_INDEX].unmaps_,
limit_, hold_, total_hold_, get_used(), cache_hold_,
total_maps, total_unmaps, large_maps, large_unmaps, get_maps(HUGE_ACHUNK_INDEX), get_unmaps(HUGE_ACHUNK_INDEX),
0, resident_size,
#ifndef ENABLE_SANITY
memory_used
@ -409,14 +408,12 @@ int64_t AChunkMgr::to_string(char *buf, const int64_t buf_len) const
#endif
);
for (int i = 0; OB_SUCC(ret) && i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
const AChunkList &free_list = slots_[i].free_list_;
const int64_t maps = slots_[i].maps_;
const int64_t unmaps = slots_[i].unmaps_;
const AChunkList &free_list = get_freelist(i);
ret = databuff_printf(buf, buf_len, pos,
"[CHUNK_MGR] %'2d MB_CACHE: hold=%'15ld free=%'15ld pushes=%'15ld pops=%'15ld maps=%'15ld unmaps=%'15ld\n",
(i + 1) * 2, free_list.hold(), free_list.count(),
free_list.get_pushes(), free_list.get_pops(),
maps, unmaps);
get_maps(i), get_unmaps(i));
}
return pos;
}
@ -425,18 +422,19 @@ int64_t AChunkMgr::sync_wash()
{
int64_t washed_size = 0;
for (int i = 0; i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
AChunk *head = slots_[i]->popall();
int64_t cache_hold = 0;
AChunk *head = popall_with_index(i, cache_hold);
if (OB_NOT_NULL(head)) {
AChunk *chunk = head;
do {
const int64_t hold_size = chunk->hold();
const int64_t all_size = chunk->aligned();
AChunk *next_chunk = chunk->next_;
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
washed_size += hold_size;
chunk = next_chunk;
} while (chunk != head);
ATOMIC_FAA(&cache_hold_, -cache_hold);
IGNORE_RETURN update_hold(-cache_hold, false);
washed_size += cache_hold;
}
}
return washed_size;

View File

@ -117,9 +117,10 @@ public:
}
return chunk;
}
inline AChunk *popall()
inline AChunk *popall(int64_t &hold)
{
AChunk *chunk = NULL;
hold = 0;
if (!OB_ISNULL(header_)) {
ObDisableDiagnoseGuard disable_diagnose_guard;
if (with_mutex_) {
@ -128,6 +129,7 @@ public:
DEFER(if (with_mutex_) {mutex_.unlock();});
if (!OB_ISNULL(header_)) {
chunk = header_;
hold = hold_;
hold_ = 0;
pops_ = pushes_;
header_ = NULL;
@ -206,11 +208,11 @@ private:
static constexpr int64_t DEFAULT_LIMIT = 4L << 30; // 4GB
static constexpr int64_t ACHUNK_ALIGN_SIZE = INTACT_ACHUNK_SIZE;
static constexpr int64_t NORMAL_ACHUNK_SIZE = INTACT_ACHUNK_SIZE;
static constexpr int32_t N = 10;
static constexpr int32_t MAX_ACHUNK_INDEX = 10;
static constexpr int32_t NORMAL_ACHUNK_INDEX = 0;
static constexpr int32_t MIN_LARGE_ACHUNK_INDEX = NORMAL_ACHUNK_INDEX + 1;
static constexpr int32_t MAX_LARGE_ACHUNK_INDEX = N - 1;
static constexpr int32_t HUGE_ACHUNK_INDEX = MAX_LARGE_ACHUNK_INDEX + 1;
static constexpr int32_t MAX_LARGE_ACHUNK_INDEX = MAX_ACHUNK_INDEX - 1;
static constexpr int32_t HUGE_ACHUNK_INDEX = MAX_ACHUNK_INDEX;
public:
static AChunkMgr &instance();
@ -259,23 +261,74 @@ private:
// wrap for mmap
void *low_alloc(const uint64_t size, const bool can_use_huge_page, bool &huge_page_used, const bool alloc_shadow);
void low_free(const void *ptr, const uint64_t size);
Slot &get_slot(const uint64_t size)
int32_t get_chunk_index(const uint64_t size)
{
int64_t index = (size - 1) / INTACT_ACHUNK_SIZE;
if (index > HUGE_ACHUNK_INDEX) {
index = HUGE_ACHUNK_INDEX;
return MIN(HUGE_ACHUNK_INDEX, (size - 1) / INTACT_ACHUNK_SIZE);
}
return slots_[index];
void inc_maps(const uint64_t size)
{
int32_t chunk_index = get_chunk_index(size);
ATOMIC_FAA(&slots_[chunk_index].maps_, 1);
}
void inc_unmaps(const uint64_t size)
{
int32_t chunk_index = get_chunk_index(size);
ATOMIC_FAA(&slots_[chunk_index].unmaps_, 1);
}
bool push_chunk(AChunk* chunk)
{
bool bret = true;
if (OB_NOT_NULL(chunk)) {
int32_t chunk_index = get_chunk_index(chunk->aligned());
bret = slots_[chunk_index]->push(chunk);
if (bret) {
ATOMIC_FAA(&cache_hold_, chunk->hold());
}
}
return bret;
}
AChunk* pop_chunk_with_index(int32_t chunk_index)
{
AChunk *chunk = slots_[chunk_index]->pop();
if (OB_NOT_NULL(chunk)) {
ATOMIC_FAA(&cache_hold_, -chunk->hold());
}
return chunk;
}
AChunk* pop_chunk_with_size(const uint64_t size)
{
int32_t chunk_index = get_chunk_index(size);
return pop_chunk_with_index(chunk_index);
}
AChunk* popall_with_index(int32_t chunk_index, int64_t &hold)
{
return slots_[chunk_index]->popall(hold);
}
int64_t get_maps(int32_t chunk_index) const
{
return slots_[chunk_index].maps_;
}
int64_t get_unmaps(int32_t chunk_index) const
{
return slots_[chunk_index].unmaps_;
}
const AChunkList& get_freelist(int32_t chunk_index) const
{
return slots_[chunk_index].free_list_;
}
protected:
int64_t limit_;
int64_t urgent_;
int64_t hold_; // Including the memory occupied by free_list, limited by memory_limit
int64_t total_hold_; // Including virtual memory, just for statifics.
int64_t used_;
int64_t cache_hold_;
int64_t shadow_hold_;
int64_t max_chunk_cache_size_;
Slot slots_[N + 1];
Slot slots_[MAX_ACHUNK_INDEX + 1];
}; // end of class AChunkMgr
OB_INLINE AChunk *AChunkMgr::ptr2chunk(const void *ptr)
@ -320,12 +373,12 @@ inline int64_t AChunkMgr::get_hold() const
inline int64_t AChunkMgr::get_used() const
{
return used_;
return hold_ - cache_hold_;
}
inline int64_t AChunkMgr::get_freelist_hold() const
{
return hold_ - used_;
return cache_hold_;
}
} // end of namespace lib
} // end of namespace oceanbase