[CP] Set up caching for large chunks of memory

This commit is contained in:
obdev
2023-06-02 02:49:03 +00:00
committed by ob-robot
parent 1c4a29ea94
commit a84b7a3e75
15 changed files with 472 additions and 104 deletions

View File

@ -35,7 +35,7 @@ BlockSet::BlockSet()
total_hold_(0), total_payload_(0), total_used_(0), tallocator_(NULL), total_hold_(0), total_payload_(0), total_used_(0), tallocator_(NULL),
chunk_free_list_(false/*with_mutex*/), locker_(nullptr) chunk_free_list_(false/*with_mutex*/), locker_(nullptr)
{ {
chunk_free_list_.set_max_chunk_cache_cnt(0); chunk_free_list_.set_max_chunk_cache_size(0);
} }
BlockSet::~BlockSet() BlockSet::~BlockSet()

View File

@ -68,8 +68,8 @@ public:
inline uint64_t get_total_used() const; inline uint64_t get_total_used() const;
void set_tenant_ctx_allocator(ObTenantCtxAllocator &allocator); void set_tenant_ctx_allocator(ObTenantCtxAllocator &allocator);
void set_max_chunk_cache_cnt(const int cnt) void set_max_chunk_cache_size(const int64_t max_cache_size)
{ chunk_free_list_.set_max_chunk_cache_cnt(cnt); } { chunk_free_list_.set_max_chunk_cache_size(max_cache_size); }
void reset(); void reset();
void set_locker(ISetLocker *locker) { locker_ = locker; } void set_locker(ISetLocker *locker) { locker_ = locker; }
int64_t sync_wash(int64_t wash_size=INT64_MAX); int64_t sync_wash(int64_t wash_size=INT64_MAX);

View File

@ -35,8 +35,8 @@ using lib::ObTenantCtxAllocator;
class ObPageManager : public lib::IBlockMgr class ObPageManager : public lib::IBlockMgr
{ {
public: public:
constexpr static int DEFAULT_CHUNK_CACHE_CNT = 2; constexpr static int DEFAULT_CHUNK_CACHE_SIZE = lib::INTACT_ACHUNK_SIZE * 2;
constexpr static int MINI_MODE_CHUNK_CACHE_CNT = 0; constexpr static int MINI_MODE_CHUNK_CACHE_SIZE = 0;
RBNODE(ObPageManager, rblink); RBNODE(ObPageManager, rblink);
int compare(const ObPageManager *node) const int compare(const ObPageManager *node) const
{ {
@ -64,8 +64,8 @@ public:
(tenant_id_ == tenant_id && id_ < id); (tenant_id_ == tenant_id && id_ < id);
} }
int set_tenant_ctx(const int64_t tenant_id, const int64_t ctx_id); int set_tenant_ctx(const int64_t tenant_id, const int64_t ctx_id);
void set_max_chunk_cache_cnt(const int cnt) void set_max_chunk_cache_size(const int64_t max_cache_size)
{ bs_.set_max_chunk_cache_cnt(cnt); } { bs_.set_max_chunk_cache_size(max_cache_size); }
void reset(); void reset();
int64_t get_hold() const; int64_t get_hold() const;
int64_t get_tid() const { return tid_; } int64_t get_tid() const { return tid_; }

View File

@ -58,9 +58,13 @@ AChunkMgr &AChunkMgr::instance()
} }
AChunkMgr::AChunkMgr() AChunkMgr::AChunkMgr()
: free_list_(), chunk_bitmap_(nullptr), limit_(DEFAULT_LIMIT), urgent_(0), hold_(0), : free_list_(), large_free_list_(),
total_hold_(0), maps_(0), unmaps_(0), large_maps_(0), large_unmaps_(0), shadow_hold_(0) chunk_bitmap_(nullptr), limit_(DEFAULT_LIMIT), urgent_(0), hold_(0),
total_hold_(0), maps_(0), unmaps_(0), large_maps_(0), large_unmaps_(0),
huge_maps_(0), huge_unmaps_(0),
shadow_hold_(0)
{ {
large_free_list_.set_max_chunk_cache_size(0);
} }
void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page, bool &huge_page_used, const bool alloc_shadow) void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page, bool &huge_page_used, const bool alloc_shadow)
@ -71,19 +75,19 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
void *ptr = nullptr; void *ptr = nullptr;
ptr = low_alloc(size, can_use_huge_page, huge_page_used, alloc_shadow); ptr = low_alloc(size, can_use_huge_page, huge_page_used, alloc_shadow);
if (nullptr != ptr) { if (nullptr != ptr) {
if (((uint64_t)ptr & (INTACT_ACHUNK_SIZE - 1)) != 0) { if (((uint64_t)ptr & (ACHUNK_ALIGN_SIZE - 1)) != 0) {
// not aligned // not aligned
low_free(ptr, size); low_free(ptr, size);
uint64_t new_size = size + INTACT_ACHUNK_SIZE; uint64_t new_size = size + ACHUNK_ALIGN_SIZE;
ptr = low_alloc(new_size, can_use_huge_page, huge_page_used, alloc_shadow); ptr = low_alloc(new_size, can_use_huge_page, huge_page_used, alloc_shadow);
if (nullptr != ptr) { if (nullptr != ptr) {
const uint64_t addr = align_up2((uint64_t)ptr, INTACT_ACHUNK_SIZE); const uint64_t addr = align_up2((uint64_t)ptr, ACHUNK_ALIGN_SIZE);
if (addr - (uint64_t)ptr > 0) { if (addr - (uint64_t)ptr > 0) {
low_free(ptr, addr - (uint64_t)ptr); low_free(ptr, addr - (uint64_t)ptr);
} }
if (INTACT_ACHUNK_SIZE - (addr - (uint64_t)ptr) > 0) { if (ACHUNK_ALIGN_SIZE - (addr - (uint64_t)ptr) > 0) {
low_free((void*)(addr + size), INTACT_ACHUNK_SIZE - (addr - (uint64_t)ptr)); low_free((void*)(addr + size), ACHUNK_ALIGN_SIZE - (addr - (uint64_t)ptr));
} }
ptr = (void*)addr; ptr = (void*)addr;
} }
@ -94,9 +98,12 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
if (ptr != nullptr) { if (ptr != nullptr) {
ATOMIC_FAA(&maps_, 1); ATOMIC_FAA(&maps_, 1);
if (size > INTACT_ACHUNK_SIZE) { if (size > LARGE_ACHUNK_SIZE) {
ATOMIC_FAA(&huge_maps_, 1);
} else if (size > NORMAL_ACHUNK_SIZE) {
ATOMIC_FAA(&large_maps_, 1); ATOMIC_FAA(&large_maps_, 1);
} }
IGNORE_RETURN ATOMIC_FAA(&total_hold_, size);
} else { } else {
LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "low alloc fail", K(size), K(orig_errno), K(errno)); LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "low alloc fail", K(size), K(orig_errno), K(errno));
auto &afc = g_alloc_failed_ctx(); auto &afc = g_alloc_failed_ctx();
@ -113,9 +120,12 @@ void AChunkMgr::direct_free(const void *ptr, const uint64_t size)
common::ObTimeGuard time_guard(__func__, 1000 * 1000); common::ObTimeGuard time_guard(__func__, 1000 * 1000);
ATOMIC_FAA(&unmaps_, 1); ATOMIC_FAA(&unmaps_, 1);
if (size > INTACT_ACHUNK_SIZE) { if (size > LARGE_ACHUNK_SIZE) {
ATOMIC_FAA(&huge_unmaps_, 1);
} else if (size > NORMAL_ACHUNK_SIZE) {
ATOMIC_FAA(&large_unmaps_, 1); ATOMIC_FAA(&large_unmaps_, 1);
} }
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -size);
low_free(ptr, size); low_free(ptr, size);
} }
@ -196,37 +206,96 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
{ {
const int64_t hold_size = hold(size); const int64_t hold_size = hold(size);
const int64_t all_size = aligned(size); const int64_t all_size = aligned(size);
const int64_t achunk_size = INTACT_ACHUNK_SIZE;
bool is_allocated = true;
AChunk *chunk = nullptr; AChunk *chunk = nullptr;
if (achunk_size == hold_size) { if (NORMAL_ACHUNK_SIZE == all_size) {
// TODO by fengshuo.fs: chunk cached by freelist may not use all memory in it, // TODO by fengshuo.fs: chunk cached by freelist may not use all memory in it,
// so update_hold can use hold_size too. // so update_hold can use hold_size too.
if (free_list_.count() > 0) { if (free_list_.count() > 0) {
chunk = free_list_.pop(); chunk = free_list_.pop();
} }
if (OB_ISNULL(chunk)) { if (OB_ISNULL(chunk)) {
if (update_hold(hold_size, high_prio)) { bool updated = false;
while (!(updated = update_hold(hold_size, high_prio)) && large_free_list_.count() > 0) {
if (OB_NOT_NULL(chunk = large_free_list_.pop())) {
int64_t all_size = chunk->aligned();
int64_t hold_size = chunk->hold();
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
chunk = nullptr;
}
}
if (updated) {
bool hugetlb_used = false; bool hugetlb_used = false;
void *ptr = direct_alloc(all_size, true, hugetlb_used, SANITY_BOOL_EXPR(true)); void *ptr = direct_alloc(all_size, true, hugetlb_used, SANITY_BOOL_EXPR(true));
if (ptr != nullptr) { if (ptr != nullptr) {
chunk = new (ptr) AChunk(); chunk = new (ptr) AChunk();
chunk->is_hugetlb_ = hugetlb_used; chunk->is_hugetlb_ = hugetlb_used;
} else { } else {
IGNORE_RETURN update_hold(-hold_size, high_prio); IGNORE_RETURN update_hold(-hold_size, false);
}
}
}
} else if (LARGE_ACHUNK_SIZE == all_size) {
if (large_free_list_.count() > 0) {
chunk = large_free_list_.pop();
}
if (chunk != NULL) {
int64_t orig_hold_size = chunk->hold();
if (hold_size == orig_hold_size) {
// do-nothing
} else if (hold_size > orig_hold_size) {
if (!update_hold(hold_size - orig_hold_size, high_prio)) {
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-orig_hold_size, false);
chunk = nullptr;
}
} else {
int result = 0;
do {
result = this->madvise((char*)chunk + hold_size, orig_hold_size - hold_size, MADV_DONTNEED);
} while (result == -1 && errno == EAGAIN);
if (-1 == result) {
LOG_WARN_RET(OB_ERR_SYS, "madvise failed", K(errno));
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-orig_hold_size, false);
chunk = nullptr;
} else {
IGNORE_RETURN update_hold(hold_size - orig_hold_size, false);
}
}
}
if (OB_ISNULL(chunk)) {
bool updated = false;
while (!(updated = update_hold(hold_size, high_prio)) && free_list_.count() > 0) {
if (OB_NOT_NULL(chunk = free_list_.pop())) {
int64_t all_size = chunk->aligned();
int64_t hold_size = chunk->hold();
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
chunk = nullptr;
}
}
if (updated) {
bool hugetlb_used = false;
void *ptr = direct_alloc(all_size, true, hugetlb_used, SANITY_BOOL_EXPR(true));
if (ptr != nullptr) {
chunk = new (ptr) AChunk();
chunk->is_hugetlb_ = hugetlb_used;
} else {
IGNORE_RETURN update_hold(-hold_size, false);
} }
} }
} else {
is_allocated = false;
} }
} else { } else {
bool updated = false; bool updated = false;
while (!(updated = update_hold(hold_size, high_prio)) && free_list_.count() > 0) { while (!(updated = update_hold(hold_size, high_prio)) &&
if (OB_NOT_NULL(chunk = free_list_.pop())) { (free_list_.count() > 0 || large_free_list_.count() > 0)) {
direct_free(chunk, achunk_size); if (OB_NOT_NULL(chunk = free_list_.pop()) || OB_NOT_NULL(chunk = large_free_list_.pop())) {
IGNORE_RETURN update_hold(-achunk_size, high_prio); int64_t all_size = chunk->aligned();
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -achunk_size); int64_t hold_size = chunk->hold();
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
chunk = nullptr; chunk = nullptr;
} }
} }
@ -237,16 +306,13 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
chunk = new (ptr) AChunk(); chunk = new (ptr) AChunk();
chunk->is_hugetlb_ = hugetlb_used; chunk->is_hugetlb_ = hugetlb_used;
} else { } else {
IGNORE_RETURN update_hold(-hold_size, high_prio); IGNORE_RETURN update_hold(-hold_size, false);
} }
} }
} }
if (OB_NOT_NULL(chunk)) { if (OB_NOT_NULL(chunk)) {
chunk->alloc_bytes_ = size; chunk->alloc_bytes_ = size;
if (is_allocated) {
IGNORE_RETURN ATOMIC_FAA(&total_hold_, all_size);
}
SANITY_UNPOISON(chunk, all_size); // maybe no need? SANITY_UNPOISON(chunk, all_size); // maybe no need?
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) { } else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
LOG_DBA_WARN(OB_ALLOCATE_MEMORY_FAILED, "msg", "oops, over total memory limit" , LOG_DBA_WARN(OB_ALLOCATE_MEMORY_FAILED, "msg", "oops, over total memory limit" ,
@ -261,9 +327,8 @@ void AChunkMgr::free_chunk(AChunk *chunk)
if (OB_NOT_NULL(chunk)) { if (OB_NOT_NULL(chunk)) {
const int64_t hold_size = chunk->hold(); const int64_t hold_size = chunk->hold();
const uint64_t all_size = chunk->aligned(); const uint64_t all_size = chunk->aligned();
const int64_t achunk_size = INTACT_ACHUNK_SIZE;
bool freed = true; bool freed = true;
if (achunk_size == hold_size) { if (NORMAL_ACHUNK_SIZE == hold_size) {
if (hold_ + hold_size <= limit_) { if (hold_ + hold_size <= limit_) {
freed = !free_list_.push(chunk); freed = !free_list_.push(chunk);
} }
@ -271,13 +336,18 @@ void AChunkMgr::free_chunk(AChunk *chunk)
direct_free(chunk, all_size); direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false); IGNORE_RETURN update_hold(-hold_size, false);
} }
} else if (LARGE_ACHUNK_SIZE == all_size) {
if (hold_ + hold_size <= limit_) {
freed = !large_free_list_.push(chunk);
}
if (freed) {
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
}
} else { } else {
direct_free(chunk, all_size); direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false); IGNORE_RETURN update_hold(-hold_size, false);
} }
if (freed) {
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -all_size);
}
} }
} }
@ -285,16 +355,16 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
{ {
const int64_t hold_size = hold(size); const int64_t hold_size = hold(size);
const int64_t all_size = aligned(size); const int64_t all_size = aligned(size);
const int64_t achunk_size = INTACT_ACHUNK_SIZE;
bool is_allocated = true;
AChunk *chunk = nullptr; AChunk *chunk = nullptr;
bool updated = false; bool updated = false;
while (!(updated = update_hold(hold_size, true)) && free_list_.count() > 0) { while (!(updated = update_hold(hold_size, true)) &&
if (OB_NOT_NULL(chunk = free_list_.pop())) { (free_list_.count() > 0 || large_free_list_.count() > 0)) {
direct_free(chunk, achunk_size); if (OB_NOT_NULL(chunk = free_list_.pop()) || OB_NOT_NULL(chunk = large_free_list_.pop())) {
IGNORE_RETURN update_hold(-achunk_size, true); int64_t all_size = chunk->aligned();
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -achunk_size); int64_t hold_size = chunk->hold();
direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false);
chunk = nullptr; chunk = nullptr;
} }
} }
@ -312,9 +382,6 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
if (OB_NOT_NULL(chunk)) { if (OB_NOT_NULL(chunk)) {
chunk->alloc_bytes_ = size; chunk->alloc_bytes_ = size;
if (is_allocated) {
IGNORE_RETURN ATOMIC_FAA(&total_hold_, all_size);
}
//SANITY_UNPOISON(chunk, all_size); // maybe no need? //SANITY_UNPOISON(chunk, all_size); // maybe no need?
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) { } else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
LOG_DBA_WARN(OB_ALLOCATE_MEMORY_FAILED, "msg", "oops, over total memory limit" , LOG_DBA_WARN(OB_ALLOCATE_MEMORY_FAILED, "msg", "oops, over total memory limit" ,
@ -329,10 +396,8 @@ void AChunkMgr::free_co_chunk(AChunk *chunk)
if (OB_NOT_NULL(chunk)) { if (OB_NOT_NULL(chunk)) {
const int64_t hold_size = chunk->hold(); const int64_t hold_size = chunk->hold();
const uint64_t all_size = chunk->aligned(); const uint64_t all_size = chunk->aligned();
const int64_t achunk_size = INTACT_ACHUNK_SIZE;
direct_free(chunk, all_size); direct_free(chunk, all_size);
IGNORE_RETURN update_hold(-hold_size, false); IGNORE_RETURN update_hold(-hold_size, false);
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -all_size);
} }
} }
@ -360,3 +425,8 @@ bool AChunkMgr::update_hold(int64_t bytes, bool high_prio)
} }
return bret; return bret;
} }
int AChunkMgr::madvise(void *addr, size_t length, int advice)
{
return ::madvise(addr, length, advice);
}

View File

@ -43,55 +43,53 @@ class AChunkList
DISALLOW_COPY_AND_ASSIGN(AChunkList); DISALLOW_COPY_AND_ASSIGN(AChunkList);
public: public:
static const int DEFAULT_MAX_CHUNK_CACHE_CNT = 500; static const int64_t DEFAULT_MAX_CHUNK_CACHE_SIZE = 1L<<30;
AChunkList(const bool with_mutex = true) AChunkList(const bool with_mutex = true)
: max_chunk_cache_cnt_(DEFAULT_MAX_CHUNK_CACHE_CNT), : max_chunk_cache_size_(DEFAULT_MAX_CHUNK_CACHE_SIZE),
mutex_(common::ObLatchIds::ALLOC_CHUNK_LOCK), mutex_(common::ObLatchIds::ALLOC_CHUNK_LOCK),
header_(NULL), pushes_(0), pops_(0), with_mutex_(with_mutex) header_(NULL), hold_(0), pushes_(0), pops_(0), with_mutex_(with_mutex)
{ {
mutex_.enable_record_stat(false); mutex_.enable_record_stat(false);
#ifdef OB_USE_ASAN #ifdef OB_USE_ASAN
max_chunk_cache_cnt_ = 0; max_chunk_cache_size_ = 0;
#endif #endif
} }
virtual ~AChunkList() virtual ~AChunkList()
{} {}
void set_max_chunk_cache_cnt(const int cnt) void set_max_chunk_cache_size(const int64_t max_cache_size)
{ {
#ifdef OB_USE_ASAN #ifdef OB_USE_ASAN
UNUSED(cnt); UNUSED(size);
max_chunk_cache_cnt_ = 0; max_chunk_cache_size_ = 0;
#else #else
max_chunk_cache_cnt_ = cnt; max_chunk_cache_size_ = max_cache_size;
#endif #endif
} }
inline bool push(AChunk *chunk) inline bool push(AChunk *chunk)
{ {
bool bret = false; bool bret = false;
if (count() < max_chunk_cache_cnt_) { ObDisableDiagnoseGuard disable_diagnose_guard;
ObDisableDiagnoseGuard disable_diagnose_guard; if (with_mutex_) {
if (with_mutex_) { mutex_.lock();
mutex_.lock(); }
} DEFER(if (with_mutex_) {mutex_.unlock();});
if (count() < max_chunk_cache_cnt_) { int64_t hold = chunk->hold();
pushes_++; if (hold_ + hold <= max_chunk_cache_size_) {
if (NULL == header_) { hold_ += hold;
chunk->prev_ = chunk; pushes_++;
chunk->next_ = chunk; if (NULL == header_) {
header_ = chunk; chunk->prev_ = chunk;
} else { chunk->next_ = chunk;
chunk->prev_ = header_->prev_; header_ = chunk;
chunk->next_ = header_; } else {
chunk->prev_->next_ = chunk; chunk->prev_ = header_->prev_;
chunk->next_->prev_ = chunk; chunk->next_ = header_;
} chunk->prev_->next_ = chunk;
bret = true; chunk->next_->prev_ = chunk;
}
if (with_mutex_) {
mutex_.unlock();
} }
bret = true;
} }
return bret; return bret;
} }
@ -103,8 +101,10 @@ public:
if (with_mutex_) { if (with_mutex_) {
mutex_.lock(); mutex_.lock();
} }
DEFER(if (with_mutex_) {mutex_.unlock();});
if (!OB_ISNULL(header_)) { if (!OB_ISNULL(header_)) {
chunk = header_; chunk = header_;
hold_ -= chunk->hold();
pops_++; pops_++;
if (header_->next_ != header_) { if (header_->next_ != header_) {
header_->prev_->next_ = header_->next_; header_->prev_->next_ = header_->next_;
@ -114,9 +114,6 @@ public:
header_ = NULL; header_ = NULL;
} }
} }
if (with_mutex_) {
mutex_.unlock();
}
} }
return chunk; return chunk;
} }
@ -126,6 +123,11 @@ public:
return pushes_ - pops_; return pushes_ - pops_;
} }
inline int64_t hold() const
{
return hold_;
}
inline int64_t get_pushes() const inline int64_t get_pushes() const
{ {
return pushes_; return pushes_;
@ -137,9 +139,10 @@ public:
} }
private: private:
int32_t max_chunk_cache_cnt_; int64_t max_chunk_cache_size_;
ObMutex mutex_; ObMutex mutex_;
AChunk *header_; AChunk *header_;
int64_t hold_;
int64_t pushes_; int64_t pushes_;
int64_t pops_; int64_t pops_;
const bool with_mutex_; const bool with_mutex_;
@ -171,8 +174,12 @@ class AChunkMgr
friend class ProtectedStackAllocator; friend class ProtectedStackAllocator;
friend class ObMemoryCutter; friend class ObMemoryCutter;
private: private:
static const int64_t DEFAULT_LIMIT = 4L << 30; // 4GB static constexpr int64_t DEFAULT_LIMIT = 4L << 30; // 4GB
static constexpr int64_t ACHUNK_ALIGN_SIZE = INTACT_ACHUNK_SIZE;
static constexpr int64_t NORMAL_ACHUNK_SIZE = INTACT_ACHUNK_SIZE;
static constexpr int64_t LARGE_ACHUNK_SIZE = INTACT_ACHUNK_SIZE << 1;
public: public:
static constexpr int64_t DEFAULT_LARGE_CHUNK_CACHE_SIZE = 128L << 20;
static AChunkMgr &instance(); static AChunkMgr &instance();
public: public:
@ -186,11 +193,14 @@ public:
void free_co_chunk(AChunk *chunk); void free_co_chunk(AChunk *chunk);
static OB_INLINE uint64_t aligned(const uint64_t size); static OB_INLINE uint64_t aligned(const uint64_t size);
static OB_INLINE uint64_t hold(const uint64_t size); static OB_INLINE uint64_t hold(const uint64_t size);
void set_max_chunk_cache_cnt(const int cnt) void set_max_chunk_cache_size(const int64_t max_cache_size)
{ free_list_.set_max_chunk_cache_cnt(cnt); } { free_list_.set_max_chunk_cache_size(max_cache_size); }
void set_max_large_chunk_cache_size(const int64_t max_cache_size)
{ large_free_list_.set_max_chunk_cache_size(max_cache_size); }
inline static AChunk *ptr2chunk(const void *ptr); inline static AChunk *ptr2chunk(const void *ptr);
bool update_hold(int64_t bytes, bool high_prio); bool update_hold(int64_t bytes, bool high_prio);
virtual int madvise(void *addr, size_t length, int advice);
inline void set_limit(int64_t limit); inline void set_limit(int64_t limit);
inline int64_t get_limit() const; inline int64_t get_limit() const;
@ -203,10 +213,13 @@ public:
inline int64_t get_free_chunk_pushes() const; inline int64_t get_free_chunk_pushes() const;
inline int64_t get_free_chunk_pops() const; inline int64_t get_free_chunk_pops() const;
inline int64_t get_freelist_hold() const; inline int64_t get_freelist_hold() const;
inline int64_t get_large_freelist_hold() const;
inline int64_t get_maps() { return maps_; } inline int64_t get_maps() { return maps_; }
inline int64_t get_unmaps() { return unmaps_; } inline int64_t get_unmaps() { return unmaps_; }
inline int64_t get_large_maps() { return large_maps_; } inline int64_t get_large_maps() { return large_maps_; }
inline int64_t get_large_unmaps() { return large_unmaps_; } inline int64_t get_large_unmaps() { return large_unmaps_; }
inline int64_t get_huge_maps() { return huge_maps_; }
inline int64_t get_huge_unmaps() { return huge_unmaps_; }
inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); } inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); }
private: private:
@ -221,6 +234,7 @@ private:
protected: protected:
AChunkList free_list_; AChunkList free_list_;
AChunkList large_free_list_;
ChunkBitMap *chunk_bitmap_; ChunkBitMap *chunk_bitmap_;
int64_t limit_; int64_t limit_;
@ -232,6 +246,8 @@ protected:
int64_t unmaps_; int64_t unmaps_;
int64_t large_maps_; int64_t large_maps_;
int64_t large_unmaps_; int64_t large_unmaps_;
int64_t huge_maps_;
int64_t huge_unmaps_;
int64_t shadow_hold_; int64_t shadow_hold_;
}; // end of class AChunkMgr }; // end of class AChunkMgr
@ -277,7 +293,7 @@ inline int64_t AChunkMgr::get_hold() const
inline int64_t AChunkMgr::get_used() const inline int64_t AChunkMgr::get_used() const
{ {
return hold_ - get_freelist_hold(); return hold_ - get_freelist_hold() - get_large_freelist_hold();
} }
inline int64_t AChunkMgr::get_free_chunk_count() const inline int64_t AChunkMgr::get_free_chunk_count() const
@ -297,7 +313,12 @@ inline int64_t AChunkMgr::get_free_chunk_pops() const
inline int64_t AChunkMgr::get_freelist_hold() const inline int64_t AChunkMgr::get_freelist_hold() const
{ {
return free_list_.count() * INTACT_ACHUNK_SIZE; return free_list_.hold();
}
inline int64_t AChunkMgr::get_large_freelist_hold() const
{
return large_free_list_.hold();
} }
} // end of namespace lib } // end of namespace lib
@ -305,4 +326,6 @@ inline int64_t AChunkMgr::get_freelist_hold() const
#define CHUNK_MGR (oceanbase::lib::AChunkMgr::instance()) #define CHUNK_MGR (oceanbase::lib::AChunkMgr::instance())
#endif /* _OCEABASE_LIB_ALLOC_ACHUNK_MGR_H_ */ #endif /* _OCEABASE_LIB_ALLOC_ACHUNK_MGR_H_ */

View File

@ -284,9 +284,9 @@ void* Thread::__th_start(void *arg)
if (OB_FAIL(ret)) { if (OB_FAIL(ret)) {
LOG_ERROR("set tenant ctx failed", K(ret)); LOG_ERROR("set tenant ctx failed", K(ret));
} else { } else {
const int cache_cnt = !lib::is_mini_mode() ? ObPageManager::DEFAULT_CHUNK_CACHE_CNT : const int cache_size = !lib::is_mini_mode() ? ObPageManager::DEFAULT_CHUNK_CACHE_SIZE :
ObPageManager::MINI_MODE_CHUNK_CACHE_CNT; ObPageManager::MINI_MODE_CHUNK_CACHE_SIZE;
pm.set_max_chunk_cache_cnt(cache_cnt); pm.set_max_chunk_cache_size(cache_size);
ObPageManager::set_thread_local_instance(pm); ObPageManager::set_thread_local_instance(pm);
MemoryContext *mem_context = GET_TSI0(MemoryContext); MemoryContext *mem_context = GET_TSI0(MemoryContext);
if (OB_ISNULL(mem_context)) { if (OB_ISNULL(mem_context)) {

View File

@ -30,6 +30,10 @@ class TestChunkMgr
: public ::testing::Test, public AChunkMgr : public ::testing::Test, public AChunkMgr
{ {
public: public:
TestChunkMgr()
{
large_free_list_.set_max_chunk_cache_size(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE);
}
virtual void SetUp() virtual void SetUp()
{ {
} }
@ -37,8 +41,263 @@ public:
virtual void TearDown() virtual void TearDown()
{ {
} }
virtual int madvise(void *addr, size_t length, int advice) override
{
if (need_fail_) return -1;
madvise_len_ = length;
return AChunkMgr::madvise(addr, length, advice);
}
bool need_fail_ = false;
int madvise_len_ = 0;
}; };
TEST_F(TestChunkMgr, NormalChunk)
{
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
int normal_hold = 0;
{
int64_t hold = 0;
AChunk *chunks[1024] = {};
for (int i = 0; i < 1024; i++) {
chunks[i] = alloc_chunk(NORMAL_SIZE);
normal_hold = chunks[i]->hold();
hold += normal_hold;
}
set_max_chunk_cache_size(hold - normal_hold);
for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]);
}
EXPECT_EQ(1023, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(0, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
hold -= normal_hold;
EXPECT_EQ(hold, hold_);
// direct alloc 4M
{
auto *chunk = alloc_chunk(LARGE_SIZE);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(0, large_free_list_.get_pops());
hold += chunk->hold();
EXPECT_EQ(hold, hold_);
}
// wash alloc
{
set_limit(hold);
auto *chunk = alloc_chunk(LARGE_SIZE);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(2, free_list_.get_pops());
hold = hold - normal_hold * 2 + chunk->hold();
EXPECT_EQ(hold, hold_);
}
}
}
TEST_F(TestChunkMgr, LargeChunk)
{
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
int large_hold = 0;
{
int64_t hold = 0;
AChunk *chunks[1024] = {};
for (int i = 0; i < 1024; i++) {
chunks[i] = alloc_chunk(LARGE_SIZE);
large_hold = chunks[i]->hold();
hold += large_hold;
}
set_max_large_chunk_cache_size(hold - large_hold);
for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]);
}
EXPECT_EQ(1023, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
EXPECT_EQ(0, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops());
hold -= large_hold;
EXPECT_EQ(hold, hold_);
// direct alloc 2M
{
auto *chunk = alloc_chunk(NORMAL_SIZE);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(0, large_free_list_.get_pops());
hold += chunk->hold();
EXPECT_EQ(hold, hold_);
}
// wash alloc
{
set_limit(hold);
auto *chunk = alloc_chunk(NORMAL_SIZE);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(1, large_free_list_.get_pops());
hold = hold - large_hold + chunk->hold();
EXPECT_EQ(hold, hold_);
}
}
}
TEST_F(TestChunkMgr, HugeChunk)
{
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
int HUGE_SIZE = INTACT_ACHUNK_SIZE * 3;
set_limit(20L<<30);
int huge_hold = 0;
{
int64_t hold = 0;
{
int64_t temp_hold = 0;
AChunk *chunks[1024] = {};
for (int i = 0; i < 1024; i++) {
chunks[i] = alloc_chunk(LARGE_SIZE);
hold += chunks[i]->hold();
temp_hold += chunks[i]->hold();
}
set_max_large_chunk_cache_size(temp_hold);
for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]);
}
EXPECT_EQ(1024, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
EXPECT_EQ(0, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(hold, hold_);
}
{
int64_t temp_hold = 0;
AChunk *chunks[1024] = {};
for (int i = 0; i < 1024; i++) {
chunks[i] = alloc_chunk(NORMAL_SIZE);
hold += chunks[i]->hold();
temp_hold += chunks[i]->hold();
}
set_max_chunk_cache_size(temp_hold);
for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]);
}
EXPECT_EQ(1024, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(1024, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
EXPECT_EQ(hold, hold_);
}
// direct alloc huge
{
auto *chunk = alloc_chunk(HUGE_SIZE);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(0, free_list_.get_pops());
EXPECT_EQ(0, large_free_list_.get_pops());
hold += chunk->hold();
EXPECT_EQ(hold, hold_);
}
// wash alloc
{
set_limit(hold);
auto *chunk = alloc_chunk(free_list_.hold() - 100);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(0, free_list_.hold());
EXPECT_NE(0, large_free_list_.hold());
chunk = alloc_chunk(large_free_list_.hold() - 100);
EXPECT_TRUE(NULL != chunk);
EXPECT_EQ(0, large_free_list_.hold());
}
}
}
TEST_F(TestChunkMgr, BorderCase_advise_shrink)
{
int ps = get_page_size();
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100 + ps * 3;
auto *chunk = alloc_chunk(LARGE_SIZE);
// pollute chunk
memset(chunk->data_, 0xaa, chunk->hold());
EXPECT_EQ(0, large_free_list_.get_pushes());
int64_t orig_chunk_hold = chunk->hold();
int64_t orig_hold = hold_;
free_chunk(chunk);
EXPECT_EQ(1, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
chunk = alloc_chunk(LARGE_SIZE - ps * 3);
EXPECT_EQ(1, large_free_list_.get_pops());
EXPECT_EQ(madvise_len_, ps * 3);
EXPECT_FALSE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
EXPECT_EQ(orig_chunk_hold - chunk->hold(), orig_hold - hold_);
}
TEST_F(TestChunkMgr, BorderCase_advise_expand)
{
int ps = get_page_size();
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
auto *chunk = alloc_chunk(LARGE_SIZE);
// pollute chunk
memset(chunk->data_, 0xaa, chunk->hold());
EXPECT_EQ(0, large_free_list_.get_pushes());
int64_t orig_chunk_hold = chunk->hold();
int64_t orig_hold = hold_;
free_chunk(chunk);
EXPECT_EQ(1, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
chunk = alloc_chunk(LARGE_SIZE + ps * 3);
EXPECT_EQ(1, large_free_list_.get_pops());
EXPECT_FALSE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
EXPECT_EQ(orig_chunk_hold - (int64_t)chunk->hold(), orig_hold - hold_);
}
TEST_F(TestChunkMgr, BorderCase_advise_fail)
{
int ps = get_page_size();
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100 + ps * 3;
auto *chunk = alloc_chunk(LARGE_SIZE);
// pollute chunk
memset(chunk->data_, 0xaa, chunk->hold());
EXPECT_EQ(0, large_free_list_.get_pushes());
int64_t orig_chunk_hold = chunk->hold();
int64_t orig_hold = hold_;
free_chunk(chunk);
EXPECT_EQ(1, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
need_fail_ = true;
chunk = alloc_chunk(LARGE_SIZE - ps * 3);
EXPECT_EQ(1, large_free_list_.get_pushes());
EXPECT_EQ(1, large_free_list_.get_pops());
// check remap happened
EXPECT_TRUE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
EXPECT_EQ(orig_chunk_hold - (int64_t)chunk->hold(), orig_hold - hold_);
}
TEST_F(TestChunkMgr, alloc_co_chunk)
{
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
{
AChunk *chunk = alloc_chunk(NORMAL_SIZE);
free_chunk(chunk);
chunk = alloc_chunk(LARGE_SIZE);
free_chunk(chunk);
}
EXPECT_EQ(1, large_free_list_.get_pushes());
EXPECT_EQ(0, large_free_list_.get_pops());
EXPECT_EQ(1, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops());
set_limit(hold_);
auto *chunk = alloc_co_chunk(NORMAL_SIZE);
EXPECT_TRUE(chunk != NULL);
EXPECT_EQ(1, free_list_.get_pops());
chunk = alloc_co_chunk(NORMAL_SIZE);
EXPECT_TRUE(chunk != NULL);
EXPECT_EQ(1, large_free_list_.get_pops());
}
TEST_F(TestChunkMgr, FreeListBasic) TEST_F(TestChunkMgr, FreeListBasic)
{ {
@ -70,7 +329,7 @@ TEST_F(TestChunkMgr, FreeListManyChunk)
for (int i = 0; i < 1024; i++) { for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]); free_chunk(chunks[i]);
} }
EXPECT_EQ(500, free_list_.get_pushes()); EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
EXPECT_EQ(0, free_list_.get_pops()); EXPECT_EQ(0, free_list_.get_pops());
for (int i = 0; i < 1024; i++) { for (int i = 0; i < 1024; i++) {
@ -79,6 +338,6 @@ TEST_F(TestChunkMgr, FreeListManyChunk)
for (int i = 0; i < 1024; i++) { for (int i = 0; i < 1024; i++) {
free_chunk(chunks[i]); free_chunk(chunks[i]);
} }
EXPECT_EQ(500*2, free_list_.get_pushes()); EXPECT_EQ(2* AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
EXPECT_EQ(500, free_list_.get_pops()); EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pops());
} }

View File

@ -225,7 +225,7 @@ TEST_F(TestObjectMgr, TestFragmentWash)
TEST_F(TestObjectMgr, TestSubObjectMgr) TEST_F(TestObjectMgr, TestSubObjectMgr)
{ {
AChunkMgr::instance().set_max_chunk_cache_cnt(0); AChunkMgr::instance().set_max_chunk_cache_size(0);
oceanbase::lib::set_memory_limit(20LL<<30); oceanbase::lib::set_memory_limit(20LL<<30);
int fd = open("alloc_flow_records", O_RDONLY, S_IRWXU | S_IRGRP); int fd = open("alloc_flow_records", O_RDONLY, S_IRWXU | S_IRGRP);
abort_unless(fd > 0); abort_unless(fd > 0);

View File

@ -180,13 +180,13 @@ TEST_F(TestAllocator, pm_basic)
// freelist // freelist
int large_size = INTACT_ACHUNK_SIZE - 200; int large_size = INTACT_ACHUNK_SIZE - 200;
pm.set_max_chunk_cache_cnt(1); pm.set_max_chunk_cache_size(INTACT_ACHUNK_SIZE);
ptr = pm.alloc_page(large_size); ptr = pm.alloc_page(large_size);
hold = pm.get_hold(); hold = pm.get_hold();
ASSERT_GT(hold, 0); ASSERT_GT(hold, 0);
pm.free_page(ptr); pm.free_page(ptr);
ASSERT_EQ(pm.get_hold(), hold); ASSERT_EQ(pm.get_hold(), hold);
pm.set_max_chunk_cache_cnt(0); pm.set_max_chunk_cache_size(0);
ptr = pm.alloc_page(large_size); ptr = pm.alloc_page(large_size);
ASSERT_EQ(pm.get_hold(), hold); ASSERT_EQ(pm.get_hold(), hold);
pm.free_page(ptr); pm.free_page(ptr);
@ -197,7 +197,7 @@ TEST_F(TestAllocator, pm_basic)
pm.free_page(ptr); pm.free_page(ptr);
ASSERT_EQ(pm.get_hold(), hold); ASSERT_EQ(pm.get_hold(), hold);
pm.set_max_chunk_cache_cnt(2); pm.set_max_chunk_cache_size(INTACT_ACHUNK_SIZE * 2);
pm.alloc_page(large_size); pm.alloc_page(large_size);
pm.alloc_page(large_size); pm.alloc_page(large_size);
pm.alloc_page(large_size); pm.alloc_page(large_size);

View File

@ -58,7 +58,7 @@ TEST_F(TestContext, Basic)
ObPageManager g_pm; ObPageManager g_pm;
ObPageManager::set_thread_local_instance(g_pm); ObPageManager::set_thread_local_instance(g_pm);
g_pm.set_tenant_ctx(tenant_id, ctx_id); g_pm.set_tenant_ctx(tenant_id, ctx_id);
g_pm.set_max_chunk_cache_cnt(0); g_pm.set_max_chunk_cache_size(0);
MemoryContext &root = MemoryContext::root(); MemoryContext &root = MemoryContext::root();
ContextParam param; ContextParam param;
param.set_mem_attr(tenant_id, "Context", ctx_id); param.set_mem_attr(tenant_id, "Context", ctx_id);

View File

@ -196,9 +196,16 @@ int ObServerReloadConfig::operator()()
const int64_t cache_size = GCONF.memory_chunk_cache_size; int64_t cache_size = GCONF.memory_chunk_cache_size;
const int cache_cnt = (cache_size > 0 ? cache_size : GMEMCONF.get_server_memory_limit()) / INTACT_ACHUNK_SIZE; if (0 == cache_size) {
lib::AChunkMgr::instance().set_max_chunk_cache_cnt(cache_cnt); cache_size = GMEMCONF.get_server_memory_limit();
}
int64_t large_cache_size = GCONF._memory_large_chunk_cache_size;
if (0 == large_cache_size) {
large_cache_size = lib::AChunkMgr::DEFAULT_LARGE_CHUNK_CACHE_SIZE;
}
lib::AChunkMgr::instance().set_max_chunk_cache_size(cache_size);
lib::AChunkMgr::instance().set_max_large_chunk_cache_size(large_cache_size);
if (!is_arbitration_mode) { if (!is_arbitration_mode) {
// Refresh cluster_id, cluster_name_hash for non arbitration mode // Refresh cluster_id, cluster_name_hash for non arbitration mode

View File

@ -225,6 +225,8 @@ DEF_CAP(cache_wash_threshold, OB_CLUSTER_PARAMETER, "4GB", "[0B,]",
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE)); ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_CAP(memory_chunk_cache_size, OB_CLUSTER_PARAMETER, "0M", "[0M,]", "the maximum size of memory cached by memory chunk cache. Range: [0M,], 0 stands for adaptive", DEF_CAP(memory_chunk_cache_size, OB_CLUSTER_PARAMETER, "0M", "[0M,]", "the maximum size of memory cached by memory chunk cache. Range: [0M,], 0 stands for adaptive",
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE)); ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_CAP(_memory_large_chunk_cache_size, OB_CLUSTER_PARAMETER, "0M", "[0M,]", "the maximum size of large memory cached by memory chunk cache. Range: [0M,], 0 stands for adaptive",
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_TIME(autoinc_cache_refresh_interval, OB_CLUSTER_PARAMETER, "3600s", "[100ms,]", DEF_TIME(autoinc_cache_refresh_interval, OB_CLUSTER_PARAMETER, "3600s", "[100ms,]",
"auto-increment service cache refresh sync_value in this interval, " "auto-increment service cache refresh sync_value in this interval, "
"with default 3600s. Range: [100ms, +∞)", "with default 3600s. Range: [100ms, +∞)",

View File

@ -127,7 +127,10 @@ int ObTenantMemoryPrinter::print_tenant_usage()
int64_t memory_used = get_virtual_memory_used(&resident_size); int64_t memory_used = get_virtual_memory_used(&resident_size);
_STORAGE_LOG(INFO, _STORAGE_LOG(INFO,
"[CHUNK_MGR] free=%ld pushes=%ld pops=%ld limit=%'15ld hold=%'15ld total_hold=%'15ld used=%'15ld" \ "[CHUNK_MGR] free=%ld pushes=%ld pops=%ld limit=%'15ld hold=%'15ld total_hold=%'15ld used=%'15ld" \
" freelist_hold=%'15ld maps=%'15ld unmaps=%'15ld large_maps=%'15ld large_unmaps=%'15ld" \ " freelist_hold=%'15ld large_freelist_hold=%'15ld" \
" maps=%'15ld unmaps=%'15ld" \
" large_maps=%'15ld large_unmaps=%'15ld" \
" huge_maps=%'15ld huge_unmaps=%'15ld" \
" memalign=%d resident_size=%'15ld" " memalign=%d resident_size=%'15ld"
#ifndef ENABLE_SANITY #ifndef ENABLE_SANITY
" virtual_memory_used=%'15ld\n", " virtual_memory_used=%'15ld\n",
@ -141,11 +144,14 @@ int ObTenantMemoryPrinter::print_tenant_usage()
CHUNK_MGR.get_hold(), CHUNK_MGR.get_hold(),
CHUNK_MGR.get_total_hold(), CHUNK_MGR.get_total_hold(),
CHUNK_MGR.get_used(), CHUNK_MGR.get_used(),
CHUNK_MGR.get_freelist_hold(), CHUNK_MGR.get_freelist_hold() + CHUNK_MGR.get_large_freelist_hold(),
CHUNK_MGR.get_large_freelist_hold(),
CHUNK_MGR.get_maps(), CHUNK_MGR.get_maps(),
CHUNK_MGR.get_unmaps(), CHUNK_MGR.get_unmaps(),
CHUNK_MGR.get_large_maps(), CHUNK_MGR.get_large_maps(),
CHUNK_MGR.get_large_unmaps(), CHUNK_MGR.get_large_unmaps(),
CHUNK_MGR.get_huge_maps(),
CHUNK_MGR.get_huge_unmaps(),
0, 0,
resident_size, resident_size,
#ifndef ENABLE_SANITY #ifndef ENABLE_SANITY

View File

@ -297,6 +297,7 @@ _max_elr_dependent_trx_count
_max_malloc_sample_interval _max_malloc_sample_interval
_max_schema_slot_num _max_schema_slot_num
_max_tablet_cnt_per_gb _max_tablet_cnt_per_gb
_memory_large_chunk_cache_size
_migrate_block_verify_level _migrate_block_verify_level
_minor_compaction_amplification_factor _minor_compaction_amplification_factor
_minor_compaction_interval _minor_compaction_interval

View File

@ -32,7 +32,7 @@ public:
virtual void SetUp() override virtual void SetUp() override
{ {
GCONF.enable_sql_operator_dump.set_value("True"); GCONF.enable_sql_operator_dump.set_value("True");
lib::AChunkMgr::instance().set_max_chunk_cache_cnt(0); lib::AChunkMgr::instance().set_max_chunk_cache_size(0);
int ret = OB_SUCCESS; int ret = OB_SUCCESS;
lib::ObMallocAllocator *malloc_allocator = lib::ObMallocAllocator::get_instance(); lib::ObMallocAllocator *malloc_allocator = lib::ObMallocAllocator::get_instance();
ret = malloc_allocator->create_and_add_tenant_allocator(OB_SYS_TENANT_ID); ret = malloc_allocator->create_and_add_tenant_allocator(OB_SYS_TENANT_ID);