[CP] [FEAT MERGE] chunk_mgr_opt
This commit is contained in:
196
deps/oblib/src/lib/resource/achunk_mgr.cpp
vendored
196
deps/oblib/src/lib/resource/achunk_mgr.cpp
vendored
@ -59,13 +59,15 @@ AChunkMgr &AChunkMgr::instance()
|
||||
}
|
||||
|
||||
AChunkMgr::AChunkMgr()
|
||||
: free_list_(), large_free_list_(),
|
||||
chunk_bitmap_(nullptr), limit_(DEFAULT_LIMIT), urgent_(0), hold_(0),
|
||||
total_hold_(0), maps_(0), unmaps_(0), large_maps_(0), large_unmaps_(0),
|
||||
huge_maps_(0), huge_unmaps_(0),
|
||||
shadow_hold_(0)
|
||||
: limit_(DEFAULT_LIMIT), urgent_(0), hold_(0),
|
||||
total_hold_(0), used_(0), shadow_hold_(0),
|
||||
max_chunk_cache_size_(limit_)
|
||||
{
|
||||
large_free_list_.set_max_chunk_cache_size(0);
|
||||
// only cache normal_chunk or large_chunk
|
||||
for (int i = 0; i < ARRAYSIZEOF(slots_); ++i) {
|
||||
new (slots_ + i) Slot();
|
||||
}
|
||||
slots_[HUGE_ACHUNK_INDEX]->set_max_chunk_cache_size(0);
|
||||
}
|
||||
|
||||
void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page, bool &huge_page_used, const bool alloc_shadow)
|
||||
@ -96,14 +98,8 @@ void *AChunkMgr::direct_alloc(const uint64_t size, const bool can_use_huge_page,
|
||||
// aligned address returned
|
||||
}
|
||||
}
|
||||
|
||||
if (ptr != nullptr) {
|
||||
ATOMIC_FAA(&maps_, 1);
|
||||
if (size > LARGE_ACHUNK_SIZE) {
|
||||
ATOMIC_FAA(&huge_maps_, 1);
|
||||
} else if (size > NORMAL_ACHUNK_SIZE) {
|
||||
ATOMIC_FAA(&large_maps_, 1);
|
||||
}
|
||||
ATOMIC_FAA(&get_slot(size).maps_, 1);
|
||||
IGNORE_RETURN ATOMIC_FAA(&total_hold_, size);
|
||||
} else {
|
||||
LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "low alloc fail", K(size), K(orig_errno), K(errno));
|
||||
@ -120,12 +116,7 @@ void AChunkMgr::direct_free(const void *ptr, const uint64_t size)
|
||||
{
|
||||
common::ObTimeGuard time_guard(__func__, 1000 * 1000);
|
||||
|
||||
ATOMIC_FAA(&unmaps_, 1);
|
||||
if (size > LARGE_ACHUNK_SIZE) {
|
||||
ATOMIC_FAA(&huge_unmaps_, 1);
|
||||
} else if (size > NORMAL_ACHUNK_SIZE) {
|
||||
ATOMIC_FAA(&large_unmaps_, 1);
|
||||
}
|
||||
ATOMIC_FAA(&get_slot(size).unmaps_, 1);
|
||||
IGNORE_RETURN ATOMIC_FAA(&total_hold_, -size);
|
||||
low_free(ptr, size);
|
||||
}
|
||||
@ -215,39 +206,8 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
|
||||
const int64_t all_size = aligned(size);
|
||||
|
||||
AChunk *chunk = nullptr;
|
||||
if (NORMAL_ACHUNK_SIZE == all_size) {
|
||||
// TODO by fengshuo.fs: chunk cached by freelist may not use all memory in it,
|
||||
// so update_hold can use hold_size too.
|
||||
if (free_list_.count() > 0) {
|
||||
chunk = free_list_.pop();
|
||||
}
|
||||
if (OB_ISNULL(chunk)) {
|
||||
bool updated = false;
|
||||
while (!(updated = update_hold(hold_size, high_prio)) && large_free_list_.count() > 0) {
|
||||
if (OB_NOT_NULL(chunk = large_free_list_.pop())) {
|
||||
int64_t all_size = chunk->aligned();
|
||||
int64_t hold_size = chunk->hold();
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
chunk = nullptr;
|
||||
}
|
||||
}
|
||||
if (updated) {
|
||||
bool hugetlb_used = false;
|
||||
void *ptr = direct_alloc(all_size, true, hugetlb_used, SANITY_BOOL_EXPR(true));
|
||||
if (ptr != nullptr) {
|
||||
chunk = new (ptr) AChunk();
|
||||
chunk->is_hugetlb_ = hugetlb_used;
|
||||
} else {
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (LARGE_ACHUNK_SIZE == all_size) {
|
||||
if (large_free_list_.count() > 0) {
|
||||
chunk = large_free_list_.pop();
|
||||
}
|
||||
if (chunk != NULL) {
|
||||
// Reuse chunk from self-cache
|
||||
if (OB_NOT_NULL(chunk = get_slot(all_size)->pop())) {
|
||||
int64_t orig_hold_size = chunk->hold();
|
||||
if (hold_size == orig_hold_size) {
|
||||
// do-nothing
|
||||
@ -258,10 +218,7 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
|
||||
chunk = nullptr;
|
||||
}
|
||||
} else {
|
||||
int result = 0;
|
||||
do {
|
||||
result = this->madvise((char*)chunk + hold_size, orig_hold_size - hold_size, MADV_DONTNEED);
|
||||
} while (result == -1 && errno == EAGAIN);
|
||||
int result = this->madvise((char*)chunk + hold_size, orig_hold_size - hold_size, MADV_DONTNEED);
|
||||
if (-1 == result) {
|
||||
LOG_WARN_RET(OB_ERR_SYS, "madvise failed", K(errno));
|
||||
direct_free(chunk, all_size);
|
||||
@ -274,35 +231,14 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
|
||||
}
|
||||
if (OB_ISNULL(chunk)) {
|
||||
bool updated = false;
|
||||
while (!(updated = update_hold(hold_size, high_prio)) && free_list_.count() > 0) {
|
||||
if (OB_NOT_NULL(chunk = free_list_.pop())) {
|
||||
int64_t all_size = chunk->aligned();
|
||||
int64_t hold_size = chunk->hold();
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
chunk = nullptr;
|
||||
}
|
||||
}
|
||||
if (updated) {
|
||||
bool hugetlb_used = false;
|
||||
void *ptr = direct_alloc(all_size, true, hugetlb_used, SANITY_BOOL_EXPR(true));
|
||||
if (ptr != nullptr) {
|
||||
chunk = new (ptr) AChunk();
|
||||
chunk->is_hugetlb_ = hugetlb_used;
|
||||
} else {
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bool updated = false;
|
||||
for (int i = MAX_LARGE_ACHUNK_INDEX; !updated && i >= 0; --i) {
|
||||
while (!(updated = update_hold(hold_size, high_prio)) &&
|
||||
(free_list_.count() > 0 || large_free_list_.count() > 0)) {
|
||||
if (OB_NOT_NULL(chunk = free_list_.pop()) || OB_NOT_NULL(chunk = large_free_list_.pop())) {
|
||||
int64_t all_size = chunk->aligned();
|
||||
int64_t hold_size = chunk->hold();
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
OB_NOT_NULL(chunk = slots_[i]->pop())) {
|
||||
// Wash chunk from all-cache when observer's hold reaches limit
|
||||
int64_t orig_all_size = chunk->aligned();
|
||||
int64_t orig_hold_size = chunk->hold();
|
||||
direct_free(chunk, orig_all_size);
|
||||
IGNORE_RETURN update_hold(-orig_hold_size, false);
|
||||
chunk = nullptr;
|
||||
}
|
||||
}
|
||||
@ -317,8 +253,8 @@ AChunk *AChunkMgr::alloc_chunk(const uint64_t size, bool high_prio)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_NOT_NULL(chunk)) {
|
||||
IGNORE_RETURN ATOMIC_AAF(&used_, hold_size);
|
||||
chunk->alloc_bytes_ = size;
|
||||
SANITY_UNPOISON(chunk, all_size); // maybe no need?
|
||||
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
|
||||
@ -334,27 +270,21 @@ void AChunkMgr::free_chunk(AChunk *chunk)
|
||||
if (OB_NOT_NULL(chunk)) {
|
||||
const int64_t hold_size = chunk->hold();
|
||||
const uint64_t all_size = chunk->aligned();
|
||||
IGNORE_RETURN ATOMIC_AAF(&used_, -hold_size);
|
||||
const double max_large_cache_ratio = 0.5;
|
||||
int64_t max_large_cache_size = min(limit_ - used_, max_chunk_cache_size_) * max_large_cache_ratio;
|
||||
const int64_t cache_hold = hold_ - used_;
|
||||
const int64_t large_cache_hold = cache_hold - slots_[NORMAL_ACHUNK_INDEX]->hold();
|
||||
bool freed = true;
|
||||
if (NORMAL_ACHUNK_SIZE == hold_size) {
|
||||
if (hold_ + hold_size <= limit_) {
|
||||
freed = !free_list_.push(chunk);
|
||||
if (cache_hold + hold_size <= max_chunk_cache_size_
|
||||
&& (NORMAL_ACHUNK_SIZE == all_size || large_cache_hold <= max_large_cache_size)
|
||||
&& 0 == chunk->washed_size_) {
|
||||
freed = !get_slot(all_size)->push(chunk);
|
||||
}
|
||||
if (freed) {
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
} else if (LARGE_ACHUNK_SIZE == all_size) {
|
||||
if (hold_ + hold_size <= limit_) {
|
||||
freed = !large_free_list_.push(chunk);
|
||||
}
|
||||
if (freed) {
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
} else {
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -365,9 +295,9 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
|
||||
|
||||
AChunk *chunk = nullptr;
|
||||
bool updated = false;
|
||||
for (int i = MAX_LARGE_ACHUNK_INDEX; !updated && i >= 0; --i) {
|
||||
while (!(updated = update_hold(hold_size, true)) &&
|
||||
(free_list_.count() > 0 || large_free_list_.count() > 0)) {
|
||||
if (OB_NOT_NULL(chunk = free_list_.pop()) || OB_NOT_NULL(chunk = large_free_list_.pop())) {
|
||||
OB_NOT_NULL(chunk = slots_[i]->pop())) {
|
||||
int64_t all_size = chunk->aligned();
|
||||
int64_t hold_size = chunk->hold();
|
||||
direct_free(chunk, all_size);
|
||||
@ -388,6 +318,7 @@ AChunk *AChunkMgr::alloc_co_chunk(const uint64_t size)
|
||||
}
|
||||
|
||||
if (OB_NOT_NULL(chunk)) {
|
||||
IGNORE_RETURN ATOMIC_AAF(&used_, hold_size);
|
||||
chunk->alloc_bytes_ = size;
|
||||
//SANITY_UNPOISON(chunk, all_size); // maybe no need?
|
||||
} else if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
|
||||
@ -403,6 +334,7 @@ void AChunkMgr::free_co_chunk(AChunk *chunk)
|
||||
if (OB_NOT_NULL(chunk)) {
|
||||
const int64_t hold_size = chunk->hold();
|
||||
const uint64_t all_size = chunk->aligned();
|
||||
IGNORE_RETURN ATOMIC_AAF(&used_, -hold_size);
|
||||
direct_free(chunk, all_size);
|
||||
IGNORE_RETURN update_hold(-hold_size, false);
|
||||
}
|
||||
@ -435,17 +367,65 @@ bool AChunkMgr::update_hold(int64_t bytes, bool high_prio)
|
||||
|
||||
int AChunkMgr::madvise(void *addr, size_t length, int advice)
|
||||
{
|
||||
return ::madvise(addr, length, advice);
|
||||
int result = 0;
|
||||
if (length > 0) {
|
||||
do {
|
||||
result = ::madvise(addr, length, advice);
|
||||
} while (result == -1 && errno == EAGAIN);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int64_t AChunkMgr::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t pos = 0;
|
||||
int64_t resident_size = 0;
|
||||
int64_t memory_used = get_virtual_memory_used(&resident_size);
|
||||
int64_t large_maps = 0;
|
||||
int64_t large_unmaps = 0;
|
||||
for (int i = MIN_LARGE_ACHUNK_INDEX; i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
|
||||
large_maps += slots_[i].maps_;
|
||||
large_unmaps += slots_[i].unmaps_;
|
||||
}
|
||||
int64_t total_maps = slots_[NORMAL_ACHUNK_INDEX].maps_ + large_maps + slots_[HUGE_ACHUNK_INDEX].maps_;
|
||||
int64_t total_unmaps = slots_[NORMAL_ACHUNK_INDEX].unmaps_ + large_unmaps + slots_[HUGE_ACHUNK_INDEX].unmaps_;
|
||||
ret = databuff_printf(buf, buf_len, pos,
|
||||
"[CHUNK_MGR] limit=%'15ld hold=%'15ld total_hold=%'15ld used=%'15ld freelists_hold=%'15ld"
|
||||
" total_maps=%'15ld total_unmaps=%'15ld large_maps=%'15ld large_unmaps=%'15ld huge_maps=%'15ld huge_unmaps=%'15ld"
|
||||
" memalign=%d resident_size=%'15ld"
|
||||
#ifndef ENABLE_SANITY
|
||||
" virtual_memory_used=%'15ld\n",
|
||||
#else
|
||||
" virtual_memory_used=%'15ld actual_virtual_memory_used=%'15ld\n",
|
||||
#endif
|
||||
limit_, hold_, total_hold_, used_, hold_ - used_,
|
||||
total_maps, total_unmaps, large_maps, large_unmaps, slots_[HUGE_ACHUNK_INDEX].maps_, slots_[HUGE_ACHUNK_INDEX].unmaps_,
|
||||
0, resident_size,
|
||||
#ifndef ENABLE_SANITY
|
||||
memory_used
|
||||
#else
|
||||
memory_used - shadow_hold_, memory_used
|
||||
#endif
|
||||
);
|
||||
for (int i = 0; OB_SUCC(ret) && i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
|
||||
const AChunkList &free_list = slots_[i].free_list_;
|
||||
const int64_t maps = slots_[i].maps_;
|
||||
const int64_t unmaps = slots_[i].unmaps_;
|
||||
ret = databuff_printf(buf, buf_len, pos,
|
||||
"[CHUNK_MGR] %'2d MB_CACHE: hold=%'15ld free=%'15ld pushes=%'15ld pops=%'15ld maps=%'15ld unmaps=%'15ld\n",
|
||||
(i + 1) * 2, free_list.hold(), free_list.count(),
|
||||
free_list.get_pushes(), free_list.get_pops(),
|
||||
maps, unmaps);
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
int64_t AChunkMgr::sync_wash()
|
||||
{
|
||||
int64_t washed_size = 0;
|
||||
AChunk *free_lists[2] = {};
|
||||
free_lists[0] = free_list_.popall();
|
||||
free_lists[1] = large_free_list_.popall();
|
||||
for (int i = 0; i < ARRAYSIZEOF(free_lists); ++i) {
|
||||
AChunk *head = free_lists[i];
|
||||
for (int i = 0; i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
|
||||
AChunk *head = slots_[i]->popall();
|
||||
if (OB_NOT_NULL(head)) {
|
||||
AChunk *chunk = head;
|
||||
do {
|
||||
|
||||
90
deps/oblib/src/lib/resource/achunk_mgr.h
vendored
90
deps/oblib/src/lib/resource/achunk_mgr.h
vendored
@ -192,12 +192,26 @@ class AChunkMgr
|
||||
friend class ProtectedStackAllocator;
|
||||
friend class ObMemoryCutter;
|
||||
private:
|
||||
struct Slot
|
||||
{
|
||||
Slot(int64_t max_cache_size = INT64_MAX) : maps_(0), unmaps_(0), free_list_()
|
||||
{
|
||||
free_list_.set_max_chunk_cache_size(max_cache_size);
|
||||
}
|
||||
AChunkList* operator->() { return &free_list_; }
|
||||
int64_t maps_;
|
||||
int64_t unmaps_;
|
||||
AChunkList free_list_;
|
||||
};
|
||||
static constexpr int64_t DEFAULT_LIMIT = 4L << 30; // 4GB
|
||||
static constexpr int64_t ACHUNK_ALIGN_SIZE = INTACT_ACHUNK_SIZE;
|
||||
static constexpr int64_t NORMAL_ACHUNK_SIZE = INTACT_ACHUNK_SIZE;
|
||||
static constexpr int64_t LARGE_ACHUNK_SIZE = INTACT_ACHUNK_SIZE << 1;
|
||||
static constexpr int32_t N = 10;
|
||||
static constexpr int32_t NORMAL_ACHUNK_INDEX = 0;
|
||||
static constexpr int32_t MIN_LARGE_ACHUNK_INDEX = NORMAL_ACHUNK_INDEX + 1;
|
||||
static constexpr int32_t MAX_LARGE_ACHUNK_INDEX = N - 1;
|
||||
static constexpr int32_t HUGE_ACHUNK_INDEX = MAX_LARGE_ACHUNK_INDEX + 1;
|
||||
public:
|
||||
static constexpr int64_t DEFAULT_LARGE_CHUNK_CACHE_SIZE = 128L << 20;
|
||||
static AChunkMgr &instance();
|
||||
|
||||
public:
|
||||
@ -211,14 +225,18 @@ public:
|
||||
void free_co_chunk(AChunk *chunk);
|
||||
static OB_INLINE uint64_t aligned(const uint64_t size);
|
||||
static OB_INLINE uint64_t hold(const uint64_t size);
|
||||
void set_max_chunk_cache_size(const int64_t max_cache_size)
|
||||
{ free_list_.set_max_chunk_cache_size(max_cache_size); }
|
||||
void set_max_large_chunk_cache_size(const int64_t max_cache_size)
|
||||
{ large_free_list_.set_max_chunk_cache_size(max_cache_size); }
|
||||
|
||||
void set_max_chunk_cache_size(const int64_t max_cache_size, const bool use_large_chunk_cache = false)
|
||||
{
|
||||
max_chunk_cache_size_ = max_cache_size;
|
||||
int64_t large_chunk_cache_size = use_large_chunk_cache ? INT64_MAX : 0;
|
||||
for (int i = MIN_LARGE_ACHUNK_INDEX; i <= MAX_LARGE_ACHUNK_INDEX; ++i) {
|
||||
slots_[i]->set_max_chunk_cache_size(large_chunk_cache_size);
|
||||
}
|
||||
}
|
||||
inline static AChunk *ptr2chunk(const void *ptr);
|
||||
bool update_hold(int64_t bytes, bool high_prio);
|
||||
virtual int madvise(void *addr, size_t length, int advice);
|
||||
int64_t to_string(char *buf, const int64_t buf_len) const;
|
||||
|
||||
inline void set_limit(int64_t limit);
|
||||
inline int64_t get_limit() const;
|
||||
@ -227,17 +245,7 @@ public:
|
||||
inline int64_t get_hold() const;
|
||||
inline int64_t get_total_hold() const { return ATOMIC_LOAD(&total_hold_); }
|
||||
inline int64_t get_used() const;
|
||||
inline int64_t get_free_chunk_count() const;
|
||||
inline int64_t get_free_chunk_pushes() const;
|
||||
inline int64_t get_free_chunk_pops() const;
|
||||
inline int64_t get_freelist_hold() const;
|
||||
inline int64_t get_large_freelist_hold() const;
|
||||
inline int64_t get_maps() { return maps_; }
|
||||
inline int64_t get_unmaps() { return unmaps_; }
|
||||
inline int64_t get_large_maps() { return large_maps_; }
|
||||
inline int64_t get_large_unmaps() { return large_unmaps_; }
|
||||
inline int64_t get_huge_maps() { return huge_maps_; }
|
||||
inline int64_t get_huge_unmaps() { return huge_unmaps_; }
|
||||
inline int64_t get_shadow_hold() const { return ATOMIC_LOAD(&shadow_hold_); }
|
||||
|
||||
int64_t sync_wash();
|
||||
@ -251,24 +259,23 @@ private:
|
||||
// wrap for mmap
|
||||
void *low_alloc(const uint64_t size, const bool can_use_huge_page, bool &huge_page_used, const bool alloc_shadow);
|
||||
void low_free(const void *ptr, const uint64_t size);
|
||||
|
||||
Slot &get_slot(const uint64_t size)
|
||||
{
|
||||
int64_t index = (size - 1) / INTACT_ACHUNK_SIZE;
|
||||
if (index > HUGE_ACHUNK_INDEX) {
|
||||
index = HUGE_ACHUNK_INDEX;
|
||||
}
|
||||
return slots_[index];
|
||||
}
|
||||
protected:
|
||||
AChunkList free_list_;
|
||||
AChunkList large_free_list_;
|
||||
ChunkBitMap *chunk_bitmap_;
|
||||
|
||||
int64_t limit_;
|
||||
int64_t urgent_;
|
||||
int64_t hold_; // Including the memory occupied by free_list, limited by memory_limit
|
||||
int64_t total_hold_; // Including virtual memory, just for statifics.
|
||||
|
||||
int64_t maps_;
|
||||
int64_t unmaps_;
|
||||
int64_t large_maps_;
|
||||
int64_t large_unmaps_;
|
||||
int64_t huge_maps_;
|
||||
int64_t huge_unmaps_;
|
||||
int64_t used_;
|
||||
int64_t shadow_hold_;
|
||||
int64_t max_chunk_cache_size_;
|
||||
Slot slots_[N + 1];
|
||||
}; // end of class AChunkMgr
|
||||
|
||||
OB_INLINE AChunk *AChunkMgr::ptr2chunk(const void *ptr)
|
||||
@ -313,34 +320,13 @@ inline int64_t AChunkMgr::get_hold() const
|
||||
|
||||
inline int64_t AChunkMgr::get_used() const
|
||||
{
|
||||
return hold_ - get_freelist_hold() - get_large_freelist_hold();
|
||||
}
|
||||
|
||||
inline int64_t AChunkMgr::get_free_chunk_count() const
|
||||
{
|
||||
return free_list_.count();
|
||||
}
|
||||
|
||||
inline int64_t AChunkMgr::get_free_chunk_pushes() const
|
||||
{
|
||||
return free_list_.get_pushes();
|
||||
}
|
||||
|
||||
inline int64_t AChunkMgr::get_free_chunk_pops() const
|
||||
{
|
||||
return free_list_.get_pops();
|
||||
return used_;
|
||||
}
|
||||
|
||||
inline int64_t AChunkMgr::get_freelist_hold() const
|
||||
{
|
||||
return free_list_.hold();
|
||||
return hold_ - used_;
|
||||
}
|
||||
|
||||
inline int64_t AChunkMgr::get_large_freelist_hold() const
|
||||
{
|
||||
return large_free_list_.hold();
|
||||
}
|
||||
|
||||
} // end of namespace lib
|
||||
} // end of namespace oceanbase
|
||||
|
||||
|
||||
268
deps/oblib/unittest/lib/alloc/test_chunk_mgr.cpp
vendored
268
deps/oblib/unittest/lib/alloc/test_chunk_mgr.cpp
vendored
@ -31,9 +31,7 @@ class TestChunkMgr
|
||||
{
|
||||
public:
|
||||
TestChunkMgr()
|
||||
{
|
||||
large_free_list_.set_max_chunk_cache_size(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE);
|
||||
}
|
||||
{}
|
||||
virtual void SetUp()
|
||||
{
|
||||
}
|
||||
@ -55,43 +53,45 @@ TEST_F(TestChunkMgr, NormalChunk)
|
||||
{
|
||||
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
|
||||
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
|
||||
int normal_hold = 0;
|
||||
int normal_hold = AChunkMgr::hold(NORMAL_SIZE);
|
||||
int large_hold = AChunkMgr::hold(LARGE_SIZE);
|
||||
{
|
||||
int64_t hold = 0;
|
||||
AChunk *chunks[1024] = {};
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
AChunk *chunks[8] = {};
|
||||
// direct alloc 2M
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
chunks[i] = alloc_chunk(NORMAL_SIZE);
|
||||
normal_hold = chunks[i]->hold();
|
||||
hold += normal_hold;
|
||||
}
|
||||
set_max_chunk_cache_size(hold - normal_hold);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
// create 2M-cache
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
free_chunk(chunks[i]);
|
||||
chunks[i] = NULL;
|
||||
}
|
||||
EXPECT_EQ(1023, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(0, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
hold -= normal_hold;
|
||||
// alloc chunk from 2M-cache
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
chunks[i] = alloc_chunk(NORMAL_SIZE);
|
||||
}
|
||||
EXPECT_EQ(8, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(8, slots_[0]->get_pops());
|
||||
EXPECT_EQ(0, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
|
||||
// direct alloc 4M
|
||||
// alloc chunk by wash 4M-cache
|
||||
{
|
||||
auto *chunk = alloc_chunk(LARGE_SIZE);
|
||||
AChunk *chunk = alloc_chunk(LARGE_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
hold += chunk->hold();
|
||||
hold += large_hold;
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
|
||||
// wash alloc
|
||||
{
|
||||
set_limit(hold);
|
||||
auto *chunk = alloc_chunk(LARGE_SIZE);
|
||||
chunk = alloc_chunk(NORMAL_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(2, free_list_.get_pops());
|
||||
hold = hold - normal_hold * 2 + chunk->hold();
|
||||
hold -= (large_hold - normal_hold);
|
||||
EXPECT_EQ(8, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(8, slots_[0]->get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(1, slots_[1]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
}
|
||||
@ -101,43 +101,56 @@ TEST_F(TestChunkMgr, LargeChunk)
|
||||
{
|
||||
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
|
||||
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
|
||||
int large_hold = 0;
|
||||
int BIG_SIZE = INTACT_ACHUNK_SIZE * 2 + 100;
|
||||
int normal_hold = AChunkMgr::hold(NORMAL_SIZE);
|
||||
int large_hold = AChunkMgr::hold(LARGE_SIZE);
|
||||
int big_hold = AChunkMgr::hold(BIG_SIZE);
|
||||
{
|
||||
int64_t hold = 0;
|
||||
AChunk *chunks[1024] = {};
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
AChunk *chunks[8] = {};
|
||||
// direct alloc 4M
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
chunks[i] = alloc_chunk(LARGE_SIZE);
|
||||
large_hold = chunks[i]->hold();
|
||||
hold += large_hold;
|
||||
}
|
||||
set_max_large_chunk_cache_size(hold - large_hold);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
// create 4M-cache
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
free_chunk(chunks[i]);
|
||||
}
|
||||
EXPECT_EQ(1023, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(0, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
hold -= large_hold;
|
||||
EXPECT_EQ(hold, hold_);
|
||||
|
||||
// direct alloc 2M
|
||||
{
|
||||
auto *chunk = alloc_chunk(NORMAL_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
hold += chunk->hold();
|
||||
EXPECT_EQ(hold, hold_);
|
||||
// alloc chunk from self-cache(4M-cache)
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
chunks[i] = alloc_chunk(LARGE_SIZE);
|
||||
}
|
||||
EXPECT_EQ(8, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(8, slots_[1]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
|
||||
// wash alloc
|
||||
// alloc chunk by wash 6M-cache and 2M-cache
|
||||
{
|
||||
set_limit(hold);
|
||||
auto *chunk = alloc_chunk(NORMAL_SIZE);
|
||||
AChunk *chunk = alloc_chunk(BIG_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(1, large_free_list_.get_pops());
|
||||
hold = hold - large_hold + chunk->hold();
|
||||
hold += big_hold;
|
||||
free_chunk(chunk);
|
||||
chunk = alloc_chunk(NORMAL_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
hold += normal_hold;
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(1, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[0]->get_pops());
|
||||
EXPECT_EQ(1, slots_[2]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[2]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
set_limit(hold);
|
||||
chunk = alloc_chunk(LARGE_SIZE);
|
||||
hold += (large_hold - big_hold);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
chunk = alloc_chunk(LARGE_SIZE);
|
||||
hold += (large_hold - normal_hold);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(1, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(1, slots_[0]->get_pops());
|
||||
EXPECT_EQ(1, slots_[2]->get_pushes());
|
||||
EXPECT_EQ(1, slots_[2]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
}
|
||||
@ -147,55 +160,34 @@ TEST_F(TestChunkMgr, HugeChunk)
|
||||
{
|
||||
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
|
||||
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
|
||||
int HUGE_SIZE = INTACT_ACHUNK_SIZE * 3;
|
||||
set_limit(20L<<30);
|
||||
int huge_hold = 0;
|
||||
int HUGE_SIZE = INTACT_ACHUNK_SIZE * 10 + 100;
|
||||
{
|
||||
int64_t hold = 0;
|
||||
{
|
||||
int64_t temp_hold = 0;
|
||||
AChunk *chunks[1024] = {};
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
chunks[i] = alloc_chunk(LARGE_SIZE);
|
||||
hold += chunks[i]->hold();
|
||||
temp_hold += chunks[i]->hold();
|
||||
AChunk *chunks[8][2] = {};
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
chunks[i][0] = alloc_chunk(NORMAL_SIZE);
|
||||
chunks[i][1] = alloc_chunk(LARGE_SIZE);
|
||||
hold += chunks[i][0]->hold();
|
||||
hold += chunks[i][1]->hold();
|
||||
}
|
||||
set_max_large_chunk_cache_size(temp_hold);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
free_chunk(chunks[i]);
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
free_chunk(chunks[i][0]);
|
||||
free_chunk(chunks[i][1]);
|
||||
}
|
||||
EXPECT_EQ(1024, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(0, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
|
||||
{
|
||||
int64_t temp_hold = 0;
|
||||
AChunk *chunks[1024] = {};
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
chunks[i] = alloc_chunk(NORMAL_SIZE);
|
||||
hold += chunks[i]->hold();
|
||||
temp_hold += chunks[i]->hold();
|
||||
}
|
||||
set_max_chunk_cache_size(temp_hold);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
free_chunk(chunks[i]);
|
||||
}
|
||||
EXPECT_EQ(1024, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(1024, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(8, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[0]->get_pops());
|
||||
EXPECT_EQ(8, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
|
||||
// direct alloc huge
|
||||
{
|
||||
auto *chunk = alloc_chunk(HUGE_SIZE);
|
||||
AChunk *chunk = alloc_chunk(HUGE_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(0, slots_[0]->get_pops());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
hold += chunk->hold();
|
||||
EXPECT_EQ(hold, hold_);
|
||||
}
|
||||
@ -203,14 +195,14 @@ TEST_F(TestChunkMgr, HugeChunk)
|
||||
// wash alloc
|
||||
{
|
||||
set_limit(hold);
|
||||
auto *chunk = alloc_chunk(free_list_.hold() - 100);
|
||||
AChunk *chunk = alloc_chunk(HUGE_SIZE);
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(0, free_list_.hold());
|
||||
EXPECT_NE(0, large_free_list_.hold());
|
||||
EXPECT_NE(0, slots_[0]->hold());
|
||||
EXPECT_EQ(0, slots_[1]->hold());
|
||||
|
||||
chunk = alloc_chunk(large_free_list_.hold() - 100);
|
||||
chunk = alloc_chunk(slots_[0]->hold());
|
||||
EXPECT_TRUE(NULL != chunk);
|
||||
EXPECT_EQ(0, large_free_list_.hold());
|
||||
EXPECT_EQ(0, slots_[0]->hold());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -222,14 +214,14 @@ TEST_F(TestChunkMgr, BorderCase_advise_shrink)
|
||||
auto *chunk = alloc_chunk(LARGE_SIZE);
|
||||
// pollute chunk
|
||||
memset(chunk->data_, 0xaa, chunk->hold());
|
||||
EXPECT_EQ(0, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pushes());
|
||||
int64_t orig_chunk_hold = chunk->hold();
|
||||
int64_t orig_hold = hold_;
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(1, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
chunk = alloc_chunk(LARGE_SIZE - ps * 3);
|
||||
EXPECT_EQ(1, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pops());
|
||||
EXPECT_EQ(madvise_len_, ps * 3);
|
||||
EXPECT_FALSE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
|
||||
EXPECT_EQ(orig_chunk_hold - chunk->hold(), orig_hold - hold_);
|
||||
@ -242,14 +234,14 @@ TEST_F(TestChunkMgr, BorderCase_advise_expand)
|
||||
auto *chunk = alloc_chunk(LARGE_SIZE);
|
||||
// pollute chunk
|
||||
memset(chunk->data_, 0xaa, chunk->hold());
|
||||
EXPECT_EQ(0, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pushes());
|
||||
int64_t orig_chunk_hold = chunk->hold();
|
||||
int64_t orig_hold = hold_;
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(1, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
chunk = alloc_chunk(LARGE_SIZE + ps * 3);
|
||||
EXPECT_EQ(1, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pops());
|
||||
EXPECT_FALSE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
|
||||
EXPECT_EQ(orig_chunk_hold - (int64_t)chunk->hold(), orig_hold - hold_);
|
||||
}
|
||||
@ -261,16 +253,16 @@ TEST_F(TestChunkMgr, BorderCase_advise_fail)
|
||||
auto *chunk = alloc_chunk(LARGE_SIZE);
|
||||
// pollute chunk
|
||||
memset(chunk->data_, 0xaa, chunk->hold());
|
||||
EXPECT_EQ(0, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pushes());
|
||||
int64_t orig_chunk_hold = chunk->hold();
|
||||
int64_t orig_hold = hold_;
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(1, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
need_fail_ = true;
|
||||
chunk = alloc_chunk(LARGE_SIZE - ps * 3);
|
||||
EXPECT_EQ(1, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(1, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(1, slots_[1]->get_pops());
|
||||
// check remap happened
|
||||
EXPECT_TRUE(0 == chunk->data_[0] && 0 == memcmp(chunk->data_, chunk->data_ + 1, chunk->hold() - 1));
|
||||
EXPECT_EQ(orig_chunk_hold - (int64_t)chunk->hold(), orig_hold - hold_);
|
||||
@ -286,17 +278,17 @@ TEST_F(TestChunkMgr, alloc_co_chunk)
|
||||
chunk = alloc_chunk(LARGE_SIZE);
|
||||
free_chunk(chunk);
|
||||
}
|
||||
EXPECT_EQ(1, large_free_list_.get_pushes());
|
||||
EXPECT_EQ(0, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[1]->get_pops());
|
||||
EXPECT_EQ(1, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(0, slots_[0]->get_pops());
|
||||
set_limit(hold_);
|
||||
auto *chunk = alloc_co_chunk(NORMAL_SIZE);
|
||||
EXPECT_TRUE(chunk != NULL);
|
||||
EXPECT_EQ(1, free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[1]->get_pops());
|
||||
chunk = alloc_co_chunk(NORMAL_SIZE);
|
||||
EXPECT_TRUE(chunk != NULL);
|
||||
EXPECT_EQ(1, large_free_list_.get_pops());
|
||||
EXPECT_EQ(1, slots_[0]->get_pops());
|
||||
}
|
||||
|
||||
TEST_F(TestChunkMgr, FreeListBasic)
|
||||
@ -304,51 +296,29 @@ TEST_F(TestChunkMgr, FreeListBasic)
|
||||
{
|
||||
AChunk *chunk = alloc_chunk(0);
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(1, free_list_.get_pushes());
|
||||
EXPECT_EQ(1, slots_[0]->get_pushes());
|
||||
}
|
||||
{
|
||||
AChunk *chunk = alloc_chunk(0);
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(2, free_list_.get_pushes());
|
||||
EXPECT_EQ(1, free_list_.get_pops());
|
||||
EXPECT_EQ(2, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(1, slots_[0]->get_pops());
|
||||
}
|
||||
{
|
||||
AChunk *chunk = alloc_chunk(OB_MALLOC_BIG_BLOCK_SIZE);
|
||||
free_chunk(chunk);
|
||||
EXPECT_EQ(3, free_list_.get_pushes());
|
||||
EXPECT_EQ(2, free_list_.get_pops());
|
||||
EXPECT_EQ(3, slots_[0]->get_pushes());
|
||||
EXPECT_EQ(2, slots_[0]->get_pops());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestChunkMgr, FreeListManyChunk)
|
||||
{
|
||||
AChunk *chunks[1024] = {};
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
chunks[i] = alloc_chunk(OB_MALLOC_BIG_BLOCK_SIZE);
|
||||
}
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
free_chunk(chunks[i]);
|
||||
}
|
||||
EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
|
||||
EXPECT_EQ(0, free_list_.get_pops());
|
||||
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
chunks[i] = alloc_chunk(OB_MALLOC_BIG_BLOCK_SIZE);
|
||||
}
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
free_chunk(chunks[i]);
|
||||
}
|
||||
EXPECT_EQ(2* AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pushes());
|
||||
EXPECT_EQ(AChunkList::DEFAULT_MAX_CHUNK_CACHE_SIZE/INTACT_ACHUNK_SIZE, free_list_.get_pops());
|
||||
}
|
||||
|
||||
TEST_F(TestChunkMgr, sync_wash)
|
||||
{
|
||||
set_limit(1LL<<30);
|
||||
int NORMAL_SIZE = OB_MALLOC_BIG_BLOCK_SIZE;
|
||||
int LARGE_SIZE = INTACT_ACHUNK_SIZE + 100;
|
||||
free_list_.set_max_chunk_cache_size(1LL<<30);
|
||||
large_free_list_.set_max_chunk_cache_size(1LL<<30);
|
||||
slots_[0]->set_max_chunk_cache_size(1LL<<30);
|
||||
slots_[1]->set_max_chunk_cache_size(1LL<<30);
|
||||
AChunk *chunks[16][2] = {};
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
chunks[i][0] = alloc_chunk(NORMAL_SIZE);
|
||||
@ -360,13 +330,13 @@ TEST_F(TestChunkMgr, sync_wash)
|
||||
chunks[i][j] = NULL;
|
||||
}
|
||||
}
|
||||
int64_t hold = free_list_.hold() + large_free_list_.hold();
|
||||
int64_t hold = get_freelist_hold();
|
||||
EXPECT_EQ(hold, hold_);
|
||||
EXPECT_EQ(16, free_list_.count());
|
||||
EXPECT_EQ(16, large_free_list_.count());
|
||||
EXPECT_EQ(16, slots_[0]->count());
|
||||
EXPECT_EQ(16, slots_[1]->count());
|
||||
int64_t washed_size = sync_wash();
|
||||
EXPECT_EQ(hold, washed_size);
|
||||
EXPECT_EQ(0, hold_);
|
||||
EXPECT_EQ(0, free_list_.count());
|
||||
EXPECT_EQ(0, large_free_list_.count());
|
||||
EXPECT_EQ(0, slots_[0]->count());
|
||||
EXPECT_EQ(0, slots_[1]->count());
|
||||
}
|
||||
@ -28,6 +28,7 @@ using namespace oceanbase::common;
|
||||
|
||||
TEST(TestTenantAllocator, CtxAlloc)
|
||||
{
|
||||
CHUNK_MGR.set_max_chunk_cache_size(1<<20);
|
||||
ObTenantCtxAllocator ta(123, 1);
|
||||
ta.set_tenant_memory_mgr();
|
||||
ta.set_limit(INT64_MAX);
|
||||
|
||||
@ -183,21 +183,15 @@ int ObServerReloadConfig::operator()()
|
||||
reserve);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int64_t cache_size = GCONF.memory_chunk_cache_size;
|
||||
if (0 == cache_size) {
|
||||
bool use_large_chunk_cache = 1 != cache_size;
|
||||
if (0 == cache_size || 1 == cache_size) {
|
||||
cache_size = GMEMCONF.get_server_memory_limit();
|
||||
if (cache_size >= (32L<<30)) {
|
||||
cache_size -= (4L<<30);
|
||||
}
|
||||
}
|
||||
int64_t large_cache_size = GCONF._memory_large_chunk_cache_size;
|
||||
if (0 == large_cache_size) {
|
||||
large_cache_size = lib::AChunkMgr::DEFAULT_LARGE_CHUNK_CACHE_SIZE;
|
||||
}
|
||||
lib::AChunkMgr::instance().set_max_chunk_cache_size(cache_size);
|
||||
lib::AChunkMgr::instance().set_max_large_chunk_cache_size(large_cache_size);
|
||||
lib::AChunkMgr::instance().set_max_chunk_cache_size(cache_size, use_large_chunk_cache);
|
||||
|
||||
if (!is_arbitration_mode) {
|
||||
// Refresh cluster_id, cluster_name_hash for non arbitration mode
|
||||
|
||||
@ -220,8 +220,6 @@ DEF_CAP(cache_wash_threshold, OB_CLUSTER_PARAMETER, "4GB", "[0B,]",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
DEF_CAP(memory_chunk_cache_size, OB_CLUSTER_PARAMETER, "0M", "[0M,]", "the maximum size of memory cached by memory chunk cache. Range: [0M,], 0 stands for adaptive",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
DEF_CAP(_memory_large_chunk_cache_size, OB_CLUSTER_PARAMETER, "0M", "[0M,]", "the maximum size of large memory cached by memory chunk cache. Range: [0M,], 0 stands for adaptive",
|
||||
ObParameterAttr(Section::OBSERVER, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
|
||||
DEF_TIME(autoinc_cache_refresh_interval, OB_CLUSTER_PARAMETER, "3600s", "[100ms,]",
|
||||
"auto-increment service cache refresh sync_value in this interval, "
|
||||
"with default 3600s. Range: [100ms, +∞)",
|
||||
|
||||
@ -123,43 +123,15 @@ int ObTenantMemoryPrinter::print_tenant_usage()
|
||||
}
|
||||
|
||||
// print global chunk freelist
|
||||
const int64_t max_unmanaged_memory_size = 10LL<<30;
|
||||
int64_t resident_size = 0;
|
||||
int64_t memory_used = get_virtual_memory_used(&resident_size);
|
||||
_STORAGE_LOG(INFO,
|
||||
"[CHUNK_MGR] free=%ld pushes=%ld pops=%ld limit=%'15ld hold=%'15ld total_hold=%'15ld used=%'15ld" \
|
||||
" freelist_hold=%'15ld large_freelist_hold=%'15ld" \
|
||||
" maps=%'15ld unmaps=%'15ld" \
|
||||
" large_maps=%'15ld large_unmaps=%'15ld" \
|
||||
" huge_maps=%'15ld huge_unmaps=%'15ld" \
|
||||
" memalign=%d resident_size=%'15ld"
|
||||
#ifndef ENABLE_SANITY
|
||||
" virtual_memory_used=%'15ld\n",
|
||||
#else
|
||||
" virtual_memory_used=%'15ld actual_virtual_memory_used=%'15ld\n",
|
||||
#endif
|
||||
CHUNK_MGR.get_free_chunk_count(),
|
||||
CHUNK_MGR.get_free_chunk_pushes(),
|
||||
CHUNK_MGR.get_free_chunk_pops(),
|
||||
CHUNK_MGR.get_limit(),
|
||||
CHUNK_MGR.get_hold(),
|
||||
CHUNK_MGR.get_total_hold(),
|
||||
CHUNK_MGR.get_used(),
|
||||
CHUNK_MGR.get_freelist_hold() + CHUNK_MGR.get_large_freelist_hold(),
|
||||
CHUNK_MGR.get_large_freelist_hold(),
|
||||
CHUNK_MGR.get_maps(),
|
||||
CHUNK_MGR.get_unmaps(),
|
||||
CHUNK_MGR.get_large_maps(),
|
||||
CHUNK_MGR.get_large_unmaps(),
|
||||
CHUNK_MGR.get_huge_maps(),
|
||||
CHUNK_MGR.get_huge_unmaps(),
|
||||
0,
|
||||
resident_size,
|
||||
#ifndef ENABLE_SANITY
|
||||
memory_used
|
||||
#else
|
||||
memory_used - CHUNK_MGR.get_shadow_hold(), memory_used
|
||||
#endif
|
||||
);
|
||||
int64_t limit = CHUNK_MGR.get_limit();
|
||||
if (resident_size > limit + max_unmanaged_memory_size) {
|
||||
LOG_ERROR("RESIDENT_SIZE OVER MEMORY_LIMIT", K(resident_size), K(limit));
|
||||
}
|
||||
int64_t pos = CHUNK_MGR.to_string(print_buf, BUF_LEN);
|
||||
_STORAGE_LOG(INFO, "%.*s", static_cast<int>(pos), print_buf);
|
||||
ObMallocTimeMonitor::get_instance().print();
|
||||
print_mutex_.unlock();
|
||||
}
|
||||
|
||||
@ -349,7 +349,6 @@ _max_rpc_packet_size
|
||||
_max_schema_slot_num
|
||||
_max_tablet_cnt_per_gb
|
||||
_mds_memory_limit_percentage
|
||||
_memory_large_chunk_cache_size
|
||||
_memstore_limit_percentage
|
||||
_migrate_block_verify_level
|
||||
_minor_compaction_amplification_factor
|
||||
|
||||
Reference in New Issue
Block a user