[Revert](mem) revert the mem config cause perfermace degradation (#13526)

* Revert "[fix](mem) failure of allocating memory (#13414)"

This reverts commit 971eb9172f3e925c0b46ec1ffd1a9037a1b49801.

* Revert "[improvement](memory) disable page cache and chunk allocator, optimize memory allocate size (#13285)"

This reverts commit a5f3880649b094b58061f25c15dccdb50a4a2973.
This commit is contained in:
HappenLee
2022-10-21 08:32:16 +08:00
committed by GitHub
parent 736d113700
commit d2be5096d6
8 changed files with 74 additions and 83 deletions

View File

@ -154,36 +154,35 @@ ChunkAllocator::ChunkAllocator(size_t reserve_limit)
Status ChunkAllocator::allocate(size_t size, Chunk* chunk) {
CHECK((size > 0 && (size & (size - 1)) == 0));
// fast path: allocate from current core arena
int core_id = CpuInfo::get_current_core();
chunk->core_id = core_id;
chunk->size = size;
if (!config::disable_chunk_allocator) {
// fast path: allocate from current core arena
if (_arenas[core_id]->pop_free_chunk(size, &chunk->data)) {
DCHECK_GE(_reserved_bytes, 0);
_reserved_bytes.fetch_sub(size);
chunk_pool_local_core_alloc_count->increment(1);
// transfer the memory ownership of allocate from ChunkAllocator::tracker to the tls tracker.
THREAD_MEM_TRACKER_TRANSFER_FROM(size, _mem_tracker.get());
return Status::OK();
}
// Second path: try to allocate from other core's arena
// When the reserved bytes is greater than the limit, the chunk is stolen from other arena.
// Otherwise, it is allocated from the system first, which can reserve enough memory as soon as possible.
// After that, allocate from current core arena as much as possible.
if (_reserved_bytes > _steal_arena_limit) {
++core_id;
for (int i = 1; i < _arenas.size(); ++i, ++core_id) {
if (_arenas[core_id % _arenas.size()]->pop_free_chunk(size, &chunk->data)) {
DCHECK_GE(_reserved_bytes, 0);
_reserved_bytes.fetch_sub(size);
chunk_pool_other_core_alloc_count->increment(1);
// reset chunk's core_id to other
chunk->core_id = core_id % _arenas.size();
// transfer the memory ownership of allocate from ChunkAllocator::tracker to the tls tracker.
THREAD_MEM_TRACKER_TRANSFER_FROM(size, _mem_tracker.get());
return Status::OK();
}
chunk->core_id = core_id;
if (_arenas[core_id]->pop_free_chunk(size, &chunk->data)) {
DCHECK_GE(_reserved_bytes, 0);
_reserved_bytes.fetch_sub(size);
chunk_pool_local_core_alloc_count->increment(1);
// transfer the memory ownership of allocate from ChunkAllocator::tracker to the tls tracker.
THREAD_MEM_TRACKER_TRANSFER_FROM(size, _mem_tracker.get());
return Status::OK();
}
// Second path: try to allocate from other core's arena
// When the reserved bytes is greater than the limit, the chunk is stolen from other arena.
// Otherwise, it is allocated from the system first, which can reserve enough memory as soon as possible.
// After that, allocate from current core arena as much as possible.
if (_reserved_bytes > _steal_arena_limit) {
++core_id;
for (int i = 1; i < _arenas.size(); ++i, ++core_id) {
if (_arenas[core_id % _arenas.size()]->pop_free_chunk(size, &chunk->data)) {
DCHECK_GE(_reserved_bytes, 0);
_reserved_bytes.fetch_sub(size);
chunk_pool_other_core_alloc_count->increment(1);
// reset chunk's core_id to other
chunk->core_id = core_id % _arenas.size();
// transfer the memory ownership of allocate from ChunkAllocator::tracker to the tls tracker.
THREAD_MEM_TRACKER_TRANSFER_FROM(size, _mem_tracker.get());
return Status::OK();
}
}
}
@ -205,7 +204,7 @@ Status ChunkAllocator::allocate(size_t size, Chunk* chunk) {
void ChunkAllocator::free(const Chunk& chunk) {
DCHECK(chunk.core_id != -1);
CHECK((chunk.size & (chunk.size - 1)) == 0);
if (config::disable_chunk_allocator) {
if (config::disable_mem_pools) {
SystemAllocator::free(chunk.data, chunk.size);
return;
}