fix core at BlockSet::free_block
This commit is contained in:
@ -30,7 +30,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
bool wash_single_mb, ObCacheMemBlock *&wash_blocks) = 0;
|
ObCacheMemBlock *&wash_blocks) = 0;
|
||||||
virtual int erase_cache(const uint64_t tenant_id) = 0;
|
virtual int erase_cache(const uint64_t tenant_id) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -40,11 +40,10 @@ class ObDefaultCacheWasher : public ObICacheWasher
|
|||||||
virtual ~ObDefaultCacheWasher() {};
|
virtual ~ObDefaultCacheWasher() {};
|
||||||
|
|
||||||
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
bool wash_single_mb, ObCacheMemBlock *&wash_blocks) override
|
ObCacheMemBlock *&wash_blocks) override
|
||||||
{
|
{
|
||||||
UNUSED(tenant_id);
|
UNUSED(tenant_id);
|
||||||
UNUSED(wash_size);
|
UNUSED(wash_size);
|
||||||
UNUSED(wash_single_mb);
|
|
||||||
UNUSED(wash_blocks);
|
UNUSED(wash_blocks);
|
||||||
return common::OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
return common::OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
||||||
}
|
}
|
||||||
|
|||||||
97
deps/oblib/src/lib/resource/ob_resource_mgr.cpp
vendored
97
deps/oblib/src/lib/resource/ob_resource_mgr.cpp
vendored
@ -80,76 +80,37 @@ AChunk *ObTenantMemoryMgr::alloc_chunk(const int64_t size, const ObMemAttr &attr
|
|||||||
&& attr.label_ != ObNewModIds::OB_KVSTORE_CACHE_MB) {
|
&& attr.label_ != ObNewModIds::OB_KVSTORE_CACHE_MB) {
|
||||||
// try wash memory from cache
|
// try wash memory from cache
|
||||||
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
||||||
bool wash_single_mb = true;
|
int64_t wash_size = hold_size + LARGE_REQUEST_EXTRA_MB_COUNT * INTACT_ACHUNK_SIZE;
|
||||||
int64_t wash_size = hold_size;
|
while (!reach_ctx_limit && OB_SUCC(ret) && NULL == chunk && wash_size < cache_hold_) {
|
||||||
if (attr.ctx_id_ == ObCtxIds::CO_STACK) {
|
if (OB_FAIL(cache_washer_->sync_wash_mbs(tenant_id_, wash_size, washed_blocks))) {
|
||||||
wash_single_mb = false;
|
LOG_WARN("sync_wash_mbs failed", K(ret), K_(tenant_id), K(wash_size));
|
||||||
} else if (wash_size > INTACT_ACHUNK_SIZE) {
|
} else {
|
||||||
wash_size = wash_size + LARGE_REQUEST_EXTRA_MB_COUNT * INTACT_ACHUNK_SIZE;
|
// should return back to os, then realloc again
|
||||||
wash_single_mb = false;
|
ObMemAttr cache_attr;
|
||||||
|
cache_attr.tenant_id_ = tenant_id_;
|
||||||
|
cache_attr.label_ = ObNewModIds::OB_KVSTORE_CACHE_MB;
|
||||||
|
ObICacheWasher::ObCacheMemBlock *next = NULL;
|
||||||
|
while (NULL != washed_blocks) {
|
||||||
|
AChunk *chunk = ptr2chunk(washed_blocks);
|
||||||
|
next = washed_blocks->next_;
|
||||||
|
free_chunk(chunk, cache_attr);
|
||||||
|
chunk = NULL;
|
||||||
|
washed_blocks = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update_hold(static_cast<int64_t>(hold_size), attr.ctx_id_, attr.label_,
|
||||||
|
reach_ctx_limit)) {
|
||||||
|
chunk = alloc_chunk_(size, attr);
|
||||||
|
if (NULL == chunk) {
|
||||||
|
update_hold(-hold_size, attr.ctx_id_, attr.label_, reach_ctx_limit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (wash_single_mb) {
|
if (OB_FAIL(ret)) {
|
||||||
if (OB_FAIL(cache_washer_->sync_wash_mbs(tenant_id_, wash_size,
|
LOG_WARN("after wash from cache, still can't alloc chunk from chunk_mgr, "
|
||||||
wash_single_mb, washed_blocks))) {
|
"maybe alloc by other thread", K(size), K(wash_size), K(ret));
|
||||||
LOG_WARN("sync_wash_mbs failed", K(ret), K_(tenant_id), K(wash_size), K(wash_single_mb));
|
|
||||||
} else if (NULL != washed_blocks->next_) {
|
|
||||||
ret = OB_ERR_UNEXPECTED;
|
|
||||||
LOG_WARN("not single memory block washed", K(ret), K(wash_single_mb));
|
|
||||||
} else {
|
|
||||||
chunk = ptr2chunk(washed_blocks);
|
|
||||||
const int64_t chunk_hold = static_cast<int64_t>(chunk->hold());
|
|
||||||
update_cache_hold(-chunk_hold);
|
|
||||||
if (!update_ctx_hold(attr.ctx_id_, chunk_hold)) {
|
|
||||||
// reach ctx limit
|
|
||||||
// The ctx_id here can be given freely, because ctx_id is meaningless when the label is OB_KVSTORE_CACHE_MB
|
|
||||||
update_hold(-chunk_hold, attr.ctx_id_,
|
|
||||||
ObNewModIds::OB_KVSTORE_CACHE_MB, reach_ctx_limit);
|
|
||||||
free_chunk_(chunk, attr);
|
|
||||||
chunk = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const int64_t max_retry_count = 3;
|
|
||||||
int64_t retry_count = 0;
|
|
||||||
while (!reach_ctx_limit && OB_SUCC(ret) && NULL == chunk && wash_size < cache_hold_
|
|
||||||
&& retry_count < max_retry_count) {
|
|
||||||
if (OB_FAIL(cache_washer_->sync_wash_mbs(tenant_id_, wash_size,
|
|
||||||
wash_single_mb, washed_blocks))) {
|
|
||||||
LOG_WARN("sync_wash_mbs failed", K(ret), K_(tenant_id), K(wash_size), K(wash_single_mb));
|
|
||||||
} else {
|
|
||||||
// should return back to os, then realloc again
|
|
||||||
ObMemAttr cache_attr;
|
|
||||||
cache_attr.tenant_id_ = tenant_id_;
|
|
||||||
cache_attr.label_ = ObNewModIds::OB_KVSTORE_CACHE_MB;
|
|
||||||
ObICacheWasher::ObCacheMemBlock *next = NULL;
|
|
||||||
while (NULL != washed_blocks) {
|
|
||||||
AChunk *chunk = ptr2chunk(washed_blocks);
|
|
||||||
next = washed_blocks->next_;
|
|
||||||
free_chunk(chunk, cache_attr);
|
|
||||||
washed_blocks = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (update_hold(static_cast<int64_t>(hold_size), attr.ctx_id_, attr.label_,
|
|
||||||
reach_ctx_limit)) {
|
|
||||||
chunk = alloc_chunk_(size, attr);
|
|
||||||
if (NULL == chunk) {
|
|
||||||
update_hold(-hold_size, attr.ctx_id_, attr.label_, reach_ctx_limit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
++retry_count;
|
|
||||||
if (OB_SUCC(ret) && NULL == chunk) {
|
|
||||||
if (retry_count < max_retry_count) {
|
|
||||||
LOG_WARN("after wash from cache, still can't alloc large chunk from chunk_mgr, "
|
|
||||||
"maybe alloc by other thread, need retry",
|
|
||||||
K(retry_count), K(max_retry_count), K(size));
|
|
||||||
} else {
|
|
||||||
LOG_WARN("after wash from cache, still can't alloc large chunk from chunk_mgr, "
|
|
||||||
"maybe alloc by other thread", K(max_retry_count), K(size));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
BASIC_TIME_GUARD_CLICK("WASH_KVCACHE_END");
|
BASIC_TIME_GUARD_CLICK("WASH_KVCACHE_END");
|
||||||
}
|
}
|
||||||
|
|||||||
@ -148,42 +148,27 @@ public:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
bool wash_single_mb, ObCacheMemBlock *&wash_blocks)
|
ObCacheMemBlock *&wash_blocks)
|
||||||
{
|
{
|
||||||
UNUSED(tenant_id);
|
UNUSED(tenant_id);
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
if (wash_single_mb) {
|
int64_t left_to_washed = wash_size;
|
||||||
if (wash_size > CHUNK_MGR.aligned(mb_size_)) {
|
ObCacheMemBlock *washed_blocks = NULL;
|
||||||
|
while (OB_SUCC(ret) && left_to_washed > 0) {
|
||||||
|
if (NULL == mb_blocks_) {
|
||||||
ret = OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
ret = OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
||||||
LIB_LOG(WARN, "wash_size > mb_size", K(ret), K(wash_size), K_(mb_size));
|
LIB_LOG(WARN, "free block not enough", K(ret), K(wash_size));
|
||||||
} else if (NULL == mb_blocks_) {
|
|
||||||
ret = OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
|
||||||
LIB_LOG(WARN, "free block not enough", K(ret), K(wash_size), K_(mb_size));
|
|
||||||
} else {
|
} else {
|
||||||
ObCacheMemBlock *free_block = mb_blocks_;
|
ObCacheMemBlock *free_block = mb_blocks_;
|
||||||
mb_blocks_ = free_block->next_;
|
mb_blocks_ = free_block->next_;
|
||||||
free_block->next_ = NULL;
|
free_block->next_ = washed_blocks;
|
||||||
wash_blocks = free_block;
|
washed_blocks = free_block;
|
||||||
}
|
left_to_washed -= CHUNK_MGR.aligned(mb_size_);
|
||||||
} else {
|
|
||||||
int64_t left_to_washed = wash_size;
|
|
||||||
ObCacheMemBlock *washed_blocks = NULL;
|
|
||||||
while (OB_SUCC(ret) && left_to_washed > 0) {
|
|
||||||
if (NULL == mb_blocks_) {
|
|
||||||
ret = OB_CACHE_FREE_BLOCK_NOT_ENOUGH;
|
|
||||||
LIB_LOG(WARN, "free block not enough", K(ret), K(wash_size));
|
|
||||||
} else {
|
|
||||||
ObCacheMemBlock *free_block = mb_blocks_;
|
|
||||||
mb_blocks_ = free_block->next_;
|
|
||||||
free_block->next_ = washed_blocks;
|
|
||||||
washed_blocks = free_block;
|
|
||||||
left_to_washed -= CHUNK_MGR.aligned(mb_size_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (OB_SUCC(ret)) {
|
|
||||||
wash_blocks = washed_blocks;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (OB_SUCC(ret)) {
|
||||||
|
wash_blocks = washed_blocks;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
void free_mbs(ObTenantMemoryMgr &mgr)
|
void free_mbs(ObTenantMemoryMgr &mgr)
|
||||||
@ -239,18 +224,18 @@ TEST(TestTenantMemoryMgr, sync_wash)
|
|||||||
++alloc_count;
|
++alloc_count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check stat, left one mb in cache
|
// check stat, left one mb in cache
|
||||||
ASSERT_EQ(aligned_size, memory_mgr.get_cache_hold());
|
mb_count -= alloc_count;
|
||||||
ASSERT_EQ(1, memory_mgr.get_cache_item_count());
|
ASSERT_EQ(aligned_size * mb_count, memory_mgr.get_cache_hold());
|
||||||
ASSERT_EQ(aligned_size * mb_count, memory_mgr.get_sum_hold());
|
ASSERT_EQ(mb_count, memory_mgr.get_cache_item_count());
|
||||||
|
ASSERT_EQ(aligned_size * (mb_count + alloc_count), memory_mgr.get_sum_hold());
|
||||||
ASSERT_EQ(mb_count - 1, alloc_count);
|
|
||||||
|
|
||||||
for (int64_t i = 0; i < chunks.count(); ++i) {
|
for (int64_t i = 0; i < chunks.count(); ++i) {
|
||||||
memory_mgr.free_chunk((AChunk *)chunks.at(i), attr);
|
memory_mgr.free_chunk((AChunk *)chunks.at(i), attr);
|
||||||
}
|
}
|
||||||
washer.free_mbs(memory_mgr);
|
for (int64_t i = 0; i < mb_count; ++i) {
|
||||||
|
washer.free_mbs(memory_mgr);
|
||||||
|
}
|
||||||
ASSERT_EQ(0, memory_mgr.get_cache_hold());
|
ASSERT_EQ(0, memory_mgr.get_cache_hold());
|
||||||
ASSERT_EQ(0, memory_mgr.get_cache_item_count());
|
ASSERT_EQ(0, memory_mgr.get_cache_item_count());
|
||||||
ASSERT_EQ(0, memory_mgr.get_ctx_hold_bytes()[0]);
|
ASSERT_EQ(0, memory_mgr.get_ctx_hold_bytes()[0]);
|
||||||
|
|||||||
5
src/share/cache/ob_kv_storecache.cpp
vendored
5
src/share/cache/ob_kv_storecache.cpp
vendored
@ -897,7 +897,6 @@ int ObKVGlobalCache::get_washable_size(const uint64_t tenant_id, int64_t &washab
|
|||||||
}
|
}
|
||||||
|
|
||||||
int ObKVGlobalCache::sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
int ObKVGlobalCache::sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
const bool wash_single_mb,
|
|
||||||
ObICacheWasher::ObCacheMemBlock *&wash_blocks)
|
ObICacheWasher::ObCacheMemBlock *&wash_blocks)
|
||||||
{
|
{
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
@ -907,9 +906,9 @@ int ObKVGlobalCache::sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_
|
|||||||
} else if (OB_INVALID_ID == tenant_id || wash_size <= 0) {
|
} else if (OB_INVALID_ID == tenant_id || wash_size <= 0) {
|
||||||
ret = OB_INVALID_ARGUMENT;
|
ret = OB_INVALID_ARGUMENT;
|
||||||
COMMON_LOG(WARN, "invalid arguments", K(ret), K(tenant_id), K(wash_size));
|
COMMON_LOG(WARN, "invalid arguments", K(ret), K(tenant_id), K(wash_size));
|
||||||
} else if (OB_FAIL(store_.sync_wash_mbs(tenant_id, wash_size, wash_single_mb, wash_blocks))) {
|
} else if (OB_FAIL(store_.sync_wash_mbs(tenant_id, wash_size, wash_blocks))) {
|
||||||
if (ret != OB_CACHE_FREE_BLOCK_NOT_ENOUGH) {
|
if (ret != OB_CACHE_FREE_BLOCK_NOT_ENOUGH) {
|
||||||
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret), K(tenant_id), K(wash_size), K(wash_single_mb));
|
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret), K(tenant_id), K(wash_size));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
1
src/share/cache/ob_kv_storecache.h
vendored
1
src/share/cache/ob_kv_storecache.h
vendored
@ -158,7 +158,6 @@ public:
|
|||||||
|
|
||||||
// wash memblock from cache synchronously
|
// wash memblock from cache synchronously
|
||||||
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
virtual int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
const bool wash_single_mb,
|
|
||||||
lib::ObICacheWasher::ObCacheMemBlock *&wash_blocks);
|
lib::ObICacheWasher::ObCacheMemBlock *&wash_blocks);
|
||||||
int set_storage_leak_check_mod(const char *check_mod);
|
int set_storage_leak_check_mod(const char *check_mod);
|
||||||
int get_cache_name(const int64_t cache_id, char *cache_name);
|
int get_cache_name(const int64_t cache_id, char *cache_name);
|
||||||
|
|||||||
16
src/share/cache/ob_kvcache_store.cpp
vendored
16
src/share/cache/ob_kvcache_store.cpp
vendored
@ -496,7 +496,6 @@ void ObKVCacheStore::flush_washable_mbs(const uint64_t tenant_id, const int64_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
int ObKVCacheStore::sync_wash_mbs(const uint64_t tenant_id, const int64_t size_need_washed,
|
int ObKVCacheStore::sync_wash_mbs(const uint64_t tenant_id, const int64_t size_need_washed,
|
||||||
const bool wash_single_mb,
|
|
||||||
ObICacheWasher::ObCacheMemBlock *&wash_blocks)
|
ObICacheWasher::ObCacheMemBlock *&wash_blocks)
|
||||||
{
|
{
|
||||||
int ret = OB_SUCCESS;
|
int ret = OB_SUCCESS;
|
||||||
@ -504,11 +503,10 @@ int ObKVCacheStore::sync_wash_mbs(const uint64_t tenant_id, const int64_t size_n
|
|||||||
if (!inited_) {
|
if (!inited_) {
|
||||||
ret = OB_NOT_INIT;
|
ret = OB_NOT_INIT;
|
||||||
COMMON_LOG(WARN, "not init", K(ret));
|
COMMON_LOG(WARN, "not init", K(ret));
|
||||||
} else if (OB_INVALID_ID == tenant_id || size_need_washed <= 0
|
} else if (OB_INVALID_ID == tenant_id || size_need_washed <= 0) {
|
||||||
|| (wash_single_mb && size_need_washed != aligned_block_size_)) {
|
|
||||||
ret = OB_INVALID_ARGUMENT;
|
ret = OB_INVALID_ARGUMENT;
|
||||||
COMMON_LOG(WARN, "invalid arguments", K(ret), K(tenant_id), K(size_need_washed),
|
COMMON_LOG(WARN, "invalid arguments", K(ret), K(tenant_id), K(size_need_washed),
|
||||||
K(wash_single_mb), K_(aligned_block_size));
|
K_(aligned_block_size));
|
||||||
} else if (OB_FAIL(try_flush_washable_mb(tenant_id, wash_blocks, -1, size_need_washed))) {
|
} else if (OB_FAIL(try_flush_washable_mb(tenant_id, wash_blocks, -1, size_need_washed))) {
|
||||||
if (ret != OB_CACHE_FREE_BLOCK_NOT_ENOUGH) {
|
if (ret != OB_CACHE_FREE_BLOCK_NOT_ENOUGH) {
|
||||||
COMMON_LOG(WARN, "Fail to try flush mb", K(ret), K(tenant_id));
|
COMMON_LOG(WARN, "Fail to try flush mb", K(ret), K(tenant_id));
|
||||||
@ -1313,26 +1311,24 @@ void *ObKVCacheStore::alloc_mb(ObTenantResourceMgrHandle &resource_handle,
|
|||||||
} else if (NULL == (ptr = resource_handle.get_memory_mgr()->alloc_cache_mb(block_size))) {
|
} else if (NULL == (ptr = resource_handle.get_memory_mgr()->alloc_cache_mb(block_size))) {
|
||||||
if (block_size == block_size_) {
|
if (block_size == block_size_) {
|
||||||
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
||||||
const bool wash_single_mb = true;
|
|
||||||
const int64_t wash_size = aligned_block_size_;
|
const int64_t wash_size = aligned_block_size_;
|
||||||
if (OB_FAIL(sync_wash_mbs(tenant_id, wash_size, wash_single_mb, washed_blocks))) {
|
if (OB_FAIL(sync_wash_mbs(tenant_id, wash_size, washed_blocks))) {
|
||||||
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret),
|
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret),
|
||||||
K(tenant_id), K(wash_size), K(wash_single_mb));
|
K(tenant_id), K(wash_size));
|
||||||
} else {
|
} else {
|
||||||
ptr = reinterpret_cast<void *>(washed_blocks);
|
ptr = reinterpret_cast<void *>(washed_blocks);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const int64_t max_retry_count = 3;
|
const int64_t max_retry_count = 3;
|
||||||
int64_t retry_count = 0;
|
int64_t retry_count = 0;
|
||||||
const bool wash_single_mb = false;
|
|
||||||
while (OB_SUCC(ret) && NULL == ptr && retry_count < max_retry_count) {
|
while (OB_SUCC(ret) && NULL == ptr && retry_count < max_retry_count) {
|
||||||
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
ObICacheWasher::ObCacheMemBlock *washed_blocks = NULL;
|
||||||
int64_t wash_size = ObTenantMemoryMgr::align(block_size);
|
int64_t wash_size = ObTenantMemoryMgr::align(block_size);
|
||||||
if (wash_size > aligned_block_size_) {
|
if (wash_size > aligned_block_size_) {
|
||||||
wash_size += 2 * aligned_block_size_;
|
wash_size += 2 * aligned_block_size_;
|
||||||
}
|
}
|
||||||
if (OB_FAIL(sync_wash_mbs(tenant_id, wash_size, wash_single_mb, washed_blocks))) {
|
if (OB_FAIL(sync_wash_mbs(tenant_id, wash_size, washed_blocks))) {
|
||||||
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret), K(tenant_id), K(wash_size), K(wash_single_mb));
|
COMMON_LOG(WARN, "sync_wash_mbs failed", K(ret), K(tenant_id), K(wash_size));
|
||||||
} else {
|
} else {
|
||||||
ObICacheWasher::ObCacheMemBlock *wash_block = washed_blocks;
|
ObICacheWasher::ObCacheMemBlock *wash_block = washed_blocks;
|
||||||
ObICacheWasher::ObCacheMemBlock *next = NULL;
|
ObICacheWasher::ObCacheMemBlock *next = NULL;
|
||||||
|
|||||||
1
src/share/cache/ob_kvcache_store.h
vendored
1
src/share/cache/ob_kvcache_store.h
vendored
@ -85,7 +85,6 @@ public:
|
|||||||
void flush_washable_mbs(const uint64_t tenant_id, const int64_t cache_id);
|
void flush_washable_mbs(const uint64_t tenant_id, const int64_t cache_id);
|
||||||
|
|
||||||
int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
int sync_wash_mbs(const uint64_t tenant_id, const int64_t wash_size,
|
||||||
const bool wash_single_mb,
|
|
||||||
lib::ObICacheWasher::ObCacheMemBlock *&wash_blocks);
|
lib::ObICacheWasher::ObCacheMemBlock *&wash_blocks);
|
||||||
|
|
||||||
virtual int alloc_mbhandle(ObKVCacheInst &inst, const int64_t block_size,
|
virtual int alloc_mbhandle(ObKVCacheInst &inst, const int64_t block_size,
|
||||||
|
|||||||
Reference in New Issue
Block a user