Fix the problem that hashtable free memory cannot be recovered immediately

This commit is contained in:
obdev
2023-04-13 10:12:29 +00:00
committed by ob-robot
parent 975b48cbbc
commit 6de0c5fbdd
5 changed files with 108 additions and 183 deletions

View File

@ -147,13 +147,11 @@ public:
int destroy() int destroy()
{ {
int ret = ht_.destroy(); int ret = ht_.destroy();
allocer_.clear();
return ret; return ret;
}; };
int clear() int clear()
{ {
int ret = ht_.clear(); int ret = ht_.clear();
//allocer_.clear();
return ret; return ret;
}; };
int reuse() int reuse()

View File

@ -114,7 +114,6 @@ public:
{ {
// ret should be handle by caller. // ret should be handle by caller.
int ret = ht_.destroy(); int ret = ht_.destroy();
allocer_.clear();
return ret; return ret;
} }
int clear() int clear()

View File

@ -824,7 +824,6 @@ public:
//memset(buckets_, 0, sizeof(hashbucket) * bucket_num); //memset(buckets_, 0, sizeof(hashbucket) * bucket_num);
bucket_num_ = bucket_num; bucket_num_ = bucket_num;
allocer_ = allocer; allocer_ = allocer;
allocer_->inc_ref();
bucket_allocer_ = bucket_allocer; bucket_allocer_ = bucket_allocer;
} }
return ret; return ret;
@ -848,7 +847,6 @@ public:
buckets_[i].node = NULL; buckets_[i].node = NULL;
} }
} }
allocer_->dec_ref();
allocer_ = NULL; allocer_ = NULL;
//delete[] buckets_; //delete[] buckets_;
//buckets_ = NULL; //buckets_ = NULL;

View File

@ -1225,8 +1225,9 @@ struct SimpleAllocerBlock
int32_t ref_cnt; int32_t ref_cnt;
int32_t cur_pos; int32_t cur_pos;
char nodes_buffer[NODE_NUM *sizeof(Node)]; Node nodes[NODE_NUM];
Node *nodes; Node *node_free_list;
Block *prev;
Block *next; Block *next;
}; };
@ -1239,7 +1240,7 @@ template <class T>
struct NodeNumTraits<false, T> struct NodeNumTraits<false, T>
{ {
static const int32_t NODE_NUM = (common::OB_MALLOC_NORMAL_BLOCK_SIZE - static const int32_t NODE_NUM = (common::OB_MALLOC_NORMAL_BLOCK_SIZE -
24/*=sizeof(SimpleAllocerBlock 's members except nodes_buffer)*/ - 128/*for robust*/) / 24/*=sizeof(SimpleAllocerBlock 's members except nodes)*/ - 128/*for robust*/) /
(32/*sizeof(SimpleAllocerNode's members except data)*/ + sizeof(T)); (32/*sizeof(SimpleAllocerNode's members except data)*/ + sizeof(T));
}; };
@ -1252,6 +1253,28 @@ struct NodeNumTraits<true, T>
#define IS_BIG_OBJ(T) \ #define IS_BIG_OBJ(T) \
((common::OB_MALLOC_NORMAL_BLOCK_SIZE - 24 - 128) < (32 + sizeof(T))) ((common::OB_MALLOC_NORMAL_BLOCK_SIZE - 24 - 128) < (32 + sizeof(T)))
/*
block_free_list_:
Block C: node1->node2->node3...
^
|
Block B: node1->node2->node3...
^
|
Block A: node1->node2->node3...
alloc:
1. fetch from block_free_list_
2. fetch from block_remainder_
3. alloc new block
free:
check reference of block, zero-block will be destroy, block has 4 status:
1. in the block_free_list_
2. is the block_remainder_
3. in the block_remainder_ && is the block_remainder_
4. neither
*/
template <class T, template <class T,
int32_t NODE_NUM = NodeNumTraits<IS_BIG_OBJ(T), T>::NODE_NUM, int32_t NODE_NUM = NodeNumTraits<IS_BIG_OBJ(T), T>::NODE_NUM,
class DefendMode = SpinMutexDefendMode, class DefendMode = SpinMutexDefendMode,
@ -1266,67 +1289,50 @@ class SimpleAllocer
typedef typename DefendMode::lock_type lock_type; typedef typename DefendMode::lock_type lock_type;
typedef typename DefendMode::lock_initer lock_initer; typedef typename DefendMode::lock_initer lock_initer;
public: public:
SimpleAllocer() : block_list_head_(NULL), free_list_head_(NULL) SimpleAllocer() : block_remainder_(NULL), block_free_list_(NULL)
{ {
lock_initer initer(lock_); lock_initer initer(lock_);
} }
~SimpleAllocer() ~SimpleAllocer()
{ {
clear(); OB_ASSERT(NULL == block_remainder_ &&
NULL == block_free_list_);
} }
void set_attr(const ObMemAttr &attr) { allocer_.set_attr(attr); } void set_attr(const ObMemAttr &attr) { allocer_.set_attr(attr); }
void set_label(const lib::ObLabel &label) { allocer_.set_label(label); } void set_label(const lib::ObLabel &label) { allocer_.set_label(label); }
void clear()
{
Block *iter = block_list_head_;
while (NULL != iter) {
Block *next = iter->next;
if (0 != iter->ref_cnt) {
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "there is still node has not been free, ref_cnt=%d block=%p cur_pos=%d",
iter->ref_cnt, iter, iter->cur_pos);
} else {
//delete iter;
allocer_.free(iter);
}
iter = next;
}
block_list_head_ = NULL;
free_list_head_ = NULL;
}
template <class ... TYPES> template <class ... TYPES>
T *alloc(TYPES&... args) T *alloc(TYPES&... args)
{ {
T *ret = NULL; T *ret = NULL;
mutexlocker locker(lock_); mutexlocker locker(lock_);
if (NULL != free_list_head_) { if (NULL != block_free_list_) {
Node *node = free_list_head_; Block *block = block_free_list_;
Node *node = block->node_free_list;
if (NODE_MAGIC1 != node->magic1 || NODE_MAGIC2 != node->magic2 || NULL == node->block) { if (NODE_MAGIC1 != node->magic1 || NODE_MAGIC2 != node->magic2 || NULL == node->block) {
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2); HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2);
} else { } else {
free_list_head_ = node->next; block->node_free_list = node->next;
if (block->node_free_list == NULL) {
take_off_from_fl(block);
}
node->block->ref_cnt++; node->block->ref_cnt++;
ret = &(node->data); ret = &(node->data);
} }
} else { } else {
Block *block = block_list_head_; Block *block = block_remainder_;
if (NULL == block || block->cur_pos >= (int32_t)NODE_NUM) { if (NULL == block || block->cur_pos >= (int32_t)NODE_NUM) {
//if (NULL == (block = new(std::nothrow) Block()))
if (NULL == (block = (Block *)allocer_.alloc(sizeof(Block)))) { if (NULL == (block = (Block *)allocer_.alloc(sizeof(Block)))) {
HASH_WRITE_LOG_RET(HASH_WARNING, OB_ERR_UNEXPECTED, "new block fail"); HASH_WRITE_LOG_RET(HASH_WARNING, OB_ERR_UNEXPECTED, "new block fail");
} else { } else {
// BACKTRACE(WARN, ((int64_t)sizeof(Block))>DEFAULT_BLOCK_SIZE,
// "hashutil alloc block=%ld node=%ld T=%ld N=%d",
// sizeof(Block), sizeof(Node), sizeof(T), NODE_NUM);
//memset(block, 0, sizeof(Block));
block->ref_cnt = 0; block->ref_cnt = 0;
block->cur_pos = 0; block->cur_pos = 0;
block->nodes = (Node *)(block->nodes_buffer); block->prev = block->next = NULL;
block->next = block_list_head_; block->node_free_list = NULL;
block_list_head_ = block; block_remainder_ = block;
} }
} }
if (NULL != block) { if (NULL != block) {
Node *node = block->nodes + block->cur_pos; Node *node = &block->nodes[block->cur_pos];
block->cur_pos++; block->cur_pos++;
block->ref_cnt++; block->ref_cnt++;
node->magic1 = NODE_MAGIC1; node->magic1 = NODE_MAGIC1;
@ -1352,65 +1358,55 @@ public:
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2); HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2);
} else { } else {
data->~T(); data->~T();
node->block->ref_cnt--; Block *block = node->block;
node->next = free_list_head_; block->ref_cnt--;
free_list_head_ = node; if (0 == block->ref_cnt) {
if (block == block_remainder_) {
block_remainder_ = NULL;
}
// non-NULL means this block is in the freelist
if (block->next != NULL) {
take_off_from_fl(block);
}
allocer_.free(block);
} else {
node->next = block->node_free_list;
block->node_free_list = node;
// NULL means this block isn't in the freelist
if (block->next == NULL) {
add_to_fl(block);
} }
} }
} }
void inc_ref() }
}
void add_to_fl(Block *block)
{ {
if (block_free_list_ == NULL) {
block->prev = block->next = block;
block_free_list_ = block;
} else {
block->prev = block_free_list_->prev;
block->next = block_free_list_;
block_free_list_->prev->next = block;
block_free_list_->prev = block;
block_free_list_ = block;
} }
void dec_ref() }
void take_off_from_fl(Block *block)
{ {
} if (block == block->next) {
void gc() block_free_list_ = NULL;
{
mutexlocker locker(lock_);
if (NULL != free_list_head_ && NULL != block_list_head_) {
Block *block_iter = block_list_head_;
Block *block_next = NULL;
Block *block_prev = NULL;
while (NULL != block_iter) {
block_next = block_iter->next;
if (0 == block_iter->ref_cnt && 0 != block_iter->cur_pos) {
Node *node_iter = free_list_head_;
Node *node_prev = free_list_head_;
volatile int32_t counter = 0;
while (NULL != node_iter
&& counter < NODE_NUM) {
if (block_iter == node_iter->block) {
if (free_list_head_ == node_iter) {
free_list_head_ = node_iter->next;
} else { } else {
node_prev->next = node_iter->next; block->prev->next = block->next;
} block->next->prev = block->prev;
counter++; block_free_list_ = block->next;
} else { block->prev = block->next = NULL;
node_prev = node_iter;
}
node_iter = node_iter->next;
}
if (block_list_head_ == block_iter) {
block_list_head_ = block_iter->next;
} else {
block_prev->next = block_iter->next;
}
//delete block_iter;
allocer_.free(block_iter);
HASH_WRITE_LOG(HASH_DEBUG, "free succ block=%p", block_iter);
block_iter = NULL;
} else {
block_prev = block_iter;
}
block_iter = block_next;
}
} }
} }
private: private:
Block *block_list_head_; Block *block_remainder_;
Node *free_list_head_; Block *block_free_list_;
lock_type lock_; lock_type lock_;
Allocer allocer_; Allocer allocer_;
}; };

View File

@ -26,107 +26,41 @@ using namespace oceanbase;
using namespace common; using namespace common;
using namespace hash; using namespace hash;
const int64_t ITEM_NUM = 1024 * 128; std::set<void *> ptr_set;
class MyAllocator : public ObIAllocator
struct data_t
{ {
uint32_t construct_flag; public:
char data[1024]; void *alloc(const int64_t size)
data_t()
{ {
construct_flag = 0xffffffff; void *ptr = ob_malloc(size);
}; ptr_set.insert(ptr);
}; return ptr;
typedef SimpleAllocer<data_t, 128> allocer_t;
template <class T>
void shuffle(T *array, const int64_t array_size)
{
srand(static_cast<int32_t>(time(NULL)));
for (int64_t i = 0; i < array_size; i++)
{
int64_t swap_pos = rand() % array_size;
std::swap(array[i], array[swap_pos]);
} }
} void *alloc(const int64_t , const ObMemAttr &)
{ return NULL; }
void free(void *ptr)
{
ptr_set.erase(ptr);
}
};
TEST(TestSimpleAllocer, allocate) TEST(TestSimpleAllocer, allocate)
{ {
allocer_t al; SimpleAllocer<int, 10, SpinMutexDefendMode, MyAllocator> alloc;
data_t *nil = NULL; const int N = 100;
int *ptrs[N];
data_t *store[ITEM_NUM]; int i = N;
for (int64_t i = 0; i < ITEM_NUM; i++) while (i--) {
{ int *ptr = alloc.alloc();
EXPECT_NE(nil, (store[i] = al.alloc())); EXPECT_TRUE(ptr != NULL);
EXPECT_EQ(0xffffffff, store[i]->construct_flag); ptrs[i] = ptr;
} }
for (int64_t i = 0; i < ITEM_NUM; i++) i= N;
{ EXPECT_TRUE(!ptr_set.empty());
store[i]->construct_flag = 0x00000000; while (i--) {
al.free(store[i]); alloc.free(ptrs[i]);
} }
for (int64_t i = ITEM_NUM - 1; i >= 0; i--) EXPECT_TRUE(ptr_set.empty());
{
data_t *tmp = NULL;
EXPECT_NE(nil, (tmp = al.alloc()));
EXPECT_EQ(0xffffffff, tmp->construct_flag);
}
}
TEST(TestSimpleAllocer, free)
{
allocer_t al;
data_t *store[ITEM_NUM];
for (int64_t i = 0; i < ITEM_NUM; i++)
{
store[i] = al.alloc();
}
al.free(NULL);
shuffle(store, ITEM_NUM);
for (int64_t i = 0; i < ITEM_NUM; i++)
{
al.free(store[i]);
}
for (int64_t i = ITEM_NUM - 1; i >= 0; i--)
{
data_t *tmp = NULL;
EXPECT_EQ(store[i], (tmp = al.alloc()));
}
for (int64_t i = 0; i < ITEM_NUM; i++)
{
al.free(store[i]);
if (0 == i % 128)
{
al.gc();
}
}
}
TEST(TestSimpleAllocer, DISABLED_overflow)
{
allocer_t al;
data_t *nil = NULL;
data_t *data[2];
data_t tmp;
EXPECT_NE(nil, (data[0] = al.alloc()));
// this case is used to test overflow
// copy more bytes than sizeof(data[0]). (actually safe)
// but Coverity treat it as a potential bug
memcpy(&tmp, data[0], sizeof(data_t) + sizeof(uint32_t));
memset(data[0], -1, sizeof(data_t) + sizeof(uint32_t));
al.free(data[0]);
memcpy(data[0], &tmp, sizeof(data_t) + sizeof(uint32_t));
al.free(data[0]);
memset(data[0], -1, sizeof(data_t) + sizeof(uint32_t));
EXPECT_EQ(nil, (data[1] = al.alloc()));
memcpy(data[0], &tmp, sizeof(data_t) + sizeof(uint32_t));
EXPECT_NE(nil, (data[1] = al.alloc()));
EXPECT_EQ(data[0], data[1]);
} }
int main(int argc, char **argv) int main(int argc, char **argv)