Revert Fix the problem that hashtable free memory cannot be recovered immediately

This commit is contained in:
obdev 2023-04-14 04:41:27 +00:00 committed by ob-robot
parent 76e6f675b5
commit d1e68a8c8c
5 changed files with 183 additions and 108 deletions

View File

@ -147,11 +147,13 @@ public:
int destroy()
{
int ret = ht_.destroy();
allocer_.clear();
return ret;
};
int clear()
{
int ret = ht_.clear();
//allocer_.clear();
return ret;
};
int reuse()

View File

@ -114,6 +114,7 @@ public:
{
// ret should be handle by caller.
int ret = ht_.destroy();
allocer_.clear();
return ret;
}
int clear()

View File

@ -824,6 +824,7 @@ public:
//memset(buckets_, 0, sizeof(hashbucket) * bucket_num);
bucket_num_ = bucket_num;
allocer_ = allocer;
allocer_->inc_ref();
bucket_allocer_ = bucket_allocer;
}
return ret;
@ -847,6 +848,7 @@ public:
buckets_[i].node = NULL;
}
}
allocer_->dec_ref();
allocer_ = NULL;
//delete[] buckets_;
//buckets_ = NULL;

View File

@ -1225,9 +1225,8 @@ struct SimpleAllocerBlock
int32_t ref_cnt;
int32_t cur_pos;
Node nodes[NODE_NUM];
Node *node_free_list;
Block *prev;
char nodes_buffer[NODE_NUM *sizeof(Node)];
Node *nodes;
Block *next;
};
@ -1240,7 +1239,7 @@ template <class T>
struct NodeNumTraits<false, T>
{
static const int32_t NODE_NUM = (common::OB_MALLOC_NORMAL_BLOCK_SIZE -
24/*=sizeof(SimpleAllocerBlock 's members except nodes)*/ - 128/*for robust*/) /
24/*=sizeof(SimpleAllocerBlock 's members except nodes_buffer)*/ - 128/*for robust*/) /
(32/*sizeof(SimpleAllocerNode's members except data)*/ + sizeof(T));
};
@ -1253,28 +1252,6 @@ struct NodeNumTraits<true, T>
#define IS_BIG_OBJ(T) \
((common::OB_MALLOC_NORMAL_BLOCK_SIZE - 24 - 128) < (32 + sizeof(T)))
/*
block_free_list_:
Block C: node1->node2->node3...
^
|
Block B: node1->node2->node3...
^
|
Block A: node1->node2->node3...
alloc:
1. fetch from block_free_list_
2. fetch from block_remainder_
3. alloc new block
free:
check reference of block, zero-block will be destroy, block has 4 status:
1. in the block_free_list_
2. is the block_remainder_
3. in the block_remainder_ && is the block_remainder_
4. neither
*/
template <class T,
int32_t NODE_NUM = NodeNumTraits<IS_BIG_OBJ(T), T>::NODE_NUM,
class DefendMode = SpinMutexDefendMode,
@ -1289,50 +1266,67 @@ class SimpleAllocer
typedef typename DefendMode::lock_type lock_type;
typedef typename DefendMode::lock_initer lock_initer;
public:
SimpleAllocer() : block_remainder_(NULL), block_free_list_(NULL)
SimpleAllocer() : block_list_head_(NULL), free_list_head_(NULL)
{
lock_initer initer(lock_);
}
~SimpleAllocer()
{
OB_ASSERT(NULL == block_remainder_ &&
NULL == block_free_list_);
clear();
}
void set_attr(const ObMemAttr &attr) { allocer_.set_attr(attr); }
void set_label(const lib::ObLabel &label) { allocer_.set_label(label); }
void clear()
{
Block *iter = block_list_head_;
while (NULL != iter) {
Block *next = iter->next;
if (0 != iter->ref_cnt) {
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "there is still node has not been free, ref_cnt=%d block=%p cur_pos=%d",
iter->ref_cnt, iter, iter->cur_pos);
} else {
//delete iter;
allocer_.free(iter);
}
iter = next;
}
block_list_head_ = NULL;
free_list_head_ = NULL;
}
template <class ... TYPES>
T *alloc(TYPES&... args)
{
T *ret = NULL;
mutexlocker locker(lock_);
if (NULL != block_free_list_) {
Block *block = block_free_list_;
Node *node = block->node_free_list;
if (NULL != free_list_head_) {
Node *node = free_list_head_;
if (NODE_MAGIC1 != node->magic1 || NODE_MAGIC2 != node->magic2 || NULL == node->block) {
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2);
} else {
block->node_free_list = node->next;
if (block->node_free_list == NULL) {
take_off_from_fl(block);
}
free_list_head_ = node->next;
node->block->ref_cnt++;
ret = &(node->data);
}
} else {
Block *block = block_remainder_;
Block *block = block_list_head_;
if (NULL == block || block->cur_pos >= (int32_t)NODE_NUM) {
//if (NULL == (block = new(std::nothrow) Block()))
if (NULL == (block = (Block *)allocer_.alloc(sizeof(Block)))) {
HASH_WRITE_LOG_RET(HASH_WARNING, OB_ERR_UNEXPECTED, "new block fail");
} else {
// BACKTRACE(WARN, ((int64_t)sizeof(Block))>DEFAULT_BLOCK_SIZE,
// "hashutil alloc block=%ld node=%ld T=%ld N=%d",
// sizeof(Block), sizeof(Node), sizeof(T), NODE_NUM);
//memset(block, 0, sizeof(Block));
block->ref_cnt = 0;
block->cur_pos = 0;
block->prev = block->next = NULL;
block->node_free_list = NULL;
block_remainder_ = block;
block->nodes = (Node *)(block->nodes_buffer);
block->next = block_list_head_;
block_list_head_ = block;
}
}
if (NULL != block) {
Node *node = &block->nodes[block->cur_pos];
Node *node = block->nodes + block->cur_pos;
block->cur_pos++;
block->ref_cnt++;
node->magic1 = NODE_MAGIC1;
@ -1358,55 +1352,65 @@ public:
HASH_WRITE_LOG_RET(HASH_FATAL, OB_ERR_UNEXPECTED, "magic broken magic1=%x magic2=%x", node->magic1, node->magic2);
} else {
data->~T();
Block *block = node->block;
block->ref_cnt--;
if (0 == block->ref_cnt) {
if (block == block_remainder_) {
block_remainder_ = NULL;
}
// non-NULL means this block is in the freelist
if (block->next != NULL) {
take_off_from_fl(block);
}
allocer_.free(block);
} else {
node->next = block->node_free_list;
block->node_free_list = node;
// NULL means this block isn't in the freelist
if (block->next == NULL) {
add_to_fl(block);
}
}
node->block->ref_cnt--;
node->next = free_list_head_;
free_list_head_ = node;
}
}
}
void add_to_fl(Block *block)
void inc_ref()
{
if (block_free_list_ == NULL) {
block->prev = block->next = block;
block_free_list_ = block;
} else {
block->prev = block_free_list_->prev;
block->next = block_free_list_;
block_free_list_->prev->next = block;
block_free_list_->prev = block;
block_free_list_ = block;
}
}
void take_off_from_fl(Block *block)
void dec_ref()
{
if (block == block->next) {
block_free_list_ = NULL;
} else {
block->prev->next = block->next;
block->next->prev = block->prev;
block_free_list_ = block->next;
block->prev = block->next = NULL;
}
void gc()
{
mutexlocker locker(lock_);
if (NULL != free_list_head_ && NULL != block_list_head_) {
Block *block_iter = block_list_head_;
Block *block_next = NULL;
Block *block_prev = NULL;
while (NULL != block_iter) {
block_next = block_iter->next;
if (0 == block_iter->ref_cnt && 0 != block_iter->cur_pos) {
Node *node_iter = free_list_head_;
Node *node_prev = free_list_head_;
volatile int32_t counter = 0;
while (NULL != node_iter
&& counter < NODE_NUM) {
if (block_iter == node_iter->block) {
if (free_list_head_ == node_iter) {
free_list_head_ = node_iter->next;
} else {
node_prev->next = node_iter->next;
}
counter++;
} else {
node_prev = node_iter;
}
node_iter = node_iter->next;
}
if (block_list_head_ == block_iter) {
block_list_head_ = block_iter->next;
} else {
block_prev->next = block_iter->next;
}
//delete block_iter;
allocer_.free(block_iter);
HASH_WRITE_LOG(HASH_DEBUG, "free succ block=%p", block_iter);
block_iter = NULL;
} else {
block_prev = block_iter;
}
block_iter = block_next;
}
}
}
private:
Block *block_remainder_;
Block *block_free_list_;
Block *block_list_head_;
Node *free_list_head_;
lock_type lock_;
Allocer allocer_;
};

View File

@ -26,41 +26,107 @@ using namespace oceanbase;
using namespace common;
using namespace hash;
std::set<void *> ptr_set;
class MyAllocator : public ObIAllocator
const int64_t ITEM_NUM = 1024 * 128;
struct data_t
{
public:
void *alloc(const int64_t size)
uint32_t construct_flag;
char data[1024];
data_t()
{
void *ptr = ob_malloc(size, "test");
ptr_set.insert(ptr);
return ptr;
}
void *alloc(const int64_t , const ObMemAttr &)
{ return NULL; }
void free(void *ptr)
{
ptr_set.erase(ptr);
}
construct_flag = 0xffffffff;
};
};
typedef SimpleAllocer<data_t, 128> allocer_t;
template <class T>
void shuffle(T *array, const int64_t array_size)
{
srand(static_cast<int32_t>(time(NULL)));
for (int64_t i = 0; i < array_size; i++)
{
int64_t swap_pos = rand() % array_size;
std::swap(array[i], array[swap_pos]);
}
}
TEST(TestSimpleAllocer, allocate)
{
SimpleAllocer<int, 10, SpinMutexDefendMode, MyAllocator> alloc;
const int N = 100;
int *ptrs[N];
int i = N;
while (i--) {
int *ptr = alloc.alloc();
EXPECT_TRUE(ptr != NULL);
ptrs[i] = ptr;
allocer_t al;
data_t *nil = NULL;
data_t *store[ITEM_NUM];
for (int64_t i = 0; i < ITEM_NUM; i++)
{
EXPECT_NE(nil, (store[i] = al.alloc()));
EXPECT_EQ(0xffffffff, store[i]->construct_flag);
}
i= N;
EXPECT_TRUE(!ptr_set.empty());
while (i--) {
alloc.free(ptrs[i]);
for (int64_t i = 0; i < ITEM_NUM; i++)
{
store[i]->construct_flag = 0x00000000;
al.free(store[i]);
}
EXPECT_TRUE(ptr_set.empty());
for (int64_t i = ITEM_NUM - 1; i >= 0; i--)
{
data_t *tmp = NULL;
EXPECT_NE(nil, (tmp = al.alloc()));
EXPECT_EQ(0xffffffff, tmp->construct_flag);
}
}
TEST(TestSimpleAllocer, free)
{
allocer_t al;
data_t *store[ITEM_NUM];
for (int64_t i = 0; i < ITEM_NUM; i++)
{
store[i] = al.alloc();
}
al.free(NULL);
shuffle(store, ITEM_NUM);
for (int64_t i = 0; i < ITEM_NUM; i++)
{
al.free(store[i]);
}
for (int64_t i = ITEM_NUM - 1; i >= 0; i--)
{
data_t *tmp = NULL;
EXPECT_EQ(store[i], (tmp = al.alloc()));
}
for (int64_t i = 0; i < ITEM_NUM; i++)
{
al.free(store[i]);
if (0 == i % 128)
{
al.gc();
}
}
}
TEST(TestSimpleAllocer, DISABLED_overflow)
{
allocer_t al;
data_t *nil = NULL;
data_t *data[2];
data_t tmp;
EXPECT_NE(nil, (data[0] = al.alloc()));
// this case is used to test overflow
// copy more bytes than sizeof(data[0]). (actually safe)
// but Coverity treat it as a potential bug
memcpy(&tmp, data[0], sizeof(data_t) + sizeof(uint32_t));
memset(data[0], -1, sizeof(data_t) + sizeof(uint32_t));
al.free(data[0]);
memcpy(data[0], &tmp, sizeof(data_t) + sizeof(uint32_t));
al.free(data[0]);
memset(data[0], -1, sizeof(data_t) + sizeof(uint32_t));
EXPECT_EQ(nil, (data[1] = al.alloc()));
memcpy(data[0], &tmp, sizeof(data_t) + sizeof(uint32_t));
EXPECT_NE(nil, (data[1] = al.alloc()));
EXPECT_EQ(data[0], data[1]);
}
int main(int argc, char **argv)