[FEAT MERGE] Implement Resource Throttle

This commit is contained in:
ZenoWang 2024-02-07 17:25:56 +00:00 committed by ob-robot
parent df5ef10e8f
commit 33a6f4973d
93 changed files with 3350 additions and 1664 deletions

View File

@ -312,6 +312,22 @@
// LST_DO_CODE(CHECK, arg1, arg2) => CHECK(arg1); CHECK(arg2)
#define LST_DO_CODE(M, ...) LST_DO(M, (;), ## __VA_ARGS__)
#define LST_DEFINE_0(...)
#define LST_DEFINE_1(A, ...) class A;
#define LST_DEFINE_2(A, ...) class A; LST_DEFINE_1(__VA_ARGS__)
#define LST_DEFINE_3(A, ...) class A; LST_DEFINE_2(__VA_ARGS__)
#define LST_DEFINE_4(A, ...) class A; LST_DEFINE_3(__VA_ARGS__)
#define LST_DEFINE_5(A, ...) class A; LST_DEFINE_4(__VA_ARGS__)
#define LST_DEFINE_6(A, ...) class A; LST_DEFINE_5(__VA_ARGS__)
#define LST_DEFINE_7(A, ...) class A; LST_DEFINE_6(__VA_ARGS__)
#define LST_DEFINE_8(A, ...) class A; LST_DEFINE_7(__VA_ARGS__)
#define LST_DEFINE_9(A, ...) class A; LST_DEFINE_8(__VA_ARGS__)
#define LST_DEFINE_10(A, ...) class A; LST_DEFINE_9(__VA_ARGS__)
#define LST_DEFINE__(N, ...) LST_DEFINE_ ## N(__VA_ARGS__)
#define LST_DEFINE_(...) LST_DEFINE__(__VA_ARGS__)
#define LST_DEFINE(...) LST_DEFINE_(ARGS_NUM(__VA_ARGS__), __VA_ARGS__)
#define ONE_TO_HUNDRED \
1, 2, 3, 4, 5, 6, 7, 8, 9, \
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, \

View File

@ -85,6 +85,7 @@
#include "storage/concurrency_control/ob_multi_version_garbage_collector.h"
#include "storage/tablelock/ob_table_lock_service.h"
#include "storage/tx/wrs/ob_tenant_weak_read_service.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h" // ObSharedMemAllocMgr
#include "logservice/palf/log_define.h"
#include "storage/access/ob_empty_read_bucket.h"
#include "storage/high_availability/ob_rebuild_service.h"
@ -669,6 +670,7 @@ int MockTenantModuleEnv::init()
MTL_BIND2(mtl_new_default, ObTenantSchemaService::mtl_init, nullptr, nullptr, nullptr, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObStorageLogger::mtl_init, ObStorageLogger::mtl_start, ObStorageLogger::mtl_stop, ObStorageLogger::mtl_wait, mtl_destroy_default);
MTL_BIND2(ObTenantMetaMemMgr::mtl_new, mtl_init_default, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, share::ObSharedMemAllocMgr::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObTransService::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, logservice::ObGarbageCollector::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObTimestampService::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);

View File

@ -124,7 +124,6 @@ public:
ObTenantMetaMemMgr *t3m = MTL(ObTenantMetaMemMgr *);
OB_ASSERT(OB_SUCCESS == ObTxDataTable::init(&ls, &tx_ctx_table_));
OB_ASSERT(OB_SUCCESS == mgr_.init(tablet_id, this, &freezer_, t3m));
mgr_.set_slice_allocator(get_slice_allocator());
return ret;
}
@ -245,7 +244,7 @@ void TestTxDataTable::insert_tx_data_()
ObTxDataGuard tx_data_guard;
tx_data_guard.reset();
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
// fill in data
@ -282,7 +281,7 @@ void TestTxDataTable::insert_rollback_tx_data_()
for (int i = 0; i < 200; i++) {
ObTxDataGuard tx_data_guard;
ObTxData *tx_data = nullptr;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
// fill in data
@ -310,7 +309,7 @@ void TestTxDataTable::insert_abort_tx_data_()
tx_id = INT64_MAX - 3;
ObTxDataGuard tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
// fill in data
@ -459,7 +458,7 @@ void TestTxDataTable::do_undo_status_test()
{
ObTxData *tx_data = nullptr;
ObTxDataGuard tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
tx_data->tx_id_ = rand();
@ -485,7 +484,7 @@ void TestTxDataTable::do_undo_status_test()
// so the undo status just have one undo status node
ObTxData *tx_data = nullptr;
ObTxDataGuard tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
tx_data->tx_id_ = rand();
@ -511,7 +510,7 @@ void TestTxDataTable::test_serialize_with_action_cnt_(int cnt)
{
ObTxData *tx_data = nullptr;
ObTxDataGuard tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
tx_data->tx_id_ = transaction::ObTransID(269381);
tx_data->commit_version_.convert_for_logservice(ObTimeUtil::current_time_ns());
@ -540,12 +539,12 @@ void TestTxDataTable::test_serialize_with_action_cnt_(int cnt)
ObTxData *new_tx_data = nullptr;
ObTxDataGuard new_tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(new_tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(new_tx_data_guard, false));
ASSERT_NE(nullptr, new_tx_data = new_tx_data_guard.tx_data());
new_tx_data->tx_id_ = transaction::ObTransID(269381);
pos = 0;
ASSERT_EQ(OB_SUCCESS, new_tx_data->deserialize(buf, serialize_size, pos,
*tx_data_table_.get_slice_allocator()));
*tx_data_table_.get_tx_data_allocator()));
ASSERT_TRUE(new_tx_data->equals_(*tx_data));
tx_data->dec_ref();
new_tx_data->dec_ref();
@ -646,7 +645,7 @@ void TestTxDataTable::do_repeat_insert_test() {
tx_id = transaction::ObTransID(269381);
tx_data = nullptr;
ObTxDataGuard tx_data_guard;
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard));
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
// fill in data

View File

@ -210,7 +210,7 @@ int ObTxDataSingleRowGetter::deserialize_tx_data_from_store_buffers_(ObTxData &t
p_dest += tx_data_buffers_[idx].get_ob_string().length();
}
tx_data.tx_id_ = tx_id_;
if (OB_FAIL(tx_data.deserialize(merge_buffer, total_buffer_size, pos, slice_allocator_))) {
if (OB_FAIL(tx_data.deserialize(merge_buffer, total_buffer_size, pos, tx_data_allocator_))) {
STORAGE_LOG(WARN, "deserialize tx data failed", KR(ret), KPHEX(merge_buffer, total_buffer_size));
hex_dump(merge_buffer, total_buffer_size, true, OB_LOG_LEVEL_WARN);
} else if (!tx_data.is_valid_in_tx_data_table()) {

View File

@ -41,7 +41,6 @@
#include "observer/mysql/ob_query_retry_ctrl.h"
#include "rpc/obrpc/ob_rpc_handler.h"
#include "rpc/obrpc/ob_rpc_proxy.h"
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_tenant_mutil_allocator_mgr.h"
#include "share/cache/ob_cache_name_define.h"
#include "share/interrupt/ob_global_interrupt_call.h"
@ -403,8 +402,6 @@ int ObServer::init(const ObServerOptions &opts, const ObPLogWriterCfg &log_cfg)
LOG_ERROR("init bl_service_ failed", KR(ret));
} else if (OB_FAIL(ObDeviceManager::get_instance().init_devices_env())) {
LOG_ERROR("init device manager failed", KR(ret));
} else if (OB_FAIL(ObMemstoreAllocatorMgr::get_instance().init())) {
LOG_ERROR("init ObMemstoreAllocatorMgr failed", KR(ret));
} else if (OB_FAIL(ObTenantMutilAllocatorMgr::get_instance().init())) {
LOG_ERROR("init ObTenantMutilAllocatorMgr failed", KR(ret));
} else if (OB_FAIL(ObExternalTableFileManager::get_instance().init())) {

View File

@ -42,6 +42,7 @@
#include "sql/engine/px/ob_px_admission.h"
#include "share/ob_get_compat_mode.h"
#include "storage/tx/wrs/ob_tenant_weak_read_service.h" // ObTenantWeakReadService
#include "share/allocator/ob_shared_memory_allocator_mgr.h" // ObSharedMemAllocMgr
#include "share/allocator/ob_tenant_mutil_allocator.h"
#include "share/allocator/ob_tenant_mutil_allocator_mgr.h"
#include "share/stat/ob_opt_stat_monitor_manager.h"
@ -438,6 +439,7 @@ int ObMultiTenant::init(ObAddr myaddr,
MTL_BIND2(mtl_new_default, storage::mds::ObTenantMdsService::mtl_init, storage::mds::ObTenantMdsService::mtl_start, storage::mds::ObTenantMdsService::mtl_stop, storage::mds::ObTenantMdsService::mtl_wait, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObStorageLogger::mtl_init, ObStorageLogger::mtl_start, ObStorageLogger::mtl_stop, ObStorageLogger::mtl_wait, mtl_destroy_default);
MTL_BIND2(ObTenantMetaMemMgr::mtl_new, mtl_init_default, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, share::ObSharedMemAllocMgr::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObTransService::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
MTL_BIND2(mtl_new_default, ObLogService::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, ObLogService::mtl_destroy);
MTL_BIND2(mtl_new_default, logservice::ObGarbageCollector::mtl_init, mtl_start_default, mtl_stop_default, mtl_wait_default, mtl_destroy_default);
@ -1297,6 +1299,9 @@ int ObMultiTenant::update_tenant_config(uint64_t tenant_id)
if (OB_TMP_FAIL(update_tenant_freezer_config_())) {
LOG_WARN("failed to update tenant tenant freezer config", K(tmp_ret), K(tenant_id));
}
if (OB_TMP_FAIL(update_throttle_config_())) {
}
}
}
LOG_INFO("update_tenant_config success", K(tenant_id));
@ -1361,6 +1366,20 @@ int ObMultiTenant::update_tenant_freezer_config_()
return ret;
}
int ObMultiTenant::update_throttle_config_()
{
int ret = OB_SUCCESS;
ObSharedMemAllocMgr *share_mem_alloc_mgr = MTL(ObSharedMemAllocMgr *);
if (OB_ISNULL(share_mem_alloc_mgr)) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("share mem alloc mgr should not be null", K(ret));
} else {
(void)share_mem_alloc_mgr->update_throttle_config();
}
return ret;
}
int ObMultiTenant::update_tenant_freezer_mem_limit(const uint64_t tenant_id,
const int64_t tenant_min_mem,
const int64_t tenant_max_mem)

View File

@ -187,6 +187,7 @@ protected:
private:
int update_tenant_freezer_config_();
int update_throttle_config_();
protected:
static const int DEL_TRY_TIMES = 30;
enum class ObTenantCreateStep {

View File

@ -13,7 +13,7 @@
#include "ob_all_virtual_tenant_memstore_allocator_info.h"
#include "observer/ob_server.h"
#include "observer/ob_server_utils.h"
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "storage/memtable/ob_memtable.h"
namespace oceanbase
@ -28,7 +28,7 @@ class MemstoreInfoFill
public:
typedef ObMemstoreAllocatorInfo Item;
typedef ObArray<Item> ItemArray;
typedef ObGMemstoreAllocator::AllocHandle Handle;
typedef ObMemstoreAllocator::AllocHandle Handle;
MemstoreInfoFill(ItemArray& array): array_(array) {}
~MemstoreInfoFill() {}
int operator()(ObDLink* link) {
@ -48,7 +48,6 @@ public:
ObAllVirtualTenantMemstoreAllocatorInfo::ObAllVirtualTenantMemstoreAllocatorInfo()
: ObVirtualTableIterator(),
allocator_mgr_(ObMemstoreAllocatorMgr::get_instance()),
tenant_ids_(),
memstore_infos_(),
memstore_infos_idx_(0),
@ -114,25 +113,24 @@ int ObAllVirtualTenantMemstoreAllocatorInfo::fill_tenant_ids()
int ObAllVirtualTenantMemstoreAllocatorInfo::fill_memstore_infos(const uint64_t tenant_id)
{
int ret = OB_SUCCESS;
ObGMemstoreAllocator *ta = NULL;
memstore_infos_.reset();
if (tenant_id <= 0) {
ret = OB_INVALID_ARGUMENT;
SERVER_LOG(WARN, "invalid tenant_id", K(tenant_id), K(ret));
} else if (OB_FAIL(allocator_mgr_.get_tenant_memstore_allocator(tenant_id, ta))) {
SERVER_LOG(WARN, "failed to get tenant memstore allocator", K(tenant_id), K(ret));
} else if (OB_ISNULL(ta)) {
ret = OB_ERR_UNEXPECTED;
SERVER_LOG(WARN, "got tenant memstore allocator is NULL", K(tenant_id), K(ret));
} else {
MemstoreInfoFill fill_func(memstore_infos_);
if (OB_FAIL(ta->for_each(fill_func))) {
SERVER_LOG(WARN, "fill memstore info fail", K(ret));
} else {
retire_clock_ = ta->get_retire_clock();
memstore_infos_idx_ = 0;
MTL_SWITCH(tenant_id)
{
ObMemstoreAllocator &memstore_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
MemstoreInfoFill fill_func(memstore_infos_);
if (OB_FAIL(memstore_allocator.for_each(fill_func))) {
SERVER_LOG(WARN, "fill memstore info fail", K(ret));
} else {
retire_clock_ = memstore_allocator.get_retire_clock();
memstore_infos_idx_ = 0;
}
}
}
return ret;
}
@ -144,9 +142,13 @@ int ObAllVirtualTenantMemstoreAllocatorInfo::inner_get_next_row(ObNewRow *&row)
SERVER_LOG(WARN, "allocator_ shouldn't be NULL", K(ret));
} else {
while (OB_SUCC(ret) && memstore_infos_idx_ >= memstore_infos_.count()) {
int64_t tenant_id = 0;
if (tenant_ids_idx_ >= tenant_ids_.count() - 1) {
ret = OB_ITER_END;
} else if (OB_FAIL(fill_memstore_infos(tenant_ids_.at(++tenant_ids_idx_)))) {
} else if (FALSE_IT(tenant_id = tenant_ids_.at(++tenant_ids_idx_))) {
} else if (is_virtual_tenant_id(tenant_id)) {
// do nothing
} else if (OB_FAIL(fill_memstore_infos(tenant_id))) {
SERVER_LOG(WARN, "fail to fill_memstore_infos", K(ret));
} else {/*do nothing*/}
}

View File

@ -18,10 +18,6 @@
namespace oceanbase
{
namespace common
{
class ObMemstoreAllocatorMgr;
}
namespace observer
{
struct ObMemstoreAllocatorInfo
@ -65,7 +61,6 @@ private:
};
int fill_tenant_ids();
int fill_memstore_infos(const uint64_t tenant_id);
common::ObMemstoreAllocatorMgr &allocator_mgr_;
common::ObArray<uint64_t> tenant_ids_;
common::ObArray<MemstoreInfo> memstore_infos_;
int64_t memstore_infos_idx_;

View File

@ -9574,26 +9574,18 @@ int ObRootService::set_config_pre_hook(obrpc::ObAdminSetConfigArg &arg)
if (item->name_.is_empty()) {
ret = OB_INVALID_ARGUMENT;
LOG_WARN("empty config name", "item", *item, K(ret));
} else if (0 == STRCMP(item->name_.ptr(), _TX_SHARE_MEMORY_LIMIT_PERCENTAGE)) {
ret = check_tx_share_memory_limit_(*item);
} else if (0 == STRCMP(item->name_.ptr(), MEMSTORE_LIMIT_PERCENTAGE)) {
ret = check_memstore_limit_(*item);
} else if (0 == STRCMP(item->name_.ptr(), _TX_DATA_MEMORY_LIMIT_PERCENTAGE)) {
ret = check_tx_data_memory_limit_(*item);
} else if (0 == STRCMP(item->name_.ptr(), _MDS_MEMORY_LIMIT_PERCENTAGE)) {
ret = check_mds_memory_limit_(*item);
} else if (0 == STRCMP(item->name_.ptr(), FREEZE_TRIGGER_PERCENTAGE)) {
// check write throttle percentage
for (int i = 0; i < item->tenant_ids_.count() && valid; i++) {
valid = valid && ObConfigFreezeTriggerIntChecker::check(item->tenant_ids_.at(i), *item);
if (!valid) {
ret = OB_INVALID_ARGUMENT;
LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant freeze_trigger_percentage which should smaller than writing_throttling_trigger_percentage");
LOG_WARN("config invalid", "item", *item, K(ret), K(i), K(item->tenant_ids_.at(i)));
}
}
ret = check_freeze_trigger_percentage_(*item);
} else if (0 == STRCMP(item->name_.ptr(), WRITING_THROTTLEIUNG_TRIGGER_PERCENTAGE)) {
// check freeze trigger
for (int i = 0; i < item->tenant_ids_.count() && valid; i++) {
valid = valid && ObConfigWriteThrottleTriggerIntChecker::check(item->tenant_ids_.at(i), *item);
if (!valid) {
ret = OB_INVALID_ARGUMENT;
LOG_USER_ERROR(OB_INVALID_ARGUMENT, "tenant writing_throttling_trigger_percentage which should greater than freeze_trigger_percentage");
LOG_WARN("config invalid", "item", *item, K(ret), K(i), K(item->tenant_ids_.at(i)));
}
}
ret = check_write_throttle_trigger_percentage(*item);
} else if (0 == STRCMP(item->name_.ptr(), WEAK_READ_VERSION_REFRESH_INTERVAL)) {
int64_t refresh_interval = ObConfigTimeParser::get(item->value_.ptr(), valid);
if (valid && OB_FAIL(check_weak_read_version_refresh_interval(refresh_interval, valid))) {
@ -9674,6 +9666,76 @@ int ObRootService::set_config_pre_hook(obrpc::ObAdminSetConfigArg &arg)
return ret;
}
#define CHECK_TENANTS_CONFIG_WITH_FUNC(FUNCTOR, LOG_INFO) \
do { \
bool valid = true; \
for (int i = 0; i < item.tenant_ids_.count() && valid; i++) { \
valid = valid && FUNCTOR::check(item.tenant_ids_.at(i), item); \
if (!valid) { \
ret = OB_INVALID_ARGUMENT; \
LOG_USER_ERROR(OB_INVALID_ARGUMENT, LOG_INFO); \
LOG_WARN("config invalid", "item", item, K(ret), K(i), K(item.tenant_ids_.at(i))); \
} \
} \
} while (0)
int ObRootService::check_tx_share_memory_limit_(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
// There is a prefix "Incorrect arguments to " before user log so the warn log looked kinds of wired
const char *warn_log = "tenant config _tx_share_memory_limit_percentage. "
"It should larger than or equal with any single module in it(Memstore, TxData, Mds)";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigTxShareMemoryLimitChecker, warn_log);
return ret;
}
int ObRootService::check_memstore_limit_(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
const char *warn_log = "tenant config memstore_limit_percentage. "
"It should less than or equal with _tx_share_memory_limit_percentage";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigTxDataLimitChecker, warn_log);
return ret;
}
int ObRootService::check_tx_data_memory_limit_(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
const char *warn_log = "tenant config _tx_data_memory_limit_percentage. "
"It should less than or equal with _tx_share_memory_limit_percentage";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigTxDataLimitChecker, warn_log);
return ret;
}
int ObRootService::check_mds_memory_limit_(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
const char *warn_log = "tenant config _mds_memory_limit_percentage. "
"It should less than or equal with _tx_share_memory_limit_percentage";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigMdsLimitChecker, warn_log);
return ret;
}
int ObRootService::check_freeze_trigger_percentage_(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
const char *warn_log = "tenant freeze_trigger_percentage "
"which should smaller than writing_throttling_trigger_percentage";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigFreezeTriggerIntChecker, warn_log);
return ret;
}
int ObRootService::check_write_throttle_trigger_percentage(obrpc::ObAdminSetConfigItem &item)
{
int ret = OB_SUCCESS;
const char *warn_log = "tenant writing_throttling_trigger_percentage "
"which should greater than freeze_trigger_percentage";
CHECK_TENANTS_CONFIG_WITH_FUNC(ObConfigWriteThrottleTriggerIntChecker, warn_log);
return ret;
}
#undef CHECK_TENANTS_CONFIG_WITH_FUNC
int ObRootService::set_config_post_hook(const obrpc::ObAdminSetConfigArg &arg)
{
int ret = OB_SUCCESS;

View File

@ -893,6 +893,12 @@ private:
int old_cancel_delete_server(const obrpc::ObAdminServerArg &arg);
int parallel_ddl_pre_check_(const uint64_t tenant_id);
int check_tx_share_memory_limit_(obrpc::ObAdminSetConfigItem &item);
int check_memstore_limit_(obrpc::ObAdminSetConfigItem &item);
int check_tx_data_memory_limit_(obrpc::ObAdminSetConfigItem &item);
int check_mds_memory_limit_(obrpc::ObAdminSetConfigItem &item);
int check_freeze_trigger_percentage_(obrpc::ObAdminSetConfigItem &item);
int check_write_throttle_trigger_percentage(obrpc::ObAdminSetConfigItem &item);
private:
static const int64_t OB_MAX_CLUSTER_REPLICA_COUNT = 10000000;
static const int64_t OB_ROOT_SERVICE_START_FAIL_COUNT_UPPER_LIMIT = 5;

View File

@ -13,11 +13,13 @@ ob_set_subtarget(ob_share SCHEMA_CPPS
ob_set_subtarget(ob_share allocator
allocator/ob_fifo_arena.cpp
allocator/ob_gmemstore_allocator.cpp
allocator/ob_memstore_allocator.cpp
allocator/ob_handle_list.cpp
allocator/ob_memstore_allocator_mgr.cpp
allocator/ob_mds_allocator.cpp
allocator/ob_shared_memory_allocator_mgr.cpp
allocator/ob_tenant_mutil_allocator.cpp
allocator/ob_tenant_mutil_allocator_mgr.cpp
allocator/ob_tx_data_allocator.cpp
)
ob_set_subtarget(ob_share backup
@ -522,6 +524,7 @@ if (OB_ERRSIM)
endif()
ob_set_subtarget(ob_share throttle
throttle/ob_share_throttle_define.cpp
throttle/ob_throttle_common.cpp
)

View File

@ -16,15 +16,15 @@
#include <malloc.h>
#endif
#include "math.h"
#include "ob_memstore_allocator_mgr.h"
#include "share/ob_tenant_mgr.h"
#include "observer/omt/ob_tenant_config_mgr.h"
#include "lib/alloc/alloc_struct.h"
#include "lib/stat/ob_diagnose_info.h"
#include "share/throttle/ob_throttle_common.h"
#include "observer/omt/ob_tenant_config_mgr.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/ob_tenant_mgr.h"
using namespace oceanbase::lib;
using namespace oceanbase::omt;
using namespace oceanbase::share;
namespace oceanbase
{
namespace common
@ -42,58 +42,7 @@ int64_t ObFifoArena::Page::get_actual_hold_size()
#endif
}
void ObFifoArena::ObWriteThrottleInfo::reset()
{
decay_factor_ = 0.0;
alloc_duration_ = 0;
trigger_percentage_ = 0;
memstore_threshold_ = 0;
ATOMIC_SET(&period_throttled_count_, 0);
ATOMIC_SET(&period_throttled_time_, 0);
ATOMIC_SET(&total_throttled_count_, 0);
ATOMIC_SET(&total_throttled_time_, 0);
}
void ObFifoArena::ObWriteThrottleInfo::reset_period_stat_info()
{
ATOMIC_SET(&period_throttled_count_, 0);
ATOMIC_SET(&period_throttled_time_, 0);
}
void ObFifoArena::ObWriteThrottleInfo::record_limit_event(int64_t interval)
{
ATOMIC_INC(&period_throttled_count_);
ATOMIC_FAA(&period_throttled_time_, interval);
ATOMIC_INC(&total_throttled_count_);
ATOMIC_FAA(&total_throttled_time_, interval);
}
int ObFifoArena::ObWriteThrottleInfo::check_and_calc_decay_factor(int64_t memstore_threshold,
int64_t trigger_percentage,
int64_t alloc_duration)
{
int ret = OB_SUCCESS;
if (memstore_threshold != memstore_threshold_
|| trigger_percentage != trigger_percentage_
|| alloc_duration != alloc_duration_
|| decay_factor_ <= 0) {
memstore_threshold_ = memstore_threshold;
trigger_percentage_ = trigger_percentage;
alloc_duration_ = alloc_duration;
int64_t available_mem = (100 - trigger_percentage_) * memstore_threshold_ / 100;
double N = static_cast<double>(available_mem) / static_cast<double>(MEM_SLICE_SIZE);
double decay_factor = (static_cast<double>(alloc_duration) - N * static_cast<double>(MIN_INTERVAL))/ static_cast<double>((((N*(N+1)*N*(N+1)))/4));
decay_factor_ = decay_factor < 0 ? 0 : decay_factor;
COMMON_LOG(INFO, "recalculate decay factor", K(memstore_threshold_), K(trigger_percentage_),
K(decay_factor_), K(alloc_duration), K(available_mem), K(N));
if (decay_factor < 0) {
LOG_ERROR("decay factor is smaller than 0", K(decay_factor), K(alloc_duration), K(N));
}
}
return ret;
}
int ObFifoArena::init(uint64_t tenant_id)
int ObFifoArena::init()
{
int ret = OB_SUCCESS;
lib::ObMallocAllocator *allocator = lib::ObMallocAllocator::get_instance();
@ -107,7 +56,7 @@ int ObFifoArena::init(uint64_t tenant_id)
}
if (OB_SUCC(ret)) {
attr_.tenant_id_ = tenant_id;
attr_.tenant_id_ = MTL_ID();
attr_.label_ = ObNewModIds::OB_MEMSTORE;
attr_.ctx_id_ = ctx_id;
}
@ -161,7 +110,7 @@ void* ObFifoArena::alloc(int64_t adv_idx, Handle& handle, int64_t size)
int ret = OB_SUCCESS;
void* ptr = NULL;
int64_t rsize = size + sizeof(Page) + sizeof(Ref);
speed_limit(ATOMIC_LOAD(&hold_), size);
CriticalGuard(get_qs());
int64_t way_id = get_way_id();
int64_t idx = get_idx(adv_idx, way_id);
@ -281,243 +230,5 @@ void ObFifoArena::destroy_page(Page* page)
}
}
bool ObFifoArena::need_do_writing_throttle() const
{
bool need_do_writing_throttle = false;
int64_t trigger_percentage = get_writing_throttling_trigger_percentage_();
if (trigger_percentage < 100) {
int64_t trigger_mem_limit = lastest_memstore_threshold_ * trigger_percentage / 100;
int64_t cur_mem_hold = ATOMIC_LOAD(&hold_);
need_do_writing_throttle = cur_mem_hold > trigger_mem_limit;
}
return need_do_writing_throttle;
}
void ObFifoArena::speed_limit(const int64_t cur_mem_hold, const int64_t alloc_size)
{
int ret = OB_SUCCESS;
int64_t trigger_percentage = get_writing_throttling_trigger_percentage_();
int64_t trigger_mem_limit = 0;
bool need_speed_limit = false;
int64_t seq = max_seq_;
int64_t throttling_interval = 0;
if (trigger_percentage < 100) {
if (OB_UNLIKELY(cur_mem_hold < 0 || alloc_size <= 0 || lastest_memstore_threshold_ <= 0 || trigger_percentage <= 0)) {
COMMON_LOG(ERROR, "invalid arguments", K(cur_mem_hold), K(alloc_size), K(lastest_memstore_threshold_), K(trigger_percentage));
} else if (cur_mem_hold > (trigger_mem_limit = lastest_memstore_threshold_ * trigger_percentage / 100)) {
need_speed_limit = true;
seq = ATOMIC_AAF(&max_seq_, alloc_size);
int64_t alloc_duration = get_writing_throttling_maximum_duration_();
if (OB_FAIL(throttle_info_.check_and_calc_decay_factor(lastest_memstore_threshold_, trigger_percentage, alloc_duration))) {
COMMON_LOG(WARN, "failed to check_and_calc_decay_factor", K(cur_mem_hold), K(alloc_size), K(throttle_info_));
}
}
advance_clock();
get_seq() = seq;
tl_need_speed_limit() = need_speed_limit;
share::get_thread_alloc_stat() += alloc_size;
if (need_speed_limit && REACH_TIME_INTERVAL(1 * 1000 * 1000L)) {
COMMON_LOG(INFO, "report write throttle info", K(alloc_size), K(attr_), K(throttling_interval),
"max_seq_", ATOMIC_LOAD(&max_seq_), K(clock_),
K(cur_mem_hold), K(throttle_info_), K(seq));
}
}
}
bool ObFifoArena::check_clock_over_seq(const int64_t req)
{
advance_clock();
int64_t clock = ATOMIC_LOAD(&clock_);
return req <= clock;
}
int64_t ObFifoArena::get_clock()
{
advance_clock();
return clock_;
}
void ObFifoArena::skip_clock(const int64_t skip_size)
{
int64_t ov = 0;
int64_t nv = ATOMIC_LOAD(&clock_);
while ((ov = nv) < ATOMIC_LOAD(&max_seq_)
&& ov != (nv = ATOMIC_CAS(&clock_, ov, min(ATOMIC_LOAD(&max_seq_), ov + skip_size)))) {
PAUSE();
if (REACH_TIME_INTERVAL(100 * 1000L)) {
const int64_t max_seq = ATOMIC_LOAD(&max_seq_);
const int64_t cur_mem_hold = ATOMIC_LOAD(&hold_);
COMMON_LOG(INFO, "skip clock",
K(clock_), K(max_seq_), K(skip_size), K(cur_mem_hold), K(attr_.tenant_id_));
}
}
}
void ObFifoArena::advance_clock()
{
int64_t cur_ts = ObTimeUtility::current_time();
int64_t old_ts = last_update_ts_;
const int64_t advance_us = cur_ts - old_ts;
if ((advance_us > ADVANCE_CLOCK_INTERVAL) &&
old_ts == ATOMIC_CAS(&last_update_ts_, old_ts, cur_ts)) {
const int64_t trigger_percentage = get_writing_throttling_trigger_percentage_();
const int64_t trigger_mem_limit = lastest_memstore_threshold_ * trigger_percentage / 100;
const int64_t cur_mem_hold = ATOMIC_LOAD(&hold_);
const int64_t mem_limit = calc_mem_limit(cur_mem_hold, trigger_mem_limit, advance_us);
const int64_t clock = ATOMIC_LOAD(&clock_);
const int64_t max_seq = ATOMIC_LOAD(&max_seq_);
ATOMIC_SET(&clock_, min(max_seq, clock + mem_limit));
if (REACH_TIME_INTERVAL(100 * 1000L)) {
COMMON_LOG(INFO, "current clock is ",
K(clock_), K(max_seq_), K(mem_limit), K(cur_mem_hold), K(attr_.tenant_id_));
}
}
}
int64_t ObFifoArena::expected_wait_time(const int64_t seq) const
{
int64_t expected_wait_time = 0;
int64_t trigger_percentage = get_writing_throttling_trigger_percentage_();
int64_t trigger_mem_limit = lastest_memstore_threshold_ * trigger_percentage / 100;
int64_t can_assign_in_next_period = calc_mem_limit(hold_, trigger_mem_limit, ADVANCE_CLOCK_INTERVAL);
int64_t clock = ATOMIC_LOAD(&clock_);
if (seq > clock) {
if (can_assign_in_next_period != 0) {
expected_wait_time = (seq - clock) * ADVANCE_CLOCK_INTERVAL / can_assign_in_next_period;
} else {
expected_wait_time = ADVANCE_CLOCK_INTERVAL;
}
}
return expected_wait_time;
}
// how much memory we can get after dt time.
int64_t ObFifoArena::calc_mem_limit(const int64_t cur_mem_hold, const int64_t trigger_mem_limit, const int64_t dt) const
{
int ret = OB_SUCCESS;
int64_t mem_can_be_assigned = 0;
const double decay_factor = throttle_info_.decay_factor_;
int64_t init_seq = 0;
int64_t init_page_left_size = 0;
double init_page_left_interval = 0;
double past_interval = 0;
double last_page_interval = 0;
double mid_result = 0;
double approx_max_chunk_seq = 0;
int64_t max_seq = 0;
double accumulate_interval = 0;
if (cur_mem_hold < trigger_mem_limit) {
// there is no speed limit now
// we can get all the memory before speed limit
mem_can_be_assigned = trigger_mem_limit - cur_mem_hold;
} else if (decay_factor <= 0) {
mem_can_be_assigned = 0;
LOG_WARN("we should limit speed, but the decay factor not calculate now", K(cur_mem_hold), K(trigger_mem_limit), K(dt));
} else {
init_seq = ((cur_mem_hold - trigger_mem_limit) + MEM_SLICE_SIZE - 1) / (MEM_SLICE_SIZE);
init_page_left_size = MEM_SLICE_SIZE - (cur_mem_hold - trigger_mem_limit) % MEM_SLICE_SIZE;
init_page_left_interval = (1.0 * decay_factor * pow(init_seq, 3) *
init_page_left_size / MEM_SLICE_SIZE);
past_interval = decay_factor * pow(init_seq, 2) * pow(init_seq + 1, 2) / 4;
// there is speed limit
if (init_page_left_interval > dt) {
last_page_interval = decay_factor * pow(init_seq, 3);
mem_can_be_assigned = dt / last_page_interval * MEM_SLICE_SIZE;
} else {
mid_result = 4.0 * (dt + past_interval - init_page_left_interval) / decay_factor;
approx_max_chunk_seq = pow(mid_result, 0.25);
max_seq = floor(approx_max_chunk_seq);
for (int i = 0; i < 2; i++) {
if (pow(max_seq, 2) * pow(max_seq + 1, 2) < mid_result) {
max_seq = max_seq + 1;
}
}
accumulate_interval = pow(max_seq, 2) * pow(max_seq + 1, 2) * decay_factor / 4 - past_interval + init_page_left_interval;
mem_can_be_assigned = init_page_left_size + (max_seq - init_seq) * MEM_SLICE_SIZE;
if (accumulate_interval > dt) {
last_page_interval = decay_factor * pow(max_seq, 3);
mem_can_be_assigned -= (accumulate_interval - dt) / last_page_interval * MEM_SLICE_SIZE;
}
}
// defensive code
if (pow(max_seq, 2) * pow(max_seq + 1, 2) < mid_result) {
LOG_ERROR("unexpected result", K(max_seq), K(mid_result));
}
}
// defensive code
if (mem_can_be_assigned <= 0) {
LOG_WARN("we can not get memory now", K(mem_can_be_assigned), K(decay_factor), K(cur_mem_hold), K(trigger_mem_limit), K(dt));
}
return mem_can_be_assigned;
}
int64_t ObFifoArena::get_throttling_interval(const int64_t cur_mem_hold,
const int64_t alloc_size,
const int64_t trigger_mem_limit)
{
constexpr int64_t MIN_INTERVAL_PER_ALLOC = 20;
int64_t chunk_cnt = ((alloc_size + MEM_SLICE_SIZE - 1) / (MEM_SLICE_SIZE));
int64_t chunk_seq = ((cur_mem_hold - trigger_mem_limit) + MEM_SLICE_SIZE - 1)/ (MEM_SLICE_SIZE);
int64_t ret_interval = 0;
double cur_chunk_seq = 1.0;
for (int64_t i = 0; i < chunk_cnt && cur_chunk_seq > 0.0; ++i) {
cur_chunk_seq = static_cast<double>(chunk_seq - i);
ret_interval += static_cast<int64_t>(throttle_info_.decay_factor_ * cur_chunk_seq * cur_chunk_seq * cur_chunk_seq);
}
return alloc_size * ret_interval / MEM_SLICE_SIZE + MIN_INTERVAL_PER_ALLOC;
}
void ObFifoArena::set_memstore_threshold(int64_t memstore_threshold)
{
ATOMIC_STORE(&lastest_memstore_threshold_, memstore_threshold);
}
template<int64_t N>
struct INTEGER_WRAPPER
{
INTEGER_WRAPPER() : v_(N), tenant_id_(0) {}
int64_t v_;
uint64_t tenant_id_;
};
int64_t ObFifoArena::get_writing_throttling_trigger_percentage_() const
{
RLOCAL(INTEGER_WRAPPER<DEFAULT_TRIGGER_PERCENTAGE>, wrapper);
int64_t &trigger_v = (&wrapper)->v_;
uint64_t &tenant_id = (&wrapper)->tenant_id_;
if (tenant_id != attr_.tenant_id_ || TC_REACH_TIME_INTERVAL(5 * 1000 * 1000)) { // 5s
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(attr_.tenant_id_));
if (!tenant_config.is_valid()) {
COMMON_LOG(INFO, "failed to get tenant config", K(attr_));
} else {
trigger_v = tenant_config->writing_throttling_trigger_percentage;
tenant_id = attr_.tenant_id_;
}
}
return trigger_v;
}
int64_t ObFifoArena::get_writing_throttling_maximum_duration_() const
{
RLOCAL(INTEGER_WRAPPER<DEFAULT_DURATION>, wrapper);
int64_t &duration_v = (&wrapper)->v_;
uint64_t &tenant_id = (&wrapper)->tenant_id_;
if (tenant_id != attr_.tenant_id_ || TC_REACH_TIME_INTERVAL(1 * 1000 * 1000)) { // 1s
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(attr_.tenant_id_));
if (!tenant_config.is_valid()) {
//keep default
COMMON_LOG(INFO, "failed to get tenant config", K(attr_));
} else {
duration_v = tenant_config->writing_throttling_maximum_duration;
tenant_id = attr_.tenant_id_;
}
}
return duration_v;
}
}; // end namespace allocator
}; // end namespace oceanbase

View File

@ -25,10 +25,10 @@ namespace oceanbase
{
namespace common
{
class ObMemstoreAllocatorMgr;
class ObActiveList;
class ObFifoArena
{
public:
static int64_t total_hold_;
struct Page;
@ -146,13 +146,26 @@ public:
};
public:
enum { MAX_CACHED_GROUP_COUNT = 16, MAX_CACHED_PAGE_COUNT = MAX_CACHED_GROUP_COUNT * Handle::MAX_NWAY, PAGE_SIZE = OB_MALLOC_BIG_BLOCK_SIZE + sizeof(Page) + sizeof(Ref)};
ObFifoArena(): allocator_(NULL), nway_(0), allocated_(0), reclaimed_(0), hold_(0), retired_(0), max_seq_(0), clock_(0), last_update_ts_(0),
last_reclaimed_(0), lastest_memstore_threshold_(0)
{ memset(cur_pages_, 0, sizeof(cur_pages_)); }
enum {
MAX_CACHED_GROUP_COUNT = 16,
MAX_CACHED_PAGE_COUNT = MAX_CACHED_GROUP_COUNT * Handle::MAX_NWAY,
PAGE_SIZE = OB_MALLOC_BIG_BLOCK_SIZE + sizeof(Page) + sizeof(Ref)
};
ObFifoArena()
: allocator_(NULL),
nway_(0),
allocated_(0),
reclaimed_(0),
hold_(0),
retired_(0),
last_reclaimed_(0),
lastest_memstore_threshold_(0)
{
memset(cur_pages_, 0, sizeof(cur_pages_));
}
~ObFifoArena() { reset(); }
public:
int init(uint64_t tenant_id);
int init();
void reset();
void update_nway_per_group(int64_t nway);
void* alloc(int64_t idx, Handle& handle, int64_t size);
@ -160,23 +173,17 @@ public:
int64_t allocated() const { return ATOMIC_LOAD(&allocated_); }
int64_t retired() const { return ATOMIC_LOAD(&retired_); }
int64_t reclaimed() const { return ATOMIC_LOAD(&reclaimed_); }
int64_t hold() const {
int64_t rsize = ATOMIC_LOAD(&reclaimed_);
int64_t asize = ATOMIC_LOAD(&allocated_);
return asize - rsize;
}
uint64_t get_tenant_id() const { return attr_.tenant_id_; }
void set_memstore_threshold(int64_t memstore_threshold);
bool need_do_writing_throttle() const;
bool check_clock_over_seq(const int64_t seq);
int64_t get_clock();
int64_t expected_wait_time(const int64_t seq) const;
void skip_clock(const int64_t skip_size);
int64_t hold() const {
return hold_;
}
uint64_t get_tenant_id() const { return attr_.tenant_id_; }
int64_t get_max_cached_memstore_size() const
{
return MAX_CACHED_GROUP_COUNT * ATOMIC_LOAD(&nway_) * (PAGE_SIZE + ACHUNK_PRESERVE_SIZE);
}
private:
ObQSync& get_qs() {
static ObQSync s_qs;
@ -185,36 +192,6 @@ private:
int64_t get_way_id() { return icpu_id() % ATOMIC_LOAD(&nway_); }
int64_t get_idx(int64_t grp_id, int64_t way_id) { return (grp_id % MAX_CACHED_GROUP_COUNT) * Handle::MAX_NWAY + way_id; }
struct ObWriteThrottleInfo {
public:
ObWriteThrottleInfo(){ reset();}
~ObWriteThrottleInfo(){}
void reset();
void reset_period_stat_info();
void record_limit_event(int64_t interval);
int check_and_calc_decay_factor(int64_t memstore_threshold,
int64_t trigger_percentage,
int64_t alloc_duration);
TO_STRING_KV(K(decay_factor_),
K(alloc_duration_),
K(trigger_percentage_),
K(memstore_threshold_),
K(period_throttled_count_),
K(period_throttled_time_),
K(total_throttled_count_),
K(total_throttled_time_));
public:
//control info
double decay_factor_;
int64_t alloc_duration_;
int64_t trigger_percentage_;
int64_t memstore_threshold_;
//stat info
int64_t period_throttled_count_;
int64_t period_throttled_time_;
int64_t total_throttled_count_;
int64_t total_throttled_time_;
};
private:
void release_ref(Ref* ref);
Page* alloc_page(int64_t size);
@ -222,24 +199,11 @@ private:
void retire_page(int64_t way_id, Handle& handle, Page* ptr);
void destroy_page(Page* page);
void shrink_cached_page(int64_t nway);
void speed_limit(const int64_t cur_mem_hold, const int64_t alloc_size);
int64_t get_throttling_interval(const int64_t cur_mem_hold,
const int64_t alloc_size,
const int64_t trigger_mem_limit);
void advance_clock();
int64_t calc_mem_limit(const int64_t cur_mem_hold, const int64_t trigger_mem_limit, const int64_t dt) const;
int64_t get_actual_hold_size(Page* page);
int64_t get_writing_throttling_trigger_percentage_() const;
int64_t get_writing_throttling_maximum_duration_() const;
private:
static const int64_t MAX_WAIT_INTERVAL = 20 * 1000 * 1000;//20s
static const int64_t ADVANCE_CLOCK_INTERVAL = 50;// 50us
static const int64_t MEM_SLICE_SIZE = 2 * 1024 * 1024; //Bytes per usecond
static const int64_t MIN_INTERVAL = 20000;
static const int64_t DEFAULT_TRIGGER_PERCENTAGE = 100;
static const int64_t DEFAULT_DURATION = 60 * 60 * 1000 * 1000L;//us
lib::ObMemAttr attr_;
lib::ObIAllocator *allocator_;
int64_t nway_;
int64_t allocated_; // record all the memory hold by pages in history.
// increase while a page created and decrease only if a failed page destroyed.
@ -250,13 +214,8 @@ private:
// (may be: hold_ = allocated_ - reclaimed_)
int64_t retired_; // record all the memory hold by not active pages in history.
int64_t max_seq_;
int64_t clock_;
int64_t last_update_ts_;
int64_t last_reclaimed_;
Page* cur_pages_[MAX_CACHED_PAGE_COUNT];
ObWriteThrottleInfo throttle_info_;
int64_t lastest_memstore_threshold_;//Save the latest memstore_threshold
DISALLOW_COPY_AND_ASSIGN(ObFifoArena);
};

View File

@ -1,201 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "ob_gmemstore_allocator.h"
#include "ob_memstore_allocator_mgr.h"
#include "share/rc/ob_tenant_base.h"
#include "storage/memtable/ob_memtable.h"
#include "lib/utility/ob_print_utils.h"
#include "observer/omt/ob_multi_tenant.h"
#include "observer/ob_server_struct.h"
#include "share/ob_tenant_mgr.h"
#include "storage/tx_storage/ob_tenant_freezer.h"
namespace oceanbase
{
using namespace share;
namespace common
{
int FrozenMemstoreInfoLogger::operator()(ObDLink* link)
{
int ret = OB_SUCCESS;
ObGMemstoreAllocator::AllocHandle* handle = CONTAINER_OF(link, typeof(*handle), total_list_);
memtable::ObMemtable& mt = handle->mt_;
if (handle->is_frozen()) {
if (OB_FAIL(databuff_print_obj(buf_, limit_, pos_, mt))) {
} else {
ret = databuff_printf(buf_, limit_, pos_, ",");
}
}
return ret;
}
int ActiveMemstoreInfoLogger::operator()(ObDLink* link)
{
int ret = OB_SUCCESS;
ObGMemstoreAllocator::AllocHandle* handle = CONTAINER_OF(link, typeof(*handle), total_list_);
memtable::ObMemtable& mt = handle->mt_;
if (handle->is_active()) {
if (OB_FAIL(databuff_print_obj(buf_, limit_, pos_, mt))) {
} else {
ret = databuff_printf(buf_, limit_, pos_, ",");
}
}
return ret;
}
int ObGMemstoreAllocator::AllocHandle::init(uint64_t tenant_id)
{
int ret = OB_SUCCESS;
ObGMemstoreAllocator* host = NULL;
if (OB_FAIL(ObMemstoreAllocatorMgr::get_instance().get_tenant_memstore_allocator(tenant_id, host))) {
ret = OB_ERR_UNEXPECTED;
} else if (NULL == host){
ret = OB_ERR_UNEXPECTED;
} else {
host->init_handle(*this, tenant_id);
}
return ret;
}
void ObGMemstoreAllocator::init_handle(AllocHandle& handle, uint64_t tenant_id)
{
handle.do_reset();
handle.set_host(this);
{
int64_t nway = nway_per_group();
LockGuard guard(lock_);
hlist_.init_handle(handle);
arena_.update_nway_per_group(nway);
set_memstore_threshold_without_lock(tenant_id);
}
COMMON_LOG(TRACE, "MTALLOC.init", KP(&handle.mt_));
}
void ObGMemstoreAllocator::destroy_handle(AllocHandle& handle)
{
ObTimeGuard time_guard("ObGMemstoreAllocator::destroy_handle", 100 * 1000);
COMMON_LOG(TRACE, "MTALLOC.destroy", KP(&handle.mt_));
arena_.free(handle.arena_handle_);
time_guard.click();
{
LockGuard guard(lock_);
time_guard.click();
hlist_.destroy_handle(handle);
time_guard.click();
if (hlist_.is_empty()) {
arena_.reset();
}
time_guard.click();
}
handle.do_reset();
}
void* ObGMemstoreAllocator::alloc(AllocHandle& handle, int64_t size)
{
int ret = OB_SUCCESS;
int64_t align_size = upper_align(size, sizeof(int64_t));
uint64_t tenant_id = arena_.get_tenant_id();
bool is_out_of_mem = false;
if (!handle.is_id_valid()) {
COMMON_LOG(TRACE, "MTALLOC.first_alloc", KP(&handle.mt_));
LockGuard guard(lock_);
if (handle.is_frozen()) {
COMMON_LOG(ERROR, "cannot alloc because allocator is frozen", K(ret), K(handle.mt_));
} else if (!handle.is_id_valid()) {
handle.set_clock(arena_.retired());
hlist_.set_active(handle);
}
}
MTL_SWITCH(tenant_id) {
storage::ObTenantFreezer *freezer = nullptr;
if (is_virtual_tenant_id(tenant_id)) {
// virtual tenant should not have memstore.
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(ERROR, "virtual tenant should not have memstore", K(ret), K(tenant_id));
} else if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer*))) {
} else if (OB_FAIL(freezer->check_memstore_full_internal(is_out_of_mem))) {
COMMON_LOG(ERROR, "fail to check tenant out of mem limit", K(ret), K(tenant_id));
}
}
if (OB_FAIL(ret)) {
is_out_of_mem = true;
}
if (is_out_of_mem && REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
STORAGE_LOG(WARN, "this tenant is already out of memstore limit or some thing wrong.", K(tenant_id));
}
return is_out_of_mem ? nullptr : arena_.alloc(handle.id_, handle.arena_handle_, align_size);
}
void ObGMemstoreAllocator::set_frozen(AllocHandle& handle)
{
COMMON_LOG(TRACE, "MTALLOC.set_frozen", KP(&handle.mt_));
LockGuard guard(lock_);
hlist_.set_frozen(handle);
}
static int64_t calc_nway(int64_t cpu, int64_t mem)
{
return std::min(cpu, mem/20/ObFifoArena::PAGE_SIZE);
}
int64_t ObGMemstoreAllocator::nway_per_group()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = arena_.get_tenant_id();
double min_cpu = 0;
double max_cpu = 0;
int64_t max_memory = 0;
int64_t min_memory = 0;
omt::ObMultiTenant *omt = GCTX.omt_;
MTL_SWITCH(tenant_id) {
storage::ObTenantFreezer *freezer = nullptr;
if (NULL == omt) {
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(WARN, "omt should not be null", K(tenant_id), K(ret));
} else if (OB_FAIL(omt->get_tenant_cpu(tenant_id, min_cpu, max_cpu))) {
COMMON_LOG(WARN, "get tenant cpu failed", K(tenant_id), K(ret));
} else if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer *))) {
} else if (OB_FAIL(freezer->get_tenant_mem_limit(min_memory, max_memory))) {
COMMON_LOG(WARN, "get tenant mem limit failed", K(tenant_id), K(ret));
}
}
return OB_SUCCESS == ret? calc_nway((int64_t)max_cpu, min_memory): 0;
}
int ObGMemstoreAllocator::set_memstore_threshold(uint64_t tenant_id)
{
LockGuard guard(lock_);
int ret = set_memstore_threshold_without_lock(tenant_id);
return ret;
}
int ObGMemstoreAllocator::set_memstore_threshold_without_lock(uint64_t tenant_id)
{
int ret = OB_SUCCESS;
int64_t memstore_threshold = INT64_MAX;
MTL_SWITCH(tenant_id) {
storage::ObTenantFreezer *freezer = nullptr;
if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer *))) {
} else if (OB_FAIL(freezer->get_tenant_memstore_limit(memstore_threshold))) {
COMMON_LOG(WARN, "failed to get_tenant_memstore_limit", K(tenant_id), K(ret));
} else {
arena_.set_memstore_threshold(memstore_threshold);
}
}
return ret;
}
}; // end namespace common
}; // end namespace oceanbase

View File

@ -19,7 +19,6 @@ namespace oceanbase
{
namespace common
{
class ObFifoArena;
class ObHandleList
{
public:

View File

@ -0,0 +1,177 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "ob_mds_allocator.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/rc/ob_tenant_base.h"
#include "share/throttle/ob_share_throttle_define.h"
#include "storage/multi_data_source/runtime_utility/mds_tenant_service.h"
#include "storage/tx_storage/ob_tenant_freezer.h"
using namespace oceanbase::storage::mds;
namespace oceanbase {
namespace share {
int64_t ObTenantMdsAllocator::resource_unit_size()
{
static const int64_t MDS_RESOURCE_UNIT_SIZE = OB_MALLOC_NORMAL_BLOCK_SIZE; /* 8KB */
return MDS_RESOURCE_UNIT_SIZE;
}
void ObTenantMdsAllocator::init_throttle_config(int64_t &resource_limit, int64_t &trigger_percentage, int64_t &max_duration)
{
// define some default value
const int64_t MDS_LIMIT_PERCENTAGE = 5;
const int64_t MDS_THROTTLE_TRIGGER_PERCENTAGE = 60;
const int64_t MDS_THROTTLE_MAX_DURATION = 2LL * 60LL * 60LL * 1000LL * 1000LL; // 2 hours
int64_t total_memory = lib::get_tenant_memory_limit(MTL_ID());
// Use tenant config to init throttle config
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(MTL_ID()));
if (tenant_config.is_valid()) {
resource_limit = total_memory * tenant_config->_mds_memory_limit_percentage / 100LL;
trigger_percentage = tenant_config->writing_throttling_trigger_percentage;
max_duration = tenant_config->writing_throttling_maximum_duration;
} else {
SHARE_LOG_RET(WARN, OB_INVALID_CONFIG, "init throttle config with default value");
resource_limit = total_memory * MDS_LIMIT_PERCENTAGE / 100;
trigger_percentage = MDS_THROTTLE_TRIGGER_PERCENTAGE;
max_duration = MDS_THROTTLE_MAX_DURATION;
}
}
void ObTenantMdsAllocator::adaptive_update_limit(const int64_t holding_size,
const int64_t config_specify_resource_limit,
int64_t &resource_limit,
int64_t &last_update_limit_ts,
bool &is_updated)
{
// do nothing
}
int ObTenantMdsAllocator::init()
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
// TODO : @gengli new ctx id?
mem_attr.tenant_id_ = MTL_ID();
mem_attr.ctx_id_ = ObCtxIds::MDS_DATA_ID;
mem_attr.label_ = "MdsTable";
ObSharedMemAllocMgr *share_mem_alloc_mgr = MTL(ObSharedMemAllocMgr *);
throttle_tool_ = &(share_mem_alloc_mgr->share_resource_throttle_tool());
MDS_TG(10_ms);
if (IS_INIT){
ret = OB_INIT_TWICE;
SHARE_LOG(WARN, "init tenant mds allocator twice", KR(ret), KPC(this));
} else if (OB_ISNULL(throttle_tool_)) {
ret = OB_ERR_UNEXPECTED;
SHARE_LOG(WARN, "throttle tool is unexpected null", KP(throttle_tool_), KP(share_mem_alloc_mgr));
} else if (MDS_FAIL(allocator_.init(OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
MDS_LOG(WARN, "init vslice allocator failed", K(ret), K(OB_MALLOC_NORMAL_BLOCK_SIZE), KP(this), K(mem_attr));
} else {
allocator_.set_nway(MDS_ALLOC_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
void *ObTenantMdsAllocator::alloc(const int64_t size)
{
int64_t abs_expire_time = THIS_WORKER.get_timeout_ts();
return alloc(size, abs_expire_time);
}
void *ObTenantMdsAllocator::alloc(const int64_t size, const ObMemAttr &attr)
{
UNUSED(attr);
void *obj = alloc(size);
MDS_LOG_RET(WARN, OB_INVALID_ARGUMENT, "VSLICE Allocator not support mark attr", KP(obj), K(size), K(attr));
return obj;
}
void *ObTenantMdsAllocator::alloc(const int64_t size, const int64_t abs_expire_time)
{
bool is_throttled = false;
// try alloc resource from throttle tool
(void)throttle_tool_->alloc_resource<ObTenantMdsAllocator>(size, abs_expire_time, is_throttled);
// if is throttled, do throttle
if (OB_UNLIKELY(is_throttled)) {
if (MTL(ObTenantFreezer *)->exist_ls_freezing()) {
(void)throttle_tool_->skip_throttle<ObTenantTxDataAllocator>(size);
} else {
(void)throttle_tool_->do_throttle<ObTenantTxDataAllocator>(abs_expire_time);
}
}
void *obj = allocator_.alloc(size);
MDS_LOG(DEBUG, "mds alloc ", K(size), KP(obj), K(abs_expire_time));
if (OB_NOT_NULL(obj)) {
MTL(storage::mds::ObTenantMdsService *)
->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__); // for debug mem leak
}
return obj;
}
void ObTenantMdsAllocator::free(void *ptr)
{
allocator_.free(ptr);
MTL(storage::mds::ObTenantMdsService *)->erase_alloc_backtrace(ptr);
}
void ObTenantMdsAllocator::set_attr(const ObMemAttr &attr) { allocator_.set_attr(attr); }
void *ObTenantBufferCtxAllocator::alloc(const int64_t size)
{
void *obj = share::mtl_malloc(size, ObMemAttr(MTL_ID(), "MDS_CTX_DEFAULT", ObCtxIds::MDS_CTX_ID));
if (OB_NOT_NULL(obj)) {
MTL(ObTenantMdsService*)->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__);// for debug mem leak
}
return obj;
}
void *ObTenantBufferCtxAllocator::alloc(const int64_t size, const ObMemAttr &attr)
{
void *obj = share::mtl_malloc(size, attr);
if (OB_NOT_NULL(obj)) {
MTL(ObTenantMdsService*)->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__);// for debug mem leak
}
return obj;
}
void ObTenantBufferCtxAllocator::free(void *ptr)
{
share::mtl_free(ptr);
MTL(ObTenantMdsService*)->erase_alloc_backtrace(ptr);
}
} // namespace share
} // namespace oceanbase

View File

@ -0,0 +1,61 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_ALLOCATOR_OB_MDS_ALLOCATOR_H_
#define OCEANBASE_ALLOCATOR_OB_MDS_ALLOCATOR_H_
#include "lib/allocator/ob_vslice_alloc.h"
#include "share/throttle/ob_share_throttle_define.h"
namespace oceanbase {
namespace share {
class ObTenantMdsAllocator : public ObIAllocator {
private:
static const int64_t MDS_ALLOC_CONCURRENCY = 32;
public:
DEFINE_CUSTOM_FUNC_FOR_THROTTLE(Mds);
public:
ObTenantMdsAllocator() : is_inited_(false), throttle_tool_(nullptr), block_alloc_(), allocator_() {}
int init();
void destroy() { is_inited_ = false; }
void *alloc(const int64_t size, const int64_t expire_ts);
virtual void *alloc(const int64_t size) override;
virtual void *alloc(const int64_t size, const ObMemAttr &attr) override;
virtual void free(void *ptr) override;
virtual void set_attr(const ObMemAttr &attr) override;
int64_t hold() { return allocator_.hold(); }
TO_STRING_KV(K(is_inited_), KP(this), KP(throttle_tool_), KP(&block_alloc_), KP(&allocator_));
private:
bool is_inited_;
share::TxShareThrottleTool *throttle_tool_;
common::ObBlockAllocMgr block_alloc_;
common::ObVSliceAlloc allocator_;
};
struct ObTenantBufferCtxAllocator : public ObIAllocator// for now, it is just a wrapper of mtl_malloc
{
virtual void *alloc(const int64_t size) override;
virtual void *alloc(const int64_t size, const ObMemAttr &attr) override;
virtual void free(void *ptr) override;
virtual void set_attr(const ObMemAttr &) override {}
};
} // namespace share
} // namespace oceanbase
#endif

View File

@ -0,0 +1,247 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "ob_shared_memory_allocator_mgr.h"
#include "share/rc/ob_tenant_base.h"
#include "storage/memtable/ob_memtable.h"
#include "lib/utility/ob_print_utils.h"
#include "observer/omt/ob_multi_tenant.h"
#include "observer/ob_server_struct.h"
#include "share/ob_tenant_mgr.h"
#include "storage/tx_storage/ob_tenant_freezer.h"
namespace oceanbase
{
using namespace share;
namespace share
{
int FrozenMemstoreInfoLogger::operator()(ObDLink* link)
{
int ret = OB_SUCCESS;
ObMemstoreAllocator::AllocHandle* handle = CONTAINER_OF(link, typeof(*handle), total_list_);
memtable::ObMemtable& mt = handle->mt_;
if (handle->is_frozen()) {
if (OB_FAIL(databuff_print_obj(buf_, limit_, pos_, mt))) {
} else {
ret = databuff_printf(buf_, limit_, pos_, ",");
}
}
return ret;
}
int ActiveMemstoreInfoLogger::operator()(ObDLink* link)
{
int ret = OB_SUCCESS;
ObMemstoreAllocator::AllocHandle* handle = CONTAINER_OF(link, typeof(*handle), total_list_);
memtable::ObMemtable& mt = handle->mt_;
if (handle->is_active()) {
if (OB_FAIL(databuff_print_obj(buf_, limit_, pos_, mt))) {
} else {
ret = databuff_printf(buf_, limit_, pos_, ",");
}
}
return ret;
}
int ObMemstoreAllocator::AllocHandle::init()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = MTL_ID();
ObMemstoreAllocator &host = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
(void)host.init_handle(*this);
return ret;
}
int ObMemstoreAllocator::init()
{
throttle_tool_ = &(MTL(ObSharedMemAllocMgr *)->share_resource_throttle_tool());
return arena_.init();
}
void ObMemstoreAllocator::init_handle(AllocHandle& handle)
{
handle.do_reset();
handle.set_host(this);
{
int64_t nway = nway_per_group();
LockGuard guard(lock_);
hlist_.init_handle(handle);
arena_.update_nway_per_group(nway);
}
COMMON_LOG(TRACE, "MTALLOC.init", KP(&handle.mt_));
}
void ObMemstoreAllocator::destroy_handle(AllocHandle& handle)
{
ObTimeGuard time_guard("ObMemstoreAllocator::destroy_handle", 100 * 1000);
COMMON_LOG(TRACE, "MTALLOC.destroy", KP(&handle.mt_));
arena_.free(handle.arena_handle_);
time_guard.click();
{
LockGuard guard(lock_);
time_guard.click();
hlist_.destroy_handle(handle);
time_guard.click();
if (hlist_.is_empty()) {
arena_.reset();
}
time_guard.click();
}
handle.do_reset();
}
void* ObMemstoreAllocator::alloc(AllocHandle& handle, int64_t size, const int64_t expire_ts)
{
int ret = OB_SUCCESS;
int64_t align_size = upper_align(size, sizeof(int64_t));
uint64_t tenant_id = arena_.get_tenant_id();
bool is_out_of_mem = false;
if (!handle.is_id_valid()) {
COMMON_LOG(TRACE, "MTALLOC.first_alloc", KP(&handle.mt_));
LockGuard guard(lock_);
if (handle.is_frozen()) {
COMMON_LOG(ERROR, "cannot alloc because allocator is frozen", K(ret), K(handle.mt_));
} else if (!handle.is_id_valid()) {
handle.set_clock(arena_.retired());
hlist_.set_active(handle);
}
}
storage::ObTenantFreezer *freezer = nullptr;
if (is_virtual_tenant_id(tenant_id)) {
// virtual tenant should not have memstore.
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(ERROR, "virtual tenant should not have memstore", K(ret), K(tenant_id));
} else if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer *))) {
} else if (OB_FAIL(freezer->check_memstore_full_internal(is_out_of_mem))) {
COMMON_LOG(ERROR, "fail to check tenant out of mem limit", K(ret), K(tenant_id));
}
void *res = nullptr;
if (OB_FAIL(ret) || is_out_of_mem) {
if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
STORAGE_LOG(WARN, "this tenant is already out of memstore limit or some thing wrong.", K(tenant_id));
}
res = nullptr;
} else {
bool is_throttled = false;
(void)throttle_tool_->alloc_resource<ObMemstoreAllocator>(align_size, expire_ts, is_throttled);
if (is_throttled) {
share::memstore_throttled_alloc() += align_size;
}
res = arena_.alloc(handle.id_, handle.arena_handle_, align_size);
}
return res;
}
void ObMemstoreAllocator::set_frozen(AllocHandle& handle)
{
COMMON_LOG(TRACE, "MTALLOC.set_frozen", KP(&handle.mt_));
LockGuard guard(lock_);
hlist_.set_frozen(handle);
}
static int64_t calc_nway(int64_t cpu, int64_t mem)
{
return std::min(cpu, mem/20/ObFifoArena::PAGE_SIZE);
}
int64_t ObMemstoreAllocator::nway_per_group()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = arena_.get_tenant_id();
double min_cpu = 0;
double max_cpu = 0;
int64_t max_memory = 0;
int64_t min_memory = 0;
omt::ObMultiTenant *omt = GCTX.omt_;
MTL_SWITCH(tenant_id) {
storage::ObTenantFreezer *freezer = nullptr;
if (NULL == omt) {
ret = OB_ERR_UNEXPECTED;
COMMON_LOG(WARN, "omt should not be null", K(tenant_id), K(ret));
} else if (OB_FAIL(omt->get_tenant_cpu(tenant_id, min_cpu, max_cpu))) {
COMMON_LOG(WARN, "get tenant cpu failed", K(tenant_id), K(ret));
} else if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer *))) {
} else if (OB_FAIL(freezer->get_tenant_mem_limit(min_memory, max_memory))) {
COMMON_LOG(WARN, "get tenant mem limit failed", K(tenant_id), K(ret));
}
}
return OB_SUCCESS == ret? calc_nway((int64_t)max_cpu, min_memory): 0;
}
int ObMemstoreAllocator::set_memstore_threshold()
{
LockGuard guard(lock_);
int ret = set_memstore_threshold_without_lock();
return ret;
}
int ObMemstoreAllocator::set_memstore_threshold_without_lock()
{
int ret = OB_SUCCESS;
int64_t memstore_threshold = INT64_MAX;
storage::ObTenantFreezer *freezer = nullptr;
if (FALSE_IT(freezer = MTL(storage::ObTenantFreezer *))) {
} else if (OB_FAIL(freezer->get_tenant_memstore_limit(memstore_threshold))) {
COMMON_LOG(WARN, "failed to get_tenant_memstore_limit", K(ret));
} else {
throttle_tool_->set_resource_limit<ObMemstoreAllocator>(memstore_threshold);
}
return ret;
}
int64_t ObMemstoreAllocator::resource_unit_size()
{
static const int64_t MEMSTORE_RESOURCE_UNIT_SIZE = 2LL * 1024LL * 1024LL; /* 2MB */
return MEMSTORE_RESOURCE_UNIT_SIZE;
}
void ObMemstoreAllocator::init_throttle_config(int64_t &resource_limit,
int64_t &trigger_percentage,
int64_t &max_duration)
{
// define some default value
const int64_t MEMSTORE_LIMIT_PERCENTAGE = 50;
const int64_t MEMSTORE_THROTTLE_TRIGGER_PERCENTAGE = 60;
const int64_t MEMSTORE_THROTTLE_MAX_DURATION = 2LL * 60LL * 60LL * 1000LL * 1000LL; // 2 hours
int64_t total_memory = lib::get_tenant_memory_limit(MTL_ID());
// Use tenant config to init throttle config
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(MTL_ID()));
if (tenant_config.is_valid()) {
resource_limit = total_memory * tenant_config->memstore_limit_percentage / 100LL;
trigger_percentage = tenant_config->writing_throttling_trigger_percentage;
max_duration = tenant_config->writing_throttling_maximum_duration;
} else {
COMMON_LOG_RET(WARN, OB_INVALID_CONFIG, "init throttle config with default value");
resource_limit = total_memory * MEMSTORE_LIMIT_PERCENTAGE / 100;
trigger_percentage = MEMSTORE_THROTTLE_TRIGGER_PERCENTAGE;
max_duration = MEMSTORE_THROTTLE_MAX_DURATION;
}
}
void ObMemstoreAllocator::adaptive_update_limit(const int64_t holding_size,
const int64_t config_specify_resource_limit,
int64_t &resource_limit,
int64_t &last_update_limit_ts,
bool &is_updated)
{
// do nothing
}
}; // namespace share
}; // namespace oceanbase

View File

@ -15,6 +15,7 @@
#include "ob_handle_list.h"
#include "ob_fifo_arena.h"
#include "lib/lock/ob_spin_lock.h"
#include "share/throttle/ob_share_throttle_define.h"
namespace oceanbase
{
@ -22,8 +23,17 @@ namespace memtable
{
class ObMemtable;
};
namespace common
namespace share
{
// record the throttled alloc size of memstore in this thread
OB_INLINE int64_t &memstore_throttled_alloc()
{
RLOCAL_INLINE(int64_t, throttled_alloc);
return throttled_alloc;
}
struct FrozenMemstoreInfoLogger
{
FrozenMemstoreInfoLogger(char* buf, int64_t limit): buf_(buf), limit_(limit), pos_(0) {}
@ -44,12 +54,15 @@ struct ActiveMemstoreInfoLogger
int64_t pos_;
};
class ObGMemstoreAllocator
class ObMemstoreAllocator
{
public:
DEFINE_CUSTOM_FUNC_FOR_THROTTLE(Memstore);
typedef ObSpinLock Lock;
typedef ObSpinLockGuard LockGuard;
typedef ObGMemstoreAllocator GAlloc;
typedef ObMemstoreAllocator GAlloc;
typedef ObFifoArena Arena;
typedef ObHandleList HandleList;
typedef HandleList::Handle ListHandle;
@ -70,7 +83,7 @@ public:
host_ = NULL;
}
int64_t get_group_id() const { return id_ < 0? INT64_MAX: (id_ % Arena::MAX_CACHED_GROUP_COUNT); }
int init(uint64_t tenant_id);
int init();
void set_host(GAlloc* host) { host_ = host; }
void destroy() {
if (NULL != host_) {
@ -109,19 +122,18 @@ public:
};
public:
ObGMemstoreAllocator():
lock_(common::ObLatchIds::MEMSTORE_ALLOCATOR_LOCK),
hlist_(),
arena_() {}
~ObGMemstoreAllocator() {}
ObMemstoreAllocator()
: throttle_tool_(nullptr), lock_(common::ObLatchIds::MEMSTORE_ALLOCATOR_LOCK), hlist_(), arena_() {}
~ObMemstoreAllocator() {}
public:
int init(uint64_t tenant_id)
{
return arena_.init(tenant_id);
}
void init_handle(AllocHandle& handle, uint64_t tenant_id);
int init();
int start() { return OB_SUCCESS; }
void stop() {}
void wait() {}
void destroy() {}
void init_handle(AllocHandle& handle);
void destroy_handle(AllocHandle& handle);
void* alloc(AllocHandle& handle, int64_t size);
void* alloc(AllocHandle& handle, int64_t size, const int64_t expire_ts = 0);
void set_frozen(AllocHandle& handle);
template<typename Func>
int for_each(Func& f, const bool reverse=false) {
@ -145,6 +157,7 @@ public:
int64_t get_max_cached_memstore_size() const {
return arena_.get_max_cached_memstore_size();
}
int64_t hold() const { return arena_.hold(); }
int64_t get_total_memstore_used() const { return arena_.hold(); }
int64_t get_frozen_memstore_pos() const {
int64_t hazard = hlist_.hazard();
@ -167,34 +180,21 @@ public:
(void)for_each(logger, true /* reverse */);
}
}
public:
int set_memstore_threshold(uint64_t tenant_id);
bool need_do_writing_throttle() const {return arena_.need_do_writing_throttle();}
bool check_clock_over_seq(const int64_t seq)
{
return arena_.check_clock_over_seq(seq);
}
int64_t get_clock()
{
return arena_.get_clock();
}
int64_t expected_wait_time(int64_t seq) const
{
return arena_.expected_wait_time(seq);
}
void skip_clock(const int64_t skip_size)
{
arena_.skip_clock(skip_size);
}
int set_memstore_threshold();
private:
int64_t nway_per_group();
int set_memstore_threshold_without_lock(uint64_t tenant_id);
int set_memstore_threshold_without_lock();
private:
share::TxShareThrottleTool *throttle_tool_;
Lock lock_;
HandleList hlist_;
Arena arena_;
};
}; // end namespace common
}; // end namespace oceanbase
}; // namespace share
}; // namespace oceanbase
#endif /* OCEANBASE_ALLOCATOR_OB_GMEMSTORE_ALLOCATOR_H_ */

View File

@ -1,131 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SHARE
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_gmemstore_allocator.h"
#include "lib/alloc/alloc_struct.h"
using namespace oceanbase::lib;
using namespace oceanbase::common;
int64_t ObMemstoreAllocatorMgr::get_all_tenants_memstore_used()
{
return ATOMIC_LOAD(&ObFifoArena::total_hold_);
}
ObMemstoreAllocatorMgr::ObMemstoreAllocatorMgr()
: is_inited_(false),
allocators_(),
allocator_map_(),
malloc_allocator_(NULL),
all_tenants_memstore_used_(0)
{
set_malloc_allocator(ObMallocAllocator::get_instance());
}
ObMemstoreAllocatorMgr::~ObMemstoreAllocatorMgr()
{}
int ObMemstoreAllocatorMgr::init()
{
int ret = OB_SUCCESS;
if (OB_FAIL(allocator_map_.create(ALLOCATOR_MAP_BUCKET_NUM, ObModIds::OB_MEMSTORE_ALLOCATOR))) {
LOG_WARN("failed to create allocator_map", K(ret));
} else {
is_inited_ = true;
}
return ret;
}
int ObMemstoreAllocatorMgr::get_tenant_memstore_allocator(const uint64_t tenant_id,
TAllocator *&out_allocator)
{
int ret = OB_SUCCESS;
if (OB_UNLIKELY(tenant_id <= 0)) {
ret = OB_INVALID_ARGUMENT;
LOG_WARN("invalid tenant id", K(tenant_id), K(ret));
} else if (tenant_id < PRESERVED_TENANT_COUNT) {
if (NULL == (out_allocator = ATOMIC_LOAD(&allocators_[tenant_id]))) {
ObMemAttr attr;
attr.tenant_id_ = OB_SERVER_TENANT_ID;
attr.label_ = ObModIds::OB_MEMSTORE_ALLOCATOR;
SET_USE_500(attr);
void *buf = ob_malloc(sizeof(TAllocator), attr);
if (NULL != buf) {
TAllocator *allocator = new (buf) TAllocator();
bool cas_succeed = false;
if (OB_SUCC(ret)) {
if (OB_FAIL(allocator->init(tenant_id))) {
LOG_WARN("failed to init tenant memstore allocator", K(tenant_id), K(ret));
} else {
LOG_INFO("succ to init tenant memstore allocator", K(tenant_id), K(ret));
cas_succeed = ATOMIC_BCAS(&allocators_[tenant_id], NULL, allocator);
}
}
if (OB_FAIL(ret) || !cas_succeed) {
allocator->~TAllocator();
ob_free(buf);
out_allocator = ATOMIC_LOAD(&allocators_[tenant_id]);
} else {
out_allocator = allocator;
}
} else {
ret = OB_ALLOCATE_MEMORY_FAILED;
LOG_WARN("failed to allocate memory", K(tenant_id), K(ret));
}
}
} else if (OB_FAIL(allocator_map_.get_refactored(tenant_id, out_allocator))) {
if (OB_HASH_NOT_EXIST != ret) {
LOG_WARN("failed to get tenant memstore allocator", K(tenant_id), K(ret));
} else {
ret = OB_SUCCESS;
ObMemAttr attr;
attr.tenant_id_ = OB_SERVER_TENANT_ID;
attr.label_ = ObModIds::OB_MEMSTORE_ALLOCATOR;
void *buf = ob_malloc(sizeof(TAllocator), attr);
if (NULL != buf) {
TAllocator *new_allocator = new (buf) TAllocator();
if (OB_FAIL(new_allocator->init(tenant_id))) {
LOG_WARN("failed to init tenant memstore allocator", K(tenant_id), K(ret));
} else if (OB_FAIL(allocator_map_.set_refactored(tenant_id, new_allocator))) {
if (OB_HASH_EXIST == ret) {
if (OB_FAIL(allocator_map_.get_refactored(tenant_id, out_allocator))) {
LOG_WARN("failed to get refactor", K(tenant_id), K(ret));
}
} else {
LOG_WARN("failed to set refactor", K(tenant_id), K(ret));
}
new_allocator->~TAllocator();
ob_free(buf);
} else {
out_allocator = new_allocator;
}
} else {
ret = OB_ALLOCATE_MEMORY_FAILED;
LOG_WARN("failed to allocate memory", K(tenant_id), K(ret));
}
}
} else if (OB_ISNULL(out_allocator)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("got allocator is NULL", K(tenant_id), K(ret));
}
return ret;
}
ObMemstoreAllocatorMgr &ObMemstoreAllocatorMgr::get_instance()
{
static ObMemstoreAllocatorMgr instance_;
return instance_;
}

View File

@ -1,57 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef _OB_SHARE_MEMSTORE_ALLOCATOR_MGR_H_
#define _OB_SHARE_MEMSTORE_ALLOCATOR_MGR_H_
#include "lib/allocator/ob_allocator.h"
#include "lib/alloc/alloc_func.h"
#include "lib/hash/ob_hashmap.h"
namespace oceanbase
{
namespace lib
{
class ObMallocAllocator;
}
namespace common
{
class ObGMemstoreAllocator;
class ObMemstoreAllocatorMgr
{
public:
typedef ObGMemstoreAllocator TAllocator;
typedef common::hash::ObHashMap<uint64_t, TAllocator *> TenantMemostoreAllocatorMap;
ObMemstoreAllocatorMgr();
virtual ~ObMemstoreAllocatorMgr();
int init();
int get_tenant_memstore_allocator(uint64_t tenant_id, TAllocator *&out_allocator);
int64_t get_all_tenants_memstore_used();
static ObMemstoreAllocatorMgr &get_instance();
public:
void set_malloc_allocator(lib::ObMallocAllocator *malloc_allocator) { malloc_allocator_ = malloc_allocator; }
private:
static const uint64_t PRESERVED_TENANT_COUNT = 10000;
static const uint64_t ALLOCATOR_MAP_BUCKET_NUM = 64;
bool is_inited_;
TAllocator *allocators_[PRESERVED_TENANT_COUNT];
TenantMemostoreAllocatorMap allocator_map_;
lib::ObMallocAllocator *malloc_allocator_;
int64_t all_tenants_memstore_used_;
private:
DISALLOW_COPY_AND_ASSIGN(ObMemstoreAllocatorMgr);
}; // end of class ObMemstoreAllocatorMgr
} // end of namespace share
} // end of namespace oceanbase
#endif /* _OB_SHARE_MEMSTORE_ALLOCATOR_MGR_H_ */

View File

@ -0,0 +1,91 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SHARE
#include "ob_shared_memory_allocator_mgr.h"
namespace oceanbase {
namespace share {
#define THROTTLE_CONFIG_LOG(ALLOCATOR, LIMIT, TRIGGER_PERCENTAGE, MAX_DURATION) \
"Unit Name", \
ALLOCATOR::throttle_unit_name(), \
"Memory Limit(MB)", \
LIMIT / 1024 / 1024, \
"Throttle Trigger(MB)", \
LIMIT * trigger_percentage / 100 / 1024 / 1024, \
"Trigger Percentage", \
TRIGGER_PERCENTAGE, \
"Max Alloc Duration", \
MAX_DURATION
void ObSharedMemAllocMgr::update_throttle_config()
{
int64_t tenant_id = MTL_ID();
int64_t total_memory = lib::get_tenant_memory_limit(tenant_id);
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(MTL_ID()));
if (tenant_config.is_valid()) {
int64_t share_mem_limit_percentage = tenant_config->_tx_share_memory_limit_percentage;
int64_t memstore_limit_percentage = tenant_config->memstore_limit_percentage;
int64_t tx_data_limit_percentage = tenant_config->_tx_data_memory_limit_percentage;
int64_t mds_limit_percentage = tenant_config->_mds_memory_limit_percentage;
int64_t trigger_percentage = tenant_config->writing_throttling_trigger_percentage;
int64_t max_duration = tenant_config->writing_throttling_maximum_duration;
if (0 == share_mem_limit_percentage) {
// 0 means use (memstore_limit + 10)
share_mem_limit_percentage = memstore_limit_percentage + 10;
}
int64_t share_mem_limit = total_memory * share_mem_limit_percentage / 100LL;
int64_t memstore_limit = total_memory * memstore_limit_percentage / 100LL;
int64_t tx_data_limit = total_memory * tx_data_limit_percentage / 100LL;
int64_t mds_limit = total_memory * mds_limit_percentage / 100LL;
(void)share_resource_throttle_tool_.update_throttle_config<FakeAllocatorForTxShare>(
share_mem_limit, trigger_percentage, max_duration);
(void)share_resource_throttle_tool_.update_throttle_config<ObMemstoreAllocator>(
memstore_limit, trigger_percentage, max_duration);
(void)share_resource_throttle_tool_.update_throttle_config<ObTenantTxDataAllocator>(
tx_data_limit, trigger_percentage, max_duration);
(void)share_resource_throttle_tool_.update_throttle_config<ObTenantMdsAllocator>(
mds_limit, trigger_percentage, max_duration);
SHARE_LOG(INFO,
"[Throttle] Update Config",
K(share_mem_limit_percentage),
K(memstore_limit_percentage),
K(tx_data_limit_percentage),
K(mds_limit_percentage),
K(trigger_percentage),
K(max_duration));
SHARE_LOG(INFO,
"[Throttle] Update Config",
THROTTLE_CONFIG_LOG(FakeAllocatorForTxShare, share_mem_limit, trigger_percentage, max_duration));
SHARE_LOG(INFO,
"[Throttle] Update Config",
THROTTLE_CONFIG_LOG(ObMemstoreAllocator, memstore_limit, trigger_percentage, max_duration));
SHARE_LOG(INFO,
"[Throttle] Update Config",
THROTTLE_CONFIG_LOG(ObTenantTxDataAllocator, tx_data_limit, trigger_percentage, max_duration));
SHARE_LOG(INFO,
"[Throttle] Update Config",
THROTTLE_CONFIG_LOG(ObTenantMdsAllocator, mds_limit, trigger_percentage, max_duration));
} else {
SHARE_LOG_RET(WARN, OB_INVALID_CONFIG, "invalid tenant config", K(tenant_id), K(total_memory));
}
}
#undef UPDATE_BY_CONFIG_NAME
} // namespace share
} // namespace oceanbase

View File

@ -0,0 +1,83 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_ALLOCATOR_OB_SHARED_MEMORY_ALLOCATOR_MGR_H_
#define OCEANBASE_ALLOCATOR_OB_SHARED_MEMORY_ALLOCATOR_MGR_H_
#include "share/allocator/ob_memstore_allocator.h"
#include "share/allocator/ob_tx_data_allocator.h"
#include "share/allocator/ob_mds_allocator.h"
#include "share/throttle/ob_share_resource_throttle_tool.h"
namespace oceanbase {
namespace share {
class ObSharedMemAllocMgr {
public:
ObSharedMemAllocMgr()
: share_resource_throttle_tool_(),
memstore_allocator_(),
tx_data_allocator_(),
mds_allocator_() {}
ObSharedMemAllocMgr(ObSharedMemAllocMgr &rhs) = delete;
ObSharedMemAllocMgr &operator=(ObSharedMemAllocMgr &rhs) = delete;
~ObSharedMemAllocMgr() {}
static int mtl_init(ObSharedMemAllocMgr *&shared_mem_alloc_mgr) { return shared_mem_alloc_mgr->init(); }
int init()
{
int ret = OB_SUCCESS;
if (OB_FAIL(tx_data_allocator_.init("TX_DATA_SLICE"))) {
SHARE_LOG(ERROR, "init tx data allocator failed", KR(ret));
} else if (OB_FAIL(memstore_allocator_.init())) {
SHARE_LOG(ERROR, "init memstore allocator failed", KR(ret));
} else if (OB_FAIL(mds_allocator_.init())) {
SHARE_LOG(ERROR, "init mds allocator failed", KR(ret));
} else if (OB_FAIL(
share_resource_throttle_tool_.init(&memstore_allocator_, &tx_data_allocator_, &mds_allocator_))) {
SHARE_LOG(ERROR, "init share resource throttle tool failed", KR(ret));
} else {
share_resource_throttle_tool_.enable_adaptive_limit<FakeAllocatorForTxShare>();
SHARE_LOG(INFO, "finish init mtl share mem allocator mgr", K(MTL_ID()), KP(this));
}
return ret;
}
int start() { return OB_SUCCESS; }
void stop() {}
void wait() {}
void destroy() {}
void update_throttle_config();
ObMemstoreAllocator &memstore_allocator() { return memstore_allocator_; }
ObTenantTxDataAllocator &tx_data_allocator() { return tx_data_allocator_; }
ObTenantMdsAllocator &mds_allocator() { return mds_allocator_; }
TxShareThrottleTool &share_resource_throttle_tool() { return share_resource_throttle_tool_; }
private:
void update_share_throttle_config_(const int64_t total_memory, omt::ObTenantConfigGuard &config);
void update_memstore_throttle_config_(const int64_t total_memory, omt::ObTenantConfigGuard &config);
void update_tx_data_throttle_config_(const int64_t total_memory, omt::ObTenantConfigGuard &config);
void update_mds_throttle_config_(const int64_t total_memory, omt::ObTenantConfigGuard &config);
private:
TxShareThrottleTool share_resource_throttle_tool_;
ObMemstoreAllocator memstore_allocator_;
ObTenantTxDataAllocator tx_data_allocator_;
ObTenantMdsAllocator mds_allocator_;
};
} // namespace share
} // namespace oceanbase
#endif

View File

@ -10,13 +10,15 @@
* See the Mulan PubL v2 for more details.
*/
#include "share/allocator/ob_tenant_mutil_allocator_mgr.h"
#include "lib/allocator/ob_malloc.h"
#include "share/config/ob_server_config.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/allocator/ob_tenant_mutil_allocator.h"
#include "ob_gmemstore_allocator.h"
#include "ob_memstore_allocator_mgr.h"
#include "observer/omt/ob_tenant_config_mgr.h"
#include "share/allocator/ob_tenant_mutil_allocator_mgr.h"
#include "share/config/ob_server_config.h"
#include "share/rc/ob_tenant_base.h"
#include "ob_memstore_allocator.h"
using namespace oceanbase::share;
namespace oceanbase
{
@ -405,15 +407,14 @@ int ObTenantMutilAllocatorMgr::update_tenant_mem_limit(const share::TenantUnits
K(tenant_id), K(nway), K(new_tma_limit), K(pre_tma_limit), K(cur_memstore_limit_percent), K(tenant_config));
}
//update memstore threshold of GmemstoreAllocator
ObGMemstoreAllocator* memstore_allocator = NULL;
if (OB_TMP_FAIL(ObMemstoreAllocatorMgr::get_instance().get_tenant_memstore_allocator(tenant_id, memstore_allocator))) {
} else if (OB_ISNULL(memstore_allocator)) {
OB_LOG(WARN, "get_tenant_memstore_allocator failed", K(tenant_id));
} else if (OB_FAIL(memstore_allocator->set_memstore_threshold(tenant_id))) {
OB_LOG(WARN, "failed to set_memstore_threshold of memstore allocator", K(tenant_id), K(ret));
} else {
OB_LOG(INFO, "succ to set_memstore_threshold of memstore allocator", K(tenant_id), K(ret));
//update memstore threshold of MemstoreAllocator
MTL_SWITCH(tenant_id) {
ObMemstoreAllocator &memstore_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
if (OB_FAIL(memstore_allocator.set_memstore_threshold())) {
OB_LOG(WARN, "failed to set_memstore_threshold of memstore allocator", K(tenant_id), K(ret));
} else {
OB_LOG(INFO, "succ to set_memstore_threshold of memstore allocator", K(tenant_id), K(ret));
}
}
}
}

View File

@ -0,0 +1,113 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SHARE
#include "ob_tx_data_allocator.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/rc/ob_tenant_base.h"
#include "storage/tx/ob_tx_data_define.h"
#include "storage/tx_storage/ob_tenant_freezer.h"
namespace oceanbase {
namespace share {
int64_t ObTenantTxDataAllocator::resource_unit_size()
{
static const int64_t TX_DATA_RESOURCE_UNIT_SIZE = OB_MALLOC_NORMAL_BLOCK_SIZE; /* 8KB */
return TX_DATA_RESOURCE_UNIT_SIZE;
}
void ObTenantTxDataAllocator::init_throttle_config(int64_t &resource_limit,
int64_t &trigger_percentage,
int64_t &max_duration)
{
int64_t total_memory = lib::get_tenant_memory_limit(MTL_ID());
// Use tenant config to init throttle config
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(MTL_ID()));
if (tenant_config.is_valid()) {
resource_limit = total_memory * tenant_config->_tx_data_memory_limit_percentage / 100LL;
trigger_percentage = tenant_config->writing_throttling_trigger_percentage;
max_duration = tenant_config->writing_throttling_maximum_duration;
} else {
SHARE_LOG_RET(WARN, OB_INVALID_CONFIG, "init throttle config with default value");
resource_limit = total_memory * TX_DATA_LIMIT_PERCENTAGE / 100;
trigger_percentage = TX_DATA_THROTTLE_TRIGGER_PERCENTAGE;
max_duration = TX_DATA_THROTTLE_MAX_DURATION;
}
}
void ObTenantTxDataAllocator::adaptive_update_limit(const int64_t holding_size,
const int64_t config_specify_resource_limit,
int64_t &resource_limit,
int64_t &last_update_limit_ts,
bool &is_updated)
{
// do nothing
}
int ObTenantTxDataAllocator::init(const char *label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
mem_attr.label_ = label;
mem_attr.tenant_id_ = MTL_ID();
mem_attr.ctx_id_ = ObCtxIds::TX_DATA_TABLE;
ObSharedMemAllocMgr *share_mem_alloc_mgr = MTL(ObSharedMemAllocMgr *);
throttle_tool_ = &(share_mem_alloc_mgr->share_resource_throttle_tool());
if (IS_INIT){
ret = OB_INIT_TWICE;
SHARE_LOG(WARN, "init tenant mds allocator twice", KR(ret), KPC(this));
} else if (OB_ISNULL(throttle_tool_)) {
ret = OB_ERR_UNEXPECTED;
SHARE_LOG(WARN, "throttle tool is unexpected null", KP(throttle_tool_), KP(share_mem_alloc_mgr));
} else if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ObTenantTxDataAllocator::ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
void ObTenantTxDataAllocator::reset()
{
is_inited_ = false;
slice_allocator_.purge_extra_cached_block(0);
}
void *ObTenantTxDataAllocator::alloc(const bool enable_throttle, const int64_t abs_expire_time)
{
// do throttle if needed
if (OB_LIKELY(enable_throttle)) {
bool is_throttled = false;
(void)throttle_tool_->alloc_resource<ObTenantTxDataAllocator>(
storage::TX_DATA_SLICE_SIZE, abs_expire_time, is_throttled);
if (OB_UNLIKELY(is_throttled)) {
if (MTL(ObTenantFreezer *)->exist_ls_freezing()) {
(void)throttle_tool_->skip_throttle<ObTenantTxDataAllocator>(storage::TX_DATA_SLICE_SIZE);
} else {
(void)throttle_tool_->do_throttle<ObTenantTxDataAllocator>(abs_expire_time);
}
}
}
// allocate memory
void *res = slice_allocator_.alloc();
return res;
}
} // namespace share
} // namespace oceanbase

View File

@ -0,0 +1,63 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_ALLOCATOR_OB_TX_DATA_ALLOCATOR_H_
#define OCEANBASE_ALLOCATOR_OB_TX_DATA_ALLOCATOR_H_
#include "lib/allocator/ob_slice_alloc.h"
#include "share/ob_delegate.h"
#include "share/throttle/ob_share_throttle_define.h"
namespace oceanbase {
namespace share {
class ObTenantTxDataAllocator {
public:
using SliceAllocator = ObSliceAlloc;
// define some default value
static const int64_t TX_DATA_LIMIT_PERCENTAGE = 20;
static const int64_t TX_DATA_THROTTLE_TRIGGER_PERCENTAGE = 60;
static const int64_t TX_DATA_THROTTLE_MAX_DURATION = 2LL * 60LL * 60LL * 1000LL * 1000LL; // 2 hours
static const int64_t ALLOC_TX_DATA_MAX_CONCURRENCY = 32;
static const uint32_t THROTTLE_TX_DATA_INTERVAL = 20 * 1000; // 20ms
// The tx data memtable will trigger a freeze if its memory use is more than 2%
static constexpr double TX_DATA_FREEZE_TRIGGER_PERCENTAGE = 2;
public:
DEFINE_CUSTOM_FUNC_FOR_THROTTLE(TxData);
public:
ObTenantTxDataAllocator()
: is_inited_(false), throttle_tool_(nullptr), block_alloc_(), slice_allocator_() {}
~ObTenantTxDataAllocator() { reset(); }
int init(const char* label);
void *alloc(const bool enable_throttle = true, const int64_t abs_expire_time = 0);
void reset();
int64_t hold() const { return block_alloc_.hold(); }
DELEGATE_WITH_RET(slice_allocator_, free, void);
TO_STRING_KV(K(is_inited_), KP(throttle_tool_), KP(&block_alloc_), KP(&slice_allocator_));
private:
bool is_inited_;
TxShareThrottleTool *throttle_tool_;
common::ObBlockAllocMgr block_alloc_;
SliceAllocator slice_allocator_;
};
} // namespace share
} // namespace oceanbase
#endif

View File

@ -101,6 +101,106 @@ int64_t ObConfigFreezeTriggerIntChecker::get_write_throttle_trigger_percentage_(
return percent;
}
bool ObConfigTxShareMemoryLimitChecker::check(const uint64_t tenant_id, const ObAdminSetConfigItem &t)
{
bool is_valid = false;
int64_t value = ObConfigIntParser::get(t.value_.ptr(), is_valid);
int64_t memstore_limit = 0;
int64_t tx_data_limit = 0;
int64_t mds_limit = 0;
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id));
if (tenant_config.is_valid()) {
memstore_limit = tenant_config->memstore_limit_percentage;
tx_data_limit = tenant_config->_tx_data_memory_limit_percentage;
mds_limit = tenant_config->_mds_memory_limit_percentage;
} else {
is_valid = false;
OB_LOG_RET(ERROR, OB_INVALID_CONFIG, "tenant config is invalid", K(tenant_id));
}
if (!is_valid) {
} else if (0 == value) {
// 0 is default value, which means (_tx_share_memory_limit_percentage = memstore_limit_percentage + 10)
is_valid = true;
} else if ((value > 0 && value < 100) && (memstore_limit <= value) && (tx_data_limit <= value) &&
(mds_limit <= value)) {
is_valid = true;
} else {
is_valid = false;
}
if (!is_valid) {
OB_LOG_RET(WARN, OB_INVALID_CONFIG,
"update _tx_share_memory_limit_percentage failed",
"_tx_share_memory_limit_percentage", value,
"memstore_limit_percentage", memstore_limit,
"_tx_data_memory_limit_percentage", tx_data_limit,
"_mds_memory_limit_percentage", mds_limit);
}
return is_valid;
}
bool less_or_equal_tx_share_limit(const uint64_t tenant_id, const int64_t value)
{
bool bool_ret = true;
int64_t tx_share_limit = 0;
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(tenant_id));
if (tenant_config.is_valid()) {
tx_share_limit = tenant_config->_tx_share_memory_limit_percentage;
if (0 == tx_share_limit) {
// 0 is default value, which means (_tx_share_memory_limit_percentage = memstore_limit_percentage + 10)
bool_ret = true;
} else if (value > 0 && value < 100 && value <= tx_share_limit) {
bool_ret = true;
} else {
bool_ret = false;
}
} else {
bool_ret = false;
OB_LOG_RET(ERROR, OB_INVALID_CONFIG, "tenant config is invalid", K(tenant_id));
}
return bool_ret;
}
bool ObConfigMemstoreLimitChecker::check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t)
{
bool is_valid = false;
int64_t value = ObConfigIntParser::get(t.value_.ptr(), is_valid);
if (less_or_equal_tx_share_limit(tenant_id, value)) {
is_valid = true;
} else {
is_valid = false;
}
return is_valid;
}
bool ObConfigTxDataLimitChecker::check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t)
{
bool is_valid = false;
int64_t value = ObConfigIntParser::get(t.value_.ptr(), is_valid);
if (less_or_equal_tx_share_limit(tenant_id, value)) {
is_valid = true;
} else {
is_valid = false;
}
return is_valid;
}
bool ObConfigMdsLimitChecker::check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t)
{
bool is_valid = false;
int64_t value = ObConfigIntParser::get(t.value_.ptr(), is_valid);
if (less_or_equal_tx_share_limit(tenant_id, value)) {
is_valid = true;
} else {
is_valid = false;
}
return is_valid;
}
bool ObConfigWriteThrottleTriggerIntChecker::check(const uint64_t tenant_id,
const ObAdminSetConfigItem &t)
{

View File

@ -103,6 +103,38 @@ private:
static int64_t get_write_throttle_trigger_percentage_(const uint64_t tenant_id);
DISALLOW_COPY_AND_ASSIGN(ObConfigFreezeTriggerIntChecker);
};
class ObConfigTxShareMemoryLimitChecker
{
public:
static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t);
private:
DISALLOW_COPY_AND_ASSIGN(ObConfigTxShareMemoryLimitChecker);
};
class ObConfigMemstoreLimitChecker
{
public:
static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t);
private:
DISALLOW_COPY_AND_ASSIGN(ObConfigMemstoreLimitChecker);
};
class ObConfigTxDataLimitChecker
{
public:
static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t);
private:
DISALLOW_COPY_AND_ASSIGN(ObConfigTxDataLimitChecker);
};
class ObConfigMdsLimitChecker
{
public:
static bool check(const uint64_t tenant_id, const obrpc::ObAdminSetConfigItem &t);
private:
DISALLOW_COPY_AND_ASSIGN(ObConfigMdsLimitChecker);
};
class ObConfigWriteThrottleTriggerIntChecker
{

View File

@ -54,6 +54,10 @@ const char* const CLUSTER_ID = "cluster_id";
const char* const CLUSTER_NAME = "cluster";
const char* const FREEZE_TRIGGER_PERCENTAGE = "freeze_trigger_percentage";
const char* const WRITING_THROTTLEIUNG_TRIGGER_PERCENTAGE = "writing_throttling_trigger_percentage";
const char* const _TX_SHARE_MEMORY_LIMIT_PERCENTAGE = "_tx_share_memory_limit_percentage";
const char* const MEMSTORE_LIMIT_PERCENTAGE = "memstore_limit_percentage";
const char* const _TX_DATA_MEMORY_LIMIT_PERCENTAGE = "_tx_data_memory_limit_percentage";
const char* const _MDS_MEMORY_LIMIT_PERCENTAGE = "_mds_memory_limit_percentage";
const char* const COMPATIBLE = "compatible";
const char* const WEAK_READ_VERSION_REFRESH_INTERVAL = "weak_read_version_refresh_interval";
const char* const PARTITION_BALANCE_SCHEDULE_INTERVAL = "partition_balance_schedule_interval";

View File

@ -10,8 +10,8 @@
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_STORAGE_OB_TRANS_HASHMAP_
#define OCEANBASE_STORAGE_OB_TRANS_HASHMAP_
#ifndef OCEANBASE_STORAGE_OB_LIGHT_HASHMAP_
#define OCEANBASE_STORAGE_OB_LIGHT_HASHMAP_
#include "lib/ob_define.h"
#include "lib/utility/ob_print_utils.h"
@ -21,7 +21,7 @@
*
* 1. Define the hash value class
*
* class ObTransCtx : public ObTransHashLink<ObTransCtx>
* class ObTransCtx : public ObLightHashLink<ObTransCtx>
* {
* public:
* // hash key compare method
@ -49,7 +49,7 @@
* };
*
* 3. define the HashMap
* ObTransHashMap <ObTransID, ObTransCtx, ObTransCtxAlloc> CtxMap;
* ObLightHashMap <ObTransID, ObTransCtx, ObTransCtxAlloc> CtxMap;
*
* 4. Ref
* insert_and_get // create the hash value , ref = ref + 2;
@ -64,16 +64,13 @@
*
*/
namespace oceanbase
{
namespace transaction
{
template<typename Value>
class ObTransHashLink
{
namespace oceanbase {
namespace share {
template <typename Value>
class ObLightHashLink {
public:
ObTransHashLink() : ref_(0), prev_(NULL), next_(NULL) {}
~ObTransHashLink()
ObLightHashLink() : ref_(0), prev_(NULL), next_(NULL) {}
~ObLightHashLink()
{
ref_ = 0;
prev_ = NULL;
@ -82,7 +79,7 @@ public:
inline int inc_ref(int32_t x)
{
if (ref_ < 0) {
TRANS_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected ref when inc ref", K(ref_));
SHARE_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected ref when inc ref", K(ref_));
}
return ATOMIC_FAA(&ref_, x);
}
@ -90,7 +87,7 @@ public:
{
int32_t ref = ATOMIC_SAF(&ref_, x);
if (ref < 0) {
TRANS_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected error", K(ref_));
SHARE_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected error", K(ref_));
}
return ref;
}
@ -100,16 +97,13 @@ public:
Value *next_;
};
template<typename Key, typename Value, typename AllocHandle, typename LockType, int64_t BUCKETS_CNT = 64>
class ObTransHashMap
{
typedef common::ObSEArray<Value *, 32> ValueArray;
template <typename Key, typename Value, typename AllocHandle, typename LockType, int64_t BUCKETS_CNT = 64>
class ObLightHashMap {
typedef common::ObSEArray<Value *, 32> ValueArray;
public:
ObTransHashMap() : is_inited_(false), total_cnt_(0)
{
OB_ASSERT(BUCKETS_CNT > 0);
}
~ObTransHashMap() { destroy(); }
ObLightHashMap() : is_inited_(false), total_cnt_(0) { OB_ASSERT(BUCKETS_CNT > 0); }
~ObLightHashMap() { destroy(); }
int64_t count() const { return ATOMIC_LOAD(&total_cnt_); }
int64_t alloc_cnt() const { return alloc_handle_.get_alloc_cnt(); }
void reset()
@ -147,13 +141,13 @@ public:
if (OB_UNLIKELY(is_inited_)) {
ret = OB_INIT_TWICE;
TRANS_LOG(WARN, "ObTransHashMap init twice", K(ret));
SHARE_LOG(WARN, "ObLightHashMap init twice", K(ret));
} else {
// init bucket, init lock in bucket
for (int64_t i = 0 ; OB_SUCC(ret) && i < BUCKETS_CNT; ++i) {
for (int64_t i = 0; OB_SUCC(ret) && i < BUCKETS_CNT; ++i) {
if (OB_FAIL(buckets_[i].init(mem_attr))) {
TRANS_LOG(WARN, "ObTransHashMap bucket init fail", K(ret));
for (int64_t j = 0 ; j <= i; ++j) {
SHARE_LOG(WARN, "ObLightHashMap bucket init fail", K(ret));
for (int64_t j = 0; j <= i; ++j) {
buckets_[j].destroy();
}
}
@ -165,20 +159,18 @@ public:
return ret;
}
int insert_and_get(const Key &key, Value *value, Value **old_value)
{ return insert__(key, value, 2, old_value); }
int insert(const Key &key, Value *value)
{ return insert__(key, value, 1, 0); }
int insert_and_get(const Key &key, Value *value, Value **old_value) { return insert__(key, value, 2, old_value); }
int insert(const Key &key, Value *value) { return insert__(key, value, 1, 0); }
int insert__(const Key &key, Value *value, int ref, Value **old_value)
{
int ret = OB_SUCCESS;
if (IS_NOT_INIT) {
ret = OB_NOT_INIT;
TRANS_LOG(WARN, "ObTransHashMap not init", K(ret), KP(value));
SHARE_LOG(WARN, "ObLightHashMap not init", K(ret), KP(value));
} else if (!key.is_valid() || OB_ISNULL(value)) {
ret = OB_INVALID_ARGUMENT;
TRANS_LOG(WARN, "invalid argument", K(key), KP(value));
SHARE_LOG(WARN, "invalid argument", K(key), KP(value));
} else {
int64_t pos = key.hash() % BUCKETS_CNT;
BucketWLockGuard guard(buckets_[pos].lock_, get_itid());
@ -218,15 +210,14 @@ public:
if (IS_NOT_INIT) {
ret = OB_NOT_INIT;
TRANS_LOG(WARN, "ObTransHashMap not init", K(ret), KP(value));
SHARE_LOG(WARN, "ObLightHashMap not init", K(ret), KP(value));
} else if (!key.is_valid() || OB_ISNULL(value)) {
ret = OB_INVALID_ARGUMENT;
TRANS_LOG(ERROR, "invalid argument", K(key), KP(value));
SHARE_LOG(ERROR, "invalid argument", K(key), KP(value));
} else {
int64_t pos = key.hash() % BUCKETS_CNT;
BucketWLockGuard guard(buckets_[pos].lock_, get_itid());
if (buckets_[pos].next_ != value &&
(NULL == value->prev_ && NULL == value->next_)) {
if (buckets_[pos].next_ != value && (NULL == value->prev_ && NULL == value->next_)) {
// do nothing
} else {
del_from_bucket_(pos, value);
@ -262,10 +253,10 @@ public:
if (IS_NOT_INIT) {
ret = OB_NOT_INIT;
TRANS_LOG(WARN, "ObTransHashMap not init", K(ret), K(key));
SHARE_LOG(WARN, "ObLightHashMap not init", K(ret), K(key));
} else if (!key.is_valid()) {
ret = OB_INVALID_ARGUMENT;
TRANS_LOG(WARN, "invalid argument", K(key));
SHARE_LOG(WARN, "invalid argument", K(key));
} else {
Value *tmp_value = NULL;
int64_t pos = key.hash() % BUCKETS_CNT;
@ -301,16 +292,18 @@ public:
}
}
template <typename Function> int for_each(Function &fn)
template <typename Function>
int for_each(Function &fn)
{
int ret = common::OB_SUCCESS;
for (int64_t pos = 0 ; OB_SUCC(ret) && pos < BUCKETS_CNT; ++pos) {
for (int64_t pos = 0; OB_SUCC(ret) && pos < BUCKETS_CNT; ++pos) {
ret = for_each_in_one_bucket(fn, pos);
}
return ret;
}
template <typename Function> int for_each_in_one_bucket(Function& fn, int64_t bucket_pos)
template <typename Function>
int for_each_in_one_bucket(Function &fn, int64_t bucket_pos)
{
int ret = common::OB_SUCCESS;
if (bucket_pos < 0 || bucket_pos >= BUCKETS_CNT) {
@ -318,7 +311,7 @@ public:
} else {
ValueArray array;
if (OB_FAIL(generate_value_arr_(bucket_pos, array))) {
TRANS_LOG(WARN, "generate value array error", K(ret));
SHARE_LOG(WARN, "generate value array error", K(ret));
} else {
const int64_t cnt = array.count();
for (int64_t i = 0; i < cnt; ++i) {
@ -328,29 +321,28 @@ public:
if (0 == array.at(i)->dec_ref(1)) {
alloc_handle_.free_value(array.at(i));
}
}
}
}
return ret;
}
template <typename Function> int remove_if(Function &fn)
template <typename Function>
int remove_if(Function &fn)
{
int ret = common::OB_SUCCESS;
ValueArray array;
for (int64_t pos = 0 ; pos < BUCKETS_CNT; ++pos) {
for (int64_t pos = 0; pos < BUCKETS_CNT; ++pos) {
array.reset();
if (OB_FAIL(generate_value_arr_(pos, array))) {
TRANS_LOG(WARN, "generate value array error", K(ret));
SHARE_LOG(WARN, "generate value array error", K(ret));
} else {
const int64_t cnt = array.count();
for (int64_t i = 0; i < cnt; ++i) {
if (fn(array.at(i))) {
BucketWLockGuard guard(buckets_[pos].lock_, get_itid());
if (buckets_[pos].next_ != array.at(i)
&& (NULL == array.at(i)->prev_ && NULL == array.at(i)->next_)) {
if (buckets_[pos].next_ != array.at(i) && (NULL == array.at(i)->prev_ && NULL == array.at(i)->next_)) {
// do nothing
} else {
del_from_bucket_(pos, array.at(i));
@ -375,7 +367,7 @@ public:
while (OB_SUCC(ret) && OB_NOT_NULL(val)) {
val->inc_ref(1);
if (OB_FAIL(arr.push_back(val))) {
TRANS_LOG(WARN, "value array push back error", K(ret));
SHARE_LOG(WARN, "value array push back error", K(ret));
val->dec_ref(1);
}
val = val->next_;
@ -406,52 +398,41 @@ public:
}
}
int64_t get_total_cnt() {
return ATOMIC_LOAD(&total_cnt_);
}
int64_t get_total_cnt() { return ATOMIC_LOAD(&total_cnt_); }
static int64_t get_buckets_cnt() { return BUCKETS_CNT; }
static int64_t get_buckets_cnt() {
return BUCKETS_CNT;
}
private:
struct ObTransHashHeader
{
struct ObLightHashHeader {
Value *next_;
Value *hot_cache_val_;
LockType lock_;
ObTransHashHeader() : next_(NULL), hot_cache_val_(NULL) {}
~ObTransHashHeader() { destroy(); }
int init(const lib::ObMemAttr &mem_attr)
{
return lock_.init(mem_attr);
}
ObLightHashHeader() : next_(NULL), hot_cache_val_(NULL) {}
~ObLightHashHeader() { destroy(); }
int init(const lib::ObMemAttr &mem_attr) { return lock_.init(mem_attr); }
void reset()
{
next_ = NULL;
hot_cache_val_ = NULL;
lock_.destroy();
}
void destroy()
{
reset();
}
void destroy() { reset(); }
};
// thread local node
class Node
{
class Node {
public:
Node() : thread_id_(-1) {}
void reset() { thread_id_ = -1; }
void set_thread_id(const int64_t thread_id) { thread_id_ = thread_id; }
uint64_t get_thread_id() const { return thread_id_; }
private:
uint64_t thread_id_;
};
class BucketRLockGuard
{
class BucketRLockGuard {
public:
explicit BucketRLockGuard(const LockType &lock, const uint64_t thread_id)
: lock_(const_cast<LockType &>(lock)), ret_(OB_SUCCESS)
@ -459,26 +440,27 @@ private:
if (OB_UNLIKELY(OB_SUCCESS != (ret_ = lock_.rdlock()))) {
COMMON_LOG_RET(WARN, ret_, "Fail to read lock, ", K_(ret));
} else {
ObTransHashMap::get_thread_node().set_thread_id(thread_id);
ObLightHashMap::get_thread_node().set_thread_id(thread_id);
}
}
~BucketRLockGuard()
{
if (OB_LIKELY(OB_SUCCESS == ret_)) {
lock_.rdunlock();
ObTransHashMap::get_thread_node().reset();
ObLightHashMap::get_thread_node().reset();
}
}
inline int get_ret() const { return ret_; }
private:
LockType &lock_;
int ret_;
private:
DISALLOW_COPY_AND_ASSIGN(BucketRLockGuard);
};
class BucketWLockGuard
{
class BucketWLockGuard {
public:
explicit BucketWLockGuard(const LockType &lock, const uint64_t thread_id)
: lock_(const_cast<LockType &>(lock)), ret_(OB_SUCCESS), locked_(false)
@ -486,13 +468,13 @@ private:
// no need to lock
if (get_itid() == get_thread_node().get_thread_id()) {
locked_ = false;
TRANS_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected thread status", K(thread_id));
SHARE_LOG_RET(ERROR, common::OB_ERR_UNEXPECTED, "unexpected thread status", K(thread_id));
} else {
if (OB_UNLIKELY(OB_SUCCESS != (ret_ = lock_.wrlock()))) {
COMMON_LOG_RET(WARN, ret_, "Fail to write lock, ", K_(ret));
} else {
locked_ = true;
ObTransHashMap::get_thread_node().set_thread_id(thread_id);
ObLightHashMap::get_thread_node().set_thread_id(thread_id);
}
}
}
@ -500,14 +482,16 @@ private:
{
if (locked_ && OB_LIKELY(OB_SUCCESS == ret_)) {
lock_.wrunlock();
ObTransHashMap::get_thread_node().reset();
ObLightHashMap::get_thread_node().reset();
}
}
inline int get_ret() const { return ret_; }
private:
LockType &lock_;
int ret_;
bool locked_;
private:
DISALLOW_COPY_AND_ASSIGN(BucketWLockGuard);
};
@ -519,11 +503,11 @@ private:
}
private:
// sizeof(ObTransHashMap) = BUCKETS_CNT * sizeof(LockType);
// sizeof(ObLightHashMap) = BUCKETS_CNT * sizeof(LockType);
// sizeof(SpinRWLock) = 20B;
// sizeof(QsyncLock) = 4K;
bool is_inited_;
ObTransHashHeader buckets_[BUCKETS_CNT];
ObLightHashHeader buckets_[BUCKETS_CNT];
int64_t total_cnt_;
#ifndef NDEBUG
public:
@ -531,6 +515,6 @@ public:
AllocHandle alloc_handle_;
};
}
}
#endif // OCEANBASE_STORAGE_OB_TRANS_HASHMAP_
} // namespace share
} // namespace oceanbase
#endif // OCEANBASE_STORAGE_OB_LIGHT_HASHMAP_

View File

@ -371,10 +371,25 @@ DEF_INT(memstore_limit_percentage, OB_TENANT_PARAMETER, "50", "(0, 100)",
DEF_INT(freeze_trigger_percentage, OB_TENANT_PARAMETER, "20", "(0, 100)",
"the threshold of the size of the mem store when freeze will be triggered. Rang:(0,100)",
ObParameterAttr(Section::TENANT, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_INT(writing_throttling_trigger_percentage, OB_TENANT_PARAMETER, "60", "(0, 100]",
"the threshold of the size of the mem store when writing_limit will be triggered. Rang:(0,100]. setting 100 means turn off writing limit",
ObParameterAttr(Section::TRANS, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_INT(_tx_share_memory_limit_percentage, OB_TENANT_PARAMETER, "0", "[0, 100)",
"Used to control the percentage of tenant memory limit that multiple modules in the transaction layer can collectively use. "
"This primarily includes user data (MemTable), transaction data (TxData), etc. "
"When it is set to the default value of 0, it represents dynamic adaptive behavior, "
"which will be adjusted dynamically based on memstore_limit_percentage. The adjustment rule is: "
" _tx_share_memory_limit_percentage = memstore_limit_percentage + 10. "
"Range: [0, 100)",
ObParameterAttr(Section::TENANT, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_INT(_tx_data_memory_limit_percentage, OB_TENANT_PARAMETER, "20", "(0, 100)",
"used to control the upper limit percentage of memory resources that the TxData module can use. "
"Range:(0, 100)",
ObParameterAttr(Section::TENANT, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_INT(_mds_memory_limit_percentage, OB_TENANT_PARAMETER, "10", "(0, 100)",
"Used to control the upper limit percentage of memory resources that the Mds module can use. "
"Range:(0, 100)",
ObParameterAttr(Section::TENANT, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));
DEF_TIME(writing_throttling_maximum_duration, OB_TENANT_PARAMETER, "2h", "[1s, 3d]",
"maximum duration of writting throttling(in minutes), max value is 3 days",
ObParameterAttr(Section::TRANS, Source::DEFAULT, EditLevel::DYNAMIC_EFFECTIVE));

View File

@ -203,6 +203,7 @@ class ObGlobalAutoIncService;
class ObDagWarningHistoryManager;
class ObTenantErrsimModuleMgr;
class ObTenantErrsimEventMgr;
class ObSharedMemAllocMgr;
class ObIndexUsageInfoMgr;
namespace schema
{
@ -243,6 +244,7 @@ using ObTableScanIteratorObjPool = common::ObServerObjectPool<oceanbase::storage
storage::mds::ObTenantMdsService*, \
storage::ObStorageLogger*, \
blocksstable::ObSharedMacroBlockMgr*, \
share::ObSharedMemAllocMgr*, \
transaction::ObTransService*, \
logservice::coordinator::ObLeaderCoordinator*, \
logservice::coordinator::ObFailureDetector*, \

View File

@ -0,0 +1,140 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H
#define OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H
#include "ob_throttle_unit.h"
#include "common/ob_clock_generator.h"
#include "observer/omt/ob_tenant_config.h"
#include "share/allocator/ob_memstore_allocator.h"
#include "share/allocator/ob_tx_data_allocator.h"
#include "share/allocator/ob_mds_allocator.h"
namespace oceanbase {
namespace share {
template <typename ALLOCATOR>
struct ModuleThrottleTool
{
ALLOCATOR *allocator_;
ObThrottleUnit<ALLOCATOR> module_throttle_unit_;
ModuleThrottleTool<ALLOCATOR>()
: allocator_(nullptr), module_throttle_unit_(ALLOCATOR::throttle_unit_name(), ALLOCATOR::resource_unit_size()) {}
TO_STRING_KV(KP(allocator_), K(module_throttle_unit_));
};
// The two stage throttle tool, which manages a share resource throttle unit and some module resource throttle unit
template <typename FakeAllocator, typename ...Args>
class ObShareResourceThrottleTool
{
private:
static const uint32_t DEFAULT_THROTTLE_SLEEP_INTERVAL = 20 * 1000;
private: // Functors for ShareResourceThrottleTool
struct SumModuleHoldResourceFunctor {
int64_t sum_;
SumModuleHoldResourceFunctor() : sum_(0) {}
template <typename ALLOCATOR,
typename std::enable_if<
std::is_same<typename std::decay<FakeAllocator>::type, typename std::decay<ALLOCATOR>::type>::value,
bool>::type = true>
int operator()(const ModuleThrottleTool<ALLOCATOR> &obj)
{
sum_ += 0;
return OB_SUCCESS;
}
template <typename ALLOCATOR,
typename std::enable_if<
!std::is_same<typename std::decay<FakeAllocator>::type, typename std::decay<ALLOCATOR>::type>::value,
bool>::type = true>
int operator()(const ModuleThrottleTool<ALLOCATOR> &obj)
{
sum_ += obj.allocator_->hold();
return OB_SUCCESS;
}
};
public:
ObShareResourceThrottleTool() {}
ObShareResourceThrottleTool(ObShareResourceThrottleTool &) = delete;
ObShareResourceThrottleTool &operator= (ObShareResourceThrottleTool &) = delete;
template <typename HEAD, typename ... OTHERS>
int init(HEAD *head, OTHERS * ... others);
template <typename TAIL>
int init(TAIL *tail);
template <typename ALLOCATOR>
void alloc_resource(const int64_t resource_size, const int64_t abs_expire_time, bool &is_throttled);
template <typename ALLOCATOR>
bool is_throttling(ObThrottleInfoGuard &share_ti_guard, ObThrottleInfoGuard &module_ti_guard);
template <typename ALLOCATOR>
bool still_throttling(ObThrottleInfoGuard &share_ti_guard, ObThrottleInfoGuard &module_ti_guard);
template <typename ALLOCATOR>
int64_t expected_wait_time(ObThrottleInfoGuard &share_ti_guard, ObThrottleInfoGuard &module_ti_guard);
template <typename ALLOCATOR>
void do_throttle(const int64_t abs_expire_time);
template <typename ALLOCATOR>
void skip_throttle(const int64_t skip_size);
template <typename ALLOCATOR>
void skip_throttle(const int64_t skip_size,
ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard);
template <typename ALLOCATOR>
void set_resource_limit(const int64_t resource_limit);
template <typename ALLOCATOR>
void update_throttle_config(const int64_t resource_limit,
const int64_t trigger_percentage,
const int64_t max_duration);
template <typename ALLOCATOR>
void enable_adaptive_limit();
template <typename ALLOCATOR>
char *unit_name();
TO_STRING_KV(KP(this), K(module_throttle_tuple_));
private:
template <typename ALLOCATOR>
int init_one_(ALLOCATOR *allocator);
private:
ObTuple<ModuleThrottleTool<FakeAllocator>, ModuleThrottleTool<Args>...> module_throttle_tuple_;
};
} // namespace share
} // namespace oceanbase
#ifndef OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H_IPP
#define OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H_IPP
#include "ob_share_resource_throttle_tool.ipp"
#endif
#endif

View File

@ -0,0 +1,310 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_IPP
#define OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_IPP
#ifndef OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H_IPP
#define OCEABASE_SHARE_THROTTLE_OB_SHARE_RESOURCE_THROTTLE_TOOL_H_IPP
#include "ob_share_resource_throttle_tool.h"
#endif
namespace oceanbase {
namespace share {
#define ACQUIRE_THROTTLE_UNIT(ALLOC_TYPE, throttle_unit) \
ObThrottleUnit<ALLOC_TYPE> &throttle_unit = \
module_throttle_tuple_.template element<ModuleThrottleTool<ALLOC_TYPE>>().module_throttle_unit_;
#define ACQUIRE_UNIT_ALLOCATOR(ALLOC_TYPE, throttle_unit, allocator) \
ObThrottleUnit<ALLOC_TYPE> &throttle_unit = \
module_throttle_tuple_.template element<ModuleThrottleTool<ALLOC_TYPE>>().module_throttle_unit_; \
ALLOC_TYPE *allocator = module_throttle_tuple_.template element<ModuleThrottleTool<ALLOC_TYPE>>().allocator_;
template <typename FakeAllocator, typename... Args>
template <typename HEAD, typename... OTHERS>
int ObShareResourceThrottleTool<FakeAllocator, Args...>::init(HEAD *head, OTHERS *...others)
{
int ret = OB_SUCCESS;
if (OB_FAIL(init_one_(head))) {
SHARE_LOG(WARN, "init one throttle tool failed", KR(ret));
} else if (OB_FAIL(init(others...))) {
SHARE_LOG(WARN, "init share resource throttle tool failed", KR(ret));
}
return ret;
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
int ObShareResourceThrottleTool<FakeAllocator, Args...>::init(ALLOCATOR *allocator)
{
int ret = OB_SUCCESS;
// init last real allocator
if (OB_FAIL(init_one_(allocator))) {
SHARE_LOG(WARN, "init one throttle tool failed", KR(ret));
} else {
// init share throttle unit
ModuleThrottleTool<FakeAllocator> &mtt =
module_throttle_tuple_.template element<ModuleThrottleTool<FakeAllocator>>();
mtt.allocator_ = nullptr;
if (OB_FAIL(mtt.module_throttle_unit_.init())) {
SHARE_LOG(ERROR, "init share resource throttle tool failed", KR(ret));
} else {
SHARE_LOG(INFO, "init share resource throttle tool finish", KR(ret), KP(this));
}
}
return ret;
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
int ObShareResourceThrottleTool<FakeAllocator, Args...>::init_one_(ALLOCATOR *allocator)
{
int ret = OB_SUCCESS;
ModuleThrottleTool<ALLOCATOR> &mtt = module_throttle_tuple_.template element<ModuleThrottleTool<ALLOCATOR>>();
mtt.allocator_ = allocator;
if (OB_ISNULL(mtt.allocator_)) {
ret = OB_ERR_UNEXPECTED;
SHARE_LOG(ERROR, "allocator is unexpected null", KR(ret), KP(allocator));
} else if (OB_FAIL(mtt.module_throttle_unit_.init())) {
SHARE_LOG(ERROR, "init module throttle unit failed", KR(ret));
} else {
SHARE_LOG(INFO, "init one allocator for throttle finish", KR(ret), K(mtt));
}
return ret;
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::enable_adaptive_limit()
{
ACQUIRE_THROTTLE_UNIT(ALLOCATOR, throttle_unit);
throttle_unit.enable_adaptive_limit();
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::alloc_resource(const int64_t alloc_size,
const int64_t abs_expire_time,
bool &is_throttled)
{
ACQUIRE_THROTTLE_UNIT(FakeAllocator, share_throttle_unit);
ACQUIRE_UNIT_ALLOCATOR(ALLOCATOR, module_throttle_unit, allocator);
is_throttled = false;
bool share_throttled = false;
bool module_throttled = false;
int64_t module_hold = allocator->hold();
SumModuleHoldResourceFunctor sum_hold_func;
(void)module_throttle_tuple_.for_each(sum_hold_func);
int share_ret = share_throttle_unit.alloc_resource(sum_hold_func.sum_, alloc_size, abs_expire_time, share_throttled);
int module_ret = module_throttle_unit.alloc_resource(allocator->hold(), alloc_size, abs_expire_time, module_throttled);
if (OB_UNLIKELY(OB_SUCCESS != share_ret || OB_SUCCESS != module_ret)) {
SHARE_LOG_RET(WARN, 0, "throttle alloc resource failed", KR(share_ret), KR(module_ret), KPC(this));
is_throttled = false;
} else {
is_throttled = (share_throttled | module_throttled);
}
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
bool ObShareResourceThrottleTool<FakeAllocator, Args...>::is_throttling(ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard)
{
ACQUIRE_THROTTLE_UNIT(FakeAllocator, share_throttle_unit);
ACQUIRE_THROTTLE_UNIT(ALLOCATOR, module_throttle_unit);
bool is_throttling = false;
// check share throttle unit
if (share_throttle_unit.is_throttling(share_ti_guard)) {
is_throttling = true;
}
// check module throttle unit
if (module_throttle_unit.is_throttling(module_ti_guard)) {
is_throttling = true;
}
return is_throttling;
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
bool ObShareResourceThrottleTool<FakeAllocator, Args...>::still_throttling(ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard)
{
ACQUIRE_THROTTLE_UNIT(FakeAllocator, share_throttle_unit);
ACQUIRE_UNIT_ALLOCATOR(ALLOCATOR, module_throttle_unit, allocator);
bool still_throttling = false;
if (module_ti_guard.is_valid() && module_throttle_unit.still_throttling(module_ti_guard, allocator->hold())) {
// if this module is throttling, skip checking share throttle unit
still_throttling = true;
} else if (share_ti_guard.is_valid()) {
SumModuleHoldResourceFunctor sum_hold_func;
(void)module_throttle_tuple_.for_each(sum_hold_func);
if (share_throttle_unit.still_throttling(share_ti_guard, sum_hold_func.sum_)) {
still_throttling = true;
}
}
return still_throttling;
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
int64_t ObShareResourceThrottleTool<FakeAllocator, Args...>::expected_wait_time(ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard)
{
ACQUIRE_THROTTLE_UNIT(FakeAllocator, share_throttle_unit);
ACQUIRE_UNIT_ALLOCATOR(ALLOCATOR, module_throttle_unit, allocator);
int64_t expected_wait_time = 0;
// step 1 : calculate module throttle time
if (module_ti_guard.is_valid()) {
expected_wait_time = module_throttle_unit.expected_wait_time(module_ti_guard, allocator->hold());
}
// stpe 2 : if module throttle done, calculate share unit throttle time
if (expected_wait_time <= 0 && share_ti_guard.is_valid()) {
SumModuleHoldResourceFunctor sum_hold_func;
(void)module_throttle_tuple_.for_each(sum_hold_func);
expected_wait_time = share_throttle_unit.expected_wait_time(share_ti_guard, sum_hold_func.sum_);
}
return expected_wait_time;
}
#define PRINT_THROTTLE_WARN \
do { \
const int64_t WARN_LOG_INTERVAL = 60L * 1000L * 1000L /* one minute */; \
if (sleep_time > (WARN_LOG_INTERVAL) && TC_REACH_TIME_INTERVAL(WARN_LOG_INTERVAL)) { \
SHARE_LOG(WARN, \
"[Throttling] Attention!! Sleep More Than One Minute!!", \
K(sleep_time), \
K(left_interval), \
K(expected_wait_t)); \
} \
} while (0)
#define PRINT_THROTTLE_STATISTIC \
do { \
const int64_t MEMSTORE_THROTTLE_LOG_INTERVAL = 1L * 1000L * 1000L; /*one seconds*/ \
if (sleep_time > 0 && REACH_TIME_INTERVAL(MEMSTORE_THROTTLE_LOG_INTERVAL)) { \
SHARE_LOG(INFO, \
"[Throttling] Time Info", \
"Throttle Unit Name", \
ALLOCATOR::throttle_unit_name(), \
"Throttle Sleep Time(us)", \
sleep_time); \
} \
} while (0);
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::do_throttle(const int64_t abs_expire_time)
{
int ret = OB_SUCCESS;
int64_t left_interval = (0 == abs_expire_time ? ObThrottleUnit<ALLOCATOR>::DEFAULT_MAX_THROTTLE_TIME
: abs_expire_time - ObClockGenerator::getCurrentTime());
int64_t sleep_time = 0;
ObThrottleInfoGuard share_ti_guard;
ObThrottleInfoGuard module_ti_guard;
if (left_interval < 0) {
// exit directly
} else if (is_throttling<ALLOCATOR>(share_ti_guard, module_ti_guard)) {
// loop to do throttle
while (still_throttling<ALLOCATOR>(share_ti_guard, module_ti_guard) && left_interval > 0) {
int64_t expected_wait_t = min(left_interval, expected_wait_time<ALLOCATOR>(share_ti_guard, module_ti_guard));
if (expected_wait_t < 0) {
SHARE_LOG_RET(ERROR,
OB_ERR_UNEXPECTED,
"expected wait time should not smaller than 0",
K(expected_wait_t),
KPC(share_ti_guard.throttle_info()),
KPC(module_ti_guard.throttle_info()),
K(clock));
if (module_ti_guard.is_valid()) {
module_ti_guard.throttle_info()->reset();
}
} else {
uint32_t sleep_interval = min(DEFAULT_THROTTLE_SLEEP_INTERVAL, (uint32_t)expected_wait_t);
sleep_time += sleep_interval;
left_interval -= sleep_interval;
::usleep(sleep_interval);
}
PRINT_THROTTLE_WARN;
}
PRINT_THROTTLE_STATISTIC;
}
}
#undef PRINT_THROTTLE_WARN
#undef PRINT_THROTTLE_STATISTIC
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::skip_throttle(const int64_t skip_size)
{
ObThrottleInfoGuard share_ti_guard;
ObThrottleInfoGuard module_ti_guard;
if (is_throttling<ALLOCATOR>(share_ti_guard, module_ti_guard)) {
skip_throttle<ALLOCATOR>(skip_size, share_ti_guard, module_ti_guard);
}
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::skip_throttle(const int64_t skip_size,
ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard)
{
ACQUIRE_THROTTLE_UNIT(FakeAllocator, share_throttle_unit);
ACQUIRE_THROTTLE_UNIT(ALLOCATOR, module_throttle_unit);
if (module_ti_guard.is_valid()) {
(void)module_throttle_unit.skip_throttle(skip_size, module_ti_guard.throttle_info()->sequence_);
}
if (share_ti_guard.is_valid()) {
(void)share_throttle_unit.skip_throttle(skip_size, share_ti_guard.throttle_info()->sequence_);
}
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::set_resource_limit(const int64_t resource_limit)
{
ACQUIRE_THROTTLE_UNIT(ALLOCATOR, module_throttle_unit);
module_throttle_unit.set_resource_limit(resource_limit);
}
template <typename FakeAllocator, typename... Args>
template <typename ALLOCATOR>
void ObShareResourceThrottleTool<FakeAllocator, Args...>::update_throttle_config(const int64_t resource_limit,
const int64_t trigger_percentage,
const int64_t max_duration)
{
ACQUIRE_THROTTLE_UNIT(ALLOCATOR, throttle_unit);
(void)throttle_unit.update_throttle_config(resource_limit, trigger_percentage, max_duration);
}
#undef ACQUIRE_THROTTLE_UNIT
#undef ACQUIRE_ALLOCATOR
#undef FUNC_PREFIX
} // namespace share
} // namespace oceanbase
#endif

View File

@ -0,0 +1,111 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "ob_share_throttle_define.h"
#include "observer/omt/ob_tenant_config_mgr.h"
#include "lib/alloc/alloc_func.h"
namespace oceanbase {
namespace share {
int64_t FakeAllocatorForTxShare::resource_unit_size()
{
static const int64_t SHARE_RESOURCE_UNIT_SIZE = 2L * 1024L * 1024L; /* 2MB */
return SHARE_RESOURCE_UNIT_SIZE;
}
void FakeAllocatorForTxShare::init_throttle_config(int64_t &resource_limit,
int64_t &trigger_percentage,
int64_t &max_duration)
{
// define some default value
const int64_t SR_LIMIT_PERCENTAGE = 60;
const int64_t SR_THROTTLE_TRIGGER_PERCENTAGE = 60;
const int64_t SR_THROTTLE_MAX_DURATION = 2LL * 60LL * 60LL * 1000LL * 1000LL; // 2 hours
int64_t total_memory = lib::get_tenant_memory_limit(MTL_ID());
omt::ObTenantConfigGuard tenant_config(TENANT_CONF(MTL_ID()));
if (tenant_config.is_valid()) {
int64_t share_mem_limit = tenant_config->_tx_share_memory_limit_percentage;
// if _tx_share_memory_limit_percentage equals 1, use (memstore_limit_percentage + 10) as default value
if (0 == share_mem_limit) {
share_mem_limit = tenant_config->memstore_limit_percentage + 10;
}
resource_limit = total_memory * share_mem_limit / 100LL;
trigger_percentage = tenant_config->writing_throttling_trigger_percentage;
max_duration = tenant_config->writing_throttling_maximum_duration;
} else {
resource_limit = total_memory * SR_LIMIT_PERCENTAGE / 100;
trigger_percentage = SR_THROTTLE_TRIGGER_PERCENTAGE;
max_duration = SR_THROTTLE_MAX_DURATION;
}
}
/**
* @brief Because the TxShare may can not use enough memory as the config specified, it need dynamic modify
* resource_limit according to the memory remained of the tenant.
*
* @param[in] holding_size this allocator current holding memory size
* @param[in] config_specify_resource_limit the memory limit which _tx_share_memory_limit_percentage specified
* @param[out] resource_limit the real limit now
* @param[out] last_update_limit_ts last update ts (for performance optimization)
* @param[out] is_updated to decide if update_decay_factor() is needed
*/
void FakeAllocatorForTxShare::adaptive_update_limit(const int64_t holding_size,
const int64_t config_specify_resource_limit,
int64_t &resource_limit,
int64_t &last_update_limit_ts,
bool &is_updated)
{
static const int64_t UPDATE_LIMIT_INTERVAL = 50LL * 1000LL; // 50 ms
static const int64_t USABLE_REMAIN_MEMORY_PERCETAGE = 60;
static const int64_t MAX_UNUSABLE_MEMORY = 2LL * 1024LL * 1024LL * 1024LL; // 2 GB
int64_t cur_ts = ObClockGenerator::getCurrentTime();
int64_t old_ts = last_update_limit_ts;
if ((cur_ts - old_ts > UPDATE_LIMIT_INTERVAL) && ATOMIC_BCAS(&last_update_limit_ts, old_ts, cur_ts)) {
int64_t remain_memory = lib::get_tenant_memory_remain(MTL_ID());
int64_t usable_remain_memory = remain_memory / 100 * USABLE_REMAIN_MEMORY_PERCETAGE;
if (remain_memory > MAX_UNUSABLE_MEMORY) {
usable_remain_memory = std::max(usable_remain_memory, remain_memory - MAX_UNUSABLE_MEMORY);
}
is_updated = false;
if (holding_size + usable_remain_memory < config_specify_resource_limit) {
resource_limit = holding_size + usable_remain_memory;
is_updated = true;
} else if (resource_limit != config_specify_resource_limit) {
resource_limit = config_specify_resource_limit;
is_updated = true;
} else {
// do nothing
}
if (is_updated && REACH_TIME_INTERVAL(10LL * 1000LL * 1000LL)) {
SHARE_LOG(INFO,
"adaptive update",
"Config Specify Resource Limit(MB)", config_specify_resource_limit / 1024 / 1024,
"TxShare Current Memory Limit(MB)", resource_limit / 1024 / 1024,
"Holding Memory(MB)", holding_size / 1024 / 1024,
"Tenant Remain Memory(MB)", remain_memory / 1024 / 1024,
"Usable Remain Memory(MB)", usable_remain_memory / 1024 /1024,
"Last Update Limit Timestamp", last_update_limit_ts,
"Is Updated", is_updated);
}
}
}
} // namespace share
} // namespace oceanbase

View File

@ -0,0 +1,82 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEABASE_SHARE_THROTTLE_OB_SHARE_THROTTLE_DEFINE_H
#define OCEABASE_SHARE_THROTTLE_OB_SHARE_THROTTLE_DEFINE_H
#include "lib/alloc/alloc_struct.h"
/**
* @brief This File is used to avoid introducing circular dependencies. It declares some class but do not include their
* .h file
*/
#define DEFINE_CUSTOM_FUNC_FOR_THROTTLE(throttle_name) \
static const lib::ObLabel throttle_unit_name() \
{ \
static lib::ObLabel label(#throttle_name); \
return label; \
} \
static int64_t resource_unit_size(); \
static void init_throttle_config(int64_t &resource_limit, int64_t &trigger_percentage, int64_t &max_duration); \
static void adaptive_update_limit(const int64_t holding_size, \
const int64_t config_specify_resource_limit, \
int64_t &resource_limit, \
int64_t &last_update_limit_ts, \
bool &is_updated);
#define DEFINE_SHARE_THROTTLE(ThrottleName, ...) \
\
struct FakeAllocatorFor##ThrottleName { \
DEFINE_CUSTOM_FUNC_FOR_THROTTLE(ThrottleName); \
}; \
\
LST_DEFINE(__VA_ARGS__); \
\
template <typename FakeAllocator, typename... Args> \
class ObShareResourceThrottleTool; \
\
using ThrottleName##ThrottleTool = ObShareResourceThrottleTool<FakeAllocatorFor##ThrottleName, __VA_ARGS__>;
namespace oceanbase {
namespace share {
// This Macor will expand as follows :
//
// struct FakeAllocatorForTxShare {
// static const lib::ObLabel throttle_unit_name()
// {
// lib::ObLabel label("TxShare");
// return label;
// }
// static int64_t resource_unit_size();
// static void init_throttle_config(int64_t &resource_limit, int64_t &trigger_percentage, int64_t &max_duration);
// };
//
// class ObMemstoreAllocator;
// class ObTenantTxDataAllocator;
// class ObTenantMdsAllocator;
//
// template <typename FakeAllocator, typename... Args>
// class ObShareResourceThrottleTool;
//
// using TxShareThrottleTool = ObShareResourceThrottleTool<FakeAllocatorForTxShare,
// ObMemstoreAllocator,
// ObTenantTxDataAllocator,
// ObTenantMdsAllocator>;
DEFINE_SHARE_THROTTLE(TxShare, ObMemstoreAllocator, ObTenantTxDataAllocator, ObTenantMdsAllocator)
} // namespace share
} // namespace oceanbase
#endif

View File

@ -0,0 +1,140 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEABASE_SHARE_THROTTLE_INFO_H
#define OCEABASE_SHARE_THROTTLE_INFO_H
#include "share/ob_light_hashmap.h"
#include "lib/objectpool/ob_concurrency_objpool.h"
namespace oceanbase {
namespace share {
class ThrottleID
{
OB_UNIS_VERSION(1);
public:
ThrottleID(const int64_t thread_idx)
: thread_idx_(thread_idx), hash_(murmurhash(&thread_idx, sizeof(thread_idx), 0)) {}
ThrottleID() = delete;
ThrottleID(const ThrottleID &) = delete;
ThrottleID &operator= (const ThrottleID &) = delete;
~ThrottleID() {}
// hash() and is_valid() is necessary function for light hash map
uint64_t hash() const
{
return hash_;
}
bool is_valid() const { return thread_idx_ > 0; }
bool operator<(const ThrottleID &rhs) {
bool bool_ret = false;
if (thread_idx_ < rhs.thread_idx_) {
bool_ret = true;
}
return bool_ret;
}
bool operator>(const ThrottleID &rhs) {
bool bool_ret = false;
if (thread_idx_ > rhs.thread_idx_) {
bool_ret = true;
}
return bool_ret;
}
bool operator==(const ThrottleID &other) const
{ return thread_idx_ == other.thread_idx_; }
bool operator!=(const ThrottleID &other) const
{ return thread_idx_ != other.thread_idx_; }
TO_STRING_KV(K(thread_idx_), K(hash_));
private:
const int64_t thread_idx_;
const uint64_t hash_;
};
struct ObThrottleInfo : public ObLightHashLink<ObThrottleInfo>
{
ThrottleID throttle_id_;
bool need_throttle_;
int64_t sequence_;
int64_t allocated_size_;
ObThrottleInfo(int64_t thread_idx) : throttle_id_(thread_idx), need_throttle_(false), sequence_(0), allocated_size_(0) {}
void reset()
{
need_throttle_ = false;
sequence_ = 0;
allocated_size_ = 0;
}
bool is_throttling()
{
return need_throttle_;
}
bool contain(const ThrottleID &throttle_id) const { return this->throttle_id_ == throttle_id; }
TO_STRING_KV(K(throttle_id_), K(need_throttle_), K(sequence_), K(allocated_size_));
};
struct ObThrottleInfoAllocHandle
{
ObThrottleInfo *alloc_value()
{
return op_alloc_args(ObThrottleInfo, common::get_itid());
}
void free_value(ObThrottleInfo *val)
{
if (NULL != val) {
op_free(val);
}
}
};
using ObThrottleInfoHashMap =
ObLightHashMap<ThrottleID, ObThrottleInfo, ObThrottleInfoAllocHandle, common::SpinRWLock, 1 << 10 /* BUCKETS_CNT */>;
struct ObThrottleInfoGuard
{
ObThrottleInfo *throttle_info_;
ObThrottleInfoHashMap *throttle_info_map_;
ObThrottleInfoGuard() : throttle_info_(nullptr), throttle_info_map_(nullptr) {}
~ObThrottleInfoGuard() { reset(); }
bool is_valid() { return OB_NOT_NULL(throttle_info_) && OB_NOT_NULL(throttle_info_map_); }
void reset()
{
if (is_valid()) {
throttle_info_map_->revert(throttle_info_);
throttle_info_ = nullptr;
throttle_info_map_ = nullptr;
}
}
void init(ObThrottleInfo *throttle_info, ObThrottleInfoHashMap *throttle_info_map)
{
reset();
throttle_info_ = throttle_info;
throttle_info_map_ = throttle_info_map;
}
ObThrottleInfo *throttle_info() { return throttle_info_; }
TO_STRING_KV(KP(throttle_info_), KP(throttle_info_map_));
};
} // namespace share
} // namespace oceanbase
#endif

View File

@ -0,0 +1,201 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H
#define OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H
#include "share/ob_light_hashmap.h"
#include "share/rc/ob_tenant_base.h"
#include "lib/objectpool/ob_concurrency_objpool.h"
#include "lib/alloc/alloc_struct.h"
#include "ob_throttle_info.h"
namespace oceanbase {
namespace share {
/**
* @brief Generic Resource Throttling Unit
*
* This class is used to manage resource throttling to ensure it doesn't exceed a specified resource limit.
* It includes resource allocation and throttling functionality. When using this throttling class, users need to
* provide a functor themselves to assist the throttling unit in updating certain values that are essential for
* throttling, such as the throttle trigger percentage (throttle_trigger_percentage_).
*
* If these values do not require dynamic updates for
* the current throttling module, they can be set as constants within the functor.
*
* For Example: TODO : @gengli.wzy compelte this comment
*
*
* using MyClassThrottleUnit = share::ObThrottleUnit<UpdateMyClassThrottleConfigHandle>;
*
*/
template <typename ALLOCATOR>
class ObThrottleUnit {
public:
// 2 hours
static const int64_t DEFAULT_MAX_THROTTLE_TIME = 2LL * 60LL * 60LL * 1000LL * 1000LL;
private:
// Default resource unit size is 2MB, which used to calculate decay_factor and avaliable resources after some time
static const int64_t DEFAULT_RESOURCE_UNIT_SIZE = 2L * 1024L * 1024L; /* 2MB */
// Time interval for advancing the clock (50 microseconds)
static const int64_t ADVANCE_CLOCK_INTERVAL = 50;// 50us
public:
// The users can use another unit size to throttle some other resources
const int64_t RESOURCE_UNIT_SIZE_;
public:
ObThrottleUnit(const char *throttle_unit_name, const int64_t resource_unit_size = DEFAULT_RESOURCE_UNIT_SIZE)
: RESOURCE_UNIT_SIZE_(resource_unit_size),
unit_name_(throttle_unit_name),
is_inited_(false),
enable_adaptive_limit_(false),
config_specify_resource_limit_(0),
resource_limit_(0),
sequence_num_(0),
clock_(0),
pre_clock_(0),
throttle_trigger_percentage_(0),
throttle_max_duration_(0),
last_advance_clock_ts_us_(0),
last_print_throttle_info_ts_(0),
decay_factor_(0),
throttle_info_map_() {}
~ObThrottleUnit() {}
// Disable default constructor and assignment operator
ObThrottleUnit() = delete;
ObThrottleUnit &operator=(ObThrottleUnit &rhs) = delete;
int init();
/**
* @brief Acquire queueing sequence and check if throttling is required.
*
* @param[in] holding_resource Amount of currently held resources.
* @param[in] resource_size Size of the requested resource.
* @param[in] abs_expire_time If is throttled, this function can sleep until abs_expire_time
* @param[out] is_throttled Indicating if throttling is needed.
*/
int alloc_resource(const int64_t holding_resource,
const int64_t resource_size,
const int64_t abs_expire_time,
bool &is_throttled);
/**
* @brief Check if this throttle unit is throttling status.
*
* @param[out] ti_guard The throttle info guard from this unit.
* @return True if this unit is in throttling status.
*/
bool is_throttling(ObThrottleInfoGuard &ti_guard);
/**
* @brief If the clock has not advanced beyond the queue sequence, throttling is still required.
*
* @param[in] ti_guard The guard which hold throttle info(acquired from is_throttling function). If throttle done, the
* throttle info in guard will be cleared.
* @param[in] holding_size The holding resource size of allocator.
* @return True if throttling is still required, false otherwise.
*/
bool still_throttling(ObThrottleInfoGuard &ti_guard, const int64_t holding_size);
/**
* @brief Advance clock once if needed.
*
* @param[in] holding_size The holding resource size of allocator.
*/
void advance_clock(const int64_t holding_size);
/**
* @brief skip some throttled sequence if the throttle is not actually executed.
*
* @param[in] skip_size The skipped sequence.
* @param[in] queue_sequence The queue_sequence this throttled thread got.
*/
void skip_throttle(const int64_t skip_size, const int64_t queue_sequence);
/**
* @brief If the clock has not advanced beyond the queue sequence, throttling is still required.
*
* @param[in] ti_guard The guard which hold throttle info(acquired from is_throttling function).
* @param[in] holding_size The holding resource size of allocator.
* @return True if throttling is still required, false otherwise.
*/
int64_t expected_wait_time(share::ObThrottleInfoGuard &ti_guard, const int64_t holding_size);
TO_STRING_KV(K(unit_name_),
K(resource_limit_),
K(sequence_num_),
K(clock_),
K(pre_clock_),
K(throttle_trigger_percentage_),
K(throttle_max_duration_),
K(last_advance_clock_ts_us_),
K(decay_factor_));
public: // throttle configs setter
void enable_adaptive_limit();
void set_throttle_trigger_percentage(const int64_t throttle_trigger_percentage);
void set_throttle_max_duration(const int64_t throttle_max_duration);
void set_resource_limit(const int64_t resource_limit);
void update_throttle_config(const int64_t resource_limit,
const int64_t throttle_trigger_percentage,
const int64_t throttle_max_duration);
private:
int get_throttle_info_(const ThrottleID &throttle_id, share::ObThrottleInfoGuard &ti_guard);
int inner_get_throttle_info_(share::ObThrottleInfo *&throttle_info, const int64_t abs_expire_time);
void inner_revert_throttle_info_(share::ObThrottleInfo *throttle_info);
void update_decay_factor_(const bool is_adaptive_update = false);
void reset_thread_throttle_();
void set_throttle_info_(const int64_t sequence, const int64_t allocated_size, const int64_t abs_expire_time);
void inner_set_resource_limit_(const int64_t resource_limit);
void print_throttle_info_(const int64_t holding_size,
const int64_t alloc_size,
const int64_t sequence,
const int64_t throttle_trigger);
int64_t avaliable_resource_after_dt_(const int64_t cur_mem_hold, const int64_t trigger_mem_limit, const int64_t dt);
private:
lib::ObLabel unit_name_;
bool is_inited_;
bool enable_adaptive_limit_;
int64_t config_specify_resource_limit_;
int64_t resource_limit_;
int64_t sequence_num_;
int64_t clock_;
int64_t pre_clock_;
int64_t throttle_trigger_percentage_;
int64_t throttle_max_duration_;
int64_t last_advance_clock_ts_us_;
int64_t last_print_throttle_info_ts_;
int64_t last_update_limit_ts_;
double decay_factor_;
// Save throttle infos created by different threads.
ObThrottleInfoHashMap throttle_info_map_;
};
} // namespace share
} // namespace oceanbase
#ifndef OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H_IPP
#define OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H_IPP
#include "ob_throttle_unit.ipp"
#endif
#endif

View File

@ -0,0 +1,504 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_IPP
#define OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_IPP
#ifndef OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H_IPP
#define OCEANBASE_SHARE_THROTTLE_OB_THROTTLE_UNIT_H_IPP
#include "ob_throttle_unit.h"
#endif
#include "observer/omt/ob_tenant_config_mgr.h"
#include "lib/thread_local/ob_tsi_utils.h"
namespace oceanbase {
namespace share {
#define THROTTLE_UNIT_INFO \
"Unit Name", unit_name_, "Config Specify Resource Limit(MB)", config_specify_resource_limit_ / 1024 / 1024, \
"Resource Limit(MB)", resource_limit_ / 1024 / 1024, "Throttle Trigger(MB)", \
resource_limit_ *throttle_trigger_percentage_ / 100 / 1024 / 1024, "Throttle Percentage", \
throttle_trigger_percentage_, "Max Duration(us)", throttle_max_duration_, "Decay Factor", decay_factor_
template <typename ALLOCATOR>
int ObThrottleUnit<ALLOCATOR>::init()
{
int ret = OB_SUCCESS;
ObMemAttr attr;
attr.tenant_id_ = MTL_ID();
attr.label_ = "ThrottleInfoMap";
attr.ctx_id_ = ObCtxIds::DEFAULT_CTX_ID;
if (IS_INIT) {
ret = OB_INIT_TWICE;
SHARE_LOG(WARN, "init throttle unit failed", KR(ret));
} else if (OB_FAIL(throttle_info_map_.init(attr))) {
SHARE_LOG(WARN, "init throttle unit failed", KR(ret));
} else {
(void)ALLOCATOR::init_throttle_config(resource_limit_, throttle_trigger_percentage_, throttle_max_duration_);
(void)update_decay_factor_();
config_specify_resource_limit_ = resource_limit_;
enable_adaptive_limit_ = false;
is_inited_ = true;
SHARE_LOG(INFO,
"[Throttle]Init throttle config finish",
K(unit_name_),
K(resource_limit_),
K(config_specify_resource_limit_),
K(throttle_trigger_percentage_),
K(throttle_max_duration_));
}
return ret;
}
template <typename ALLOCATOR>
int ObThrottleUnit<ALLOCATOR>::alloc_resource(const int64_t holding_size,
const int64_t alloc_size,
const int64_t abs_expire_time,
bool &is_throttled)
{
int ret = OB_SUCCESS;
int64_t trigger_percentage = throttle_trigger_percentage_;
if (OB_LIKELY(trigger_percentage < 100)) {
// do adaptive update resource limit if needed
if (enable_adaptive_limit_) {
bool is_updated = false;
ALLOCATOR::adaptive_update_limit(holding_size,
config_specify_resource_limit_,
resource_limit_,
last_update_limit_ts_,
is_updated);
if (is_updated) {
(void)update_decay_factor_(true /* is_adaptive_update */);
}
}
// check if need throttle
int64_t throttle_trigger = resource_limit_ * trigger_percentage / 100;
if (OB_UNLIKELY(holding_size < 0 || alloc_size <= 0 || resource_limit_ <= 0 || trigger_percentage <= 0)) {
SHARE_LOG(ERROR, "invalid arguments", K(holding_size), K(alloc_size), K(resource_limit_), K(trigger_percentage));
} else if (holding_size > throttle_trigger) {
is_throttled = true;
int64_t sequence = ATOMIC_AAF(&sequence_num_, alloc_size);
int64_t alloc_duration = throttle_max_duration_;
(void)advance_clock(holding_size);
(void)set_throttle_info_(sequence, alloc_size, abs_expire_time);
(void)print_throttle_info_(holding_size, alloc_size, sequence, throttle_trigger);
}
}
return ret;
}
template <typename ALLOCATOR>
bool ObThrottleUnit<ALLOCATOR>::is_throttling(ObThrottleInfoGuard &ti_guard)
{
int ret = OB_SUCCESS;
bool is_throttling = false;
if (OB_FAIL(get_throttle_info_(share::ThrottleID(common::get_itid()), ti_guard))) {
if (OB_LIKELY(OB_ENTRY_NOT_EXIST == ret)) {
is_throttling = false;
} else {
SHARE_LOG(WARN, "get throttle info failed", K(ti_guard), THROTTLE_UNIT_INFO);
}
} else {
is_throttling = ti_guard.throttle_info()->is_throttling();
}
return is_throttling;
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::set_throttle_info_(const int64_t queue_sequence,
const int64_t allocated_size,
const int64_t abs_expire_time)
{
int ret = OB_SUCCESS;
ObThrottleInfo *throttle_info = nullptr;
if (OB_FAIL(inner_get_throttle_info_(throttle_info, abs_expire_time))) {
SHARE_LOG(WARN, "get throttle info failed", KR(ret), THROTTLE_UNIT_INFO);
} else if (OB_ISNULL(throttle_info)) {
SHARE_LOG_RET(ERROR, OB_ERR_UNEXPECTED, "throttle_info should not be nullptr");
} else {
throttle_info->need_throttle_ = true;
throttle_info->sequence_ = queue_sequence;
throttle_info->allocated_size_ += allocated_size;
inner_revert_throttle_info_(throttle_info);
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::print_throttle_info_(const int64_t holding_size,
const int64_t alloc_size,
const int64_t sequence,
const int64_t throttle_trigger)
{
// ATTENTION!! DO NOT MODIFY THIS LOG!!
// This function is used to print some statistic info, which will be collected by a awk script.
const int64_t PRINT_THROTTLE_INFO_INTERVAL = 1L * 1000L * 1000L; // one second
int64_t last_print_ts = last_print_throttle_info_ts_;
int64_t current_ts = ObClockGenerator::getCurrentTime();
if (current_ts - last_print_ts > PRINT_THROTTLE_INFO_INTERVAL &&
ATOMIC_BCAS(&last_print_throttle_info_ts_, last_print_ts, current_ts)) {
// release_speed means the allocated resource size per second
int64_t release_speed = 0;
int64_t cur_clock = clock_;
if (pre_clock_ > 0) {
release_speed = (cur_clock - pre_clock_) / ((current_ts - last_print_ts) / PRINT_THROTTLE_INFO_INTERVAL);
}
pre_clock_ = cur_clock;
SHARE_LOG(INFO,
"[Throttling] Size Info",
"Throttle Unit Name", unit_name_,
"Allocating Resource Size", alloc_size,
"Holding Resource Size", holding_size,
"Queueing Sequence", sequence,
"Released Sequence", cur_clock,
"Release Speed", release_speed,
"Total Resource Limit", resource_limit_,
"Throttle Trigger Threshold", throttle_trigger);
}
}
template <typename ALLOCATOR>
int ObThrottleUnit<ALLOCATOR>::inner_get_throttle_info_(share::ObThrottleInfo *&throttle_info,
const int64_t input_abs_expire_time)
{
int ret = OB_SUCCESS;
ThrottleID tid(common::get_itid());
bool get_throttle_info_done = false;
int64_t abs_expire_time = input_abs_expire_time;
throttle_info = nullptr;
while (OB_SUCC(ret) && !get_throttle_info_done) {
if (OB_FAIL(throttle_info_map_.get(tid, throttle_info))) {
if (OB_ENTRY_NOT_EXIST == ret) {
ret = OB_SUCCESS;
if (0 == abs_expire_time) {
abs_expire_time = DEFAULT_MAX_THROTTLE_TIME + ObClockGenerator::getCurrentTime();
}
if (OB_FAIL(throttle_info_map_.alloc_value(throttle_info))) {
if (OB_ALLOCATE_MEMORY_FAILED == ret) {
if (REACH_TIME_INTERVAL(10L * 1000L * 1000L)) {
SHARE_LOG(WARN, "allocate throttle info failed", KR(ret), K(tid));
}
if (ObClockGenerator::getCurrentTime() > abs_expire_time) {
SHARE_LOG(WARN, "allocate throttle info failed", KR(ret), K(tid));
} else {
// sleep 10 ms and retry
usleep(10 * 1000);
ret = OB_SUCCESS;
}
} else {
SHARE_LOG(ERROR, "allocate throttle info failed", KR(ret), K(tid));
}
} else if (OB_FAIL(throttle_info_map_.insert(tid, throttle_info))) {
SHARE_LOG(ERROR, "insert throttle info failed", KR(ret), K(tid));
(void)throttle_info_map_.free_value(throttle_info);
}
}
} else {
get_throttle_info_done = true;
}
}
return ret;
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::inner_revert_throttle_info_(share::ObThrottleInfo *throttle_info)
{
(void)throttle_info_map_.revert(throttle_info);
}
template <typename ALLOCATOR>
int ObThrottleUnit<ALLOCATOR>::get_throttle_info_(const ThrottleID &throttle_id, share::ObThrottleInfoGuard &ti_guard)
{
int ret = OB_SUCCESS;
ObThrottleInfo *throttle_info = nullptr;
if (OB_FAIL(throttle_info_map_.get(throttle_id, throttle_info))) {
if (OB_ENTRY_NOT_EXIST != ret) {
SHARE_LOG(WARN, "get throttle info failed", THROTTLE_UNIT_INFO);
}
} else {
(void)ti_guard.init(throttle_info, &throttle_info_map_);
}
return ret;
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::update_decay_factor_(const bool is_adaptive_update /* default : false */)
{
int64_t avaliable_resources = (100 - throttle_trigger_percentage_) * resource_limit_ / 100;
double N = static_cast<double>(avaliable_resources) / static_cast<double>(RESOURCE_UNIT_SIZE_);
double decay_factor =
(static_cast<double>(throttle_max_duration_) - N) / static_cast<double>((((N * (N + 1) * N * (N + 1))) / 4));
decay_factor_ = decay_factor < 0 ? 0 : decay_factor;
if (decay_factor < 0) {
SHARE_LOG_RET(
ERROR, OB_ERR_UNEXPECTED, "decay factor is smaller than 0", K(decay_factor), K(throttle_max_duration_), K(N));
}
if (OB_UNLIKELY(is_adaptive_update)) {
if (REACH_TIME_INTERVAL(1LL * 1000LL * 1000LL /* one second */)) {
SHARE_LOG(INFO, "[Throttle] Update Throttle Unit Config", K(is_adaptive_update), K(N), THROTTLE_UNIT_INFO);
}
} else {
SHARE_LOG(INFO, "[Throttle] Update Throttle Unit Config", K(is_adaptive_update), K(N), THROTTLE_UNIT_INFO);
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::advance_clock(const int64_t holding_size)
{
int64_t cur_ts = ObClockGenerator::getCurrentTime();
int64_t old_ts = last_advance_clock_ts_us_;
const int64_t advance_us = cur_ts - old_ts;
if ((advance_us > ADVANCE_CLOCK_INTERVAL) && ATOMIC_BCAS(&last_advance_clock_ts_us_, old_ts, cur_ts)) {
bool unused = false;
const int64_t throttle_trigger = resource_limit_ * throttle_trigger_percentage_ / 100;
const int64_t avaliable_resource = avaliable_resource_after_dt_(holding_size, throttle_trigger, advance_us);
const int64_t clock = ATOMIC_LOAD(&clock_);
const int64_t cur_seq = ATOMIC_LOAD(&sequence_num_);
ATOMIC_SET(&clock_, min(cur_seq, clock + avaliable_resource));
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::skip_throttle(const int64_t skip_size, const int64_t queue_sequence)
{
int64_t old_clock = 0;
// loop until 1. (clock >= queue_sequence) or 2. (skip clock success)
while ((old_clock = ATOMIC_LOAD(&clock_)) < queue_sequence) {
int64_t new_clock = old_clock + min(skip_size, queue_sequence - old_clock);
if (ATOMIC_BCAS(&clock_, old_clock, new_clock)) {
// skip throttle finish
break;
}
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::reset_thread_throttle_()
{
int ret = OB_SUCCESS;
ObThrottleInfoGuard ti_guard;
if (OB_FAIL(get_throttle_info_(common::get_itid(), ti_guard))) {
SHARE_LOG(WARN, "get throttle info from map failed", KR(ret), THROTTLE_UNIT_INFO);
} else if (ti_guard.is_valid()) {
ti_guard.throttle_info()->need_throttle_ = false;
ti_guard.throttle_info()->sequence_ = 0;
}
}
template <typename ALLOCATOR>
int64_t ObThrottleUnit<ALLOCATOR>::avaliable_resource_after_dt_(const int64_t cur_resource_hold,
const int64_t throttle_trigger,
const int64_t dt)
{
int ret = OB_SUCCESS;
int64_t avaliable_resource = 0;
const double decay_factor = decay_factor_;
int64_t init_seq = 0;
int64_t init_page_left_size = 0;
double init_page_left_interval = 0;
double past_interval = 0;
double last_page_interval = 0;
double mid_result = 0;
double approx_max_chunk_seq = 0;
int64_t max_seq = 0;
double accumulate_interval = 0;
if (cur_resource_hold < throttle_trigger) {
// there is no speed limit now
// we can get all the memory before speed limit
avaliable_resource = throttle_trigger - cur_resource_hold;
} else if (decay_factor <= 0) {
avaliable_resource = 0;
SHARE_LOG(WARN, "decay factor invalid", K(cur_resource_hold), K(throttle_trigger), K(dt), THROTTLE_UNIT_INFO);
} else {
init_seq = ((cur_resource_hold - throttle_trigger) + RESOURCE_UNIT_SIZE_ - 1) / (RESOURCE_UNIT_SIZE_);
init_page_left_size = RESOURCE_UNIT_SIZE_ - (cur_resource_hold - throttle_trigger) % RESOURCE_UNIT_SIZE_;
init_page_left_interval = (1.0 * decay_factor * pow(init_seq, 3) * init_page_left_size / RESOURCE_UNIT_SIZE_);
past_interval = decay_factor * pow(init_seq, 2) * pow(init_seq + 1, 2) / 4;
// there is speed limit
if (init_page_left_interval > dt) {
last_page_interval = decay_factor * pow(init_seq, 3);
avaliable_resource = dt / last_page_interval * RESOURCE_UNIT_SIZE_;
} else {
mid_result = 4.0 * (dt + past_interval - init_page_left_interval) / decay_factor;
approx_max_chunk_seq = pow(mid_result, 0.25);
max_seq = floor(approx_max_chunk_seq);
for (int i = 0; i < 2; i++) {
if (pow(max_seq, 2) * pow(max_seq + 1, 2) < mid_result) {
max_seq = max_seq + 1;
}
}
accumulate_interval =
pow(max_seq, 2) * pow(max_seq + 1, 2) * decay_factor / 4 - past_interval + init_page_left_interval;
avaliable_resource = init_page_left_size + (max_seq - init_seq) * RESOURCE_UNIT_SIZE_;
if (accumulate_interval > dt) {
last_page_interval = decay_factor * pow(max_seq, 3);
avaliable_resource -= (accumulate_interval - dt) / last_page_interval * RESOURCE_UNIT_SIZE_;
}
}
// defensive code
if (pow(max_seq, 2) * pow(max_seq + 1, 2) < mid_result) {
SHARE_LOG(ERROR, "unexpected result", K(max_seq), K(mid_result));
}
}
// defensive code
if (avaliable_resource <= 0) {
SHARE_LOG(
WARN, "we can not get memory now", K(avaliable_resource), K(cur_resource_hold), K(dt), THROTTLE_UNIT_INFO);
}
return avaliable_resource;
}
template <typename ALLOCATOR>
bool ObThrottleUnit<ALLOCATOR>::still_throttling(ObThrottleInfoGuard &ti_guard, const int64_t holding_size)
{
(void)advance_clock(holding_size);
bool still_throttling = false;
int64_t trigger_percentage = throttle_trigger_percentage_;
ObThrottleInfo *ti_info = ti_guard.throttle_info();
// check if still throttling
if (trigger_percentage < 100 && OB_NOT_NULL(ti_info)) {
int64_t throttle_trigger = resource_limit_ * trigger_percentage / 100;
if (ti_info->sequence_ <= clock_) {
still_throttling = false;
} else {
still_throttling = true;
}
} else {
still_throttling = false;
}
// reset status if do not need throttle
if (!still_throttling && OB_NOT_NULL(ti_info)) {
ti_info->reset();
}
return still_throttling;
}
template <typename ALLOCATOR>
int64_t ObThrottleUnit<ALLOCATOR>::expected_wait_time(share::ObThrottleInfoGuard &ti_guard, const int64_t holding_size)
{
int64_t expected_wait_time = 0;
int64_t clock = clock_;
int64_t queue_sequence = OB_NOT_NULL(ti_guard.throttle_info()) ? ti_guard.throttle_info()->sequence_ : 0;
if (clock >= queue_sequence) {
// do not need wait cause clock has reached queue_sequence
expected_wait_time = 0;
} else {
int64_t throttle_trigger = resource_limit_ * throttle_trigger_percentage_ / 100;
int64_t can_assign_in_next_period =
avaliable_resource_after_dt_(holding_size, throttle_trigger, ADVANCE_CLOCK_INTERVAL);
if (can_assign_in_next_period != 0) {
expected_wait_time = (queue_sequence - clock) * ADVANCE_CLOCK_INTERVAL / can_assign_in_next_period;
} else {
expected_wait_time = ADVANCE_CLOCK_INTERVAL;
}
}
return expected_wait_time;
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::set_resource_limit(const int64_t value)
{
if (value < 0) {
int ret = OB_INVALID_ARGUMENT;
SHARE_LOG(WARN, "update throttle config failed cause invalid argument", K(value));
} else {
(void)inner_set_resource_limit_(value);
(void)update_decay_factor_();
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::inner_set_resource_limit_(const int64_t value)
{
// record config specified resource limit
ATOMIC_STORE(&config_specify_resource_limit_, value);
if (OB_LIKELY(!enable_adaptive_limit_)) {
// if adaptive limit is not enabled, dircetly update resource_limit_
ATOMIC_STORE(&resource_limit_, value);
} else {
// adaptive update logic will update resource_limit_
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::set_throttle_trigger_percentage(const int64_t value)
{
if (value <= 0 || value > 100) {
int ret = OB_INVALID_ARGUMENT;
SHARE_LOG(WARN, "update throttle config failed cause invalid argument", K(value));
} else {
throttle_trigger_percentage_ = value;
(void)update_decay_factor_();
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::set_throttle_max_duration(const int64_t value)
{
if (value <= 0) {
int ret = OB_INVALID_ARGUMENT;
SHARE_LOG(WARN, "update throttle config failed cause invalid argument", K(value));
} else {
throttle_max_duration_ = value;
(void)update_decay_factor_();
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::update_throttle_config(const int64_t resource_limit,
const int64_t throttle_trigger_percentage,
const int64_t throttle_max_duration)
{
if (resource_limit < 0 || throttle_trigger_percentage <= 0 || throttle_trigger_percentage > 100 ||
throttle_max_duration <= 0) {
int ret = OB_INVALID_ARGUMENT;
SHARE_LOG(WARN,
"update throttle config failed cause invalid argument",
K(resource_limit),
K(throttle_trigger_percentage),
K(throttle_max_duration));
} else {
throttle_trigger_percentage_ = throttle_trigger_percentage;
throttle_max_duration_ = throttle_max_duration;
(void)inner_set_resource_limit_(resource_limit);
(void)update_decay_factor_();
}
}
template <typename ALLOCATOR>
void ObThrottleUnit<ALLOCATOR>::enable_adaptive_limit()
{
ATOMIC_STORE(&enable_adaptive_limit_, true);
}
} // namespace share
} // namespace oceanbase
#endif

View File

@ -535,16 +535,20 @@ void ObSSTablePrinter::print_store_row(
if (OB_LIKELY(tx_id.get_id() != INT64_MAX)) {
ObMemAttr mem_attr;
mem_attr.label_ = "TX_DATA_TABLE";
void *p = op_alloc(ObSliceAlloc);
common::ObSliceAlloc *slice_allocator = new (p) ObSliceAlloc(storage::TX_DATA_SLICE_SIZE, mem_attr);
void *p = op_alloc(ObTenantTxDataAllocator);
if (OB_NOT_NULL(p)) {
ObTenantTxDataAllocator *tx_data_allocator = new (p) ObTenantTxDataAllocator();
ObTxData tx_data;
tx_data.tx_id_ = tx_id;
if (OB_FAIL(tx_data.deserialize(str.ptr(), str.length(), pos, *slice_allocator))) {
STORAGE_LOG(WARN, "deserialize tx data failed", KR(ret), K(str));
hex_dump(str.ptr(), str.length(), true, OB_LOG_LEVEL_WARN);
} else {
ObTxData::print_to_stderr(tx_data);
ObTxData tx_data;
tx_data.tx_id_ = tx_id;
if (OB_FAIL(tx_data_allocator->init("PRINT_TX_DATA_SST"))) {
STORAGE_LOG(WARN, "init tx data allocator failed", KR(ret), K(str));
} else if (OB_FAIL(tx_data.deserialize(str.ptr(), str.length(), pos, *tx_data_allocator))) {
STORAGE_LOG(WARN, "deserialize tx data failed", KR(ret), K(str));
hex_dump(str.ptr(), str.length(), true, OB_LOG_LEVEL_WARN);
} else {
ObTxData::print_to_stderr(tx_data);
}
}
} else {
// pre-process data for upper trans version calculation

View File

@ -174,7 +174,7 @@ int ObMemtable::init(const ObITable::TableKey &table_key,
} else if (FALSE_IT(set_max_schema_version(schema_version))) {
} else if (OB_FAIL(set_freezer(freezer))) {
TRANS_LOG(WARN, "fail to set freezer", K(ret), KP(freezer));
} else if (OB_FAIL(local_allocator_.init(MTL_ID()))) {
} else if (OB_FAIL(local_allocator_.init())) {
TRANS_LOG(WARN, "fail to init memstore allocator", K(ret), "tenant id", MTL_ID());
} else if (OB_FAIL(query_engine_.init(MTL_ID()))) {
TRANS_LOG(WARN, "query_engine.init fail", K(ret), "tenant_id", MTL_ID());

View File

@ -12,7 +12,7 @@
#ifndef OCEANBASE_MEMTABLE_OB_MEMTABLE_
#define OCEANBASE_MEMTABLE_OB_MEMTABLE_
#include "share/allocator/ob_gmemstore_allocator.h"
#include "share/allocator/ob_memstore_allocator.h"
#include "share/ob_tenant_mgr.h"
#include "share/ob_cluster_version.h"
@ -182,8 +182,8 @@ enum class MemtableRefOp
class ObMemtable : public ObIMemtable, public storage::checkpoint::ObFreezeCheckpoint
{
public:
typedef common::ObGMemstoreAllocator::AllocHandle ObMemstoreAllocator;
using ObMvccRowAndWriteResults = common::ObSEArray<ObMvccRowAndWriteResult, 16>;
typedef share::ObMemstoreAllocator::AllocHandle ObSingleMemstoreAllocator;
ObMemtable();
virtual ~ObMemtable();
public:
@ -630,7 +630,7 @@ private:
storage::ObFreezer *freezer_;
storage::ObTabletMemtableMgr *memtable_mgr_;
mutable uint32_t freeze_clock_;
ObMemstoreAllocator local_allocator_;
ObSingleMemstoreAllocator local_allocator_;
ObMTKVBuilder kv_builder_;
ObQueryEngine query_engine_;
ObMvccEngine mvcc_engine_;

View File

@ -913,6 +913,7 @@ int ObMemtableMultiVersionScanIterator::inner_get_next_row(const ObDatumRow *&ro
TRANS_LOG(TRACE, "after inner get next row", K(*row), K(scan_state_));
}
}
return ret;
}

View File

@ -13,6 +13,7 @@
#include "mds_factory.h"
#include "lib/ob_errno.h"
#include "share/rc/ob_tenant_base.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "storage/multi_data_source/buffer_ctx.h"
#include "storage/tx/ob_trans_define.h"
#include "storage/tx_storage/ob_tenant_freezer.h"
@ -30,7 +31,7 @@ namespace mds
void *MdsAllocator::alloc(const int64_t size)
{
void *ptr = MTL(ObTenantMdsService*)->get_allocator().alloc(size);
void *ptr = MTL(share::ObSharedMemAllocMgr *)->mds_allocator().alloc(size);
if (OB_NOT_NULL(ptr)) {
ATOMIC_INC(&alloc_times_);
}
@ -39,13 +40,13 @@ void *MdsAllocator::alloc(const int64_t size)
void *MdsAllocator::alloc(const int64_t size, const ObMemAttr &attr)
{
return MTL(ObTenantMdsService*)->get_allocator().alloc(size, attr);
return MTL(share::ObSharedMemAllocMgr *)->mds_allocator().alloc(size, attr);
}
void MdsAllocator::free(void *ptr) {
if (OB_NOT_NULL(ptr)) {
ATOMIC_INC(&free_times_);
MTL(ObTenantMdsService*)->get_allocator().free(ptr);
MTL(share::ObSharedMemAllocMgr *)->mds_allocator().free(ptr);
}
}

View File

@ -19,6 +19,7 @@
#include "lib/utility/utility.h"
#include "ob_clock_generator.h"
#include "share/rc/ob_tenant_base.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "storage/meta_mem/ob_tablet_map_key.h"
#include "storage/meta_mem/ob_tenant_meta_mem_mgr.h"
#include "storage/tablet/ob_tablet.h"
@ -38,7 +39,6 @@ namespace storage
{
namespace mds
{
/********************FOR MEMORY LEAK DEBUG***************************/
thread_local char __thread_mds_tag__[TAG_SIZE] = {0};
TLOCAL(const char *, __thread_mds_alloc_type__) = nullptr;
@ -46,7 +46,7 @@ TLOCAL(const char *, __thread_mds_alloc_file__) = nullptr;
TLOCAL(const char *, __thread_mds_alloc_func__) = nullptr;
TLOCAL(uint32_t, __thread_mds_alloc_line__) = 0;
void set_mds_mem_check_thread_local_info(const MdsWriter &writer,
void set_mds_mem_check_thread_local_info(const storage::mds::MdsWriter &writer,
const char *alloc_ctx_type,
const char *alloc_file,
const char *alloc_func,
@ -85,92 +85,6 @@ void reset_mds_mem_check_thread_local_info()
}
/********************************************************************/
int ObTenantMdsAllocator::init()
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
// TODO : @gengli new ctx id?
mem_attr.tenant_id_ = MTL_ID();
mem_attr.ctx_id_ = ObCtxIds::MDS_DATA_ID;
mem_attr.label_ = "MdsTable";
MDS_TG(10_ms);
if (MDS_FAIL(allocator_.init(OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
MDS_LOG(WARN, "init vslice allocator failed",
K(ret), K(OB_MALLOC_NORMAL_BLOCK_SIZE), KP(this), K(mem_attr));
} else {
allocator_.set_nway(MDS_ALLOC_CONCURRENCY);
}
return ret;
}
void *ObTenantMdsAllocator::alloc(const int64_t size)
{
void *obj = allocator_.alloc(size);
MDS_LOG(DEBUG, "mds alloc ", K(size), KP(obj));
if (OB_NOT_NULL(obj)) {
MTL(ObTenantMdsService*)->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__);// for debug mem leak
}
return obj;
}
void *ObTenantMdsAllocator::alloc(const int64_t size, const ObMemAttr &attr)
{
UNUSED(attr);
void *obj = alloc(size);
MDS_LOG_RET(WARN, OB_INVALID_ARGUMENT, "VSLICE Allocator not support mark attr", KP(obj), K(size), K(attr));
return obj;
}
void ObTenantMdsAllocator::free(void *ptr)
{
allocator_.free(ptr);
MTL(ObTenantMdsService*)->erase_alloc_backtrace(ptr);
}
void ObTenantMdsAllocator::set_attr(const ObMemAttr &attr)
{
allocator_.set_attr(attr);
}
void *ObTenantBufferCtxAllocator::alloc(const int64_t size)
{
void *obj = share::mtl_malloc(size, ObMemAttr(MTL_ID(), "MDS_CTX_DEFAULT", ObCtxIds::MDS_CTX_ID));
if (OB_NOT_NULL(obj)) {
MTL(ObTenantMdsService*)->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__);// for debug mem leak
}
return obj;
}
void *ObTenantBufferCtxAllocator::alloc(const int64_t size, const ObMemAttr &attr)
{
void *obj = share::mtl_malloc(size, attr);
if (OB_NOT_NULL(obj)) {
MTL(ObTenantMdsService*)->record_alloc_backtrace(obj,
__thread_mds_tag__,
__thread_mds_alloc_type__,
__thread_mds_alloc_file__,
__thread_mds_alloc_func__,
__thread_mds_alloc_line__);// for debug mem leak
}
return obj;
}
void ObTenantBufferCtxAllocator::free(void *ptr)
{
share::mtl_free(ptr);
MTL(ObTenantMdsService*)->erase_alloc_backtrace(ptr);
}
int ObTenantMdsService::mtl_init(ObTenantMdsService *&mds_service)
{
int ret = OB_SUCCESS;
@ -180,8 +94,6 @@ int ObTenantMdsService::mtl_init(ObTenantMdsService *&mds_service)
MDS_LOG(ERROR, "init mds tenant service twice!", KR(ret), KPC(mds_service));
} else if (MDS_FAIL(mds_service->memory_leak_debug_map_.init("MdsDebugMap", MTL_ID()))) {
MDS_LOG(WARN, "init map failed", K(ret));
} else if (MDS_FAIL(mds_service->mds_allocator_.init())) {
MDS_LOG(ERROR, "fail to init allocator", KR(ret), KPC(mds_service));
} else if (MDS_FAIL(mds_service->mds_timer_.timer_.init_and_start(1/*worker number*/,
100_ms/*precision*/,
"MdsT"/*thread name*/))) {

View File

@ -22,7 +22,7 @@
#include "lib/allocator/ob_vslice_alloc.h"
#include "share/ob_ls_id.h"
#include "share/ob_occam_timer.h"
// #include "storage/tx/ob_trans_define.h"
#include "share/allocator/ob_mds_allocator.h"
#include "storage/tx_storage/ob_ls_handle.h"
#include "lib/hash/ob_linear_hash_map.h"
@ -95,34 +95,6 @@ struct ObMdsMemoryLeakDebugInfo
int64_t tid_;
};
class ObTenantMdsAllocator : public ObIAllocator
{
friend class ObTenantMdsService;
private:
static const int64_t MDS_ALLOC_CONCURRENCY = 32;
public:
ObTenantMdsAllocator() = default;
int init();
void destroy() {}
virtual void *alloc(const int64_t size) override;
virtual void *alloc(const int64_t size, const ObMemAttr &attr) override;
virtual void free(void *ptr) override;
virtual void set_attr(const ObMemAttr &attr) override;
int64_t hold() { return allocator_.hold(); }
TO_STRING_KV(KP(this));
private:
common::ObBlockAllocMgr block_alloc_;
common::ObVSliceAlloc allocator_;
};
struct ObTenantBufferCtxAllocator : public ObIAllocator// for now, it is just a wrapper of mtl_malloc
{
virtual void *alloc(const int64_t size) override;
virtual void *alloc(const int64_t size, const ObMemAttr &attr) override;
virtual void free(void *ptr) override;
virtual void set_attr(const ObMemAttr &) override {}
};
struct ObTenantMdsTimer
{
ObTenantMdsTimer() = default;
@ -161,9 +133,8 @@ public:
static int for_each_ls_in_tenant(const ObFunction<int(ObLS &)> &op);
static int for_each_tablet_in_ls(ObLS &ls, const ObFunction<int(ObTablet &)> &op);
static int for_each_mds_table_in_ls(ObLS &ls, const ObFunction<int(ObTablet &)> &op);
ObTenantMdsAllocator &get_allocator() { return mds_allocator_; }
ObTenantBufferCtxAllocator &get_buffer_ctx_allocator() { return buffer_ctx_allocator_; }
TO_STRING_KV(KP(this), K_(is_inited), K_(mds_allocator), K_(mds_timer))
share::ObTenantBufferCtxAllocator &get_buffer_ctx_allocator() { return buffer_ctx_allocator_; }
TO_STRING_KV(KP(this), K_(is_inited), K_(mds_timer))
public:
/*******************debug for memoy leak************************/
template <typename OP>
@ -189,8 +160,7 @@ public:
/***************************************************************/
private:
bool is_inited_;
ObTenantMdsAllocator mds_allocator_;
ObTenantBufferCtxAllocator buffer_ctx_allocator_;
share::ObTenantBufferCtxAllocator buffer_ctx_allocator_;
ObTenantMdsTimer mds_timer_;
/*******************debug for memoy leak************************/
ObLinearHashMap<ObIntWarp, ObMdsMemoryLeakDebugInfo> memory_leak_debug_map_;

View File

@ -13,8 +13,9 @@
#define USING_LOG_PREFIX STORAGE
#include "storage/ob_storage_table_guard.h"
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/throttle/ob_throttle_common.h"
#include "share/throttle/ob_share_throttle_define.h"
#include "storage/memtable/ob_memtable.h"
#include "storage/ob_i_table.h"
#include "storage/ob_relative_table.h"
@ -47,116 +48,121 @@ ObStorageTableGuard::ObStorageTableGuard(
for_multi_source_data_(for_multi_source_data)
{
init_ts_ = ObTimeUtility::current_time();
get_thread_alloc_stat() = 0;
share::memstore_throttled_alloc() = 0;
}
ObStorageTableGuard::~ObStorageTableGuard()
{
bool &need_speed_limit = tl_need_speed_limit();
ObThrottleStat &stat = get_throttle_stat();
int64_t total_expected_wait_us = 0;
int64_t user_timeout_skip_us = 0;
int64_t frozen_memtable_skip_us = 0;
int64_t replay_frozen_skip_us = 0;
int64_t from_user_skip_us = 0; // does not used now
if (need_control_mem_ && need_speed_limit) {
bool need_sleep = true;
int64_t left_interval = INT64_MAX;
if (!for_replay_) {
left_interval = min(left_interval, store_ctx_.timeout_ - ObTimeUtility::current_time());
}
if (NULL != memtable_) {
need_sleep = memtable_->is_active_memtable();
}
uint64_t timeout = 10000;//10s
common::ObWaitEventGuard wait_guard(common::ObWaitEventIds::MEMSTORE_MEM_PAGE_ALLOC_WAIT, timeout, 0, 0, left_interval);
reset();
int ret = OB_SUCCESS;
int tmp_ret = OB_SUCCESS;
bool has_sleep = false;
int64_t sleep_time = 0;
int time = 0;
const int64_t &seq = get_seq();
int64_t clock = 0;
ObGMemstoreAllocator* memstore_allocator = NULL;
if (OB_SUCCESS != (tmp_ret = ObMemstoreAllocatorMgr::get_instance().get_tenant_memstore_allocator(
MTL_ID(), memstore_allocator))) {
} else if (OB_ISNULL(memstore_allocator)) {
LOG_WARN_RET(OB_ALLOCATE_MEMORY_FAILED, "get_tenant_mutil_allocator failed", K(store_ctx_.tablet_id_), K(tmp_ret));
} else {
clock = memstore_allocator->get_clock();
total_expected_wait_us = memstore_allocator->expected_wait_time(seq);
user_timeout_skip_us = max(0, total_expected_wait_us - left_interval);
frozen_memtable_skip_us = need_sleep ? 0 : max(0, total_expected_wait_us - user_timeout_skip_us);
while (need_sleep &&
!memstore_allocator->check_clock_over_seq(seq) &&
(left_interval > 0)) {
if (for_replay_) {
if(MTL(ObTenantFreezer *)->exist_ls_freezing()) {
replay_frozen_skip_us = max(0, total_expected_wait_us - user_timeout_skip_us - sleep_time);
break;
}
}
int64_t expected_wait_time = memstore_allocator->expected_wait_time(seq);
if (expected_wait_time < 0) {
LOG_ERROR("expected wait time should not smaller than 0", K(expected_wait_time), K(seq), K(clock), K(left_interval));
}
if (expected_wait_time <= 0) {
break;
}
int64_t sleep_interval = min(min(left_interval, SLEEP_INTERVAL_PER_TIME), expected_wait_time);
// don't use ob_usleep, as we are already in the scope of 'wait_guard'
if (sleep_interval < 0) {
LOG_ERROR("sleep interval should not smaller than 0", K(expected_wait_time), K(seq), K(clock), K(left_interval));
}
if (sleep_interval > 10 * 60 * 1000 * 1000L) {
LOG_WARN("sleep interval greater than 10 minutes, pay attention", K(expected_wait_time), K(seq), K(clock), K(left_interval));
}
if (sleep_interval <= 0) {
break;
}
::usleep(sleep_interval);
sleep_time += sleep_interval;
time++;
left_interval -= sleep_interval;
has_sleep = true;
need_sleep = memstore_allocator->need_do_writing_throttle();
}
const int64_t finish_clock = memstore_allocator->get_clock();
if (finish_clock < seq) { // we has skip some time, need make the clock skip too.
const int64_t skip_clock = MIN(seq - finish_clock, get_thread_alloc_stat());
memstore_allocator->skip_clock(skip_clock);
}
}
if (REACH_TIME_INTERVAL(100 * 1000L) &&
sleep_time > 0) {
int64_t cost_time = ObTimeUtility::current_time() - init_ts_;
LOG_INFO("throttle situation", K(sleep_time), K(clock), K(time), K(seq), K(for_replay_), K(cost_time));
}
if (for_replay_ && has_sleep) {
// avoid print replay_timeout
get_replay_is_writing_throttling() = true;
}
}
(void)throttle_if_needed_();
reset();
stat.update(total_expected_wait_us,
from_user_skip_us,
user_timeout_skip_us,
frozen_memtable_skip_us,
replay_frozen_skip_us);
const bool last_throttle_status = stat.last_throttle_status;
const int64_t last_print_log_time = stat.last_log_timestamp;
if (stat.need_log(need_speed_limit)) {
LOG_INFO("throttle statics", K(need_speed_limit), K(last_throttle_status), K(last_print_log_time), K(stat));
if (!need_speed_limit && last_throttle_status) {
stat.reset();
}
void ObStorageTableGuard::throttle_if_needed_()
{
int ret = OB_SUCCESS;
if (!need_control_mem_) {
// skip throttle
} else {
TxShareThrottleTool &throttle_tool = MTL(ObSharedMemAllocMgr *)->share_resource_throttle_tool();
ObThrottleInfoGuard share_ti_guard;
ObThrottleInfoGuard module_ti_guard;
int64_t thread_idx = common::get_itid();
if (throttle_tool.is_throttling<ObMemstoreAllocator>(share_ti_guard, module_ti_guard)) {
// only do throttle on active memtable
if (OB_NOT_NULL(memtable_) && memtable_->is_active_memtable()) {
reset();
(void)do_throttle_(throttle_tool, share_ti_guard, module_ti_guard);
}
// if throttle is skipped due to some reasons, advance clock by call skip_throttle() and clean throttle status
// record in throttle info
if (throttle_tool.still_throttling<ObMemstoreAllocator>(share_ti_guard, module_ti_guard)){
int64_t skip_size = share::memstore_throttled_alloc();
(void)throttle_tool.skip_throttle<ObMemstoreAllocator>(skip_size, share_ti_guard, module_ti_guard);
if (OB_NOT_NULL(module_ti_guard.throttle_info())) {
module_ti_guard.throttle_info()->reset();
}
}
}
}
}
#define PRINT_THROTTLE_WARN \
do { \
const int64_t WARN_LOG_INTERVAL = 60L * 1000L * 1000L /* one minute */; \
if (sleep_time > (WARN_LOG_INTERVAL) && TC_REACH_TIME_INTERVAL(WARN_LOG_INTERVAL)) { \
SHARE_LOG(WARN, \
"[Throttling] Attention!! Sleep More Than One Minute!!", \
K(sleep_time), \
K(left_interval), \
K(expected_wait_time)); \
} \
} while (0)
#define PRINT_THROTTLE_STATISTIC \
do { \
const int64_t MEMSTORE_THROTTLE_LOG_INTERVAL = 1L * 1000L * 1000L; /*one seconds*/ \
if (sleep_time > 0 && REACH_TIME_INTERVAL(MEMSTORE_THROTTLE_LOG_INTERVAL)) { \
SHARE_LOG(INFO, \
"[Throttling] Time Info", \
"Throttle Unit Name", \
ObMemstoreAllocator::throttle_unit_name(), \
"Throttle Sleep Time(us)", \
sleep_time); \
} \
} while (0);
void ObStorageTableGuard::do_throttle_(TxShareThrottleTool &throttle_tool,
ObThrottleInfoGuard &share_ti_guard,
ObThrottleInfoGuard &module_ti_guard)
{
int ret = OB_SUCCESS;
int64_t sleep_time = 0;
int64_t left_interval = INT64_MAX;
if (!for_replay_) {
left_interval = min(left_interval, store_ctx_.timeout_ - ObClockGenerator::getCurrentTime());
}
while (throttle_tool.still_throttling<ObMemstoreAllocator>(share_ti_guard, module_ti_guard) && (left_interval > 0)) {
int64_t expected_wait_time = 0;
if (for_replay_ && MTL(ObTenantFreezer *)->exist_ls_freezing()) {
// skip throttle if ls freeze exists
break;
} else if ((expected_wait_time =
throttle_tool.expected_wait_time<ObMemstoreAllocator>(share_ti_guard, module_ti_guard)) <= 0) {
if (expected_wait_time < 0) {
LOG_ERROR("expected wait time should not smaller than 0",
K(expected_wait_time),
KPC(share_ti_guard.throttle_info()),
KPC(module_ti_guard.throttle_info()),
K(clock),
K(left_interval));
}
break;
}
// do sleep when expected_wait_time and left_interval are not equal to 0
int64_t sleep_interval = min(SLEEP_INTERVAL_PER_TIME, expected_wait_time);
::usleep(sleep_interval);
sleep_time += sleep_interval;
left_interval -= sleep_interval;
PRINT_THROTTLE_WARN;
}
PRINT_THROTTLE_STATISTIC;
if (for_replay_ && sleep_time > 0) {
// avoid print replay_timeout
get_replay_is_writing_throttling() = true;
}
}
#undef PRINT_THROTTLE_WARN
#undef PRINT_THROTTLE_STATISTIC
int ObStorageTableGuard::refresh_and_protect_table(ObRelativeTable &relative_table)
{
int ret = OB_SUCCESS;
@ -264,6 +270,7 @@ void ObStorageTableGuard::reset()
memtable_->dec_write_ref();
memtable_ = NULL;
}
share::memstore_throttled_alloc() = 0;
}
void ObStorageTableGuard::double_check_inc_write_ref(

View File

@ -15,12 +15,17 @@
#include <stdint.h>
#include "share/scn.h"
#include "share/throttle/ob_share_throttle_define.h"
namespace oceanbase
{
namespace share
{
class ObThrottleInfoGuard;
}
namespace memtable
{
class ObMemtable;
class ObIMemtable;
}
@ -60,8 +65,13 @@ private:
int check_freeze_to_inc_write_ref(ObITable *table, bool &bool_ret, bool &for_replace_tablet_meta);
bool need_to_refresh_table(ObTableStoreIterator &iter);
void check_if_need_log_(bool &need_log, bool &need_log_error);
void throttle_if_needed_();
void do_throttle_(share::TxShareThrottleTool &throttle_tool,
share::ObThrottleInfoGuard &share_ti_guard,
share::ObThrottleInfoGuard &module_ti_guard);
private:
static const int64_t LOG_INTERVAL_US = 10 * 1000 * 1000; // 10s
static const int64_t LOG_INTERVAL_US = 10 * 1000 * 1000; // 10s
static const int64_t LOG_ERROR_INTERVAL_US = 60 * 1000 * 1000; // 1min
static const int64_t GET_TS_INTERVAL = 10 * 1000;
static const int64_t SLEEP_INTERVAL_PER_TIME = 20 * 1000; // 20ms

View File

@ -80,7 +80,7 @@ public:
};
typedef ObDList<ObTableLockOpLinkNode> ObTableLockOpList;
class ObOBJLock : public ObTransHashLink<ObOBJLock>
class ObOBJLock : public share::ObLightHashLink<ObOBJLock>
{
public:
ObOBJLock(const ObLockID &lock_id);
@ -298,7 +298,7 @@ public:
class ObOBJLockMap
{
typedef ObTransHashMap<ObLockID, ObOBJLock, ObOBJLockAlloc, common::SpinRWLock, 1 << 10> Map;
typedef share::ObLightHashMap<ObLockID, ObOBJLock, ObOBJLockAlloc, common::SpinRWLock, 1 << 10> Map;
public:
ObOBJLockMap() :
lock_map_(),

View File

@ -36,7 +36,7 @@ namespace transaction
TRANS_LOG(WARN, "tx table is null", KR(ret), K(ctx_mgr_->get_ls_id()), K(*this)); \
}
int ObCtxTxData::init(ObLSTxCtxMgr *ctx_mgr, int64_t tx_id)
int ObCtxTxData::init(const int64_t abs_expire_time, ObLSTxCtxMgr *ctx_mgr, int64_t tx_id)
{
int ret = OB_SUCCESS;
if (OB_ISNULL(ctx_mgr) || tx_id < 0) {
@ -51,7 +51,7 @@ int ObCtxTxData::init(ObLSTxCtxMgr *ctx_mgr, int64_t tx_id)
} else if (OB_ISNULL(tx_table = ctx_mgr_->get_tx_table())) {
ret = OB_ERR_UNEXPECTED;
TRANS_LOG(WARN, "tx table is null", KR(ret), K(ctx_mgr_->get_ls_id()), K(*this));
} else if (OB_FAIL(tx_table->alloc_tx_data(tx_data_guard_))) {
} else if (OB_FAIL(tx_table->alloc_tx_data(tx_data_guard_, true, abs_expire_time))) {
TRANS_LOG(WARN, "get tx data failed", KR(ret), K(ctx_mgr_->get_ls_id()));
} else if (OB_ISNULL(tx_data_guard_.tx_data())) {
ret = OB_ERR_UNEXPECTED;
@ -162,25 +162,6 @@ int ObCtxTxData::deep_copy_tx_data_out(ObTxDataGuard &tmp_tx_data_guard)
return ret;
}
int ObCtxTxData::alloc_tmp_tx_data(storage::ObTxDataGuard &tmp_tx_data_guard)
{
int ret = OB_SUCCESS;
RLockGuard guard(lock_);
if (OB_FAIL(check_tx_data_writable_())) {
TRANS_LOG(WARN, "tx data is not writeable", K(ret), K(*this));
} else {
ObTxTable *tx_table = nullptr;
GET_TX_TABLE_(tx_table)
if (OB_FAIL(ret)) {
} else if (OB_FAIL(tx_table->alloc_tx_data(tmp_tx_data_guard))) {
TRANS_LOG(WARN, "alloc tx data failed", K(ret));
}
}
return ret;
}
int ObCtxTxData::free_tmp_tx_data(ObTxData *&tmp_tx_data)
{
int ret = OB_SUCCESS;

View File

@ -34,14 +34,14 @@ public:
void reset();
void destroy();
int init(ObLSTxCtxMgr *ctx_mgr, int64_t tx_id);
int init(const int64_t abs_expire_time, ObLSTxCtxMgr *ctx_mgr, int64_t tx_id);
bool is_read_only() const { return read_only_; }
int insert_into_tx_table();
int recover_tx_data(storage::ObTxDataGuard &rhs);
int replace_tx_data(storage::ObTxData *tmp_tx_data);
int deep_copy_tx_data_out(storage::ObTxDataGuard &tmp_tx_data_guard);
int alloc_tmp_tx_data(storage::ObTxDataGuard &tmp_tx_data);
// int alloc_tmp_tx_data(storage::ObTxDataGuard &tmp_tx_data);
int free_tmp_tx_data(storage::ObTxData *&tmp_tx_data);
int insert_tmp_tx_data(storage::ObTxData *tmp_tx_data);

View File

@ -75,7 +75,7 @@ public:
typedef common::ObSEArray<SingleRowDefensiveRecord *, 12> ObSingleRowDefensiveRecordArray;
class ObSingleTabletDefensiveCheckInfo : public ObTransHashLink<ObSingleTabletDefensiveCheckInfo>
class ObSingleTabletDefensiveCheckInfo : public share::ObLightHashLink<ObSingleTabletDefensiveCheckInfo>
{
public:
ObSingleTabletDefensiveCheckInfo() { }
@ -112,7 +112,7 @@ public:
}
};
typedef ObTransHashMap<ObTransID,
typedef share::ObLightHashMap<ObTransID,
ObSingleTabletDefensiveCheckInfo,
ObSingleTabletDefensiveCheckInfoAlloc,
common::SpinRWLock, 2 << 16 /*bucket_num*/> ObTxDefensiveCheckInfoMap;

View File

@ -33,7 +33,7 @@
#include "storage/memtable/ob_memtable_context.h"
#include "ob_xa_define.h"
#include "share/rc/ob_context.h"
#include "ob_trans_hashmap.h"
#include "share/ob_light_hashmap.h"
#include "ob_tx_elr_handler.h"
namespace oceanbase
@ -93,7 +93,7 @@ static inline void protocol_error(const int64_t state, const int64_t msg_type)
// For Example: If you change the signature of the function `commit` in
// `ObTransCtx`, you should also modify the signatore of function `commit` in
// `ObPartTransCtx`, `ObScheTransCtx`
class ObTransCtx: public ObTransHashLink<ObTransCtx>
class ObTransCtx: public share::ObLightHashLink<ObTransCtx>
{
friend class CtxLock;
public:
@ -202,13 +202,13 @@ protected:
}
int acquire_ctx_ref_()
{
ObTransHashLink::inc_ref(1);
ObLightHashLink::inc_ref(1);
TRANS_LOG(DEBUG, "inc tx ctx ref", KPC(this));
return OB_SUCCESS;
}
void release_ctx_ref_()
{
ObTransHashLink::dec_ref(1);
ObLightHashLink::dec_ref(1);
TRANS_LOG(DEBUG, "dec tx ctx ref", KPC(this));
}

View File

@ -21,6 +21,7 @@
#include "lib/container/ob_se_array.h"
#include "common/ob_simple_iterator.h"
#include "share/ob_ls_id.h"
#include "share/ob_light_hashmap.h"
#include "storage/memtable/ob_memtable_context.h"
#include "ob_trans_ctx.h"
#include "ob_trans_stat.h"
@ -28,7 +29,6 @@
#include "storage/tx/ob_tx_ls_log_writer.h"
#include "storage/tx_table/ob_tx_table_define.h"
#include "storage/tx/ob_tx_log_adapter.h"
#include "ob_trans_hashmap.h"
#include "ob_trans_ctx_mgr_v4.h"
namespace oceanbase

View File

@ -84,7 +84,7 @@ typedef common::ObSimpleIterator<ObLSTxCtxMgrStat,
typedef common::ObSimpleIterator<ObTxLockStat,
ObModIds::OB_TRANS_VIRTUAL_TABLE_TRANS_STAT, 16> ObTxLockStatIterator;
typedef ObTransHashMap<ObTransID, ObTransCtx, TransCtxAlloc, common::SpinRWLock, 1 << 14 /*bucket_num*/> ObLSTxCtxMap;
typedef share::ObLightHashMap<ObTransID, ObTransCtx, TransCtxAlloc, common::SpinRWLock, 1 << 14 /*bucket_num*/> ObLSTxCtxMap;
typedef common::LinkHashNode<share::ObLSID> ObLSTxCtxMgrHashNode;
typedef common::LinkHashValue<share::ObLSID> ObLSTxCtxMgrHashValue;
@ -152,7 +152,7 @@ const static char OB_SIMPLE_ITERATOR_LABEL_FOR_TX_ID[] = "ObTxCtxMgr";
typedef common::ObSimpleIterator<ObTransID, OB_SIMPLE_ITERATOR_LABEL_FOR_TX_ID, 16> ObTxIDIterator;
// LogStream Transaction Context Manager
class ObLSTxCtxMgr: public ObTransHashLink<ObLSTxCtxMgr>
class ObLSTxCtxMgr: public share::ObLightHashLink<ObLSTxCtxMgr>
{
// ut
friend class unittest::TestTxCtxTable;
@ -940,7 +940,7 @@ public:
}
};
typedef transaction::ObTransHashMap<share::ObLSID, ObLSTxCtxMgr,
typedef share::ObLightHashMap<share::ObLSID, ObLSTxCtxMgr,
ObLSTxCtxMgrAlloc, common::ObQSyncLock> ObLSTxCtxMgrMap;
class ObTxCtxMgr

View File

@ -26,7 +26,7 @@
#include "common/ob_role.h"
#include "share/ob_cluster_version.h"
#include "share/ob_ls_id.h"
#include "ob_trans_hashmap.h"
#include "share/ob_light_hashmap.h"
#include "storage/tx/ob_trans_define.h"
#include "common/ob_simple_iterator.h"
#include "share/ob_common_id.h"
@ -411,7 +411,7 @@ private:
common::ObMaskSet2<ObTxExecPart> mask_set_;
};
class ObTxDesc final : public ObTransHashLink<ObTxDesc>
class ObTxDesc final : public share::ObLightHashLink<ObTxDesc>
{
static constexpr const char *OP_LABEL = "TX_DESC_VALUE";
static constexpr int64_t MAX_RESERVED_CONFLICT_TX_NUM = 30;
@ -871,7 +871,7 @@ public:
ObTxDesc::DLink list_;
#endif
};
ObTransHashMap<ObTransID, ObTxDesc, ObTxDescAlloc, common::SpinRWLock, 1 << 16 /*bucket_num*/> map_;
share::ObLightHashMap<ObTransID, ObTxDesc, ObTxDescAlloc, common::SpinRWLock, 1 << 16 /*bucket_num*/> map_;
std::function<int(ObTransID&)> tx_id_allocator_;
ObTransService &txs_;
};

View File

@ -120,7 +120,7 @@ int ObPartTransCtx::init(const uint64_t tenant_id,
TRANS_LOG(WARN, "ObPartTransCtx init memtable context error", KR(ret), K(trans_id), K(ls_id));
} else if (OB_FAIL(init_log_cbs_(ls_id, trans_id))) {
TRANS_LOG(WARN, "init log cbs failed", KR(ret), K(trans_id), K(ls_id));
} else if (OB_FAIL(ctx_tx_data_.init(ls_ctx_mgr, trans_id))) {
} else if (OB_FAIL(ctx_tx_data_.init(trans_expired_time, ls_ctx_mgr, trans_id))) {
TRANS_LOG(WARN, "init ctx tx data failed",K(ret), K(trans_id), K(ls_id));
} else if (OB_FAIL(mds_cache_.init(tenant_id))) {
TRANS_LOG(WARN, "init mds cache failed", K(ret), K(trans_id), K(ls_id));

View File

@ -21,6 +21,7 @@
#include "storage/memtable/ob_memtable_context.h"
#include "share/schema/ob_multi_version_schema_service.h"
#include "share/ob_common_rpc_proxy.h"
#include "share/ob_light_hashmap.h"
#include "sql/ob_end_trans_callback.h"
#include "lib/utility/utility.h"
#include "ob_trans_define.h"
@ -146,7 +147,7 @@ public:
ObThreadLocalTransCtxState state_;
} CACHE_ALIGNED;
class ObRollbackSPMsgGuard final : public ObTransHashLink<ObRollbackSPMsgGuard>
class ObRollbackSPMsgGuard final : public share::ObLightHashLink<ObRollbackSPMsgGuard>
{
public:
ObRollbackSPMsgGuard(ObCommonID tx_msg_id, ObTxDesc &tx_desc, ObTxDescMgr &tx_desc_mgr)
@ -338,7 +339,7 @@ private:
// for rollback-savepoint request-id
int64_t rollback_sp_msg_sequence_;
// for rollback-savepoint msg resp callback to find tx_desc
ObTransHashMap<ObCommonID, ObRollbackSPMsgGuard, ObRollbackSPMsgGuardAlloc, common::SpinRWLock, 1 << 16 /*bucket_num*/> rollback_sp_msg_mgr_;
share::ObLightHashMap<ObCommonID, ObRollbackSPMsgGuard, ObRollbackSPMsgGuardAlloc, common::SpinRWLock, 1 << 16 /*bucket_num*/> rollback_sp_msg_mgr_;
private:
DISALLOW_COPY_AND_ASSIGN(ObTransService);
};

View File

@ -15,9 +15,10 @@
#include "storage/tx_table/ob_tx_table.h"
#include "share/rc/ob_tenant_base.h"
using namespace oceanbase::share;
namespace oceanbase
{
namespace storage
{
@ -69,7 +70,7 @@ int ObUndoStatusList::serialize_(char *buf, const int64_t buf_len, int64_t &pos)
int ObUndoStatusList::deserialize(const char *buf,
const int64_t data_len,
int64_t &pos,
ObSliceAlloc &slice_allocator)
ObTenantTxDataAllocator &tx_data_allocator)
{
int ret = OB_SUCCESS;
int64_t version = 0;
@ -92,7 +93,7 @@ int ObUndoStatusList::deserialize(const char *buf,
} else {
int64_t original_pos = pos;
pos = 0;
if (OB_FAIL(deserialize_(buf + original_pos, undo_status_list_len, pos, slice_allocator))) {
if (OB_FAIL(deserialize_(buf + original_pos, undo_status_list_len, pos, tx_data_allocator))) {
STORAGE_LOG(WARN, "deserialize_ fail", "slen", undo_status_list_len, K(pos), K(ret));
}
pos += original_pos;
@ -104,7 +105,7 @@ int ObUndoStatusList::deserialize(const char *buf,
int ObUndoStatusList::deserialize_(const char *buf,
const int64_t data_len,
int64_t &pos,
ObSliceAlloc &slice_allocator)
ObTenantTxDataAllocator &tx_data_allocator)
{
int ret = OB_SUCCESS;
ObUndoStatusNode *cur_node = nullptr;
@ -115,11 +116,7 @@ int ObUndoStatusList::deserialize_(const char *buf,
// allcate new undo status node if needed
if (OB_ISNULL(cur_node) || cur_node->size_ >= TX_DATA_UNDO_ACT_MAX_NUM_PER_NODE) {
void *undo_node_buf = nullptr;
#ifdef OB_ENABLE_SLICE_ALLOC_LEAK_DEBUG
if (OB_ISNULL(undo_node_buf = slice_allocator.alloc(true /*record_alloc_lbt*/))) {
#else
if (OB_ISNULL(undo_node_buf = slice_allocator.alloc())) {
#endif
if (OB_ISNULL(undo_node_buf = tx_data_allocator.alloc(false/* enable_throttle */))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
STORAGE_LOG(WARN, "allocate memory when deserialize ObTxData failed.", KR(ret));
} else {
@ -337,7 +334,7 @@ int64_t ObTxData::size() const
int ObTxData::deserialize(const char *buf,
const int64_t data_len,
int64_t &pos,
ObSliceAlloc &slice_allocator)
ObTenantTxDataAllocator &slice_allocator)
{
int ret = OB_SUCCESS;
int64_t version = 0;
@ -366,7 +363,7 @@ int ObTxData::deserialize(const char *buf,
int ObTxData::deserialize_(const char *buf,
const int64_t data_len,
int64_t &pos,
ObSliceAlloc &slice_allocator)
ObTenantTxDataAllocator &tx_data_allocator)
{
int ret = OB_SUCCESS;
@ -380,7 +377,7 @@ int ObTxData::deserialize_(const char *buf,
STORAGE_LOG(WARN, "deserialize start_scn fail.", KR(ret), K(pos), K(data_len));
} else if (OB_FAIL(end_scn_.deserialize(buf, data_len, pos))) {
STORAGE_LOG(WARN, "deserialize end_scn fail.", KR(ret), K(pos), K(data_len));
} else if (OB_FAIL(undo_status_list_.deserialize(buf, data_len, pos, slice_allocator))) {
} else if (OB_FAIL(undo_status_list_.deserialize(buf, data_len, pos, tx_data_allocator))) {
STORAGE_LOG(WARN, "deserialize undo_status_list fail.", KR(ret), K(pos), K(data_len));
}
@ -389,14 +386,14 @@ int ObTxData::deserialize_(const char *buf,
void ObTxData::reset()
{
if (OB_NOT_NULL(slice_allocator_) || ref_cnt_ != 0) {
if (OB_NOT_NULL(tx_data_allocator_) || ref_cnt_ != 0) {
int ret = OB_ERR_UNEXPECTED;
STORAGE_LOG(WARN, "this tx data should not be reset", KR(ret), KP(this), KP(slice_allocator_), K(ref_cnt_));
STORAGE_LOG(WARN, "this tx data should not be reset", KR(ret), KP(this), KP(tx_data_allocator_), K(ref_cnt_));
// TODO : @gengli remove ob_abort
ob_abort();
}
ObTxCommitData::reset();
slice_allocator_ = nullptr;
tx_data_allocator_ = nullptr;
ref_cnt_ = 0;
undo_status_list_.reset();
}

View File

@ -13,8 +13,8 @@
#ifndef OCEANBASE_STORAGE_OB_TX_DATA_DEFINE
#define OCEANBASE_STORAGE_OB_TX_DATA_DEFINE
#include "lib/allocator/ob_slice_alloc.h"
#include "share/scn.h"
#include "share/allocator/ob_tx_data_allocator.h"
#include "lib/objectpool/ob_server_object_pool.h"
#include "storage/tx/ob_committer_define.h"
#include "storage/tx/ob_trans_define.h"
@ -25,7 +25,6 @@ namespace oceanbase
namespace storage
{
class ObTxData;
class ObTxTable;
class ObTxDataTable;
@ -38,7 +37,7 @@ class ObTxDataMemtableMgr;
// avoid memory fragmentation caused by frequent allocation of non-fixed-length memory
// 2. Avoid dumping failure caused by memory allocation failure
//
// The tx data table uses ObSliceAlloc to allocate multiple memory slices. There are three kinds of
// The tx data table uses ObTenantTxDataAllocator to allocate multiple memory slices. There are three kinds of
// slice. The first kind of slice is divided into three areas. This kind of slice is used in link
// hash map of tx data memtable. :
// 1. HashNodes that ObLinkHashMap needs
@ -154,7 +153,7 @@ public:
public:
int serialize(char *buf, const int64_t buf_len, int64_t &pos) const;
int deserialize(const char *buf, const int64_t data_len, int64_t &pos, ObSliceAlloc &slice_allocator);
int deserialize(const char *buf, const int64_t data_len, int64_t &pos, share::ObTenantTxDataAllocator &tx_data_allocator);
int64_t get_serialize_size() const;
bool is_contain(const transaction::ObTxSEQ seq_no, int32_t tx_data_state) const;
void reset()
@ -166,7 +165,10 @@ public:
private:
bool is_contain_(const transaction::ObTxSEQ seq_no) const;
int serialize_(char *buf, const int64_t buf_len, int64_t &pos) const;
int deserialize_(const char *buf, const int64_t data_len, int64_t &pos, ObSliceAlloc &slice_allocator);
int deserialize_(const char *buf,
const int64_t data_len,
int64_t &pos,
share::ObTenantTxDataAllocator &tx_data_allocator);
int64_t get_serialize_size_() const;
public:
@ -236,7 +238,7 @@ class ObTxData : public ObTxCommitData, public ObTxDataLink
private:
const static int64_t UNIS_VERSION = 1;
public:
ObTxData() : ObTxCommitData(), ObTxDataLink(), slice_allocator_(nullptr), ref_cnt_(0), undo_status_list_(), flag_(0) {}
ObTxData() : ObTxCommitData(), ObTxDataLink(), tx_data_allocator_(nullptr), ref_cnt_(0), undo_status_list_(), flag_(0) {}
ObTxData(const ObTxData &rhs);
ObTxData &operator=(const ObTxData &rhs);
ObTxData &operator=(const ObTxCommitData &rhs);
@ -257,7 +259,7 @@ public:
#ifdef UNITTEST
return;
#endif
if (nullptr == slice_allocator_) {
if (nullptr == tx_data_allocator_) {
STORAGE_LOG_RET(ERROR, OB_ERR_UNEXPECTED, "invalid slice allocator", KPC(this));
ob_abort();
} else if (0 == ATOMIC_SAF(&ref_cnt_, 1)) {
@ -267,10 +269,10 @@ public:
while (nullptr != node_ptr) {
node_to_free = node_ptr;
node_ptr = node_ptr->next_;
slice_allocator_->free(node_to_free);
tx_data_allocator_->free(node_to_free);
}
}
slice_allocator_->free(this);
tx_data_allocator_->free(this);
}
}
@ -290,7 +292,7 @@ public:
*/
bool is_valid_in_tx_data_table() const;
int serialize(char *buf, const int64_t buf_len, int64_t &pos) const;
int deserialize(const char *buf, const int64_t data_len, int64_t &pos, ObSliceAlloc &slice_allocator);
int deserialize(const char *buf, const int64_t data_len, int64_t &pos, share::ObTenantTxDataAllocator &tx_data_allocator);
int64_t get_serialize_size() const;
int64_t size() const;
@ -301,7 +303,10 @@ public:
private:
int serialize_(char *buf, const int64_t buf_len, int64_t &pos) const;
int deserialize_(const char *buf, const int64_t data_len, int64_t &pos, ObSliceAlloc &slice_allocator);
int deserialize_(const char *buf,
const int64_t data_len,
int64_t &pos,
share::ObTenantTxDataAllocator &tx_data_allocator);
int64_t get_serialize_size_() const;
bool equals_(ObTxData &rhs);
int merge_undo_actions_(ObTxDataTable *tx_data_table,
@ -320,7 +325,7 @@ public:
}
public:
ObSliceAlloc *slice_allocator_;
share::ObTenantTxDataAllocator *tx_data_allocator_;
int64_t ref_cnt_;
ObUndoStatusList undo_status_list_;
int64_t flag_;

View File

@ -17,7 +17,7 @@
#include "observer/ob_srv_network_frame.h"
#include "observer/omt/ob_tenant_config_mgr.h" // ObTenantConfigGuard
#include "rootserver/freeze/ob_major_freeze_helper.h"
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/config/ob_server_config.h"
#include "share/ob_share_util.h"
#include "share/rc/ob_tenant_module_init_ctx.h"
@ -34,9 +34,9 @@ namespace storage
{
using namespace mds;
typedef ObMemstoreAllocatorMgr::TAllocator ObTenantMemstoreAllocator;
double ObTenantFreezer::MDS_TABLE_FREEZE_TRIGGER_TENANT_PERCENTAGE = 5;
double ObTenantFreezer::MDS_TABLE_FREEZE_TRIGGER_TENANT_PERCENTAGE = 2;
ObTenantFreezer::ObTenantFreezer()
: is_inited_(false),
@ -44,7 +44,6 @@ ObTenantFreezer::ObTenantFreezer()
svr_rpc_proxy_(nullptr),
common_rpc_proxy_(nullptr),
rs_mgr_(nullptr),
allocator_mgr_(nullptr),
freeze_thread_pool_(),
freeze_thread_pool_lock_(common::ObLatchIds::FREEZE_THREAD_POOL_LOCK),
exist_ls_freezing_(false),
@ -65,7 +64,6 @@ void ObTenantFreezer::destroy()
svr_rpc_proxy_ = nullptr;
common_rpc_proxy_ = nullptr;
rs_mgr_ = nullptr;
allocator_mgr_ = nullptr;
is_inited_ = false;
}
@ -106,7 +104,6 @@ int ObTenantFreezer::init()
svr_rpc_proxy_ = GCTX.srv_rpc_proxy_;
common_rpc_proxy_ = GCTX.rs_rpc_proxy_;
rs_mgr_ = GCTX.rs_mgr_;
allocator_mgr_ = &ObMemstoreAllocatorMgr::get_instance();
tenant_info_.tenant_id_ = MTL_ID();
is_inited_ = true;
}
@ -315,7 +312,7 @@ int ObTenantFreezer::tenant_freeze_()
ObLSService *ls_srv = MTL(ObLSService *);
FLOG_INFO("[TenantFreezer] tenant_freeze start", KR(ret));
ObTenantFreezeGuard freeze_guard(allocator_mgr_, ret, tenant_info_);
ObTenantFreezeGuard freeze_guard(ret, tenant_info_);
if (OB_FAIL(ls_srv->get_ls_iter(iter, ObLSGetMod::TXSTORAGE_MOD))) {
LOG_WARN("[TenantFreezer] fail to get log stream iterator", KR(ret));
} else {
@ -506,17 +503,18 @@ static const int64_t ONE_MB = 1024L * 1024L;
"Tenant Frozen TxData Memory(MB)", frozen_tx_data_mem_used/ONE_MB, \
"Tenant Active TxData Memory(MB)", active_tx_data_mem_used/ONE_MB, \
"Freeze TxData Trigger Memory(MB)", self_freeze_trigger_memory/ONE_MB, \
"Total TxDataTable Hold Memory(MB)", tx_data_table_mem_hold/ONE_MB, \
"Total TxDataTable Memory Limit(MB)", tx_data_table_mem_limit/ONE_MB
"Total TxDataTable Hold Memory(MB)", tx_data_mem_hold/ONE_MB, \
"Total TxDataTable Memory Limit(MB)", tx_data_mem_limit/ONE_MB
int ObTenantFreezer::check_and_freeze_tx_data_()
{
int ret = OB_SUCCESS;
int64_t frozen_tx_data_mem_used = 0;
int64_t active_tx_data_mem_used = 0;
int64_t total_memory = lib::get_tenant_memory_limit(tenant_info_.tenant_id_);
int64_t tx_data_table_mem_hold = lib::get_tenant_memory_hold(tenant_info_.tenant_id_, ObCtxIds::TX_DATA_TABLE);
int64_t tx_data_table_mem_limit = total_memory * (ObTxDataTable::TX_DATA_MEM_LIMIT_PERCENTAGE / 100);
int64_t self_freeze_trigger_memory = total_memory * (ObTxDataTable::TX_DATA_FREEZE_TRIGGER_PERCENTAGE / 100);
int64_t tx_data_mem_hold = lib::get_tenant_memory_hold(tenant_info_.tenant_id_, ObCtxIds::TX_DATA_TABLE);
int64_t self_freeze_trigger_memory =
total_memory * ObTenantTxDataAllocator::TX_DATA_FREEZE_TRIGGER_PERCENTAGE / 100;
int64_t tx_data_mem_limit = total_memory * ObTenantTxDataAllocator::TX_DATA_LIMIT_PERCENTAGE / 100;
static int skip_count = 0;
if (true == ATOMIC_LOAD(&is_freezing_tx_data_)) {
@ -542,8 +540,8 @@ int ObTenantFreezer::check_and_freeze_tx_data_()
// execute statistic print once a minute
if (TC_REACH_TIME_INTERVAL(60 * 1000 * 1000)) {
if (frozen_tx_data_mem_used + active_tx_data_mem_used > tx_data_table_mem_limit) {
LOG_ERROR_RET(OB_ERR_UNEXPECTED, "tx data use too much memory!!!", STATISTIC_PRINT_MACRO);
if (frozen_tx_data_mem_used + active_tx_data_mem_used > tx_data_mem_limit) {
LOG_INFO("tx data use too much memory!!!", STATISTIC_PRINT_MACRO);
} else if (OB_FAIL(get_tenant_tx_data_mem_used_(
frozen_tx_data_mem_used, active_tx_data_mem_used, true /*for_statistic_print*/))) {
LOG_INFO("print statistic failed");
@ -657,7 +655,7 @@ int ObTenantFreezer::check_and_freeze_mds_table_()
bool trigger_flush = false;
int64_t total_memory = lib::get_tenant_memory_limit(tenant_info_.tenant_id_);
int64_t trigger_freeze_memory = total_memory * (ObTenantFreezer::MDS_TABLE_FREEZE_TRIGGER_TENANT_PERCENTAGE / 100);
ObTenantMdsAllocator &mds_allocator = MTL(ObTenantMdsService *)->get_allocator();
ObTenantMdsAllocator &mds_allocator = MTL(ObSharedMemAllocMgr *)->mds_allocator();
int64_t hold_memory = mds_allocator.hold();
if (OB_UNLIKELY(0 == trigger_freeze_memory)) {
@ -1002,7 +1000,8 @@ int ObTenantFreezer::get_tenant_memstore_limit(int64_t &mem_limit)
int ObTenantFreezer::get_tenant_mem_usage_(ObTenantFreezeCtx &ctx)
{
int ret = OB_SUCCESS;
ObTenantMemstoreAllocator *tenant_allocator = NULL;
ObMemstoreAllocator &tenant_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
int64_t active_memstore_used = 0;
int64_t freezable_active_memstore_used = 0;
int64_t total_memstore_used = 0;
@ -1010,20 +1009,11 @@ int ObTenantFreezer::get_tenant_mem_usage_(ObTenantFreezeCtx &ctx)
int64_t max_cached_memstore_size = 0;
const uint64_t tenant_id = MTL_ID();
if (OB_FAIL(allocator_mgr_->get_tenant_memstore_allocator(tenant_id,
tenant_allocator))) {
LOG_WARN("[TenantFreezer] failed to get_tenant_memstore_allocator", KR(ret), K(tenant_id));
} else if (NULL == tenant_allocator) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("[TenantFreezer] tenant memstore allocator is NULL", KR(ret), K(tenant_id));
} else {
active_memstore_used = tenant_allocator->get_active_memstore_used();
freezable_active_memstore_used = tenant_allocator->get_freezable_active_memstore_used();
total_memstore_used = tenant_allocator->get_total_memstore_used();
total_memstore_hold = get_tenant_memory_hold(tenant_id,
ObCtxIds::MEMSTORE_CTX_ID);
max_cached_memstore_size = tenant_allocator->get_max_cached_memstore_size();
}
active_memstore_used = tenant_allocator.get_active_memstore_used();
freezable_active_memstore_used = tenant_allocator.get_freezable_active_memstore_used();
total_memstore_used = tenant_allocator.get_total_memstore_used();
max_cached_memstore_size = tenant_allocator.get_max_cached_memstore_size();
total_memstore_hold = get_tenant_memory_hold(tenant_id, ObCtxIds::MEMSTORE_CTX_ID);
ctx.active_memstore_used_ = active_memstore_used;
ctx.freezable_active_memstore_used_ = freezable_active_memstore_used;
ctx.total_memstore_used_ = total_memstore_used;
@ -1036,7 +1026,7 @@ int ObTenantFreezer::get_tenant_mem_usage_(ObTenantFreezeCtx &ctx)
int ObTenantFreezer::get_tenant_mem_stat_(ObTenantStatistic &stat)
{
int ret = OB_SUCCESS;
ObTenantMemstoreAllocator *tenant_allocator = NULL;
ObMemstoreAllocator &tenant_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
int64_t active_memstore_used = 0;
int64_t total_memstore_used = 0;
int64_t total_memstore_hold = 0;
@ -1051,21 +1041,15 @@ int ObTenantFreezer::get_tenant_mem_stat_(ObTenantStatistic &stat)
tenant_info_.get_freeze_ctx(ctx);
if (OB_FAIL(get_freeze_trigger_(ctx))) {
LOG_WARN("[TenantFreezer] get tenant minor freeze trigger error", KR(ret), K(tenant_info_.tenant_id_));
} else if (OB_FAIL(allocator_mgr_->get_tenant_memstore_allocator(tenant_id,
tenant_allocator))) {
LOG_WARN("[TenantFreezer] failed to get_tenant_memstore_allocator", KR(ret), K(tenant_id));
} else if (NULL == tenant_allocator) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("[TenantFreezer] tenant memstore allocator is NULL", KR(ret), K(tenant_id));
} else {
active_memstore_used = tenant_allocator->get_active_memstore_used();
total_memstore_used = tenant_allocator->get_total_memstore_used();
active_memstore_used = tenant_allocator.get_active_memstore_used();
total_memstore_used = tenant_allocator.get_total_memstore_used();
total_memstore_hold = get_tenant_memory_hold(tenant_id,
ObCtxIds::MEMSTORE_CTX_ID);
max_cached_memstore_size = tenant_allocator->get_max_cached_memstore_size();
memstore_allocated_pos = tenant_allocator->get_memstore_allocated_pos();
memstore_frozen_pos = tenant_allocator->get_frozen_memstore_pos();
memstore_reclaimed_pos = tenant_allocator->get_memstore_reclaimed_pos();
max_cached_memstore_size = tenant_allocator.get_max_cached_memstore_size();
memstore_allocated_pos = tenant_allocator.get_memstore_allocated_pos();
memstore_frozen_pos = tenant_allocator.get_frozen_memstore_pos();
memstore_reclaimed_pos = tenant_allocator.get_memstore_reclaimed_pos();
}
stat.active_memstore_used_ = active_memstore_used;
stat.total_memstore_used_ = total_memstore_used;
@ -1074,7 +1058,6 @@ int ObTenantFreezer::get_tenant_mem_stat_(ObTenantStatistic &stat)
stat.memstore_limit_ = ctx.mem_memstore_limit_;
stat.tenant_memory_limit_ = get_tenant_memory_limit(tenant_id);
stat.tenant_memory_hold_ = get_tenant_memory_hold(tenant_id);
stat.kvcache_mem_ = ctx.kvcache_mem_;
stat.max_cached_memstore_size_ = max_cached_memstore_size;
stat.memstore_allocated_pos_ = memstore_allocated_pos;
@ -1096,60 +1079,45 @@ static inline bool is_add_overflow(int64_t first, int64_t second, int64_t &res)
int ObTenantFreezer::get_freeze_trigger_(ObTenantFreezeCtx &ctx)
{
static const int64_t MEMSTORE_USABLE_REMAIN_MEMORY_PERCETAGE = 50;
static const int64_t MAX_UNUSABLE_MEMORY = 2LL * 1024LL * 1024LL * 1024LL;
int ret = OB_SUCCESS;
ObTenantResourceMgrHandle resource_handle;
const uint64_t tenant_id = MTL_ID();
const int64_t mem_memstore_limit = ctx.mem_memstore_limit_;
int64_t kv_cache_mem = 0;
int64_t memstore_freeze_trigger = 0;
int64_t max_mem_memstore_can_get_now = 0;
if (OB_FAIL(ObResourceMgr::get_instance().
get_tenant_resource_mgr(tenant_id,
resource_handle))) {
LOG_WARN("[TenantFreezer] fail to get resource mgr", KR(ret), K(tenant_id));
ret = OB_SUCCESS;
memstore_freeze_trigger =
mem_memstore_limit / 100 * get_freeze_trigger_percentage_();
} else {
int64_t tenant_mem_limit = get_tenant_memory_limit(tenant_id);
int64_t tenant_mem_hold = get_tenant_memory_hold(tenant_id);
int64_t tenant_memstore_hold = get_tenant_memory_hold(tenant_id,
ObCtxIds::MEMSTORE_CTX_ID);
bool is_overflow = true;
kv_cache_mem = resource_handle.get_memory_mgr()->get_cache_hold();
if (tenant_mem_limit < tenant_mem_hold) {
LOG_WARN("[TenantFreezer] tenant_mem_limit is smaller than tenant_mem_hold",
K(tenant_mem_limit), K(tenant_mem_hold), K(tenant_id));
} else if (is_add_overflow(tenant_mem_limit - tenant_mem_hold,
tenant_memstore_hold,
max_mem_memstore_can_get_now)) {
if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
LOG_WARN("[TenantFreezer] max memstore can get is overflow", K(tenant_mem_limit),
K(tenant_mem_hold), K(tenant_memstore_hold), K(tenant_id));
}
} else if (is_add_overflow(max_mem_memstore_can_get_now,
kv_cache_mem,
max_mem_memstore_can_get_now)) {
if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
LOG_WARN("[TenantFreezer] max memstore can get is overflow",
K(tenant_mem_limit), K(tenant_mem_hold), K(tenant_memstore_hold),
K(kv_cache_mem), K(tenant_id));
}
} else {
is_overflow = false;
}
int64_t min = mem_memstore_limit;
if (!is_overflow) {
min = MIN(mem_memstore_limit, max_mem_memstore_can_get_now);
}
memstore_freeze_trigger = min / 100 * get_freeze_trigger_percentage_();
int64_t tenant_remain_memory = get_tenant_memory_remain(tenant_id);
int64_t tenant_memstore_hold = get_tenant_memory_hold(tenant_id, ObCtxIds::MEMSTORE_CTX_ID);
int64_t usable_remain_memory = tenant_remain_memory / 100 * MEMSTORE_USABLE_REMAIN_MEMORY_PERCETAGE;
if (tenant_remain_memory > MAX_UNUSABLE_MEMORY) {
usable_remain_memory = std::max(usable_remain_memory, tenant_remain_memory - MAX_UNUSABLE_MEMORY);
}
bool is_overflow = true;
if (is_add_overflow(usable_remain_memory, tenant_memstore_hold, max_mem_memstore_can_get_now)) {
if (REACH_TIME_INTERVAL(1 * 1000 * 1000)) {
LOG_WARN("[TenantFreezer] max memstore can get is overflow",
K(tenant_memstore_hold),
K(usable_remain_memory),
K(tenant_remain_memory),
K(tenant_id));
}
} else {
is_overflow = false;
}
int64_t min = mem_memstore_limit;
if (!is_overflow) {
min = MIN(mem_memstore_limit, max_mem_memstore_can_get_now);
}
memstore_freeze_trigger = min / 100 * get_freeze_trigger_percentage_();
// result
ctx.max_mem_memstore_can_get_now_ = max_mem_memstore_can_get_now;
ctx.memstore_freeze_trigger_ = memstore_freeze_trigger;
ctx.kvcache_mem_ = kv_cache_mem;
return ret;
}
@ -1396,7 +1364,6 @@ int ObTenantFreezer::print_tenant_usage(
"memstore_limit=% '15ld "
"mem_tenant_limit=% '15ld "
"mem_tenant_hold=% '15ld "
"kv_cache_mem=% '15ld "
"max_mem_memstore_can_get_now=% '15ld "
"memstore_alloc_pos=% '15ld "
"memstore_frozen_pos=% '15ld "
@ -1410,7 +1377,6 @@ int ObTenantFreezer::print_tenant_usage(
stat.memstore_limit_,
stat.tenant_memory_limit_,
stat.tenant_memory_hold_,
stat.kvcache_mem_,
stat.memstore_can_get_now_,
stat.memstore_allocated_pos_,
stat.memstore_frozen_pos_,
@ -1566,7 +1532,7 @@ int ObTenantFreezer::do_major_freeze_(const int64_t try_frozen_scn)
void ObTenantFreezer::log_frozen_memstore_info_if_need_(const ObTenantFreezeCtx &ctx)
{
int ret = OB_SUCCESS;
ObTenantMemstoreAllocator *tenant_allocator = NULL;
ObMemstoreAllocator &tenant_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
if (ctx.total_memstore_hold_ > ctx.memstore_freeze_trigger_ ||
ctx.freezable_active_memstore_used_ > ctx.memstore_freeze_trigger_) {
// There is an unreleased memstable
@ -1579,15 +1545,9 @@ void ObTenantFreezer::log_frozen_memstore_info_if_need_(const ObTenantFreezeCtx
"tenant_id",
MTL_ID());
if (OB_FAIL(allocator_mgr_->get_tenant_memstore_allocator(MTL_ID(),
tenant_allocator))) {
LOG_WARN("[TenantFreezer] get tenant memstore allocator failed", KR(ret));
} else {
char frozen_mt_info[DEFAULT_BUF_LENGTH];
tenant_allocator->log_frozen_memstore_info(frozen_mt_info,
sizeof(frozen_mt_info));
LOG_INFO("[TenantFreezer] oldest frozen memtable", "list", frozen_mt_info);
}
char frozen_mt_info[DEFAULT_BUF_LENGTH];
tenant_allocator.log_frozen_memstore_info(frozen_mt_info, sizeof(frozen_mt_info));
LOG_INFO("[TenantFreezer] oldest frozen memtable", "list", frozen_mt_info);
}
}

View File

@ -26,11 +26,6 @@
namespace oceanbase
{
namespace common
{
class ObServerConfig;
class ObMemstoreAllocatorMgr;
}
namespace storage
{
class ObTenantFreezer;
@ -215,7 +210,6 @@ private:
const share::ObRsMgr *rs_mgr_;
ObAddr self_;
ObRetryMajorInfo retry_major_info_;
common::ObMemstoreAllocatorMgr *allocator_mgr_;
common::ObOccamThreadPool freeze_trigger_pool_;
common::ObOccamTimer freeze_trigger_timer_;

View File

@ -14,8 +14,7 @@
#include "lib/oblog/ob_log.h"
#include "lib/alloc/alloc_func.h"
#include "share/allocator/ob_gmemstore_allocator.h"
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/ob_force_print_log.h"
#include "share/rc/ob_tenant_base.h"
#include "storage/tx_storage/ob_tenant_freezer_common.h"
@ -23,10 +22,9 @@
namespace oceanbase
{
using namespace lib;
using namespace share;
namespace storage
{
typedef ObMemstoreAllocatorMgr::TAllocator ObTenantMemstoreAllocator;
DEF_TO_STRING(ObTenantFreezeArg)
{
int64_t pos = 0;
@ -44,7 +42,6 @@ ObTenantFreezeCtx::ObTenantFreezeCtx()
mem_memstore_limit_(0),
memstore_freeze_trigger_(0),
max_mem_memstore_can_get_now_(0),
kvcache_mem_(0),
active_memstore_used_(0),
freezable_active_memstore_used_(0),
total_memstore_used_(0),
@ -60,7 +57,6 @@ void ObTenantFreezeCtx::reset()
mem_memstore_limit_ = 0;
memstore_freeze_trigger_ = 0;
max_mem_memstore_can_get_now_ = 0;
kvcache_mem_ = 0;
active_memstore_used_ = 0;
freezable_active_memstore_used_ = 0;
total_memstore_used_ = 0;
@ -76,7 +72,6 @@ ObTenantStatistic::ObTenantStatistic()
memstore_limit_(0),
tenant_memory_limit_(0),
tenant_memory_hold_(0),
kvcache_mem_(0),
memstore_can_get_now_(0),
max_cached_memstore_size_(0),
memstore_allocated_pos_(0),
@ -93,7 +88,6 @@ void ObTenantStatistic::reset()
memstore_limit_ = 0;
tenant_memory_limit_ = 0;
tenant_memory_hold_ = 0;
kvcache_mem_ = 0;
memstore_can_get_now_ = 0;
max_cached_memstore_size_ = 0;
memstore_allocated_pos_ = 0;
@ -256,66 +250,47 @@ void ObTenantInfo::unset_slow_freeze(const common::ObTabletID &tablet_id)
}
}
ObTenantFreezeGuard::ObTenantFreezeGuard(common::ObMemstoreAllocatorMgr *allocator_mgr,
int &err_code,
const ObTenantInfo &tenant_info,
const int64_t warn_threshold)
: allocator_mgr_(nullptr),
tenant_info_(tenant_info),
pre_retire_pos_(0),
error_code_(err_code),
time_guard_("FREEZE_CHECKER", warn_threshold)
ObTenantFreezeGuard::ObTenantFreezeGuard(int &err_code, const ObTenantInfo &tenant_info, const int64_t warn_threshold)
: tenant_info_(tenant_info),
pre_retire_pos_(0),
error_code_(err_code),
time_guard_("FREEZE_CHECKER", warn_threshold)
{
int ret = OB_SUCCESS;
ObTenantMemstoreAllocator *tenant_allocator = NULL;
const uint64_t tenant_id = MTL_ID();
if (OB_NOT_NULL(allocator_mgr)) {
allocator_mgr_ = allocator_mgr;
if (OB_FAIL(allocator_mgr_->get_tenant_memstore_allocator(tenant_id,
tenant_allocator))) {
LOG_WARN("[FREEZE_CHECKER] failed to get_tenant_memstore_allocator", KR(ret), K(tenant_id));
} else if (NULL == tenant_allocator) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("[FREEZE_CHECKER] tenant memstore allocator is NULL", KR(ret), K(tenant_id));
} else {
pre_retire_pos_ = tenant_allocator->get_retire_clock();
}
}
ObMemstoreAllocator &tenant_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
pre_retire_pos_ = tenant_allocator.get_retire_clock();
}
ObTenantFreezeGuard::~ObTenantFreezeGuard()
{
int ret = OB_SUCCESS;
ObTenantMemstoreAllocator *tenant_allocator = NULL;
const uint64_t tenant_id = MTL_ID();
int64_t curr_frozen_pos = 0;
if (OB_ISNULL(allocator_mgr_)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("[FREEZE_CHECKER]freeze guard invalid", KR(ret), K_(allocator_mgr), K(lbt()));
} else if (OB_FAIL(error_code_)) {
LOG_WARN("[FREEZE_CHECKER]tenant freeze failed, skip check frozen memstore", KR(ret));
} else if (OB_FAIL(allocator_mgr_->get_tenant_memstore_allocator(tenant_id,
tenant_allocator))) {
LOG_WARN("[FREEZE_CHECKER] failed to get_tenant_memstore_allocator", KR(ret), K(tenant_id));
} else if (NULL == tenant_allocator) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("[FREEZE_CHECKER] tenant memstore allocator is NULL", KR(ret), K(tenant_id));
if (OB_FAIL(error_code_)) {
LOG_WARN("[FREEZE_CHECKER]tenant freeze failed, skip check frozen memstore", KR(error_code_));
} else {
curr_frozen_pos = tenant_allocator->get_frozen_memstore_pos();
ObMemstoreAllocator &tenant_allocator = MTL(ObSharedMemAllocMgr *)->memstore_allocator();
int64_t curr_frozen_pos = 0;
curr_frozen_pos = tenant_allocator.get_frozen_memstore_pos();
const bool retired_mem_frozen = (curr_frozen_pos >= pre_retire_pos_);
const bool has_no_active_memtable = (curr_frozen_pos == 0);
if (!(retired_mem_frozen || has_no_active_memtable)) {
ret = OB_ERR_UNEXPECTED;
if (tenant_info_.is_freeze_slowed()) {
LOG_WARN("[FREEZE_CHECKER]there may be frequent tenant freeze, but slowed", KR(ret), K(curr_frozen_pos),
K_(pre_retire_pos), K(retired_mem_frozen), K(has_no_active_memtable), K_(tenant_info));
LOG_WARN("[FREEZE_CHECKER]there may be frequent tenant freeze, but slowed",
KR(ret),
K(curr_frozen_pos),
K_(pre_retire_pos),
K(retired_mem_frozen),
K(has_no_active_memtable),
K_(tenant_info));
} else {
LOG_ERROR("[FREEZE_CHECKER]there may be frequent tenant freeze", KR(ret), K(curr_frozen_pos),
K_(pre_retire_pos), K(retired_mem_frozen), K(has_no_active_memtable));
LOG_ERROR("[FREEZE_CHECKER]there may be frequent tenant freeze",
KR(ret),
K(curr_frozen_pos),
K_(pre_retire_pos),
K(retired_mem_frozen),
K(has_no_active_memtable));
}
char active_mt_info[DEFAULT_BUF_LENGTH];
tenant_allocator->log_active_memstore_info(active_mt_info,
sizeof(active_mt_info));
tenant_allocator.log_active_memstore_info(active_mt_info, sizeof(active_mt_info));
FLOG_INFO("[FREEZE_CHECKER] oldest active memtable", "list", active_mt_info);
}
}

View File

@ -20,10 +20,6 @@
namespace oceanbase
{
namespace common
{
class ObMemstoreAllocatorMgr;
}
namespace storage
{
struct ObTenantFreezeArg
@ -72,7 +68,6 @@ public:
// running data
int64_t memstore_freeze_trigger_;
int64_t max_mem_memstore_can_get_now_;
int64_t kvcache_mem_;
int64_t active_memstore_used_;
int64_t freezable_active_memstore_used_;
@ -98,7 +93,6 @@ public:
int64_t memstore_limit_;
int64_t tenant_memory_limit_;
int64_t tenant_memory_hold_;
int64_t kvcache_mem_;
int64_t memstore_can_get_now_;
int64_t max_cached_memstore_size_;
@ -171,13 +165,11 @@ private:
class ObTenantFreezeGuard
{
public:
ObTenantFreezeGuard(common::ObMemstoreAllocatorMgr *allocator_mgr,
int &ret,
ObTenantFreezeGuard(int &ret,
const ObTenantInfo &tenant_info,
const int64_t warn_threshold = 60 * 1000 * 1000 /* 1 min */);
~ObTenantFreezeGuard();
private:
common::ObMemstoreAllocatorMgr *allocator_mgr_;
const ObTenantInfo &tenant_info_;
int64_t pre_retire_pos_;
int &error_code_;

View File

@ -16,7 +16,6 @@
#include "lib/alloc/memory_dump.h"
#include "observer/omt/ob_multi_tenant.h" // ObMultiTenant
#include "share/ob_tenant_mgr.h" // get_virtual_memory_used
#include "share/allocator/ob_memstore_allocator_mgr.h" // ObMemstoreAllocatorMgr
#include "storage/tx_storage/ob_tenant_freezer.h" // ObTenantFreezer
#include "storage/tx_storage/ob_tenant_memory_printer.h"
#include "deps/oblib/src/lib/alloc/malloc_hook.h"
@ -58,7 +57,6 @@ int ObTenantMemoryPrinter::print_tenant_usage()
{
int ret = OB_SUCCESS;
int tmp_ret = OB_SUCCESS;
common::ObMemstoreAllocatorMgr *allocator_mgr = &ObMemstoreAllocatorMgr::get_instance();
static const int64_t BUF_LEN = 64LL << 10;
static char print_buf[BUF_LEN] = "";
int64_t pos = 0;
@ -70,8 +68,7 @@ int ObTenantMemoryPrinter::print_tenant_usage()
} else {
if (OB_FAIL(databuff_printf(print_buf, BUF_LEN, pos,
"=== TENANTS MEMORY INFO ===\n"
"all_tenants_memstore_used=% '15ld, divisive_memory_used=% '15ld\n",
allocator_mgr->get_all_tenants_memstore_used(),
"divisive_memory_used=% '15ld\n",
get_divisive_mem_size()))) {
LOG_WARN("print failed", K(ret));
} else if (OB_FAIL(ObVirtualTenantManager::get_instance().print_tenant_usage(print_buf,

View File

@ -17,7 +17,6 @@
#include "lib/ob_define.h"
#include "lib/utility/ob_print_utils.h"
#include "lib/container/ob_se_array.h"
#include "lib/allocator/ob_slice_alloc.h"
#include "storage/tx/ob_trans_define.h"
namespace oceanbase {

View File

@ -37,7 +37,6 @@ namespace storage
int64_t ObTxDataMemtable::PERIODICAL_SELECT_INTERVAL_NS = 1000LL * 1000LL * 1000LL;
int ObTxDataMemtable::init(const ObITable::TableKey &table_key,
SliceAllocator *slice_allocator,
ObTxDataMemtableMgr *memtable_mgr,
storage::ObFreezer *freezer,
const int64_t buckets_cnt)
@ -47,9 +46,9 @@ int ObTxDataMemtable::init(const ObITable::TableKey &table_key,
if (IS_INIT) {
ret = OB_INIT_TWICE;
STORAGE_LOG(WARN, "init tx data memtable twice", KR(ret), K(table_key), KPC(memtable_mgr));
} else if (OB_ISNULL(slice_allocator) || OB_ISNULL(memtable_mgr)) {
} else if (OB_ISNULL(memtable_mgr)) {
ret = OB_ERR_NULL_VALUE;
STORAGE_LOG(WARN, "the slice_allocator is nullptr", KR(ret), K(table_key), KPC(memtable_mgr));
STORAGE_LOG(WARN, "the tx_data_allocator is nullptr", KR(ret), K(table_key), KPC(memtable_mgr));
} else if (OB_FAIL(ObITable::init(table_key))) {
STORAGE_LOG(WARN, "ObITable::init fail", KR(ret), K(table_key), KPC(memtable_mgr));
} else if (FALSE_IT(init_arena_allocator_())) {
@ -78,7 +77,6 @@ int ObTxDataMemtable::init(const ObITable::TableKey &table_key,
stat_change_ts_.reset();
state_ = ObTxDataMemtable::State::ACTIVE;
sort_list_head_.reset();
slice_allocator_ = slice_allocator;
memtable_mgr_ = memtable_mgr;
row_key_array_.reuse();
@ -145,7 +143,6 @@ void ObTxDataMemtable::reset()
arena_allocator_.free(tx_data_map_);
tx_data_map_ = nullptr;
}
slice_allocator_ = nullptr;
memtable_mgr_ = nullptr;
buf_.reset();
arena_allocator_.reset();
@ -204,7 +201,7 @@ int ObTxDataMemtable::insert(ObTxData *tx_data)
void ObTxDataMemtable::atomic_update_(ObTxData *tx_data)
{
int64_t thread_idx = ::get_itid() & MAX_CONCURRENCY_MOD_MASK;
int64_t thread_idx = common::get_itid() & MAX_CONCURRENCY_MOD_MASK;
min_tx_scn_[thread_idx].dec_update(tx_data->end_scn_);
min_start_scn_[thread_idx].dec_update(tx_data->start_scn_);
int64_t tx_data_size = TX_DATA_SLICE_SIZE * (1LL + tx_data->undo_status_list_.undo_node_cnt_);
@ -241,7 +238,7 @@ int ObTxDataMemtable::pre_process_for_merge()
// only do pre process for frozen tx data memtable
} else if (pre_process_done_) {
STORAGE_LOG(INFO, "call pre process more than once. skip pre process.");
} else if (OB_FAIL(memtable_mgr_->get_tx_data_table()->alloc_tx_data(fake_tx_data_guard))) {
} else if (OB_FAIL(memtable_mgr_->get_tx_data_table()->alloc_tx_data(fake_tx_data_guard, false /* enable_throttle */))) {
STORAGE_LOG(WARN, "allocate tx data from tx data table failed.", KR(ret), KPC(this));
} else if (OB_FAIL(prepare_tx_data_list())) {
STORAGE_LOG(WARN, "prepare tx data list failed.", KR(ret), KPC(this));

View File

@ -167,7 +167,6 @@ public: // ObTxDataMemtable
arena_allocator_(),
sort_list_head_(),
tx_data_map_(nullptr),
slice_allocator_(nullptr),
memtable_mgr_(nullptr),
freezer_(nullptr),
buf_(arena_allocator_),
@ -175,7 +174,6 @@ public: // ObTxDataMemtable
~ObTxDataMemtable() { reset(); }
void reset();
int init(const ObITable::TableKey &table_key,
SliceAllocator *slice_allocator,
ObTxDataMemtableMgr *memtable_mgr,
storage::ObFreezer *freezer,
const int64_t buckets_cnt);
@ -475,10 +473,6 @@ private: // ObTxDataMemtable
// the hash map sotres tx data
TxDataMap *tx_data_map_;
// the link hash map of tx data need the slice allocator of tx data table to construct because the
// destruct of link hash map will free all tx data
SliceAllocator *slice_allocator_;
// used for freeze
ObTxDataMemtableMgr *memtable_mgr_;

View File

@ -31,8 +31,7 @@ ObTxDataMemtableMgr::ObTxDataMemtableMgr()
ls_id_(),
mini_merge_recycle_commit_versions_ts_(0),
tx_data_table_(nullptr),
ls_tablet_svr_(nullptr),
slice_allocator_(nullptr)
ls_tablet_svr_(nullptr)
{
lock_.lock_type_ = LockType::OB_SPIN_RWLOCK;
lock_.lock_ = &lock_def_;
@ -167,9 +166,6 @@ int ObTxDataMemtableMgr::create_memtable(const SCN clog_checkpoint_scn,
} else if (OB_UNLIKELY(schema_version < 0)) {
ret = OB_INVALID_ARGUMENT;
STORAGE_LOG(WARN, "invalid argument", K(ret), K(schema_version));
} else if (OB_ISNULL(slice_allocator_)) {
ret = OB_ERR_NULL_VALUE;
STORAGE_LOG(WARN, "slice_allocator_ has not been set.");
} else {
MemMgrWLockGuard lock_guard(lock_);
if (OB_FAIL(create_memtable_(clog_checkpoint_scn, schema_version, ObTxDataHashMap::DEFAULT_BUCKETS_CNT))) {
@ -205,7 +201,7 @@ int ObTxDataMemtableMgr::create_memtable_(const SCN clog_checkpoint_scn,
} else if (OB_ISNULL(tx_data_memtable)) {
ret = OB_ERR_UNEXPECTED;
STORAGE_LOG(ERROR, "dynamic cast failed", KR(ret), KPC(this));
} else if (OB_FAIL(tx_data_memtable->init(table_key, slice_allocator_, this, freezer_, buckets_cnt))) {
} else if (OB_FAIL(tx_data_memtable->init(table_key, this, freezer_, buckets_cnt))) {
STORAGE_LOG(WARN, "memtable init fail.", KR(ret), KPC(tx_data_memtable));
} else if (OB_FAIL(add_memtable_(handle))) {
STORAGE_LOG(WARN, "add memtable fail.", KR(ret));
@ -228,9 +224,6 @@ int ObTxDataMemtableMgr::freeze()
} else if (get_memtable_count_() <= 0) {
ret = OB_ERR_UNEXPECTED;
STORAGE_LOG(ERROR, "there is no tx data memtable.", KR(ret), K(get_memtable_count_()));
} else if (OB_ISNULL(slice_allocator_)) {
ret = OB_ERR_NULL_VALUE;
STORAGE_LOG(WARN, "slice_allocator_ has not been set.", KR(ret), KP(slice_allocator_));
} else {
MemMgrWLockGuard lock_guard(lock_);
if (OB_FAIL(freeze_())) {

View File

@ -21,6 +21,12 @@
// It provides all operations related to tx data memtable.
namespace oceanbase
{
namespace share
{
class ObTenantTxDataAllocator;
};
namespace storage
{
class TxDataMemtableMgrFreezeGuard;
@ -150,16 +156,13 @@ public: // ObTxDataMemtableMgr
K_(ls_id),
K_(mini_merge_recycle_commit_versions_ts),
KP_(tx_data_table),
KP_(ls_tablet_svr),
KP_(slice_allocator));
KP_(ls_tablet_svr));
public: // getter and setter
ObLSTabletService *get_ls_tablet_svr() { return ls_tablet_svr_; }
ObTxDataTable *get_tx_data_table() { return tx_data_table_; }
int64_t get_mini_merge_recycle_commit_versions_ts() { return mini_merge_recycle_commit_versions_ts_; }
void set_slice_allocator(SliceAllocator *slice_allocator) { slice_allocator_ = slice_allocator; }
protected:
virtual int release_head_memtable_(memtable::ObIMemtable *imemtable,
const bool force);
@ -186,7 +189,6 @@ private: // ObTxDataMemtableMgr
int64_t mini_merge_recycle_commit_versions_ts_;
ObTxDataTable *tx_data_table_;
ObLSTabletService *ls_tablet_svr_;
SliceAllocator *slice_allocator_;
common::SpinRWLock lock_def_;
};

View File

@ -11,8 +11,10 @@
*/
#include "storage/tx_table/ob_tx_data_table.h"
#include "lib/lock/ob_tc_rwlock.h"
#include "lib/time/ob_time_utility.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
#include "share/rc/ob_tenant_base.h"
#include "storage/ls/ob_ls.h"
#include "storage/ls/ob_ls_tablet_service.h"
@ -37,12 +39,6 @@ using namespace oceanbase::share;
namespace storage
{
#ifdef OB_ENABLE_SLICE_ALLOC_LEAK_DEBUG
#define TX_DATA_MEM_LEAK_DEBUG_CODE slice_allocator_.enable_leak_debug();
#else
#define TX_DATA_MEM_LEAK_DEBUG_CODE
#endif
int64_t ObTxDataTable::UPDATE_CALC_UPPER_INFO_INTERVAL = 30 * 1000 * 1000; // 30 seconds
int ObTxDataTable::init(ObLS *ls, ObTxCtxTable *tx_ctx_table)
@ -56,8 +52,9 @@ int ObTxDataTable::init(ObLS *ls, ObTxCtxTable *tx_ctx_table)
if (OB_ISNULL(ls) || OB_ISNULL(tx_ctx_table)) {
ret = OB_ERR_NULL_VALUE;
STORAGE_LOG(WARN, "ls tablet service or tx ctx table is nullptr", KR(ret));
} else if (OB_FAIL(init_slice_allocator_())) {
STORAGE_LOG(ERROR, "slice_allocator_ init fail");
} else if (OB_ISNULL(tx_data_allocator_ = &MTL(ObSharedMemAllocMgr*)->tx_data_allocator())) {
ret = OB_ERR_UNEXPECTED;;
STORAGE_LOG(WARN, "unexpected nullptr of mtl object", KR(ret), KP(tx_data_allocator_));
} else if (FALSE_IT(ls_tablet_svr_ = ls->get_tablet_svr())) {
} else if (OB_FAIL(ls_tablet_svr_->get_tx_data_memtable_mgr(memtable_mgr_handle))) {
STORAGE_LOG(WARN, "get tx data memtable mgr fail.", KR(ret), K(tablet_id_));
@ -68,13 +65,10 @@ int ObTxDataTable::init(ObLS *ls, ObTxCtxTable *tx_ctx_table)
} else {
calc_upper_trans_version_cache_.commit_versions_.array_.set_attr(
ObMemAttr(ls->get_tenant_id(), "CommitVersions"));
slice_allocator_.set_nway(ObTxDataTable::TX_DATA_MAX_CONCURRENCY);
TX_DATA_MEM_LEAK_DEBUG_CODE
ls_ = ls;
ls_id_ = ls->get_ls_id();
memtable_mgr_ = static_cast<ObTxDataMemtableMgr *>(memtable_mgr_handle.get_memtable_mgr());
memtable_mgr_->set_slice_allocator(&slice_allocator_);
tx_ctx_table_ = tx_ctx_table;
tablet_id_ = LS_TX_DATA_TABLET;
@ -84,17 +78,6 @@ int ObTxDataTable::init(ObLS *ls, ObTxCtxTable *tx_ctx_table)
return ret;
}
int ObTxDataTable::init_slice_allocator_()
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
mem_attr.label_ = "TX_DATA_SLICE";
mem_attr.tenant_id_ = MTL_ID();
mem_attr.ctx_id_ = ObCtxIds::TX_DATA_TABLE;
ret = slice_allocator_.init(TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, common::default_blk_alloc, mem_attr);
return ret;
}
int ObTxDataTable::init_arena_allocator_()
{
ObMemAttr mem_attr;
@ -199,7 +182,6 @@ void ObTxDataTable::reset()
calc_upper_info_.reset();
calc_upper_trans_version_cache_.reset();
memtables_cache_.reuse();
slice_allocator_.purge_extra_cached_block(0);
is_started_ = false;
is_inited_ = false;
}
@ -263,17 +245,19 @@ int ObTxDataTable::clean_memtables_cache_()
void ObTxDataTable::destroy() { reset(); }
int ObTxDataTable::alloc_tx_data(ObTxDataGuard &tx_data_guard)
int ObTxDataTable::alloc_tx_data(ObTxDataGuard &tx_data_guard,
const bool enable_throttle,
const int64_t abs_expire_time)
{
int ret = OB_SUCCESS;
void *slice_ptr = nullptr;
if (OB_ISNULL(slice_ptr = slice_allocator_.alloc())) {
if (OB_ISNULL(slice_ptr = tx_data_allocator_->alloc(enable_throttle, abs_expire_time))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
STORAGE_LOG(WARN, "allocate memory from slice_allocator fail.", KR(ret), KP(this),
K(tablet_id_));
STORAGE_LOG(WARN, "allocate memory from slice_allocator fail.", KR(ret), KP(this), K(tablet_id_));
} else {
ObTxData *tx_data = new (slice_ptr) ObTxData();
tx_data->slice_allocator_ = &slice_allocator_;
tx_data->tx_data_allocator_ = tx_data_allocator_;
tx_data_guard.init(tx_data);
}
return ret;
@ -283,19 +267,21 @@ int ObTxDataTable::deep_copy_tx_data(const ObTxDataGuard &in_tx_data_guard, ObTx
{
int ret = OB_SUCCESS;
void *slice_ptr = nullptr;
const int64_t abs_expire_time = THIS_WORKER.get_timeout_ts();
const ObTxData *in_tx_data = in_tx_data_guard.tx_data();
ObTxData *out_tx_data = nullptr;
if (OB_ISNULL(slice_ptr = slice_allocator_.alloc())) {
if (OB_ISNULL(slice_ptr = tx_data_allocator_->alloc(true, abs_expire_time))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
STORAGE_LOG(WARN, "allocate memory from slice_allocator fail.", KR(ret), KP(this),
K(tablet_id_));
K(tablet_id_), K(abs_expire_time));
} else if (OB_ISNULL(in_tx_data)) {
ret = OB_ERR_UNEXPECTED;
STORAGE_LOG(ERROR, "invalid nullptr of tx data", K(in_tx_data_guard), KPC(this));
} else {
out_tx_data = new (slice_ptr) ObTxData();
*out_tx_data = *in_tx_data;
out_tx_data->slice_allocator_ = &slice_allocator_;
out_tx_data->tx_data_allocator_ = tx_data_allocator_;
out_tx_data->undo_status_list_.head_ = nullptr;
out_tx_data->ref_cnt_ = 0;
out_tx_data_guard.init(out_tx_data);
@ -342,13 +328,11 @@ int ObTxDataTable::alloc_undo_status_node(ObUndoStatusNode *&undo_status_node)
{
int ret = OB_SUCCESS;
void *slice_ptr = nullptr;
#ifdef OB_ENABLE_SLICE_ALLOC_LEAK_DEBUG
if (OB_ISNULL(slice_ptr = slice_allocator_.alloc(true /*record_alloc_lbt*/))) {
#else
if (OB_ISNULL(slice_ptr = slice_allocator_.alloc())) {
#endif
const int64_t abs_expire_time = THIS_WORKER.get_timeout_ts();
if (OB_ISNULL(slice_ptr = tx_data_allocator_->alloc(true, abs_expire_time))) {
ret = OB_ALLOCATE_MEMORY_FAILED;
STORAGE_LOG(WARN, "allocate memory fail.", KR(ret), KP(this), K(tablet_id_));
STORAGE_LOG(WARN, "allocate memory fail.", KR(ret), KP(this), K(tablet_id_), K(abs_expire_time));
} else {
undo_status_node = new (slice_ptr) ObUndoStatusNode();
}
@ -362,7 +346,7 @@ int ObTxDataTable::free_undo_status_node(ObUndoStatusNode *&undo_status_node)
ret = OB_ERR_NULL_VALUE;
STORAGE_LOG(WARN, "trying to free nullptr", KR(ret), K(tablet_id_));
} else {
slice_allocator_.free(undo_status_node);
tx_data_allocator_->free(undo_status_node);
}
return ret;
}
@ -373,7 +357,7 @@ void ObTxDataTable::free_undo_status_list_(ObUndoStatusNode *node_ptr)
while (nullptr != node_ptr) {
node_to_free = node_ptr;
node_ptr = node_ptr->next_;
slice_allocator_.free(reinterpret_cast<void *>(node_to_free));
tx_data_allocator_->free(reinterpret_cast<void *>(node_to_free));
}
}
@ -687,7 +671,7 @@ int ObTxDataTable::check_tx_data_in_sstable_(const ObTransID tx_id,
int ret = OB_SUCCESS;
tx_data_guard.reset();
if (OB_FAIL(alloc_tx_data(tx_data_guard))) {
if (OB_FAIL(alloc_tx_data(tx_data_guard, false/* enable_throttle */))) {
STORAGE_LOG(WARN, "allocate tx data to read from sstable failed", KR(ret), K(tx_data_guard));
} else if (OB_ISNULL(tx_data_guard.tx_data())) {
ret = OB_ERR_UNEXPECTED;
@ -725,7 +709,7 @@ int ObTxDataTable::get_tx_data_in_sstable_(const transaction::ObTransID tx_id, O
LOG_WARN("fail to fetch table store", K(ret));
} else {
const ObSSTableArray &sstables = table_store_wrapper.get_member()->get_minor_sstables();
ObTxDataSingleRowGetter getter(iter_param, sstables, slice_allocator_, recycled_scn);
ObTxDataSingleRowGetter getter(iter_param, sstables, *tx_data_allocator_, recycled_scn);
if (OB_FAIL(getter.init(tx_id))) {
STORAGE_LOG(WARN, "init ObTxDataSingleRowGetter fail.", KR(ret), KP(this), K(tablet_id_));
} else if (OB_FAIL(getter.get_next_row(tx_data))) {
@ -968,7 +952,7 @@ int ObTxDataTable::DEBUG_calc_with_row_iter_(ObStoreRowIterator *row_iter,
int64_t pos = 0;
const ObString &str = row->storage_datums_[TX_DATA_VAL_COLUMN].get_string();
if (OB_FAIL(tx_data.deserialize(str.ptr(), str.length(), pos, slice_allocator_))) {
if (OB_FAIL(tx_data.deserialize(str.ptr(), str.length(), pos, *tx_data_allocator_))) {
STORAGE_LOG(WARN, "deserialize tx data from store row fail.", KR(ret), K(*row), KPHEX(str.ptr(), str.length()));
} else if (tx_data.start_scn_ <= sstable_end_scn
&& tx_data.commit_version_ > tmp_upper_trans_version) {

View File

@ -13,12 +13,12 @@
#ifndef OCEANBASE_STORAGE_OB_TX_DATA_TABLE
#define OCEANBASE_STORAGE_OB_TX_DATA_TABLE
#include "storage/meta_mem/ob_tablet_handle.h"
#include "lib/future/ob_future.h"
#include "share/scn.h"
#include "share/ob_occam_timer.h"
#include "share/allocator/ob_tx_data_allocator.h"
#include "storage/meta_mem/ob_tablet_handle.h"
#include "storage/tx_table/ob_tx_data_memtable_mgr.h"
#include "storage/tx_table/ob_tx_table_define.h"
#include "share/ob_occam_timer.h"
namespace oceanbase
{
@ -107,22 +107,8 @@ public:
TO_STRING_KV(K(min_start_scn_in_ctx_), K(keep_alive_scn_), K(update_ts_));
};
using SliceAllocator = ObSliceAlloc;
static int64_t UPDATE_CALC_UPPER_INFO_INTERVAL;
static const int64_t TX_DATA_MAX_CONCURRENCY = 32;
// A tx data is 128 bytes, 128 * 262144 = 32MB
static const int64_t SSTABLE_CACHE_MAX_RETAIN_CNT = 262144;
// The max tps is 150w which means the cache can be inserted 15w tx data during 100ms. So once
// cache cleaning task will delete at least 11w tx data.
static const int64_t DEFAULT_CACHE_RETAINED_TIME = 100_ms; // 100ms
// The tx data memtable will trigger a freeze if its memory use is more than 2%
static constexpr double TX_DATA_FREEZE_TRIGGER_PERCENTAGE = 2;
// TODO : @gengli.wzy The active & frozen tx data memtable can not use memory more than 10%
static constexpr double TX_DATA_MEM_LIMIT_PERCENTAGE = 10;
enum COLUMN_ID_LIST
{
TX_ID = common::OB_APP_MIN_COLUMN_ID,
@ -138,8 +124,8 @@ public: // ObTxDataTable
is_started_(false),
ls_id_(),
tablet_id_(0),
slice_allocator_(),
arena_allocator_(),
tx_data_allocator_(nullptr),
ls_(nullptr),
ls_tablet_svr_(nullptr),
memtable_mgr_(nullptr),
@ -159,11 +145,11 @@ public: // ObTxDataTable
int online();
/**
* @brief Allocate tx data with slice allocator
*
* @param[out] tx_data the tx data allocated by slice allocator
* @brief the same as ObTxTable::alloc_tx_data
*/
virtual int alloc_tx_data(ObTxDataGuard &tx_data);
virtual int alloc_tx_data(ObTxDataGuard &tx_data,
const bool enable_throttle = true,
const int64_t abs_expire_time = 0);
/**
* @brief allocate memory and deep copy tx data
@ -229,7 +215,6 @@ public: // ObTxDataTable
int update_memtables_cache();
int prepare_for_safe_destroy();
/**
@ -251,10 +236,11 @@ public: // ObTxDataTable
KP_(ls),
KP_(ls_tablet_svr),
KP_(memtable_mgr),
KP_(tx_ctx_table));
KP_(tx_ctx_table),
KP_(&tx_data_allocator));
public: // getter and setter
SliceAllocator *get_slice_allocator() { return &slice_allocator_; }
share::ObTenantTxDataAllocator *get_tx_data_allocator() { return tx_data_allocator_; }
TxDataReadSchema &get_read_schema() { return read_schema_; };
share::ObLSID get_ls_id();
@ -343,8 +329,8 @@ private:
share::ObLSID ls_id_;
ObTabletID tablet_id_;
// Allocator to allocate ObTxData and ObUndoStatus
SliceAllocator slice_allocator_;
ObArenaAllocator arena_allocator_;
share::ObTenantTxDataAllocator *tx_data_allocator_;
ObLS *ls_;
// Pointer to tablet service, used for get tx data memtable mgr
ObLSTabletService *ls_tablet_svr_;
@ -357,19 +343,6 @@ private:
MemtableHandlesCache memtables_cache_;
}; // tx_table
class CleanTxDataSSTableCacheFunctor
{
public:
CleanTxDataSSTableCacheFunctor(TxDataMap &sstable_cache, int64_t clean_ts) : sstable_cache_(sstable_cache), clean_ts_(clean_ts) {}
bool operator()(const transaction::ObTransID &key, ObTxData *tx_data);
private:
TxDataMap &sstable_cache_;
int64_t clean_ts_;
};
} // namespace storage
} // namespace oceanbase

View File

@ -592,13 +592,13 @@ void ObTxTable::destroy()
is_inited_ = false;
}
int ObTxTable::alloc_tx_data(ObTxDataGuard &tx_data_guard)
int ObTxTable::alloc_tx_data(ObTxDataGuard &tx_data_guard, const bool enable_throttle, const int64_t abs_expire_time)
{
int ret = OB_SUCCESS;
if (IS_NOT_INIT) {
ret = OB_NOT_INIT;
LOG_WARN("tx table is not init.", KR(ret));
} else if (OB_FAIL(tx_data_table_.alloc_tx_data(tx_data_guard))) {
} else if (OB_FAIL(tx_data_table_.alloc_tx_data(tx_data_guard, enable_throttle, abs_expire_time))) {
LOG_WARN("allocate tx data from tx data table fail.", KR(ret));
}
return ret;

View File

@ -109,14 +109,14 @@ public:
int offline();
int online();
// In OB4 .0, transaction contexts are divided into exec_data and tx_data. Where exec_data
// indicates the data required when the transaction is running,and tx_data indicates the data that
// may still be required after the transaction commits. To avoid memory copying, the entire life
// cycle of tx_data is maintained by tx data table.Therefore, when a transaction is started, the
// memory of tx_data needs to be allocated by this function
//
// @param [out] tx_data, a tx data allocated by slice allocator
int alloc_tx_data(ObTxDataGuard &tx_data_guard);
/**
* @brief In OB4 .0, transaction contexts are divided into exec_data and tx_data. Where exec_data indicates the data required when the transaction is running,and tx_data indicates the data that may still be required after the transaction commits. To avoid memory copying, the entire life cycle of tx_data is maintained by tx data table.Therefore, when a transaction is started, the memory of tx_data needs to be allocated by this function
*
* @param [out] tx_data a guard with tx data allocated by allocator
* @param [in] abs_expire_time indicate the absolute transaction's timetout point
* @param [in] enable_throttle if this allocation need be throttled, true as the default value
*/
int alloc_tx_data(ObTxDataGuard &tx_data, const bool enable_throttle = true, const int64_t abs_expire_time = 0);
int deep_copy_tx_data(const ObTxDataGuard &in_tx_data_guard, ObTxDataGuard &out_tx_data_guard);

View File

@ -128,7 +128,7 @@ int ObTxCtxTableInfo::deserialize(const char *buf,
int ret = OB_SUCCESS;
ObTxCtxTableCommonHeader header(MAGIC_VERSION, 0);
if (OB_FAIL(tx_data_table.alloc_tx_data(tx_data_guard_))) {
if (OB_FAIL(tx_data_table.alloc_tx_data(tx_data_guard_, false/* enable_throttle */))) {
STORAGE_LOG(WARN, "alloc tx data failed", KR(ret));
} else if (OB_FAIL(header.deserialize(buf, buf_len, pos))) {
TRANS_LOG(WARN, "deserialize header fail", K(buf_len), K(pos), K(ret));
@ -151,7 +151,7 @@ int ObTxCtxTableInfo::deserialize_(const char *buf,
TRANS_LOG(WARN, "deserialize ls_id fail.", KR(ret), K(pos), K(buf_len));
} else if (OB_FAIL(serialization::decode_vi64(buf, buf_len, pos, &cluster_id_))) {
TRANS_LOG(WARN, "encode cluster_id fail", K(cluster_id_), K(buf_len), K(pos), K(ret));
} else if (OB_FAIL(tx_data_guard_.tx_data()->deserialize(buf, buf_len, pos, *tx_data_table.get_slice_allocator()))) {
} else if (OB_FAIL(tx_data_guard_.tx_data()->deserialize(buf, buf_len, pos, *tx_data_table.get_tx_data_allocator()))) {
TRANS_LOG(WARN, "deserialize state_info fail.", KR(ret), K(pos), K(buf_len));
} else if (OB_FAIL(exec_info_.deserialize(buf, buf_len, pos))) {
TRANS_LOG(WARN, "deserialize exec_info fail.", KR(ret), K(pos), K(buf_len));

View File

@ -203,7 +203,6 @@ int ObTxDataMemtableScanIterator::init(ObTxDataMemtable *tx_data_memtable)
} else {
STORAGE_LOG(INFO, "[TX DATA MERGE]init tx data dump iter finish", KR(ret), KPC(this), KPC(tx_data_memtable_));
}
return ret;
}
@ -589,7 +588,7 @@ int ObTxDataSingleRowGetter::deserialize_tx_data_from_store_buffers_(ObTxData &t
p_dest += tx_data_buffers_[idx].get_ob_string().length();
}
tx_data.tx_id_ = tx_id_;
if (OB_FAIL(tx_data.deserialize(merge_buffer, total_buffer_size, pos, slice_allocator_))) {
if (OB_FAIL(tx_data.deserialize(merge_buffer, total_buffer_size, pos, tx_data_allocator_))) {
STORAGE_LOG(WARN, "deserialize tx data failed",
KR(ret), KPHEX(merge_buffer, total_buffer_size));
hex_dump(merge_buffer, total_buffer_size, true, OB_LOG_LEVEL_WARN);

View File

@ -170,14 +170,16 @@ private:
*/
class ObTxDataSingleRowGetter
{
using SliceAllocator = ObSliceAlloc;
public:
ObTxDataSingleRowGetter(
const ObTableIterParam &iter_param,
const ObSSTableArray &sstables,
SliceAllocator &slice_allocator,
share::SCN &recycled_scn)
: iter_param_(iter_param), sstables_(sstables), slice_allocator_(slice_allocator), recycled_scn_(recycled_scn), key_datums_() {}
ObTxDataSingleRowGetter(const ObTableIterParam &iter_param,
const ObSSTableArray &sstables,
share::ObTenantTxDataAllocator &tx_data_allocator,
share::SCN &recycled_scn)
: iter_param_(iter_param),
sstables_(sstables),
tx_data_allocator_(tx_data_allocator),
recycled_scn_(recycled_scn),
key_datums_() {}
virtual ~ObTxDataSingleRowGetter() {}
/**
@ -201,7 +203,7 @@ private:
private:
const ObTableIterParam &iter_param_;
const ObSSTableArray &sstables_;
SliceAllocator &slice_allocator_;
share::ObTenantTxDataAllocator &tx_data_allocator_;
share::SCN &recycled_scn_;
transaction::ObTransID tx_id_;
ObArenaAllocator arena_allocator_;

View File

@ -339,6 +339,7 @@ _max_malloc_sample_interval
_max_rpc_packet_size
_max_schema_slot_num
_max_tablet_cnt_per_gb
_mds_memory_limit_percentage
_memory_large_chunk_cache_size
_migrate_block_verify_level
_minor_compaction_amplification_factor
@ -413,7 +414,9 @@ _transfer_start_retry_count
_transfer_start_rpc_timeout
_transfer_start_trans_timeout
_transfer_task_tablet_count_threshold
_tx_data_memory_limit_percentage
_tx_result_retention
_tx_share_memory_limit_percentage
_upgrade_stage
_wait_interval_after_parallel_ddl
_with_subquery

View File

@ -43,7 +43,6 @@ ob_unittest(test_debug_sync)
#ob_unittest(test_primary_zone_util)
#ob_unittest(test_time_zone_info_manager)
#ob_unittest(test_rpc_struct)
#ob_unittest(test_memstore_allocator_mgr)
#ob_unittest(test_dag_scheduler scheduler/test_dag_scheduler.cpp)
storage_unittest(test_ob_function)
storage_unittest(test_ob_guard)

View File

@ -1,85 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "gtest/gtest.h"
#define private public
#include "share/allocator/ob_memstore_allocator_mgr.h"
#include "share/ob_tenant_mgr.h"
#include "share/ob_srv_rpc_proxy.h"
using namespace oceanbase;
using namespace oceanbase::common;
using namespace oceanbase::share;
TEST(ObMemstoreAllocatorMgr, funcs)
{
const uint64_t PRESERVED_TENANT_COUNT = 10000;
common::ObMemstoreAllocatorMgr &alloc_mgr = common::ObMemstoreAllocatorMgr::get_instance();
int ret = alloc_mgr.init();
EXPECT_EQ(OB_SUCCESS, ret);
common::ObGMemstoreAllocator *allocator = NULL;
ret = alloc_mgr.get_tenant_memstore_allocator(0, allocator);
EXPECT_EQ(OB_INVALID_ARGUMENT, ret);
EXPECT_EQ((void*)NULL, allocator);
ret = alloc_mgr.get_tenant_memstore_allocator(OB_SYS_TENANT_ID, allocator);
EXPECT_EQ(OB_SUCCESS, ret);
EXPECT_NE((void*)NULL, allocator);
ret = alloc_mgr.get_tenant_memstore_allocator(OB_SYS_TENANT_ID, allocator);
EXPECT_EQ(OB_SUCCESS, ret);
EXPECT_NE((void*)NULL, allocator);
ret = alloc_mgr.get_tenant_memstore_allocator(OB_SYS_TENANT_ID + PRESERVED_TENANT_COUNT, allocator);
EXPECT_EQ(OB_SUCCESS, ret);
EXPECT_NE((void*)NULL, allocator);
ret = alloc_mgr.get_tenant_memstore_allocator(OB_SYS_TENANT_ID + PRESERVED_TENANT_COUNT, allocator);
EXPECT_EQ(OB_SUCCESS, ret);
EXPECT_NE((void*)NULL, allocator);
}
int init_tenant_mgr()
{
ObTenantManager &tm = ObTenantManager::get_instance();
/*
ObAddr self;
self.set_ip_addr("127.0.0.1", 8086);
rpc::frame::ObReqTransport req_transport(NULL, NULL);
obrpc::ObSrvRpcProxy rpc_proxy;*/
//int ret = tm.init(self, rpc_proxy, &req_transport, &ObServerConfig::get_instance());
int ret = tm.init();
EXPECT_EQ(OB_SUCCESS, ret);
ret = tm.add_tenant(OB_SYS_TENANT_ID);
EXPECT_EQ(OB_SUCCESS, ret);
const int64_t ulmt = 16LL << 30;
const int64_t llmt = 8LL << 30;
ret = tm.set_tenant_mem_limit(OB_SYS_TENANT_ID, ulmt, llmt);
EXPECT_EQ(OB_SUCCESS, ret);
common::ObMemstoreAllocatorMgr &mem_mgr = common::ObMemstoreAllocatorMgr::get_instance();
ret = mem_mgr.init();
EXPECT_EQ(OB_SUCCESS, ret);
return ret;
}
int main(int argc, char** argv)
{
//init_tenant_mgr();
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
OB_LOGGER.set_log_level("INFO");
testing::InitGoogleTest(&argc,argv);
return RUN_ALL_TESTS();
}

View File

@ -44,7 +44,7 @@ storage_unittest(test_hash_performance)
storage_unittest(test_row_fuse)
#storage_unittest(test_keybtree memtable/mvcc/test_keybtree.cpp)
storage_unittest(test_query_engine memtable/mvcc/test_query_engine.cpp)
storage_unittest(test_memtable_basic memtable/test_memtable_basic.cpp)
#storage_unittest(test_memtable_basic memtable/test_memtable_basic.cpp)
storage_unittest(test_mvcc_callback memtable/mvcc/test_mvcc_callback.cpp)
# storage_unittest(test_mds_compile multi_data_source/test_mds_compile.cpp)
storage_unittest(test_mds_list multi_data_source/test_mds_list.cpp)

View File

@ -57,16 +57,16 @@ int check_sequence_set_violation(const concurrent_control::ObWriteFlag ,
}
} // concurrent_control
namespace common
namespace share
{
// override the function
int ObGMemstoreAllocator::set_memstore_threshold_without_lock(uint64_t tenant_id)
int ObMemstoreAllocator::set_memstore_threshold_without_lock()
{
int64_t memstore_threshold = INT64_MAX;
arena_.set_memstore_threshold(memstore_threshold);
return OB_SUCCESS;
}
void* ObGMemstoreAllocator::alloc(AllocHandle& handle, int64_t size)
void* ObMemstoreAllocator::alloc(AllocHandle& handle, int64_t size)
{
int64_t align_size = upper_align(size, sizeof(int64_t));
if (!handle.is_id_valid()) {
@ -78,7 +78,7 @@ void* ObGMemstoreAllocator::alloc(AllocHandle& handle, int64_t size)
}
}
if (arena_.allocator_ == nullptr) {
if (arena_.init(OB_SERVER_TENANT_ID) != OB_SUCCESS) {
if (arena_.init() != OB_SUCCESS) {
abort();
}
}

View File

@ -22,12 +22,46 @@
#include "../mock_utils/async_util.h"
#include "test_tx_dsl.h"
#include "tx_node.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
namespace oceanbase
{
using namespace ::testing;
using namespace transaction;
using namespace share;
static ObSharedMemAllocMgr MTL_MEM_ALLOC_MGR;
namespace share {
int ObTenantTxDataAllocator::init(const char *label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
throttle_tool_ = &(MTL_MEM_ALLOC_MGR.share_resource_throttle_tool());
if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ObTenantTxDataAllocator::ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
int ObMemstoreAllocator::init()
{
throttle_tool_ = &MTL_MEM_ALLOC_MGR.share_resource_throttle_tool();
return arena_.init();
}
int ObMemstoreAllocator::AllocHandle::init()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = 1;
ObSharedMemAllocMgr *mtl_alloc_mgr = &MTL_MEM_ALLOC_MGR;
ObMemstoreAllocator &host = mtl_alloc_mgr->memstore_allocator();
(void)host.init_handle(*this);
return ret;
}
}; // namespace share
namespace concurrent_control
{
int check_sequence_set_violation(const concurrent_control::ObWriteFlag,
@ -102,6 +136,7 @@ public:
const testing::TestInfo *const test_info =
testing::UnitTest::GetInstance()->current_test_info();
auto test_name = test_info->name();
MTL_MEM_ALLOC_MGR.init();
_TRANS_LOG(INFO, ">>>> starting test : %s", test_name);
}
virtual void TearDown() override

View File

@ -21,12 +21,47 @@
#include "tx_node.h"
#include "../mock_utils/async_util.h"
#include "test_tx_dsl.h"
namespace oceanbase
{
using namespace ::testing;
using namespace transaction;
using namespace share;
static ObSharedMemAllocMgr MTL_MEM_ALLOC_MGR;
namespace share {
int ObTenantTxDataAllocator::init(const char *label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
throttle_tool_ = &(MTL_MEM_ALLOC_MGR.share_resource_throttle_tool());
if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ObTenantTxDataAllocator::ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
int ObMemstoreAllocator::init()
{
throttle_tool_ = &MTL_MEM_ALLOC_MGR.share_resource_throttle_tool();
return arena_.init();
}
int ObMemstoreAllocator::AllocHandle::init()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = 1;
ObSharedMemAllocMgr *mtl_alloc_mgr = &MTL_MEM_ALLOC_MGR;
ObMemstoreAllocator &host = mtl_alloc_mgr->memstore_allocator();
(void)host.init_handle(*this);
return ret;
}
}; // namespace share
namespace concurrent_control
{
int check_sequence_set_violation(const concurrent_control::ObWriteFlag ,
@ -55,6 +90,7 @@ public:
ObClockGenerator::init();
const testing::TestInfo* const test_info =
testing::UnitTest::GetInstance()->current_test_info();
MTL_MEM_ALLOC_MGR.init();
auto test_name = test_info->name();
_TRANS_LOG(INFO, ">>>> starting test : %s", test_name);
}

View File

@ -21,12 +21,46 @@
#include "../mock_utils/async_util.h"
#include "test_tx_dsl.h"
#include "tx_node.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
namespace oceanbase
{
using namespace ::testing;
using namespace transaction;
using namespace share;
static ObSharedMemAllocMgr MTL_MEM_ALLOC_MGR;
namespace share {
int ObTenantTxDataAllocator::init(const char *label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
throttle_tool_ = &(MTL_MEM_ALLOC_MGR.share_resource_throttle_tool());
if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ObTenantTxDataAllocator::ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
int ObMemstoreAllocator::init()
{
throttle_tool_ = &MTL_MEM_ALLOC_MGR.share_resource_throttle_tool();
return arena_.init();
}
int ObMemstoreAllocator::AllocHandle::init()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = 1;
ObSharedMemAllocMgr *mtl_alloc_mgr = &MTL_MEM_ALLOC_MGR;
ObMemstoreAllocator &host = mtl_alloc_mgr->memstore_allocator();
(void)host.init_handle(*this);
return ret;
}
}; // namespace share
namespace concurrent_control
{
int check_sequence_set_violation(const concurrent_control::ObWriteFlag,
@ -55,6 +89,7 @@ public:
ObClockGenerator::init();
const testing::TestInfo *const test_info =
testing::UnitTest::GetInstance()->current_test_info();
MTL_MEM_ALLOC_MGR.init();
auto test_name = test_info->name();
_TRANS_LOG(INFO, ">>>> starting test : %s", test_name);
}

View File

@ -22,11 +22,46 @@
#include "tx_node.h"
#include "../mock_utils/async_util.h"
#include "test_tx_dsl.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
namespace oceanbase
{
using namespace ::testing;
using namespace transaction;
using namespace share;
static ObSharedMemAllocMgr MTL_MEM_ALLOC_MGR;
namespace share {
int ObTenantTxDataAllocator::init(const char *label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
throttle_tool_ = &(MTL_MEM_ALLOC_MGR.share_resource_throttle_tool());
if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ObTenantTxDataAllocator::ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
int ObMemstoreAllocator::init()
{
throttle_tool_ = &MTL_MEM_ALLOC_MGR.share_resource_throttle_tool();
return arena_.init();
}
int ObMemstoreAllocator::AllocHandle::init()
{
int ret = OB_SUCCESS;
uint64_t tenant_id = 1;
ObSharedMemAllocMgr *mtl_alloc_mgr = &MTL_MEM_ALLOC_MGR;
ObMemstoreAllocator &host = mtl_alloc_mgr->memstore_allocator();
(void)host.init_handle(*this);
return ret;
}
}; // namespace share
namespace omt {
bool the_ctrl_of_enable_transaction_free_route = true;
ObTenantConfig *ObTenantConfigMgr::get_tenant_config_with_lock(const uint64_t tenant_id,
@ -621,6 +656,7 @@ public:
common::ObClusterVersion::get_instance().update_cluster_version(CLUSTER_VERSION_4_1_0_0);
const testing::TestInfo* const test_info =
testing::UnitTest::GetInstance()->current_test_info();
MTL_MEM_ALLOC_MGR.init();
auto test_name = test_info->name();
_TRANS_LOG(INFO, ">>>> starting test : %s", test_name);
}

View File

@ -22,9 +22,9 @@
} while(0);
namespace oceanbase {
namespace common
namespace share
{
void* ObGMemstoreAllocator::alloc(AllocHandle& handle, int64_t size)
void* ObMemstoreAllocator::alloc(AllocHandle& handle, int64_t size, const int64_t expire_ts)
{
int ret = OB_SUCCESS;
int64_t align_size = upper_align(size, sizeof(int64_t));

View File

@ -33,12 +33,14 @@
#include "../mock_utils/msg_bus.h"
#include "../mock_utils/basic_fake_define.h"
#include "../mock_utils/ob_fake_tx_rpc.h"
#include "share/allocator/ob_shared_memory_allocator_mgr.h"
namespace oceanbase {
using namespace transaction;
using namespace share;
using namespace common;
namespace transaction {
template<class T>
class QueueConsumer : public share::ObThreadPool

View File

@ -35,6 +35,10 @@ using namespace memtable;
namespace transaction {
class ObFakeTxDataTable : public ObTxDataTable {
public:
ObSliceAlloc slice_allocator_;
ObTenantTxDataAllocator *FAKE_ALLOCATOR = (ObTenantTxDataAllocator *)0x1;
public:
ObFakeTxDataTable() : arena_allocator_(), map_(arena_allocator_, 1 << 20 /*2097152*/)
{
@ -46,7 +50,7 @@ public:
ObMemtableMgrHandle memtable_mgr_handle;
OB_ASSERT(OB_SUCCESS == slice_allocator_.init(
sizeof(ObTxData), OB_MALLOC_NORMAL_BLOCK_SIZE, common::default_blk_alloc, mem_attr));
slice_allocator_.set_nway(ObTxDataTable::TX_DATA_MAX_CONCURRENCY);
slice_allocator_.set_nway(32);
is_inited_ = true;
}
virtual int init(ObLS *ls, ObTxCtxTable *tx_ctx_table) override
@ -57,12 +61,15 @@ public:
virtual void stop() override {}
virtual void reset() override {}
virtual void destroy() override {}
virtual int alloc_tx_data(ObTxDataGuard &tx_data_guard) override
virtual int alloc_tx_data(ObTxDataGuard &tx_data_guard,
const bool enable_throttle,
const int64_t abs_expire_time)
{
void *ptr = slice_allocator_.alloc();
ObMemAttr attr;
void *ptr = ob_malloc(TX_DATA_SLICE_SIZE, attr);
ObTxData *tx_data = new (ptr) ObTxData();
tx_data->ref_cnt_ = 100;
tx_data->slice_allocator_ = &slice_allocator_;
tx_data->tx_data_allocator_ = FAKE_ALLOCATOR;
tx_data->flag_ = 269381;
tx_data_guard.init(tx_data);
return OB_ISNULL(tx_data) ? OB_ALLOCATE_MEMORY_FAILED : OB_SUCCESS;
@ -74,7 +81,7 @@ public:
ObTxData *to = new (ptr) ObTxData();
ObTxData *from = (ObTxData*)from_guard.tx_data();
to->ref_cnt_ = 100;
to->slice_allocator_ = &slice_allocator_;
to->tx_data_allocator_ = FAKE_ALLOCATOR;
to->flag_ = 269381;
to_guard.init(to);
OX (*to = *from);

View File

@ -309,9 +309,9 @@ TEST_F(TestObStandbyRead, trans_check_for_standby)
part1.exec_info_.prepare_version_.convert_for_tx(90);
part2.set_downstream_state(ObTxState::COMMIT);
ObTxData part2_tx_data;
ObSliceAlloc slice_allocator;
ObTenantTxDataAllocator tx_data_allocator;
part2_tx_data.ref_cnt_ = 1000;
part2_tx_data.slice_allocator_ = &slice_allocator;
part2_tx_data.tx_data_allocator_ = &tx_data_allocator;
part2.ctx_tx_data_.tx_data_guard_.init(&part2_tx_data);
part2.ctx_tx_data_.tx_data_guard_.tx_data()->commit_version_.convert_for_tx(90);
part3.set_downstream_state(ObTxState::UNKNOWN);

View File

@ -10,7 +10,7 @@
* See the Mulan PubL v2 for more details.
*/
#include "storage/tx/ob_trans_hashmap.h"
#include "share/ob_light_hashmap.h"
#include <gtest/gtest.h>
#include "share/ob_errno.h"
#include "lib/oblog/ob_log.h"
@ -40,7 +40,7 @@ public :
const char *TestObTrans::LOCAL_IP = "127.0.0.1";
const int64_t TIME_OUT = 1;
class ObTransTestValue : public ObTransHashLink<ObTransTestValue>
class ObTransTestValue : public share::ObLightHashLink<ObTransTestValue>
{
public:
ObTransTestValue() {}
@ -84,7 +84,7 @@ public:
}
};
typedef ObTransHashMap<ObTransID, ObTransTestValue, ObTransTestValueAlloc, common::SpinRWLock> TestHashMap;
typedef share::ObLightHashMap<ObTransID, ObTransTestValue, ObTransTestValueAlloc, common::SpinRWLock> TestHashMap;
class ForeachFunctor
{

View File

@ -36,6 +36,29 @@ using namespace storage;
using namespace blocksstable;
using namespace share;
namespace share
{
int ObTenantTxDataAllocator::init(const char* label)
{
int ret = OB_SUCCESS;
ObMemAttr mem_attr;
if (OB_FAIL(slice_allocator_.init(
storage::TX_DATA_SLICE_SIZE, OB_MALLOC_NORMAL_BLOCK_SIZE, common::default_blk_alloc, mem_attr))) {
SHARE_LOG(WARN, "init slice allocator failed", KR(ret));
} else {
slice_allocator_.set_nway(ALLOC_TX_DATA_MAX_CONCURRENCY);
is_inited_ = true;
}
return ret;
}
void *ObTenantTxDataAllocator::alloc(const bool enable_throttle, const int64_t abs_expire_time)
{
void *res = slice_allocator_.alloc();
return res;
}
};
int storage::ObTenantMetaMemMgr::fetch_tenant_config()
{
return OB_SUCCESS;
@ -166,6 +189,7 @@ public:
ObTenantMetaMemMgr t3m_;
ObIMemtableMgr *mt_mgr_;
ObTxCtxMemtableMgr *ctx_mt_mgr_;
ObTenantTxDataAllocator tx_data_allocator_;
ObTenantBase tenant_base_;
};
@ -275,7 +299,8 @@ TEST_F(TestTxCtxTable, test_tx_ctx_memtable_mgr)
ObTxDataTable tx_data_table;
ObMemAttr attr;
attr.tenant_id_ = MTL_ID();
tx_data_table.slice_allocator_.init(sizeof(ObTxData), OB_MALLOC_NORMAL_BLOCK_SIZE, common::default_blk_alloc, attr);
tx_data_allocator_.init("test");
tx_data_table.tx_data_allocator_ = &tx_data_allocator_;
ObTxPalfParam palf_param((logservice::ObLogHandler *)(0x01),
(transaction::ObDupTableLSHandler *)(0x02));