[FEAT MERGE] log4200

Co-authored-by: zhjc1124 <zhjc1124@gmail.com>
Co-authored-by: BinChenn <binchenn.bc@gmail.com>
Co-authored-by: oceanoverflow <oceanoverflow@gmail.com>
This commit is contained in:
obdev
2023-05-06 08:15:43 +00:00
committed by ob-robot
parent f0fdf277f1
commit d6c6e05727
450 changed files with 33015 additions and 2243 deletions

View File

@ -177,7 +177,7 @@ private:
// lsn of last log_entry, may be invalid if haven't generate any log_entry.
palf::LSN last_lsn_();
private:
TenantLSID tls_id_;
logservice::TenantLSID tls_id_;
ObTxLogBlockBuilder block_builder_;
ObLogLSNArray lsn_arr_;
LSN last_record_lsn_;

View File

@ -48,7 +48,7 @@ public:
UNUSED(page);
}
void set_task_info(const TenantLSID &tls_id,
void set_task_info(const logservice::TenantLSID &tls_id,
const char *tls_id_str)
{
UNUSED(tls_id);
@ -90,7 +90,7 @@ TEST(ObLogTransTaskPool, Function1)
MockTransTask **tasks = new MockTransTask*[task_cnt];
const char *tls_info = "tenant_ls_id";
TenantLSID tls_id;
logservice::TenantLSID tls_id;
for (int64_t idx = 0; idx < task_cnt; ++idx) {
tasks[idx] = pool.get(tls_info, tls_id);
@ -123,7 +123,7 @@ TEST(ObLogTransTaskPool, Function2)
MockTransTask **tasks = new MockTransTask*[task_cnt];
const char *tls_info = "partition";
TenantLSID tls_id;
logservice::TenantLSID tls_id;
for (int64_t idx = 0; idx < task_cnt; ++idx) {
tasks[idx] = pool.get(tls_info, tls_id);

View File

@ -23,7 +23,7 @@
#include "share/ob_define.h"
#include "storage/ob_storage_log_type.h"
#include "storage/tx/ob_trans_log.h"
#include "ob_log_fetch_stat_info.h"
#include "logservice/logfetcher/ob_log_fetch_stat_info.h"
#include "logservice/libobcdc/src/ob_log_utils.h"
#include "logservice/libobcdc/src/ob_log_ls_fetch_ctx.h"
#include "logservice/libobcdc/src/ob_log_ls_fetch_mgr.h"
@ -42,13 +42,14 @@ using namespace common;
using namespace libobcdc;
using namespace transaction;
using namespace storage;
using namespace logfetcher;
#define PREPARE_ENV(tenant_id, ls_id, tx_id, cluster_id) \
bool stop_flag = false; \
TenantLSID tls_id(tenant_id, share::ObLSID(ls_id)); \
logservice::TenantLSID tls_id(tenant_id, share::ObLSID(ls_id)); \
EXPECT_TRUE(tls_id.is_valid()); \
IObCDCPartTransResolver::MissingLogInfo missing_info; \
TransStatInfo tsi; \
logfetcher::TransStatInfo tsi; \
int64_t start_ts_ns = 1; \
palf::LSN start_lsn(0); \
EXPECT_TRUE(start_lsn.is_valid());

View File

@ -86,7 +86,7 @@ TEST_F(TestLogGroupBuffer, test_get_buffer_pos)
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.init(start_lsn));
EXPECT_EQ(OB_INVALID_ARGUMENT, log_group_buffer_.get_buffer_pos_(lsn, start_pos));
lsn.val_ = 50;
EXPECT_EQ(OB_INVALID_ARGUMENT, log_group_buffer_.get_buffer_pos_(lsn, start_pos));
EXPECT_EQ(OB_ERR_OUT_OF_LOWER_BOUND, log_group_buffer_.get_buffer_pos_(lsn, start_pos));
lsn.val_ = 110;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.get_buffer_pos_(lsn, start_pos));
EXPECT_EQ(10, start_pos);
@ -312,6 +312,88 @@ TEST_F(TestLogGroupBuffer, test_to_follower)
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.to_follower());
}
TEST_F(TestLogGroupBuffer, test_read_data)
{
LSN lsn(100);
char data[1024];
int64_t len = 100;
LSN reuse_lsn(0);
LSN start_lsn(0);
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.init(start_lsn));
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.fill(lsn, data, len));
LSN read_begin_lsn(0);
int64_t in_read_size = 100;
int64_t out_read_size = 0;
char *out_buf = (char*)malloc(1024);
// read nothing because reuse_lsn <= read_begin_lsn
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(0, out_read_size);
reuse_lsn.val_ = 200;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.inc_update_reuse_lsn(reuse_lsn));
// read data success
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(in_read_size, out_read_size);
// fill data at lsn(40M) with len 50.
lsn.val_ = reuse_lsn.val_ + log_group_buffer_.get_available_buffer_size() - 200;
len = 50;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.fill(lsn, data, len));
// read data at lsn(50)
read_begin_lsn.val_ = 50;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(in_read_size, out_read_size);
// read data at lsn(49), this pos has been re-written
read_begin_lsn.val_ = 49;
out_read_size = 0;
EXPECT_EQ(OB_ERR_OUT_OF_LOWER_BOUND, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(0, out_read_size);
// truncate at lsn(40M + 1000)
LSN truncate_lsn(1000 + log_group_buffer_.get_available_buffer_size());
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.truncate(truncate_lsn));
// read data at lsn(49), this pos has been re-written
out_read_size = 0;
read_begin_lsn.val_ = 49;
EXPECT_EQ(OB_ERR_OUT_OF_LOWER_BOUND, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
// truncate at lsn(0)
// truncate_lsn.val_ = 0;
// EXPECT_EQ(OB_SUCCESS, log_group_buffer_.truncate(truncate_lsn));
// read data at lsn(500)
out_read_size = 0;
read_begin_lsn.val_ = 500;
EXPECT_EQ(OB_ERR_OUT_OF_LOWER_BOUND, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(0, out_read_size);
// fill data at lsn 500 - 600
lsn = truncate_lsn + 500;
len = 100;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.fill(lsn, data, len));
reuse_lsn = truncate_lsn + 600;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.inc_update_reuse_lsn(reuse_lsn));
// read data at lsn(500), read nothing because of last_truncate_max_lsn_
out_read_size = 0;
read_begin_lsn = truncate_lsn + 500;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(100, out_read_size);
// truncate at lsn(40M + 40M)
truncate_lsn.val_ = 2 * log_group_buffer_.get_available_buffer_size();
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.truncate(truncate_lsn));
reuse_lsn = truncate_lsn + 100;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.inc_update_reuse_lsn(reuse_lsn));
// fill data at lsn 40M - 600
lsn.val_ = truncate_lsn.val_ + log_group_buffer_.get_available_buffer_size() - 50;
len = 100;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.fill(lsn, data, len));
// update reuse_lsn
reuse_lsn = lsn + len;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.inc_update_reuse_lsn(reuse_lsn));
// read above wrapped data, expect success
out_read_size = 0;
read_begin_lsn = lsn;
in_read_size = 100;
EXPECT_EQ(OB_SUCCESS, log_group_buffer_.read_data(read_begin_lsn, in_read_size, out_buf, out_read_size));
EXPECT_EQ(in_read_size, out_read_size);
free(out_buf);
}
} // END of unittest
} // end of oceanbase
@ -319,7 +401,7 @@ int main(int argc, char **argv)
{
system("rm -rf ./test_log_group_buffer.log*");
OB_LOGGER.set_file_name("test_log_group_buffer.log", true);
OB_LOGGER.set_log_level("INFO");
OB_LOGGER.set_log_level("TRACE");
PALF_LOG(INFO, "begin unittest::test_log_group_buffer");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();

View File

@ -3,6 +3,7 @@ storage_unittest(test_hfilter_parser table/test_hfilter_parser.cpp)
storage_unittest(test_query_response_time mysql/test_query_response_time.cpp)
storage_unittest(test_create_executor table/test_create_executor.cpp)
storage_unittest(test_table_sess_pool table/test_table_sess_pool.cpp)
storage_unittest(test_ingress_bw_alloc_manager net/test_ingress_bw_alloc_manager.cpp)
ob_unittest(test_uniq_task_queue)
add_subdirectory(rpc EXCLUDE_FROM_ALL)

View File

@ -0,0 +1,155 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX RS
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#define private public
#include "observer/net/ob_ingress_bw_alloc_service.h"
namespace oceanbase
{
using namespace common;
using namespace obrpc;
namespace rootserver
{
class TestEndpointIngressService : public testing::Test
{
public:
TestEndpointIngressService()
{}
virtual ~TestEndpointIngressService()
{}
virtual void SetUp(){};
virtual void TearDown()
{}
virtual void TestBody()
{}
};
TEST_F(TestEndpointIngressService, ingress_service)
{
int ret = OB_SUCCESS;
ObNetEndpointIngressManager ingress_manager_;
ret = ingress_manager_.init();
ASSERT_EQ(ret, OB_SUCCESS);
// register_endpoint
ObAddr addr1(1, 1);
ObNetEndpointKey key1;
key1.addr_ = addr1;
int64_t time = ObTimeUtility::current_time();
ret = ingress_manager_.register_endpoint(key1, time);
ASSERT_EQ(ret, OB_SUCCESS);
// register_endpoint second time
ret = ingress_manager_.register_endpoint(key1, time);
ASSERT_EQ(ret, OB_SUCCESS);
// get endpoint value
ObNetEndpointValue *value1 = nullptr;
ret = ingress_manager_.ingress_plan_map_.get_refactored(key1, value1);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(key1.addr_, addr1);
ASSERT_EQ(value1->expire_time_, time);
// set endpoint applied bandwidth
value1->predicted_bw_ = 10;
// update ingress plan
ObAddr addr2(2, 2);
ObNetEndpointKey key2;
ObNetEndpointValue *value2 = nullptr;
key2.addr_ = addr2;
ret = ingress_manager_.register_endpoint(key2, time);
ASSERT_EQ(ret, OB_SUCCESS);
ret = ingress_manager_.ingress_plan_map_.get_refactored(key2, value2);
ASSERT_EQ(ret, OB_SUCCESS);
value1->predicted_bw_ = 10;
value2->predicted_bw_ = 10;
ObAddr addr3(3, 3);
ObNetEndpointKey key3;
ObNetEndpointValue *value3 = nullptr;
key3.addr_ = addr3;
ret = ingress_manager_.register_endpoint(key3, time);
ASSERT_EQ(ret, OB_SUCCESS);
ret = ingress_manager_.ingress_plan_map_.get_refactored(key3, value3);
ASSERT_EQ(ret, OB_SUCCESS);
value3->predicted_bw_ = 20;
ingress_manager_.total_bw_limit_ = 100;
ObNetEndpointKVArray update_kvs;
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
update_kvs.push_back(ObNetEndpointKeyValue(key2, value2));
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(value2->assigned_bw_, 100);
update_kvs.push_back(ObNetEndpointKeyValue(key3, value3));
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(value2->assigned_bw_, 45);
ASSERT_EQ(value3->assigned_bw_, 55);
ingress_manager_.total_bw_limit_ = 30;
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(value2->assigned_bw_, 10);
ASSERT_EQ(value3->assigned_bw_, 20);
update_kvs.push_back(ObNetEndpointKeyValue(key1, value1));
value1->predicted_bw_ = 2;
ingress_manager_.total_bw_limit_ = 20;
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(value1->assigned_bw_, 2);
ASSERT_EQ(value2->assigned_bw_, 9);
ASSERT_EQ(value3->assigned_bw_, 9);
ObAddr addr4(4, 4);
ObNetEndpointKey key4;
ObNetEndpointValue *value4 = nullptr;
key4.addr_ = addr4;
ret = ingress_manager_.register_endpoint(key4, time);
ASSERT_EQ(ret, OB_SUCCESS);
ret = ingress_manager_.ingress_plan_map_.get_refactored(key4, value4);
ASSERT_EQ(ret, OB_SUCCESS);
value4->predicted_bw_ = 100;
update_kvs.push_back(ObNetEndpointKeyValue(key4, value4));
ret = ingress_manager_.update_ingress_plan(update_kvs);
ASSERT_EQ(ret, OB_SUCCESS);
ASSERT_EQ(value1->assigned_bw_, 2);
ASSERT_EQ(value2->assigned_bw_, 6);
ASSERT_EQ(value3->assigned_bw_, 6);
ASSERT_EQ(value3->assigned_bw_, 6);
ingress_manager_.destroy();
}
} // namespace rootserver
} // namespace oceanbase
int main(int argc, char **argv)
{
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#undef private

View File

@ -285,9 +285,6 @@ TEST_F(TestLogStreamBackup, test_backup_path)
LOG_INFO("dump path", K(path), K(expect_path));
ASSERT_EQ(0, path.get_obstr().compare(expect_path));
expect_path = "file:///obbackup/tenant_1_incarnation_1/data/backup_set_1_full/logstream_1/complement_log/0";
ret = ObBackupPathUtil::get_ls_complement_log_backup_path(
root_path, tenant_id_, backup_set_desc_, ls_id_, file_id_, path);
ASSERT_EQ(OB_SUCCESS, ret);
LOG_INFO("dump path", K(path), K(expect_path));
ASSERT_EQ(0, path.get_obstr().compare(expect_path));

View File

@ -614,28 +614,11 @@ TEST_F(TestIOStruct, IOFaultDetector)
ASSERT_SUCC(detector.start());
ObIOManager::get_instance().is_working_ = true;
// test write failure detection
// test read failure detection
ObIOInfo io_info = get_random_io_info();
ObIORequest req;
req.inc_ref();
ASSERT_SUCC(req.init(io_info));
req.io_info_.flag_.set_mode(ObIOMode::WRITE);
req.finish(OB_CANCELED);
for (int64_t i = 0; i < 10000L; ++i) {
detector.record_failure(req);
}
ASSERT_SUCC(detector.get_device_health_status(dhs, disk_abnormal_time));
ASSERT_TRUE(DEVICE_HEALTH_NORMAL == dhs);
ASSERT_TRUE(0 == disk_abnormal_time);
req.ret_code_.io_ret_ = OB_IO_ERROR;
for (int64_t i = 0; i < 10000L; ++i) {
detector.record_failure(req);
}
ASSERT_SUCC(detector.get_device_health_status(dhs, disk_abnormal_time));
ASSERT_TRUE(DEVICE_HEALTH_ERROR == dhs);
ASSERT_TRUE(disk_abnormal_time > 0);
// test read failure detection
detector.reset_device_health();
ASSERT_SUCC(detector.get_device_health_status(dhs, disk_abnormal_time));
ASSERT_TRUE(DEVICE_HEALTH_NORMAL == dhs);