[liboblog] support storage work mode
This commit is contained in:
committed by
LINxiansheng
parent
8b88f1f1bd
commit
463064375a
30
unittest/obcdc/CMakeLists.txt
Normal file
30
unittest/obcdc/CMakeLists.txt
Normal file
@ -0,0 +1,30 @@
|
||||
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
|
||||
link_directories(${DEP_DIR}/lib/mariadb)
|
||||
function(libobcdc_unittest case)
|
||||
ob_unittest(${ARGV})
|
||||
target_link_libraries(${case} PRIVATE obcdc)
|
||||
disable_pch(${case})
|
||||
target_include_directories(${case}
|
||||
PRIVATE ${PROJECT_SOURCE_DIR}/tools ${CMAKE_SOURCE_DIR}/unittest ${CMAKE_SOURCE_DIR}/deps/oblib/unittest ${DEP_DIR}/include)
|
||||
endfunction()
|
||||
|
||||
libobcdc_unittest(test_log_part_mgr)
|
||||
libobcdc_unittest(test_log_task_pool)
|
||||
libobcdc_unittest(test_small_arena)
|
||||
libobcdc_unittest(test_log_config)
|
||||
libobcdc_unittest(test_log_fake_common_config)
|
||||
libobcdc_unittest(test_log_table_matcher)
|
||||
libobcdc_unittest(test_ob_map_queue)
|
||||
libobcdc_unittest(test_ob_map_queue_thread)
|
||||
libobcdc_unittest(test_ob_log_timer)
|
||||
libobcdc_unittest(test_ob_log_dlist)
|
||||
libobcdc_unittest(test_ob_log_part_svr_list)
|
||||
libobcdc_unittest(test_ob_log_all_svr_cache)
|
||||
libobcdc_unittest(test_ob_log_start_log_id_locator)
|
||||
libobcdc_unittest(test_ob_log_heartbeater)
|
||||
libobcdc_unittest(test_log_utils)
|
||||
libobcdc_unittest(test_ob_log_adapt_string)
|
||||
libobcdc_unittest(test_ob_concurrent_seq_queue)
|
||||
libobcdc_unittest(test_ob_seq_thread)
|
||||
libobcdc_unittest(test_ob_log_part_trans_resolver_new)
|
||||
libobcdc_unittest(test_log_svr_blacklist)
|
||||
12
unittest/obcdc/fake_config_server
Executable file
12
unittest/obcdc/fake_config_server
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
port=${1:-6789}
|
||||
|
||||
wrap_json='{"Message": "successful", "Success": true, "Code": 200, "Data":'
|
||||
json=''
|
||||
while content="$(printf "HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" ${#json} "$json" | nc -l $port)" ; do
|
||||
if echo "$content" | grep -q '^POST' && echo "$content" | tail -n 1 | grep -q '^{' ; then
|
||||
json="$wrap_json$(echo "$content" | tail -n 1)}"
|
||||
# echo "$json"
|
||||
fi
|
||||
done
|
||||
544
unittest/obcdc/nopretest_test_ext_break.cpp
Normal file
544
unittest/obcdc/nopretest_test_ext_break.cpp
Normal file
@ -0,0 +1,544 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include "ob_log_fetcher_rpc_interface.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
using namespace common;
|
||||
using namespace obrpc;
|
||||
using namespace liboblog;
|
||||
using namespace liboblog::fetcher;
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
class MockFectherInterface : public IFetcherRpcInterface
|
||||
{
|
||||
public:
|
||||
MockFectherInterface(ObNetClient &net_client,
|
||||
const uint64_t tenant_id = OB_SYS_TENANT_ID)
|
||||
: net_client_(net_client),
|
||||
tenant_id_(tenant_id)
|
||||
{
|
||||
svr_finder_ = NULL;
|
||||
}
|
||||
void set_svr(const ObAddr &svr)
|
||||
{
|
||||
svr_ = svr;
|
||||
}
|
||||
virtual const ObAddr& get_svr() const
|
||||
{
|
||||
return svr_;
|
||||
}
|
||||
void set_timeout(const int64_t timeout)
|
||||
{
|
||||
timeout_ = timeout;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts(const ObLogReqStartLogIdByTsRequest &req,
|
||||
ObLogReqStartLogIdByTsResponse &res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts_2(
|
||||
const ObLogReqStartLogIdByTsRequestWithBreakpoint &req,
|
||||
ObLogReqStartLogIdByTsResponseWithBreakpoint &res)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).req_start_log_id_by_ts_with_breakpoint(req, res);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req start log id by ts", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
res.reset();
|
||||
res.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else {}
|
||||
_D_("rpc: req start log id by ts", K(ret), "svr", get_svr(),
|
||||
K(req), K(res));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int req_start_pos_by_log_id(
|
||||
const ObLogReqStartPosByLogIdRequest &req,
|
||||
ObLogReqStartPosByLogIdResponse &res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id_2(
|
||||
const ObLogReqStartPosByLogIdRequestWithBreakpoint& req,
|
||||
ObLogReqStartPosByLogIdResponseWithBreakpoint& res)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).req_start_pos_by_log_id_with_breakpoint(req, res);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req start pos by log id", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
res.reset();
|
||||
res.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: req start pos by log id", K(ret), "svr", get_svr(),
|
||||
K(req), K(res));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int fetch_log(const ObLogExternalFetchLogRequest& req,
|
||||
ObLogExternalFetchLogResponse& res)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).fetch_log(req, res);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc fetch log", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
res.reset();
|
||||
res.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: fetch log", K(ret), "svr", get_svr(), K(req), K(res));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int req_heartbeat_info(const ObLogReqHeartbeatInfoRequest& req,
|
||||
ObLogReqHeartbeatInfoResponse& res)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).req_heartbeat_info(req, res);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req heartbeat info", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
res.reset();
|
||||
res.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: req heartbeat info", K(ret), "svr", get_svr(), K(req), K(res));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int req_leader_heartbeat(
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).leader_heartbeat(req, res);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req heartbeat info", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
res.reset();
|
||||
res.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: req heartbeat info", K(ret), "svr", get_svr(), K(req), K(res));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int req_svr_feedback(const ReqLogSvrFeedback &feedback)
|
||||
{
|
||||
// This interface is deprecated.
|
||||
UNUSED(feedback);
|
||||
return common::OB_NOT_IMPLEMENT;
|
||||
}
|
||||
|
||||
virtual int open_stream(const ObLogOpenStreamReq &req,
|
||||
ObLogOpenStreamResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int fetch_stream_log(const ObLogStreamFetchLogReq &req,
|
||||
ObLogStreamFetchLogResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
ObNetClient &net_client_;
|
||||
SvrFinder *svr_finder_;
|
||||
ObAddr svr_;
|
||||
uint64_t tenant_id_;
|
||||
int64_t timeout_;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
using namespace oceanbase::common;
|
||||
using namespace oceanbase::obrpc;
|
||||
using namespace oceanbase::liboblog;
|
||||
using namespace oceanbase::unittest;
|
||||
|
||||
ObAddr get_svr_addr()
|
||||
{
|
||||
ObAddr svr;
|
||||
int32_t port = 59700;
|
||||
svr.set_ip_addr("100.81.152.31", port);
|
||||
return svr;
|
||||
}
|
||||
|
||||
#define MILLI_SECOND 1000
|
||||
#define SECOND (1000 * 1000)
|
||||
|
||||
int64_t get_timeout()
|
||||
{
|
||||
return 1000 * SECOND;
|
||||
}
|
||||
|
||||
#define N 3
|
||||
const ObPartitionKey pk1(1099511677785, 0, 1);
|
||||
const ObPartitionKey pk3(1099511677784, 0, 1);
|
||||
const ObPartitionKey pk2(1099511677783, 0, 1);
|
||||
ObPartitionKey pks[N] = { pk1, pk2, pk3 };
|
||||
|
||||
typedef ObLogReqStartLogIdByTsRequestWithBreakpoint TsReq;
|
||||
typedef ObLogReqStartLogIdByTsRequestWithBreakpoint::Param TsReqParam;
|
||||
typedef ObLogReqStartLogIdByTsRequestWithBreakpoint::ParamArray TsReqParamArray;
|
||||
typedef ObLogReqStartLogIdByTsResponseWithBreakpoint TsResp;
|
||||
typedef ObLogReqStartLogIdByTsResponseWithBreakpoint::Result TsRespResult;
|
||||
typedef ObLogReqStartLogIdByTsResponseWithBreakpoint::ResultArray TsRespResultArray;
|
||||
|
||||
typedef ObLogReqStartPosByLogIdRequestWithBreakpoint IdReq;
|
||||
typedef ObLogReqStartPosByLogIdRequestWithBreakpoint::Param IdReqParam;
|
||||
typedef ObLogReqStartPosByLogIdRequestWithBreakpoint::ParamArray IdReqParamArray;
|
||||
typedef ObLogReqStartPosByLogIdResponseWithBreakpoint IdResp;
|
||||
typedef ObLogReqStartPosByLogIdResponseWithBreakpoint::Result IdRespResult;
|
||||
typedef ObLogReqStartPosByLogIdResponseWithBreakpoint::ResultArray IdRespResultArray;
|
||||
|
||||
void test_ts_break(const int64_t start_ts, TsResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObNetClient net_client;
|
||||
if (OB_FAIL(net_client.init())) {
|
||||
_E_("net client init error", K(ret));
|
||||
} else {
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
TsReq req;
|
||||
for (int i = 0; OB_SUCC(ret) && i < N; i++) {
|
||||
TsReqParam param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_tstamp_ = start_ts;
|
||||
if (OB_FAIL(req.append_param(param))) {
|
||||
_W_("push param error", K(ret));
|
||||
}
|
||||
}
|
||||
ret = rpc.req_start_log_id_by_ts_2(req, resp);
|
||||
_I_("----------------------------------------");
|
||||
_I_("start_ts:", K(start_ts));
|
||||
_I_("req_start_log_id_by_ts finish", K(ret), K(req), K(resp));
|
||||
_I_("----------------------------------------");
|
||||
}
|
||||
}
|
||||
|
||||
void test_id_break(uint64_t start_log_ids[N], IdResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObNetClient net_client;
|
||||
if (OB_FAIL(net_client.init())) {
|
||||
_E_("net client init error", K(ret));
|
||||
} else {
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
IdReq req;
|
||||
for (int i = 0; OB_SUCC(ret) && i < N; i++) {
|
||||
IdReqParam param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_log_id_ = start_log_ids[i];
|
||||
if (OB_FAIL(req.append_param(param))) {
|
||||
_W_("push param error", K(ret));
|
||||
}
|
||||
}
|
||||
ret = rpc.req_start_pos_by_log_id_2(req, resp);
|
||||
_I_("----------------------------------------");
|
||||
_I_("start_log_id", K(start_log_ids[0]), K(start_log_ids[1]), K(start_log_ids[2]));
|
||||
_I_("req_start_pos_by_log_id finish", K(ret), K(req), K(resp));
|
||||
_I_("----------------------------------------");
|
||||
}
|
||||
}
|
||||
|
||||
void ts_case_1()
|
||||
{
|
||||
// normal test
|
||||
int64_t start_ts = 1460969850000000;
|
||||
TsResp resp;
|
||||
test_ts_break(start_ts, resp);
|
||||
}
|
||||
|
||||
void ts_case_2()
|
||||
{
|
||||
// large enough, test handle_cold_pkeys, get predict value
|
||||
int64_t start_ts = 1500000000000000;
|
||||
TsResp resp;
|
||||
test_ts_break(start_ts, resp);
|
||||
}
|
||||
|
||||
void ts_case_3()
|
||||
{
|
||||
// large enough, test handle cold by last info
|
||||
int64_t start_ts = 1460970107619884 + 1;
|
||||
TsResp resp;
|
||||
test_ts_break(start_ts, resp);
|
||||
}
|
||||
|
||||
void ts_case_4()
|
||||
{
|
||||
// small enough, test after_scan
|
||||
int64_t start_ts = 1400000000080000;
|
||||
TsResp resp;
|
||||
test_ts_break(start_ts, resp);
|
||||
}
|
||||
|
||||
void ts_case_5()
|
||||
{
|
||||
// test break
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t start_ts = 1400000000080000;
|
||||
ObNetClient net_client;
|
||||
if (OB_FAIL(net_client.init())) {
|
||||
_E_("net client init error", K(ret));
|
||||
} else {
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
|
||||
_I_("++++++++++++++++++++++++++++++++++++++++");
|
||||
TsReq req;
|
||||
TsResp resp;
|
||||
bool stop = false;
|
||||
for (int i = 0; OB_SUCC(ret) && i < N; i++) {
|
||||
TsReqParam param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_tstamp_ = start_ts;
|
||||
if (OB_FAIL(req.append_param(param))) {
|
||||
_W_("push param error", K(ret));
|
||||
}
|
||||
}
|
||||
|
||||
while (!stop) {
|
||||
stop = true;
|
||||
ret = rpc.req_start_log_id_by_ts_2(req, resp);
|
||||
_I_("----------------------------------------");
|
||||
_I_("start_ts:", K(start_ts));
|
||||
_I_("req_start_log_id_by_ts_with_breakpoint finish", K(ret), K(req), K(resp));
|
||||
_I_("----------------------------------------");
|
||||
|
||||
const TsRespResultArray &res_arr = resp.get_results();
|
||||
TsReqParamArray param_arr = req.get_params();
|
||||
int64_t i = 0;
|
||||
int64_t res_count = res_arr.count();
|
||||
req.reset();
|
||||
for (i = 0; OB_SUCC(ret) && i < res_count; i++) {
|
||||
const TsRespResult &res = res_arr[i];
|
||||
if (OB_EXT_HANDLE_UNFINISH == res.err_) {
|
||||
TsReqParam param;
|
||||
param.pkey_ = param_arr[i].pkey_;
|
||||
param.start_tstamp_ = start_ts;
|
||||
param.break_info_.break_file_id_ = res.break_info_.break_file_id_;
|
||||
param.break_info_.min_greater_log_id_ = res.break_info_.min_greater_log_id_;
|
||||
ret = req.append_param(param);
|
||||
stop = false;
|
||||
} else {
|
||||
// finished pkey
|
||||
}
|
||||
}
|
||||
resp.reset();
|
||||
if (OB_FAIL(ret)) {
|
||||
_W_("re-send rpc error", K(ret));
|
||||
}
|
||||
}
|
||||
_I_("++++++++++++++++++++++++++++++++++++++++");
|
||||
}
|
||||
}
|
||||
|
||||
//----
|
||||
void id_case_1()
|
||||
{
|
||||
// large enough, test handle_cold_pkeys_by_sw
|
||||
uint64_t start_log_ids[N] = {1000, 1000, 1000};
|
||||
IdResp resp;
|
||||
test_id_break(start_log_ids, resp);
|
||||
}
|
||||
|
||||
void id_case_2()
|
||||
{
|
||||
// min_log_id in last_info_block, test handle_cold_pkeys_by_last_info_block
|
||||
uint64_t start_log_ids[N] = {251, 251, 251};
|
||||
IdResp resp;
|
||||
test_id_break(start_log_ids, resp);
|
||||
}
|
||||
|
||||
void id_case_3()
|
||||
{
|
||||
// normal case
|
||||
uint64_t start_log_ids[N] = {230, 230, 230};
|
||||
IdResp resp;
|
||||
test_id_break(start_log_ids, resp);
|
||||
}
|
||||
|
||||
void id_case_4()
|
||||
{
|
||||
// test break
|
||||
int ret = OB_SUCCESS;
|
||||
uint64_t start_log_ids[N] = {1, 1, 1};
|
||||
ObNetClient net_client;
|
||||
if (OB_FAIL(net_client.init())) {
|
||||
_E_("net client init error", K(ret));
|
||||
} else {
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
|
||||
_I_("++++++++++++++++++++++++++++++++++++++++");
|
||||
IdReq req;
|
||||
IdResp resp;
|
||||
bool stop = false;
|
||||
for (int i = 0; OB_SUCC(ret) && i < N; i++) {
|
||||
IdReqParam param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_log_id_ = start_log_ids[i];
|
||||
if (OB_FAIL(req.append_param(param))) {
|
||||
_W_("push param error", K(ret));
|
||||
}
|
||||
}
|
||||
|
||||
while (!stop) {
|
||||
stop = true;
|
||||
ret = rpc.req_start_pos_by_log_id_2(req, resp);
|
||||
_I_("----------------------------------------");
|
||||
_I_("req_start_pos_by_log_id_with_breakpoint finish", K(ret), K(req), K(resp));
|
||||
_I_("----------------------------------------");
|
||||
|
||||
const IdRespResultArray &res_arr = resp.get_results();
|
||||
IdReqParamArray param_arr = req.get_params();
|
||||
int64_t i = 0;
|
||||
int64_t res_count = res_arr.count();
|
||||
req.reset();
|
||||
for (i = 0; OB_SUCC(ret) && i < res_count; i++) {
|
||||
const IdRespResult &res = res_arr[i];
|
||||
if (OB_EXT_HANDLE_UNFINISH == res.err_) {
|
||||
IdReqParam param;
|
||||
param.pkey_ = param_arr[i].pkey_;
|
||||
param.start_log_id_ = start_log_ids[i];
|
||||
param.break_info_.break_file_id_ = res.break_info_.break_file_id_;
|
||||
param.break_info_.min_greater_log_id_ = res.break_info_.min_greater_log_id_;
|
||||
ret = req.append_param(param);
|
||||
stop = false;
|
||||
} else {
|
||||
// finished pkey
|
||||
}
|
||||
}
|
||||
resp.reset();
|
||||
if (OB_FAIL(ret)) {
|
||||
_W_("re-send rpc error", K(ret));
|
||||
}
|
||||
}
|
||||
_I_("++++++++++++++++++++++++++++++++++++++++");
|
||||
}
|
||||
}
|
||||
|
||||
void ts_test()
|
||||
{
|
||||
ts_case_1();
|
||||
ts_case_2();
|
||||
ts_case_3();
|
||||
ts_case_4();
|
||||
ts_case_5();
|
||||
}
|
||||
|
||||
void id_test()
|
||||
{
|
||||
id_case_1();
|
||||
id_case_2();
|
||||
id_case_3();
|
||||
id_case_4();
|
||||
}
|
||||
|
||||
void test_id_cold()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObNetClient net_client;
|
||||
if (OB_FAIL(net_client.init())) {
|
||||
_E_("net client init error", K(ret));
|
||||
} else {
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
|
||||
IdReq req;
|
||||
IdResp resp;
|
||||
|
||||
ObPartitionKey pkey(1099511677782, 0, 1);
|
||||
IdReqParam param;
|
||||
param.pkey_ = pkey;
|
||||
param.start_log_id_ = 5;
|
||||
if (OB_FAIL(req.append_param(param))) {
|
||||
_W_("push param error", K(ret));
|
||||
}
|
||||
ret = rpc.req_start_pos_by_log_id_2(req, resp);
|
||||
_I_("----------------------------------------");
|
||||
_I_("req_start_pos_by_log_id finish", K(ret), K(req), K(resp));
|
||||
_I_("----------------------------------------");
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
UNUSED(argc);
|
||||
UNUSED(argv);
|
||||
ObLogger::get_logger().set_mod_log_levels("ALL.*:INFO, TLOG.*:INFO");
|
||||
|
||||
test_id_cold();
|
||||
|
||||
return 0;
|
||||
}
|
||||
364
unittest/obcdc/nopretest_test_ext_fetcher.cpp
Normal file
364
unittest/obcdc/nopretest_test_ext_fetcher.cpp
Normal file
@ -0,0 +1,364 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "common/ob_queue_thread.h"
|
||||
#include "ob_log_fetcher_rpc_interface.h"
|
||||
#include "clog/ob_log_entry.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
using namespace common;
|
||||
using namespace obrpc;
|
||||
using namespace liboblog;
|
||||
using namespace liboblog::fetcher;
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
class MockFectherInterface : public IFetcherRpcInterface
|
||||
{
|
||||
public:
|
||||
MockFectherInterface(ObNetClient &net_client,
|
||||
const uint64_t tenant_id = OB_SYS_TENANT_ID)
|
||||
: net_client_(net_client),
|
||||
tenant_id_(tenant_id)
|
||||
{
|
||||
svr_finder_ = NULL;
|
||||
}
|
||||
void set_svr(const ObAddr &svr)
|
||||
{
|
||||
svr_ = svr;
|
||||
}
|
||||
virtual const ObAddr& get_svr() const
|
||||
{
|
||||
return svr_;
|
||||
}
|
||||
void set_timeout(const int64_t timeout)
|
||||
{
|
||||
timeout_ = timeout;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts(const ObLogReqStartLogIdByTsRequest &req,
|
||||
ObLogReqStartLogIdByTsResponse &resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts_2(
|
||||
const ObLogReqStartLogIdByTsRequestWithBreakpoint &req,
|
||||
ObLogReqStartLogIdByTsResponseWithBreakpoint &resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int req_start_pos_by_log_id(
|
||||
const ObLogReqStartPosByLogIdRequest &req,
|
||||
ObLogReqStartPosByLogIdResponse &resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id_2(
|
||||
const ObLogReqStartPosByLogIdRequestWithBreakpoint& req,
|
||||
ObLogReqStartPosByLogIdResponseWithBreakpoint& resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int fetch_log(const ObLogExternalFetchLogRequest& req,
|
||||
ObLogExternalFetchLogResponse& resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_heartbeat_info(const ObLogReqHeartbeatInfoRequest& req,
|
||||
ObLogReqHeartbeatInfoResponse& resp)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_leader_heartbeat(
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_svr_feedback(const ReqLogSvrFeedback &feedback)
|
||||
{
|
||||
UNUSED(feedback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int open_stream(const ObLogOpenStreamReq &req,
|
||||
ObLogOpenStreamResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).open_stream(req, resp);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req heartbeat info", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
resp.reset();
|
||||
resp.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: open_stream", K(ret), "svr", get_svr(), K(req), K(resp));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int fetch_stream_log(const ObLogStreamFetchLogReq &req,
|
||||
ObLogStreamFetchLogResp &resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObLogExternalProxy proxy;
|
||||
if (OB_SUCCESS != (ret = net_client_.get_proxy(proxy))) {
|
||||
_E_("err get proxy", K(ret));
|
||||
} else {
|
||||
ret = proxy.to(svr_).by(tenant_id_).timeout(timeout_).stream_fetch_log(req, resp);
|
||||
int err = proxy.get_result_code().rcode_;
|
||||
if (_FAIL_(ret) && _FAIL_(err)) {
|
||||
_W_("err rpc req heartbeat info", K(ret), "result_code", err,
|
||||
"svr", get_svr(), K(req));
|
||||
resp.reset();
|
||||
resp.set_err(OB_ERR_SYS);
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
else { }
|
||||
_D_("rpc: stream_fetch_log", K(ret), "svr", get_svr(), K(req), K(resp));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
ObNetClient &net_client_;
|
||||
SvrFinder *svr_finder_;
|
||||
ObAddr svr_;
|
||||
uint64_t tenant_id_;
|
||||
int64_t timeout_;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
using namespace oceanbase::common;
|
||||
using namespace oceanbase::common::sqlclient;
|
||||
using namespace oceanbase::obrpc;
|
||||
using namespace oceanbase::liboblog;
|
||||
using namespace oceanbase::unittest;
|
||||
using namespace oceanbase::clog;
|
||||
|
||||
ObAddr get_svr_addr()
|
||||
{
|
||||
ObAddr svr;
|
||||
int32_t port = 27800;
|
||||
svr.set_ip_addr("100.81.140.76", port);
|
||||
// int32_t port = 27800;
|
||||
// svr.set_ip_addr("10.210.170.16", port);
|
||||
return svr;
|
||||
}
|
||||
|
||||
int64_t get_timeout()
|
||||
{
|
||||
return 60L * 1000 * 1000;
|
||||
}
|
||||
|
||||
//#define PKEY_COUNT 1
|
||||
#define PKEY_COUNT 2
|
||||
ObPartitionKey pks[PKEY_COUNT];
|
||||
ObCond table_ready;
|
||||
int64_t trans_log_count_recved[PKEY_COUNT];
|
||||
uint64_t start_log_id[PKEY_COUNT];
|
||||
|
||||
#define INSERT_COUNT 9
|
||||
#define LIFE_TIME (1000 * 1000 * 60)
|
||||
|
||||
void init_env()
|
||||
{
|
||||
const int64_t table_id = 1101710651081591;
|
||||
// const int64_t table_id = 1101710651081589;
|
||||
for (int i = 0; i < PKEY_COUNT; i++) {
|
||||
pks[i].init(table_id + i, 0, 1);
|
||||
trans_log_count_recved[i] = 0;
|
||||
start_log_id[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
void report_log_recved()
|
||||
{
|
||||
for (int i = 0; i < PKEY_COUNT; i++) {
|
||||
fprintf(stdout, "pkey.table_id = %ld, trans_log_num = %ld, next_log_id = %ld\n", static_cast<int64_t>(pks[i].table_id_), trans_log_count_recved[i], start_log_id[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void recv_log(ObLogStreamFetchLogResp &fetch_resp)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
const int64_t log_num = fetch_resp.get_log_num();
|
||||
const char *buf = fetch_resp.get_log_entry_buf();
|
||||
ObLogEntry entry;
|
||||
int64_t pos = 0;
|
||||
int p = 0;
|
||||
for (int64_t idx = 0; idx < log_num; ++idx) {
|
||||
ret = entry.deserialize(buf, OB_MAX_LOG_BUFFER_SIZE, pos);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
const ObLogEntryHeader &header = entry.get_header();
|
||||
_I_("recv clog_entry", K(ret), K(entry));
|
||||
for (p = 0; p < PKEY_COUNT && pks[p] != header.get_partition_key(); p++);
|
||||
ASSERT_TRUE(p < PKEY_COUNT);
|
||||
if (OB_LOG_SUBMIT == header.get_log_type()) {
|
||||
trans_log_count_recved[p]++;
|
||||
_I_("trans_log_count_recved", K(p), "pkey", pks[p], "trans_cnt", trans_log_count_recved[p]);
|
||||
}
|
||||
ASSERT_TRUE(header.get_log_id() == start_log_id[p]);
|
||||
start_log_id[p]++;
|
||||
}
|
||||
}
|
||||
|
||||
bool recv_all()
|
||||
{
|
||||
int i = 0;
|
||||
for (i = 0; (trans_log_count_recved[i] == INSERT_COUNT) && i < PKEY_COUNT; i++);
|
||||
// return i == PKEY_COUNT;
|
||||
return false;
|
||||
}
|
||||
|
||||
void start_fetch()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObNetClient net_client;
|
||||
ASSERT_EQ(OB_SUCCESS, net_client.init());
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
|
||||
int64_t c1 = 0;
|
||||
int64_t c2 = 0;
|
||||
int err = OB_SUCCESS;
|
||||
while (!recv_all()) {
|
||||
c1++;
|
||||
ObLogOpenStreamReq open_req;
|
||||
ObLogOpenStreamResp open_resp;
|
||||
for (int i = 0; OB_SUCC(ret) && i < PKEY_COUNT; i++) {
|
||||
ObLogOpenStreamReq::Param param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_log_id_ = start_log_id[i];
|
||||
ASSERT_EQ(OB_SUCCESS, open_req.append_param(param));
|
||||
}
|
||||
open_req.set_stream_lifetime(LIFE_TIME);
|
||||
ret = rpc.open_stream(open_req, open_resp);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
ASSERT_TRUE(open_resp.get_stream_seq().is_valid());
|
||||
|
||||
_I_("open_stream success", K(open_resp));
|
||||
|
||||
const ObStreamSeq &seq = open_resp.get_stream_seq();
|
||||
const int64_t upper_lmt_ts = 100000000000000000L; // large enough
|
||||
const int64_t step = 100;
|
||||
c2 = 0;
|
||||
while (!recv_all()) {
|
||||
c2++;
|
||||
ObLogStreamFetchLogReq fetch_req;
|
||||
ObLogStreamFetchLogResp fetch_resp;
|
||||
ASSERT_EQ(OB_SUCCESS, fetch_req.set_stream_seq(seq));
|
||||
ASSERT_EQ(OB_SUCCESS, fetch_req.set_upper_limit_ts(upper_lmt_ts));
|
||||
ASSERT_EQ(OB_SUCCESS, fetch_req.set_log_cnt_per_part_per_round(step));
|
||||
|
||||
ret = rpc.fetch_stream_log(fetch_req, fetch_resp);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
err = fetch_resp.get_err();
|
||||
if (OB_SUCCESS == err) {
|
||||
recv_log(fetch_resp);
|
||||
} else if (OB_STREAM_NOT_EXIST == err) {
|
||||
fprintf(stdout, "stream not exist\n");
|
||||
break;
|
||||
} else {
|
||||
fprintf(stdout, "error ret=%d\n", err);
|
||||
ASSERT_TRUE(false);
|
||||
}
|
||||
_I_("fetch", K(c1), K(c2));
|
||||
if (true && REACH_TIME_INTERVAL(1000 * 1000)) {
|
||||
fprintf(stdout, "--------------------------------------------------\n");
|
||||
fprintf(stdout, "fetch, c1 = %ld, c2 = %ld\n", c1, c2);
|
||||
report_log_recved();
|
||||
}
|
||||
usleep(1000 * 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void del_stale()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObNetClient net_client;
|
||||
ASSERT_EQ(OB_SUCCESS, net_client.init());
|
||||
MockFectherInterface rpc(net_client);
|
||||
rpc.set_svr(get_svr_addr());
|
||||
rpc.set_timeout(get_timeout());
|
||||
|
||||
ObLogOpenStreamReq open_req;
|
||||
ObLogOpenStreamResp open_resp;
|
||||
for (int i = 0; OB_SUCC(ret) && i < PKEY_COUNT; i++) {
|
||||
ObLogOpenStreamReq::Param param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_log_id_ = start_log_id[i];
|
||||
ASSERT_EQ(OB_SUCCESS, open_req.append_param(param));
|
||||
}
|
||||
open_req.set_stream_lifetime(LIFE_TIME);
|
||||
ret = rpc.open_stream(open_req, open_resp);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
ASSERT_TRUE(open_resp.get_stream_seq().is_valid());
|
||||
|
||||
_I_("open_stream success", K(open_resp));
|
||||
|
||||
const ObStreamSeq &first_seq = open_resp.get_stream_seq();
|
||||
|
||||
ObLogOpenStreamReq open_req2;
|
||||
ObLogOpenStreamResp open_resp2;
|
||||
for (int i = 0; OB_SUCC(ret) && i < PKEY_COUNT; i++) {
|
||||
ObLogOpenStreamReq::Param param;
|
||||
param.pkey_ = pks[i];
|
||||
param.start_log_id_ = start_log_id[i];
|
||||
ASSERT_EQ(OB_SUCCESS, open_req2.append_param(param));
|
||||
}
|
||||
open_req2.set_stale_stream(first_seq);
|
||||
open_req2.set_stream_lifetime(LIFE_TIME);
|
||||
ret = rpc.open_stream(open_req2, open_resp2);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
ASSERT_TRUE(open_resp2.get_stream_seq().is_valid());
|
||||
|
||||
_I_("open_stream success", K(open_resp2));
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
UNUSED(argc);
|
||||
UNUSED(argv);
|
||||
system("rm els.log");
|
||||
OB_LOGGER.set_file_name("els.log", true);
|
||||
ObLogger::get_logger().set_mod_log_levels("ALL.*:INFO, TLOG.*:DEBUG");
|
||||
init_env();
|
||||
start_fetch();
|
||||
// del_stale();
|
||||
return 0;
|
||||
}
|
||||
276
unittest/obcdc/test_log_config.cpp
Normal file
276
unittest/obcdc/test_log_config.cpp
Normal file
@ -0,0 +1,276 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_config.h"
|
||||
|
||||
#define ADD_CONFIG_INT(var, value) \
|
||||
do { \
|
||||
std::string name_str = #var; \
|
||||
std::string value_str = #value; \
|
||||
var = value; \
|
||||
databuff_printf(config_buf_, sizeof(config_buf_), config_buf_pos_, "%s=%ld\n", #var, var); \
|
||||
config_map_.erase(name_str); \
|
||||
config_map_.insert(std::pair<std::string, std::string>(name_str, value_str)); \
|
||||
} while (0)
|
||||
|
||||
#define ADD_CONFIG_STR(var, value) \
|
||||
do { \
|
||||
std::string name_str = #var; \
|
||||
std::string value_str = value; \
|
||||
var = value; \
|
||||
databuff_printf(config_buf_, sizeof(config_buf_), config_buf_pos_, "%s=%s\n", #var, var); \
|
||||
config_map_.erase(name_str); \
|
||||
config_map_.insert(std::pair<std::string, std::string>(name_str, value_str)); \
|
||||
} while (0)
|
||||
|
||||
using namespace oceanbase::common;
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace liboblog
|
||||
{
|
||||
class TestLogConfig : public ::testing::Test
|
||||
{
|
||||
static const int64_t MAX_CONFIG_BUFFER_SIZE = 1 << 10;
|
||||
public:
|
||||
TestLogConfig() {}
|
||||
~TestLogConfig() {}
|
||||
|
||||
virtual void SetUp();
|
||||
virtual void TearDown() {}
|
||||
|
||||
public:
|
||||
int64_t dml_parser_thread_num;
|
||||
int64_t sequencer_thread_num;
|
||||
int64_t formatter_thread_num;
|
||||
int64_t instance_num;
|
||||
int64_t instance_index;
|
||||
const char *log_level;
|
||||
const char *cluster_url;
|
||||
const char *cluster_user;
|
||||
const char *cluster_password;
|
||||
const char *config_fpath;
|
||||
const char *cluster_appname;
|
||||
const char *cluster_db_name;
|
||||
const char *timezone;
|
||||
const char *tb_white_list;
|
||||
const char *tb_black_list;
|
||||
int64_t sql_conn_timeout_us;
|
||||
int64_t sql_query_timeout_us;
|
||||
|
||||
int64_t unknown_int_config;
|
||||
const char *unknown_str_config;
|
||||
|
||||
char config_buf_[MAX_CONFIG_BUFFER_SIZE];
|
||||
int64_t config_buf_pos_;
|
||||
|
||||
std::map<std::string, std::string> config_map_;
|
||||
};
|
||||
|
||||
void TestLogConfig::SetUp()
|
||||
{
|
||||
config_buf_pos_ = 0;
|
||||
|
||||
ADD_CONFIG_INT(dml_parser_thread_num, 100);
|
||||
ADD_CONFIG_INT(sequencer_thread_num, 200);
|
||||
ADD_CONFIG_INT(formatter_thread_num, 300);
|
||||
ADD_CONFIG_INT(instance_num, 1);
|
||||
ADD_CONFIG_INT(instance_index, 0);
|
||||
ADD_CONFIG_INT(sql_conn_timeout_us, 13000000000);
|
||||
ADD_CONFIG_INT(sql_query_timeout_us, 12000000000);
|
||||
|
||||
ADD_CONFIG_STR(log_level, "INFO");
|
||||
ADD_CONFIG_STR(cluster_url, "http:://www.test_url.com/abcdefg/");
|
||||
ADD_CONFIG_STR(cluster_user, "cdc");
|
||||
ADD_CONFIG_STR(cluster_password, "V587");
|
||||
ADD_CONFIG_STR(config_fpath, "/home/abcdefg/hijklmn");
|
||||
ADD_CONFIG_STR(cluster_appname, "obcdc");
|
||||
ADD_CONFIG_STR(cluster_db_name, "oceanbase");
|
||||
ADD_CONFIG_STR(timezone, "+8:00");
|
||||
ADD_CONFIG_STR(tb_white_list, "*.*.*");
|
||||
ADD_CONFIG_STR(tb_black_list, "|");
|
||||
|
||||
// test unknown config
|
||||
ADD_CONFIG_INT(unknown_int_config, 1010);
|
||||
ADD_CONFIG_STR(unknown_str_config, "unknown");
|
||||
}
|
||||
|
||||
TEST_F(TestLogConfig, init)
|
||||
{
|
||||
ObLogConfig *config = new ObLogConfig();
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, config->init());
|
||||
// After initialization, the configuration items are not detected by default
|
||||
EXPECT_NE(OB_SUCCESS, config->check_all());
|
||||
config->print();
|
||||
if (NULL != config) {
|
||||
delete config;
|
||||
config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestLogConfig, load_from_buffer)
|
||||
{
|
||||
ObLogConfig *config = new ObLogConfig();
|
||||
EXPECT_EQ(OB_SUCCESS, config->init());
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, config->load_from_buffer(config_buf_, strlen(config_buf_)));
|
||||
EXPECT_EQ(OB_SUCCESS, config->check_all());
|
||||
config->print();
|
||||
|
||||
EXPECT_EQ(dml_parser_thread_num, config->dml_parser_thread_num);
|
||||
EXPECT_EQ(sequencer_thread_num, config->sequencer_thread_num);
|
||||
EXPECT_EQ(formatter_thread_num, config->formatter_thread_num);
|
||||
EXPECT_EQ(0, strcmp(cluster_url, config->cluster_url.str()));
|
||||
EXPECT_EQ(0, strcmp(log_level, config->log_level.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_user, config->cluster_user.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_password, config->cluster_password.str()));
|
||||
EXPECT_EQ(0, strcmp(config_fpath, config->config_fpath.str()));
|
||||
|
||||
bool check_name = true;
|
||||
int64_t version = 0;
|
||||
EXPECT_NE(OB_SUCCESS,
|
||||
config->load_from_buffer(config_buf_, strlen(config_buf_), version, check_name));
|
||||
if (NULL != config) {
|
||||
delete config;
|
||||
config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestLogConfig, load_from_map)
|
||||
{
|
||||
ObLogConfig *config = new ObLogConfig();
|
||||
EXPECT_EQ(OB_SUCCESS, config->init());
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, config->load_from_map(config_map_));
|
||||
EXPECT_EQ(OB_SUCCESS, config->check_all());
|
||||
config->print();
|
||||
|
||||
EXPECT_EQ(dml_parser_thread_num, config->dml_parser_thread_num);
|
||||
EXPECT_EQ(sequencer_thread_num, config->sequencer_thread_num);
|
||||
EXPECT_EQ(formatter_thread_num, config->formatter_thread_num);
|
||||
EXPECT_EQ(0, strcmp(cluster_url, config->cluster_url.str()));
|
||||
EXPECT_EQ(0, strcmp(log_level, config->log_level.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_user, config->cluster_user.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_password, config->cluster_password.str()));
|
||||
EXPECT_EQ(0, strcmp(config_fpath, config->config_fpath.str()));
|
||||
|
||||
bool check_name = true;
|
||||
int64_t version = 0;
|
||||
EXPECT_NE(OB_SUCCESS, config->load_from_map(config_map_, version, check_name));
|
||||
if (NULL != config) {
|
||||
delete config;
|
||||
config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestLogConfig, load_from_file)
|
||||
{
|
||||
// The ObLogConfig class is larger than the local variable stack and would overflow if located
|
||||
// Therefore, the dynamic construction method is used here
|
||||
ObLogConfig *config_from_buffer_ptr = new ObLogConfig();
|
||||
ObLogConfig *config_from_file_ptr = new ObLogConfig();
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_buffer_ptr->init());
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_file_ptr->init());
|
||||
ObLogConfig &config_from_buffer = *config_from_buffer_ptr;
|
||||
ObLogConfig &config_from_file = *config_from_file_ptr;
|
||||
const char *config_file = "liboblog.conf";
|
||||
|
||||
// Load configuration items from the Buffer and verify the accuracy of the configuration items
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_buffer.load_from_buffer(config_buf_, strlen(config_buf_)));
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_buffer.check_all());
|
||||
config_from_buffer.print();
|
||||
EXPECT_EQ(dml_parser_thread_num, config_from_buffer.dml_parser_thread_num);
|
||||
EXPECT_EQ(sequencer_thread_num, config_from_buffer.sequencer_thread_num);
|
||||
EXPECT_EQ(formatter_thread_num, config_from_buffer.formatter_thread_num);
|
||||
EXPECT_EQ(0, strcmp(cluster_url, config_from_buffer.cluster_url.str()));
|
||||
EXPECT_EQ(0, strcmp(log_level, config_from_buffer.log_level.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_user, config_from_buffer.cluster_user.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_password, config_from_buffer.cluster_password.str()));
|
||||
EXPECT_EQ(0, strcmp(config_fpath, config_from_buffer.config_fpath.str()));
|
||||
|
||||
// Dump configuration items into a file
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_buffer.dump2file(config_file));
|
||||
|
||||
// Loading configuration items from a file
|
||||
EXPECT_EQ(OB_SUCCESS, config_from_file.load_from_file(config_file));
|
||||
|
||||
// Verify the accuracy of configuration items
|
||||
config_from_file.print();
|
||||
EXPECT_EQ(dml_parser_thread_num, config_from_file.dml_parser_thread_num);
|
||||
EXPECT_EQ(sequencer_thread_num, config_from_file.sequencer_thread_num);
|
||||
EXPECT_EQ(formatter_thread_num, config_from_file.formatter_thread_num);
|
||||
EXPECT_EQ(0, strcmp(cluster_url, config_from_file.cluster_url.str()));
|
||||
EXPECT_EQ(0, strcmp(log_level, config_from_file.log_level.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_user, config_from_file.cluster_user.str()));
|
||||
EXPECT_EQ(0, strcmp(cluster_password, config_from_file.cluster_password.str()));
|
||||
EXPECT_EQ(0, strcmp(config_fpath, config_from_file.config_fpath.str()));
|
||||
|
||||
if (NULL != config_from_buffer_ptr) {
|
||||
delete config_from_buffer_ptr;
|
||||
config_from_buffer_ptr = NULL;
|
||||
}
|
||||
|
||||
if (NULL != config_from_file_ptr) {
|
||||
delete config_from_file_ptr;
|
||||
config_from_file_ptr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the ObLogConfig::check_all() function actually formats the cluster_url
|
||||
// default check_all() removes the double quotes from cluster_url
|
||||
TEST_F(TestLogConfig, format_cluster_url)
|
||||
{
|
||||
ObLogConfig *config = new ObLogConfig();
|
||||
EXPECT_EQ(OB_SUCCESS, config->init());
|
||||
const char *URL = "http://abc.com/def/hijklmn";
|
||||
char cluster_url[1024];
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS, config->load_from_buffer(config_buf_, strlen(config_buf_)));
|
||||
ASSERT_EQ(OB_SUCCESS, config->check_all());
|
||||
|
||||
snprintf(cluster_url, sizeof(cluster_url), "\"");
|
||||
ASSERT_TRUE(config->cluster_url.set_value(cluster_url));
|
||||
ASSERT_NE(OB_SUCCESS, config->format_cluster_url());
|
||||
|
||||
snprintf(cluster_url, sizeof(cluster_url), "\"\"");
|
||||
ASSERT_TRUE(config->cluster_url.set_value(cluster_url));
|
||||
ASSERT_NE(OB_SUCCESS, config->format_cluster_url());
|
||||
|
||||
snprintf(cluster_url, sizeof(cluster_url), "\"%s\"", URL);
|
||||
ASSERT_TRUE(config->cluster_url.set_value(cluster_url));
|
||||
ASSERT_EQ(OB_SUCCESS, config->format_cluster_url());
|
||||
EXPECT_EQ(0, strcmp(URL, config->cluster_url.str()));
|
||||
|
||||
// No handling of single inverted commas
|
||||
snprintf(cluster_url, sizeof(cluster_url), "\'\'");
|
||||
ASSERT_TRUE(config->cluster_url.set_value(cluster_url));
|
||||
ASSERT_EQ(OB_SUCCESS, config->format_cluster_url());
|
||||
if (NULL != config) {
|
||||
delete config;
|
||||
config = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace liboblog
|
||||
} // namespace oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OB_LOGGER.set_file_name("test_log_config.log", true);
|
||||
OB_LOGGER.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
47
unittest/obcdc/test_log_fake_common_config.cpp
Normal file
47
unittest/obcdc/test_log_fake_common_config.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_fake_common_config.h"
|
||||
|
||||
using namespace oceanbase::common;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace liboblog
|
||||
{
|
||||
class TestLogFakeCommonConfig : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
TestLogFakeCommonConfig() {}
|
||||
~TestLogFakeCommonConfig() {}
|
||||
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
};
|
||||
|
||||
TEST_F(TestLogFakeCommonConfig, common_test)
|
||||
{
|
||||
ObLogFakeCommonConfig fake_config;
|
||||
EXPECT_EQ(OB_OBLOG, fake_config.get_server_type());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
164
unittest/obcdc/test_log_fetcher.cpp
Normal file
164
unittest/obcdc/test_log_fetcher.cpp
Normal file
@ -0,0 +1,164 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "lib/allocator/ob_malloc.h"
|
||||
#include "lib/allocator/ob_concurrent_fifo_allocator.h"
|
||||
#include "lib/container/ob_array.h"
|
||||
|
||||
#include "obcdc/src/ob_i_log_fetcher.h"
|
||||
#include "obcdc/src/ob_log_fetcher_utils.h"
|
||||
#include "obcdc/src/ob_log_fetcher.h"
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace storage;
|
||||
using namespace transaction;
|
||||
using namespace clog;
|
||||
using namespace fetcher;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
|
||||
/*
|
||||
* Manual:
|
||||
* - This test allows you to fetch log data from
|
||||
* a single observer.
|
||||
* - Partitions and data are set up by this test.
|
||||
*/
|
||||
/*
|
||||
* Fetch Log Test.
|
||||
* Use schema 1.
|
||||
*/
|
||||
TEST(DISABLED_ObLogFetcherEnhanced, FetchLogTest1)
|
||||
//TEST(ObLogFetcherEnhanced, FetchLogTest1)
|
||||
{
|
||||
ObClockGenerator::init();
|
||||
|
||||
// Prepare svr.
|
||||
SvrCfg svr_cfg;
|
||||
svr_cfg.svr_addr_ = "10.210.177.162";
|
||||
svr_cfg.internal_port_ = 43000;
|
||||
svr_cfg.mysql_port_ = 43001;
|
||||
svr_cfg.mysql_db_ = "oceanbase";
|
||||
svr_cfg.mysql_password_ = "";
|
||||
svr_cfg.mysql_user_ = "root";
|
||||
svr_cfg.mysql_timeout_ = 1 * _SEC_;
|
||||
|
||||
// Prepare table.
|
||||
ObArray<ObPartitionKey> pkeys;
|
||||
const int64_t table_cnt = 3;
|
||||
prepare_table_1(svr_cfg,
|
||||
prepare_table_name_1(),
|
||||
table_cnt,
|
||||
prepare_table_schema_1(),
|
||||
pkeys);
|
||||
|
||||
// Print them.
|
||||
for (int64_t idx = 0; idx < pkeys.count(); ++idx) {
|
||||
ObPartitionKey &key = pkeys.at(idx);
|
||||
_I_(">>> add partition key", K(key));
|
||||
}
|
||||
|
||||
// Prepare svr provider.
|
||||
MockSvrProvider1 svr_provider;
|
||||
ObAddr addr(ObAddr::IPV4, svr_cfg.svr_addr_, svr_cfg.mysql_port_);
|
||||
svr_provider.add_svr(addr);
|
||||
|
||||
// Prepare err handler.
|
||||
MockLiboblogErrHandler1 err_handler;
|
||||
|
||||
// Prepare parser.
|
||||
MockParser1 mock_parser;
|
||||
|
||||
// Fetcher config.
|
||||
FetcherConfig fcfg;
|
||||
fcfg.reset();
|
||||
|
||||
ObConcurrentFIFOAllocator fifo;
|
||||
int64_t G = 1024 * 1024 * 1024;
|
||||
EXPECT_EQ(OB_SUCCESS, fifo.init(1 * G, 1 * G, OB_MALLOC_BIG_BLOCK_SIZE));
|
||||
|
||||
// Task Pool
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, task_pool.init(&fifo, 10240, 1024, 4 * 1024L * 1024L, true));
|
||||
|
||||
// Prepare fetcher.
|
||||
ObLogFetcherEnhanced fetcher;
|
||||
int ret = fetcher.init(&mock_parser,
|
||||
&err_handler,
|
||||
&svr_provider,
|
||||
&task_pool,
|
||||
fcfg);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
// Add partitions.
|
||||
for (int64_t idx = 0; idx < pkeys.count(); ++idx) {
|
||||
ret = fetcher.start_fetch(pkeys.at(idx), 1);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
|
||||
// Start worker.
|
||||
ret = fetcher.start();
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
_I_(">>> Start fetch");
|
||||
|
||||
// Generate data.
|
||||
_I_(">>> Generate data");
|
||||
const int64_t trans_cnt_per_part = 100;
|
||||
const int64_t part_cnt = table_cnt; // pcnt == table cnt.
|
||||
const int64_t trans_cnt = part_cnt * trans_cnt_per_part;
|
||||
ConnectorConfig cfg = prepare_cfg_1(svr_cfg);
|
||||
for (int64_t idx = 0; idx < table_cnt; ++idx) {
|
||||
DataGenerator1 gen(cfg);
|
||||
gen.insert(prepare_table_name_1()[idx], 0, trans_cnt_per_part);
|
||||
gen.join();
|
||||
}
|
||||
|
||||
// Wait.
|
||||
while (mock_parser.get_trans_cnt() < trans_cnt) {
|
||||
usec_sleep(1 * _SEC_);
|
||||
_I_(">>> Waiting...");
|
||||
}
|
||||
|
||||
// Stop everything.
|
||||
_I_(">>> Stop fetch");
|
||||
for (int64_t idx = 0; idx < pkeys.count(); ++idx) {
|
||||
ret = fetcher.stop_fetch(pkeys.at(idx));
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
|
||||
fetcher.stop();
|
||||
ret = fetcher.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
1494
unittest/obcdc/test_log_fetcher_common_utils.h
Normal file
1494
unittest/obcdc/test_log_fetcher_common_utils.h
Normal file
File diff suppressed because it is too large
Load Diff
463
unittest/obcdc/test_log_fetcher_heartbeat_mgr.cpp
Normal file
463
unittest/obcdc/test_log_fetcher_heartbeat_mgr.cpp
Normal file
@ -0,0 +1,463 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
|
||||
#include "obcdc/src/ob_log_fetcher_heartbeat_mgr.h"
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
|
||||
/*
|
||||
* Heartbeater Tests.
|
||||
*/
|
||||
/*
|
||||
* Basic function test 1.
|
||||
* - N thread & M requests for each thread
|
||||
* - result timestamp == next log id
|
||||
* - rpc always succeed, no server internal error
|
||||
* - rpc interface returns correct result or an error code randomly
|
||||
* (30% correct so most requests are sent to at least 2 servers)
|
||||
*/
|
||||
namespace basic_func_test_1
|
||||
{
|
||||
class MockRpcInterface : public IFetcherRpcInterface
|
||||
{
|
||||
public:
|
||||
~MockRpcInterface() {}
|
||||
virtual void set_svr(const common::ObAddr& svr) { UNUSED(svr); }
|
||||
virtual const ObAddr& get_svr() const { static ObAddr svr; return svr; }
|
||||
virtual void set_timeout(const int64_t timeout) { UNUSED(timeout); }
|
||||
virtual int req_start_log_id_by_ts(
|
||||
const obrpc::ObLogReqStartLogIdByTsRequest& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts_2(const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id_2(const obrpc::ObLogReqStartPosByLogIdRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponseWithBreakpoint &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id(
|
||||
const obrpc::ObLogReqStartPosByLogIdRequest& req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int fetch_log(
|
||||
const obrpc::ObLogExternalFetchLogRequest& req,
|
||||
obrpc::ObLogExternalFetchLogResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_heartbeat_info(
|
||||
const obrpc::ObLogReqHeartbeatInfoRequest& req,
|
||||
obrpc::ObLogReqHeartbeatInfoResponse& res)
|
||||
{
|
||||
res.reset();
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
// 30%.
|
||||
bool succeed = ((idx + seed) % 100) < 30;
|
||||
obrpc::ObLogReqHeartbeatInfoResponse::Result result;
|
||||
result.reset();
|
||||
result.err_ = (succeed) ? OB_SUCCESS : OB_NEED_RETRY;
|
||||
result.tstamp_ = (succeed) ? (int64_t)(req.get_params().at(idx).log_id_) : OB_INVALID_TIMESTAMP;
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int req_leader_heartbeat(
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res)
|
||||
{
|
||||
res.reset();
|
||||
res.set_err(OB_SUCCESS);
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
obrpc::ObLogLeaderHeartbeatResp::Result result;
|
||||
const obrpc::ObLogLeaderHeartbeatReq::Param ¶m = req.get_params().at(idx);
|
||||
// 30%.
|
||||
bool succeed = ((idx + seed) % 100) < 30;
|
||||
|
||||
result.reset();
|
||||
result.err_ = succeed ? OB_SUCCESS : OB_NOT_MASTER;
|
||||
result.next_served_log_id_ = param.next_log_id_;
|
||||
result.next_served_ts_ = succeed ? get_timestamp() : 1;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
|
||||
_D_(">>> heartbeat", K(req), K(res));
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int open_stream(const obrpc::ObLogOpenStreamReq &req,
|
||||
obrpc::ObLogOpenStreamResp &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
|
||||
virtual int fetch_stream_log(const obrpc::ObLogStreamFetchLogReq &req,
|
||||
obrpc::ObLogStreamFetchLogResp &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_svr_feedback(const ReqLogSvrFeedback &feedback)
|
||||
{
|
||||
UNUSED(feedback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Factory.
|
||||
*/
|
||||
class MockRpcInterfaceFactory : public IFetcherRpcInterfaceFactory
|
||||
{
|
||||
public:
|
||||
virtual int new_fetcher_rpc_interface(IFetcherRpcInterface*& rpc)
|
||||
{
|
||||
rpc = new MockRpcInterface();
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int delete_fetcher_rpc_interface(IFetcherRpcInterface* rpc)
|
||||
{
|
||||
delete rpc;
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////// test basic function //////////////////////////////////////////
|
||||
/*
|
||||
* Test HeartbeatRequest
|
||||
*/
|
||||
TEST(Heartbeater, BasicFuncTest1)
|
||||
{
|
||||
// Build Heartbeater requests.
|
||||
const int64_t AllSvrCnt = 3;
|
||||
ObAddr svrs[AllSvrCnt];
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
const int64_t HeartbeatRequestCnt = 10000;
|
||||
HeartbeatRequest *request_array = static_cast<HeartbeatRequest*>(ob_malloc(
|
||||
HeartbeatRequestCnt * sizeof(HeartbeatRequest)));
|
||||
// test assignment
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
r.reset();
|
||||
// reset IDLE
|
||||
EXPECT_EQ(HeartbeatRequest::IDLE, r.get_state());
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
r.next_log_id_ = (uint64_t)(1 + idx);
|
||||
r.svr_ = svrs[idx % AllSvrCnt];
|
||||
// test getter and setter
|
||||
EXPECT_EQ(HeartbeatRequest::IDLE, r.get_state());
|
||||
r.set_state(HeartbeatRequest::REQ);
|
||||
EXPECT_EQ(HeartbeatRequest::REQ, r.get_state());
|
||||
r.set_state(HeartbeatRequest::DONE);
|
||||
EXPECT_EQ(HeartbeatRequest::DONE, r.get_state());
|
||||
}
|
||||
|
||||
ob_free(request_array);
|
||||
request_array = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test Heartbeater
|
||||
*/
|
||||
TEST(Heartbeater, BasicFuncTest2)
|
||||
{
|
||||
// Build Heartbeater requests.
|
||||
const int64_t AllSvrCnt = 3;
|
||||
ObAddr svrs[AllSvrCnt];
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
const int64_t HeartbeatRequestCnt = 10000;
|
||||
HeartbeatRequest *request_array = static_cast<HeartbeatRequest*>(ob_malloc(
|
||||
HeartbeatRequestCnt * sizeof(HeartbeatRequest)));
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
r.reset();
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
r.next_log_id_ = (uint64_t)(1 + idx);
|
||||
r.svr_ = svrs[idx % AllSvrCnt];
|
||||
}
|
||||
// Heartbeater
|
||||
Heartbeater heartbeater;
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
const int64_t heartbeat_worker_cnt = 3;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, &worker_pool, heartbeat_worker_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// test async_heartbeat_req
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.async_heartbeat_req(&r));
|
||||
}
|
||||
// test destroy
|
||||
err = heartbeater.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
ob_free(request_array);
|
||||
request_array = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test Worker.
|
||||
*/
|
||||
class TestWorker : public Runnable
|
||||
{
|
||||
public:
|
||||
Heartbeater *heartbeater_;
|
||||
virtual int routine()
|
||||
{
|
||||
// Build requests.
|
||||
const int64_t AllSvrCnt = 3;
|
||||
ObAddr svrs[AllSvrCnt];
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
const int64_t HeartbeatRequestCnt = 10 * 10000;
|
||||
HeartbeatRequest *request_array = new HeartbeatRequest[HeartbeatRequestCnt];
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
r.reset();
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
r.next_log_id_ = (uint64_t)(1 + idx);
|
||||
r.svr_ = svrs[idx % AllSvrCnt];
|
||||
}
|
||||
// Push requests into heartbeater.
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater_->async_heartbeat_req(&r));
|
||||
if (0 == (idx % 1000)) {
|
||||
usec_sleep(10 * _MSEC_);
|
||||
}
|
||||
}
|
||||
// Wait for requests end. Max test time should set.
|
||||
int64_t end_request_cnt = 0;
|
||||
const int64_t TestTimeLimit = 10 * _MIN_;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TestTimeLimit)
|
||||
&& (end_request_cnt < HeartbeatRequestCnt)) {
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
if (HeartbeatRequest::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state(HeartbeatRequest::IDLE);
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
// Assert if test cannot finish.
|
||||
EXPECT_EQ(HeartbeatRequestCnt, end_request_cnt);
|
||||
// Do some statistics.
|
||||
int64_t svr_consume_distribution[AllSvrCnt]; // 1, 2, 3, ...
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svr_consume_distribution[idx] = 0;
|
||||
}
|
||||
int64_t succ_cnt = 0;
|
||||
for (int64_t idx = 0, cnt = HeartbeatRequestCnt; idx < cnt; ++idx) {
|
||||
svr_consume_distribution[idx % AllSvrCnt] += 1;
|
||||
}
|
||||
delete[] request_array;
|
||||
const int64_t BuffSize = 1024;
|
||||
char buf[BuffSize];
|
||||
int64_t pos = 0;
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
pos += snprintf(buf + pos, BuffSize - pos, "svr_cnt:%ld perc:%f ", (1 + idx),
|
||||
((double)svr_consume_distribution[idx] / (double)HeartbeatRequestCnt));
|
||||
}
|
||||
fprintf(stderr, "request count: %ld distribution: %s succeed perc: %f \n",
|
||||
HeartbeatRequestCnt, buf, (double)succ_cnt / (double)HeartbeatRequestCnt);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
////////////////////// Boundary tests //////////////////////////////////////////
|
||||
// Heartbeater init fail
|
||||
TEST(Heartbeater, BasicFuncTest3)
|
||||
{
|
||||
//_I_("called", "prepare:", 100);
|
||||
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
const int64_t heartbeat_worker_cnt = 3;
|
||||
Heartbeater heartbeater;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = heartbeater.init(NULL, &err_handler1, &worker_pool, heartbeat_worker_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
err = heartbeater.init(&rpc_factory, NULL, &worker_pool, heartbeat_worker_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, NULL, heartbeat_worker_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
// heartbeat_worker_cnt error, [0, 32]
|
||||
int64_t heartbeat_worker_cnt_err1 = -1;
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, &worker_pool, heartbeat_worker_cnt_err1);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
int64_t heartbeat_worker_cnt_err2 = 33;
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, &worker_pool, heartbeat_worker_cnt_err2);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
}
|
||||
|
||||
// Heartbeater aync_heartbeat_req fail
|
||||
TEST(Heartbeater, BasicFuncTest4)
|
||||
{
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
const int64_t heartbeat_worker_cnt = 3;
|
||||
Heartbeater heartbeater;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, &worker_pool, heartbeat_worker_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Build Heartbeater requests.
|
||||
ObAddr svr = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(1000));
|
||||
HeartbeatRequest req;
|
||||
req.reset();
|
||||
req.pkey_ = ObPartitionKey((uint64_t)(1000), 0, 1);
|
||||
req.next_log_id_ = (uint64_t)(100);
|
||||
req.svr_ = svr;
|
||||
req.set_state(HeartbeatRequest::REQ);
|
||||
|
||||
err = heartbeater.async_heartbeat_req(NULL);
|
||||
EXPECT_NE(OB_SUCCESS, err);
|
||||
err = heartbeater.async_heartbeat_req(&req);
|
||||
EXPECT_NE(OB_SUCCESS, err);
|
||||
|
||||
err = heartbeater.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test workflow
|
||||
*/
|
||||
//TEST(DISABLED_Heartbeater, BasicFuncTest5)
|
||||
TEST(Heartbeater, BasicFuncTest5)
|
||||
{
|
||||
_I_("called", "func:", "workflow");
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
Heartbeater heartbeater;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
_I_("workflow", "worker_pool:", "init OB_SUCCESS");
|
||||
|
||||
err = heartbeater.init(&rpc_factory, &err_handler1, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
_I_("workflow", "heartbeat:", "init OB_SUCCESS");
|
||||
|
||||
const int64_t TestWorkerCnt = 3;
|
||||
TestWorker workers[TestWorkerCnt];
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.heartbeater_ = &heartbeater;
|
||||
w.create();
|
||||
_I_("workflow", "thread:", "create OB_SUCCESS");
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.join();
|
||||
_I_("workflow", "thread:", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
err = heartbeater.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
_I_("workflow", "heartbeat:", "destroy OB_SUCCESS");
|
||||
|
||||
err = worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
_I_("workflow", "work pool:", "destroy OB_SUCCESS");
|
||||
}
|
||||
|
||||
}//end of basic_func_test_1
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_log_fetcher_heartbeat_mgr.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
384
unittest/obcdc/test_log_fetcher_impl.cpp
Normal file
384
unittest/obcdc/test_log_fetcher_impl.cpp
Normal file
@ -0,0 +1,384 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/allocator/ob_concurrent_fifo_allocator.h"
|
||||
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
#include "obcdc/src/ob_log_fetcher_impl.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
namespace BasicFunction1
|
||||
{
|
||||
/*
|
||||
* Mock systable helper. // Todo...
|
||||
*/
|
||||
class MockSystableHelper : public ObILogSysTableHelper
|
||||
{
|
||||
public:
|
||||
virtual int query_all_clog_history_info_by_log_id_1(
|
||||
const common::ObPartitionKey &pkey, const uint64_t log_id,
|
||||
AllClogHistoryInfos &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t seed = get_timestamp() / 3333333;
|
||||
records.reset();
|
||||
AllClogHistoryInfoRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
rec.table_id_ = (uint64_t)(pkey.table_id_);
|
||||
rec.partition_idx_ = (int32_t)(pkey.get_partition_id());
|
||||
rec.partition_cnt_ = (int32_t)(pkey.get_partition_cnt());
|
||||
rec.start_log_id_ = log_id;
|
||||
rec.end_log_id_ = log_id + 10000;
|
||||
rec.start_log_timestamp_ = seed - (1 * _HOUR_);
|
||||
rec.end_log_timestamp_ = seed + (1 * _HOUR_);
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
records.push_back(rec);
|
||||
seed += 17;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_clog_history_info_by_timestamp_1(
|
||||
const common::ObPartitionKey &pkey, const int64_t timestamp,
|
||||
AllClogHistoryInfos &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t seed = get_timestamp() / 7777777;
|
||||
records.reset();
|
||||
AllClogHistoryInfoRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
rec.table_id_ = (uint64_t)(pkey.table_id_);
|
||||
rec.partition_idx_ = (int32_t)(pkey.get_partition_id());
|
||||
rec.partition_cnt_ = (int32_t)(pkey.get_partition_cnt());
|
||||
rec.start_log_id_ = 0;
|
||||
rec.end_log_id_ = 65536;
|
||||
rec.start_log_timestamp_ = timestamp - (1 * _HOUR_);
|
||||
rec.end_log_timestamp_ = timestamp + (1 * _HOUR_);
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
records.push_back(rec);
|
||||
seed += 17;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
virtual int query_all_meta_table_1(
|
||||
const common::ObPartitionKey &pkey, AllMetaTableRecords &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(pkey);
|
||||
int64_t seed = get_timestamp() / 3333333;
|
||||
records.reset();
|
||||
AllMetaTableRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
rec.role_ = (0 == idx) ? LEADER : FOLLOWER;
|
||||
records.push_back(rec);
|
||||
seed += 17;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_meta_table_for_leader(
|
||||
const common::ObPartitionKey &pkey,
|
||||
bool &has_leader,
|
||||
common::ObAddr &leader)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
has_leader = true;
|
||||
leader.set_ip_addr("127.0.0.1", 8888);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int query_all_server_table_1(AllServerTableRecords &records) {
|
||||
UNUSED(records);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Mock rpc.
|
||||
*/
|
||||
class MockRpcInterface : public IFetcherRpcInterface
|
||||
{
|
||||
public:
|
||||
~MockRpcInterface() {}
|
||||
virtual void set_svr(const common::ObAddr& svr) { UNUSED(svr); }
|
||||
virtual const ObAddr& get_svr() const { static ObAddr svr; return svr; }
|
||||
virtual void set_timeout(const int64_t timeout) { UNUSED(timeout); }
|
||||
virtual int req_start_log_id_by_ts(
|
||||
const obrpc::ObLogReqStartLogIdByTsRequest& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts_2(const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint &res) {
|
||||
res.reset();
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
// 30% success, 30% break.
|
||||
int64_t rand = (idx + seed) % 100;
|
||||
bool succeed = (rand < 30);
|
||||
bool breakrpc = (30 <= rand) && (rand < 60);
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint::Result result;
|
||||
result.reset();
|
||||
result.err_ = (succeed) ? OB_SUCCESS : ((breakrpc) ? OB_EXT_HANDLE_UNFINISH : OB_NEED_RETRY);
|
||||
result.start_log_id_ = 1;
|
||||
// Break info is actually not returned.
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id_2(const obrpc::ObLogReqStartPosByLogIdRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponseWithBreakpoint &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id(
|
||||
const obrpc::ObLogReqStartPosByLogIdRequest& req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int fetch_log(
|
||||
const obrpc::ObLogExternalFetchLogRequest& req,
|
||||
obrpc::ObLogExternalFetchLogResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_heartbeat_info(
|
||||
const obrpc::ObLogReqHeartbeatInfoRequest& req,
|
||||
obrpc::ObLogReqHeartbeatInfoResponse& res)
|
||||
{
|
||||
res.reset();
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
obrpc::ObLogReqHeartbeatInfoResponse::Result result;
|
||||
result.reset();
|
||||
result.err_ = OB_SUCCESS;
|
||||
result.tstamp_ = get_timestamp();
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
_D_(">>> req heartbeat", K(req), K(res));
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_leader_heartbeat(
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res)
|
||||
{
|
||||
res.reset();
|
||||
res.set_err(OB_SUCCESS);
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
obrpc::ObLogLeaderHeartbeatResp::Result result;
|
||||
const obrpc::ObLogLeaderHeartbeatReq::Param ¶m = req.get_params().at(idx);
|
||||
|
||||
result.reset();
|
||||
result.err_ = OB_SUCCESS;
|
||||
result.next_served_log_id_ = param.next_log_id_;
|
||||
result.next_served_ts_ = get_timestamp();
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
|
||||
_D_(">>> heartbeat", K(req), K(res));
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int open_stream(const obrpc::ObLogOpenStreamReq &req,
|
||||
obrpc::ObLogOpenStreamResp &res) {
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(req);
|
||||
obrpc::ObStreamSeq seq;
|
||||
seq.reset();
|
||||
seq.self_.set_ip_addr("127.0.0.1", 8888);
|
||||
seq.seq_ts_ = get_timestamp();
|
||||
res.reset();
|
||||
res.set_err(OB_SUCCESS);
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
res.set_stream_seq(seq);
|
||||
_D_(">>> open stream", K(req), K(res));
|
||||
return ret;
|
||||
}
|
||||
virtual int fetch_stream_log(const obrpc::ObLogStreamFetchLogReq &req,
|
||||
obrpc::ObLogStreamFetchLogResp &res) {
|
||||
UNUSED(req);
|
||||
res.set_err(OB_SUCCESS);
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_svr_feedback(const ReqLogSvrFeedback &feedback)
|
||||
{
|
||||
UNUSED(feedback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Factory.
|
||||
*/
|
||||
class MockRpcInterfaceFactory : public IFetcherRpcInterfaceFactory
|
||||
{
|
||||
public:
|
||||
virtual int new_fetcher_rpc_interface(IFetcherRpcInterface*& rpc)
|
||||
{
|
||||
rpc = new MockRpcInterface();
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int delete_fetcher_rpc_interface(IFetcherRpcInterface* rpc)
|
||||
{
|
||||
delete rpc;
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(Fetcher, BasicFunction1)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
ObConcurrentFIFOAllocator task_pool_alloc;
|
||||
err = task_pool_alloc.init(128 * _G_, 8 * _M_, OB_MALLOC_NORMAL_BLOCK_SIZE);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = task_pool.init(&task_pool_alloc, 10240, 1024, 4 * 1024 * 1024, true);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
|
||||
// Err Handler.
|
||||
MockLiboblogErrHandler1 err_handler;
|
||||
MockFetcherErrHandler1 err_handler2;
|
||||
|
||||
// Rpc.
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
|
||||
// Worker Pool.
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// StartLogIdLocator.
|
||||
::oceanbase::liboblog::fetcher::StartLogIdLocator locator;
|
||||
err = locator.init(&rpc_factory, &err_handler2, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Heartbeater.
|
||||
Heartbeater heartbeater;
|
||||
err = heartbeater.init(&rpc_factory, &err_handler2, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// SvrFinder.
|
||||
MockSystableHelper systable_helper;
|
||||
::oceanbase::liboblog::fetcher::SvrFinder svrfinder;
|
||||
err = svrfinder.init(&systable_helper, &err_handler2, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Fetcher Config.
|
||||
FetcherConfig cfg;
|
||||
cfg.reset();
|
||||
|
||||
// Init.
|
||||
::oceanbase::liboblog::fetcher::Fetcher fetcher;
|
||||
err = fetcher.init(&task_pool, &parser, &err_handler2, &rpc_factory,
|
||||
&worker_pool, &svrfinder, &locator, &heartbeater, &cfg);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Add partition.
|
||||
ObPartitionKey p1(1001, 1, 1);
|
||||
ObPartitionKey p2(1002, 1, 1);
|
||||
ObPartitionKey p3(1003, 1, 1);
|
||||
err = fetcher.fetch_partition(p1, 1, OB_INVALID_ID);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// err = fetcher.fetch_partition(p2, 1, OB_INVALID_ID);
|
||||
// EXPECT_EQ(OB_SUCCESS, err);
|
||||
// err = fetcher.fetch_partition(p3, 1, OB_INVALID_ID);
|
||||
// EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Run.
|
||||
err = fetcher.start();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
usleep(10 * _SEC_);
|
||||
|
||||
// Discard partition.
|
||||
err = fetcher.discard_partition(p1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// err = fetcher.discard_partition(p2);
|
||||
// EXPECT_EQ(OB_SUCCESS, err);
|
||||
// err = fetcher.discard_partition(p3);
|
||||
// EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
usleep(10 * _SEC_);
|
||||
|
||||
// Stop.
|
||||
err = fetcher.stop(true);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Destroy.
|
||||
err = fetcher.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = locator.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = svrfinder.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = heartbeater.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
task_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
ObLogger::get_logger().set_mod_log_levels("ALL.*:ERROR, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
295
unittest/obcdc/test_log_fetcher_part_stream.cpp
Normal file
295
unittest/obcdc/test_log_fetcher_part_stream.cpp
Normal file
@ -0,0 +1,295 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
|
||||
#include "obcdc/src/ob_log_fetcher_part_stream.h"
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* Basic Function Tests.
|
||||
*/
|
||||
/*
|
||||
* Half commit, half abort.
|
||||
* Fixed redo log cnt.
|
||||
*/
|
||||
TEST(PartitionStream, BasicTest1)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
ObTransPrepareLog prepare_;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = 1000;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
const int64_t redo_cnt = 5;
|
||||
|
||||
// Pkey.
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
// Log gen.
|
||||
TransLogEntryGenerator1 log_gen(pkey);
|
||||
// Task Pool.
|
||||
ObConcurrentFIFOAllocator fifo_allocator;
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
err = fifo_allocator.init(16 * _G_, 16 * _M_, OB_MALLOC_NORMAL_BLOCK_SIZE);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = task_pool.init(&fifo_allocator, 10240, 1024, 4 * 1024 * 1024, true);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
FetcherConfig cfg;
|
||||
|
||||
// Init.
|
||||
PartitionStream ps;
|
||||
err = ps.init(pkey, &parser, &task_pool, &cfg);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
// Commit trans with even idx.
|
||||
log_gen.next_trans(redo_cnt, (0 == idx % 2));
|
||||
ObLogEntry log_entry;
|
||||
while (OB_SUCCESS == log_gen.next_log_entry(log_entry)) {
|
||||
err = ps.read(log_entry, missing);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
err = ps.flush();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
err = ps.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Half commit, half abort.
|
||||
* Commit with Prepare-Commit trans log.
|
||||
*/
|
||||
TEST(PartitionStream, BasicTest2)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
ObTransPrepareLog prepare_;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = 1000;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
const int64_t redo_cnt = 5;
|
||||
|
||||
// Pkey.
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
// Log gen.
|
||||
TransLogEntryGenerator1 log_gen(pkey);
|
||||
// Task Pool.
|
||||
ObConcurrentFIFOAllocator fifo_allocator;
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
err = fifo_allocator.init(16 * _G_, 16 * _M_, OB_MALLOC_NORMAL_BLOCK_SIZE);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = task_pool.init(&fifo_allocator, 10240, 1024, 4 * 1024 * 1024, true);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
FetcherConfig cfg;
|
||||
|
||||
// Init.
|
||||
PartitionStream ps;
|
||||
err = ps.init(pkey, &parser, &task_pool, &cfg);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
// Commit trans with even idx.
|
||||
log_gen.next_trans(redo_cnt, (0 == idx % 2));
|
||||
ObLogEntry log_entry;
|
||||
while (OB_SUCCESS == log_gen.next_log_entry_2(log_entry)) {
|
||||
err = ps.read(log_entry, missing);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
err = ps.flush();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
err = ps.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Test partition progress tracker.
|
||||
*/
|
||||
TEST(PartProgressTracker, BasicTest1)
|
||||
{
|
||||
const int64_t progress_cnt = 4 * 10000;
|
||||
PartProgressTracker tracker;
|
||||
|
||||
int err = tracker.init(progress_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
ObArray<int64_t> indices;
|
||||
const int64_t time = get_timestamp();
|
||||
|
||||
// Acquire progresses and update their values.
|
||||
for (int64_t idx = 0, cnt = progress_cnt; idx < cnt; ++idx) {
|
||||
int64_t progress_idx = 0;
|
||||
err = tracker.acquire_progress(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = indices.push_back(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = tracker.update_progress(progress_idx, time);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Get min progress test.
|
||||
const int64_t test_cnt = 10000;
|
||||
int64_t start = get_timestamp();
|
||||
for (int64_t idx = 0, cnt = test_cnt; idx < cnt; ++idx) {
|
||||
int64_t min = 0;
|
||||
err = tracker.get_min_progress(min);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
const int64_t avg = ((get_timestamp() - start)/ test_cnt);
|
||||
|
||||
// Release.
|
||||
while (0 != indices.count()) {
|
||||
int64_t progress_idx = indices.at(indices.count() - 1);
|
||||
indices.pop_back();
|
||||
err = tracker.release_progress(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
err = tracker.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Print result.
|
||||
fprintf(stderr, "partition progress tracker get min for %ld progresses costs %s\n",
|
||||
progress_cnt, TVAL_TO_STR(avg));
|
||||
}
|
||||
|
||||
// Perf test.
|
||||
// This test requires at least 3 cores: 1 core tests reading, 2 cores update data.
|
||||
struct PerfTest1Updater : public Runnable
|
||||
{
|
||||
virtual int routine()
|
||||
{
|
||||
while (ATOMIC_LOAD(&atomic_run_)) {
|
||||
int64_t seed = get_timestamp();
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
progress_tracker_->update_progress(indices_->at((seed % (indices_->count()))), seed);
|
||||
seed += 777;
|
||||
}
|
||||
}
|
||||
return common::OB_SUCCESS;
|
||||
}
|
||||
bool atomic_run_;
|
||||
PartProgressTracker *progress_tracker_;
|
||||
ObArray<int64_t> *indices_;
|
||||
};
|
||||
TEST(PartProgressTracker, PerfTest1)
|
||||
{
|
||||
const int64_t progress_cnt = 4 * 10000;
|
||||
PartProgressTracker tracker;
|
||||
|
||||
int err = tracker.init(progress_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
ObArray<int64_t> indices;
|
||||
const int64_t time = get_timestamp();
|
||||
|
||||
// Acquire progresses and update their values.
|
||||
for (int64_t idx = 0, cnt = progress_cnt; idx < cnt; ++idx) {
|
||||
int64_t progress_idx = 0;
|
||||
err = tracker.acquire_progress(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = indices.push_back(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = tracker.update_progress(progress_idx, time);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Trigger updaters.
|
||||
const int64_t updater_cnt = 2;
|
||||
PerfTest1Updater updaters[updater_cnt];
|
||||
for (int i = 0; i < updater_cnt; ++i) {
|
||||
updaters[i].atomic_run_ = true;
|
||||
updaters[i].progress_tracker_ = &tracker;
|
||||
updaters[i].indices_ = &indices;
|
||||
updaters[i].create();
|
||||
}
|
||||
|
||||
// Get min progress test.
|
||||
const int64_t test_cnt = 10000;
|
||||
int64_t start = get_timestamp();
|
||||
for (int64_t idx = 0, cnt = test_cnt; idx < cnt; ++idx) {
|
||||
int64_t min = 0;
|
||||
err = tracker.get_min_progress(min);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
const int64_t avg = ((get_timestamp() - start)/ test_cnt);
|
||||
|
||||
// Stop updaters.
|
||||
for (int i = 0; i < updater_cnt; ++i) {
|
||||
ATOMIC_STORE(&(updaters[i].atomic_run_), false);
|
||||
updaters[i].join();
|
||||
}
|
||||
|
||||
// Release.
|
||||
while (0 != indices.count()) {
|
||||
int64_t progress_idx = indices.at(indices.count() - 1);
|
||||
indices.pop_back();
|
||||
err = tracker.release_progress(progress_idx);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
err = tracker.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Print result.
|
||||
fprintf(stderr, "partition progress tracker 2 updaters get min for %ld progresses costs %s\n",
|
||||
progress_cnt, TVAL_TO_STR(avg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
317
unittest/obcdc/test_log_fetcher_start_log_id_locator.cpp
Normal file
317
unittest/obcdc/test_log_fetcher_start_log_id_locator.cpp
Normal file
@ -0,0 +1,317 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_log_fetcher_start_log_id_locator.h"
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* StartLogIdLocator Tests.
|
||||
*/
|
||||
/*
|
||||
* Basic function test 1.
|
||||
* - N thread & M requests for each thread
|
||||
* - result start log id = 1
|
||||
* - rpc always succeed, no server internal error
|
||||
* - rpc interface breaks the locating process randomly (30%)
|
||||
* - rpc interface returns correct result or an error code randomly (30%)
|
||||
*/
|
||||
namespace basic_func_test_1
|
||||
{
|
||||
class MockRpcInterface : public IFetcherRpcInterface
|
||||
{
|
||||
public:
|
||||
~MockRpcInterface() {}
|
||||
virtual void set_svr(const common::ObAddr& svr) { UNUSED(svr); }
|
||||
virtual const ObAddr& get_svr() const { static ObAddr svr; return svr; }
|
||||
virtual void set_timeout(const int64_t timeout) { UNUSED(timeout); }
|
||||
virtual int req_start_log_id_by_ts(
|
||||
const obrpc::ObLogReqStartLogIdByTsRequest& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_log_id_by_ts_2(const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint &res) {
|
||||
res.reset();
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
// 30% success, 30% break.
|
||||
int64_t rand = (idx + seed) % 100;
|
||||
bool succeed = (rand < 30);
|
||||
bool breakrpc = (30 <= rand) && (rand < 60);
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint::Result result;
|
||||
result.reset();
|
||||
result.err_ = (succeed) ? OB_SUCCESS : ((breakrpc) ? OB_EXT_HANDLE_UNFINISH : OB_NEED_RETRY);
|
||||
result.start_log_id_ = 1;
|
||||
// Break info is actually not returned.
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id_2(const obrpc::ObLogReqStartPosByLogIdRequestWithBreakpoint &req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponseWithBreakpoint &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_start_pos_by_log_id(
|
||||
const obrpc::ObLogReqStartPosByLogIdRequest& req,
|
||||
obrpc::ObLogReqStartPosByLogIdResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int fetch_log(
|
||||
const obrpc::ObLogExternalFetchLogRequest& req,
|
||||
obrpc::ObLogExternalFetchLogResponse& res)
|
||||
{
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_heartbeat_info(
|
||||
const obrpc::ObLogReqHeartbeatInfoRequest& req,
|
||||
obrpc::ObLogReqHeartbeatInfoResponse& res)
|
||||
{
|
||||
res.reset();
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
// 30%.
|
||||
bool succeed = ((idx + seed) % 100) < 30;
|
||||
obrpc::ObLogReqHeartbeatInfoResponse::Result result;
|
||||
result.reset();
|
||||
result.err_ = (succeed) ? OB_SUCCESS : OB_NEED_RETRY;
|
||||
result.tstamp_ = (succeed) ? (int64_t)(req.get_params().at(idx).log_id_) : OB_INVALID_TIMESTAMP;
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int req_leader_heartbeat(
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res)
|
||||
{
|
||||
res.reset();
|
||||
res.set_err(OB_SUCCESS);
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); idx < cnt; ++idx) {
|
||||
obrpc::ObLogLeaderHeartbeatResp::Result result;
|
||||
const obrpc::ObLogLeaderHeartbeatReq::Param ¶m = req.get_params().at(idx);
|
||||
// 30%.
|
||||
bool succeed = ((idx + seed) % 100) < 30;
|
||||
|
||||
result.reset();
|
||||
result.err_ = succeed ? OB_SUCCESS : OB_NOT_MASTER;
|
||||
result.next_served_log_id_ = param.next_log_id_;
|
||||
result.next_served_ts_ = succeed ? get_timestamp() : 1;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
|
||||
_D_(">>> heartbeat", K(req), K(res));
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int open_stream(const obrpc::ObLogOpenStreamReq &req,
|
||||
obrpc::ObLogOpenStreamResp &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
|
||||
virtual int fetch_stream_log(const obrpc::ObLogStreamFetchLogReq &req,
|
||||
obrpc::ObLogStreamFetchLogResp &res) {
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
return OB_NOT_IMPLEMENT;
|
||||
}
|
||||
virtual int req_svr_feedback(const ReqLogSvrFeedback &feedback)
|
||||
{
|
||||
UNUSED(feedback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Factory.
|
||||
*/
|
||||
class MockRpcInterfaceFactory : public IFetcherRpcInterfaceFactory
|
||||
{
|
||||
public:
|
||||
virtual int new_fetcher_rpc_interface(IFetcherRpcInterface*& rpc)
|
||||
{
|
||||
rpc = new MockRpcInterface();
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int delete_fetcher_rpc_interface(IFetcherRpcInterface* rpc)
|
||||
{
|
||||
delete rpc;
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Worker.
|
||||
*/
|
||||
class TestWorker : public Runnable
|
||||
{
|
||||
public:
|
||||
StartLogIdLocator *locator_;
|
||||
virtual int routine()
|
||||
{
|
||||
// Build requests.
|
||||
const int64_t AllSvrCnt = 3;
|
||||
ObAddr svrs[AllSvrCnt];
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
const int64_t RequestCnt = 10 * 10000;
|
||||
StartLogIdLocatorRequest *request_array = new StartLogIdLocatorRequest[RequestCnt];
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
StartLogIdLocatorRequest &r = request_array[idx];
|
||||
r.reset();
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
r.start_tstamp_ = 1 + idx;
|
||||
// Set server list.
|
||||
for (int64_t idx2 = 0, cnt2 = AllSvrCnt; idx2 < cnt2; ++idx2) {
|
||||
StartLogIdLocatorRequest::SvrListItem item;
|
||||
item.reset();
|
||||
item.svr_ = svrs[idx2];
|
||||
r.svr_list_.push_back(item);
|
||||
}
|
||||
}
|
||||
// Push requests into locator.
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
StartLogIdLocatorRequest &r = request_array[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, locator_->async_start_log_id_req(&r));
|
||||
if (0 == (idx % 1000)) {
|
||||
usec_sleep(10 * _MSEC_);
|
||||
}
|
||||
}
|
||||
// Wait for requests end. Max test time should set.
|
||||
int64_t end_request_cnt = 0;
|
||||
const int64_t TestTimeLimit = 10 * _MIN_;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TestTimeLimit)
|
||||
&& (end_request_cnt < RequestCnt)) {
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
StartLogIdLocatorRequest &r = request_array[idx];
|
||||
if (StartLogIdLocatorRequest::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state(StartLogIdLocatorRequest::IDLE);
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
// Assert if test cannot finish.
|
||||
EXPECT_EQ(RequestCnt, end_request_cnt);
|
||||
// Do some statistics.
|
||||
int64_t svr_consume_distribution[AllSvrCnt]; // 1, 2, 3, ...
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svr_consume_distribution[idx] = 0;
|
||||
}
|
||||
int64_t succ_cnt = 0;
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
StartLogIdLocatorRequest &r = request_array[idx];
|
||||
EXPECT_GE(r.svr_list_consumed_, 0);
|
||||
svr_consume_distribution[(r.svr_list_consumed_ - 1)] += 1;
|
||||
uint64_t start_log_id = 0;
|
||||
if (r.get_result(start_log_id)) {
|
||||
succ_cnt += 1;
|
||||
EXPECT_EQ(1, start_log_id);
|
||||
}
|
||||
}
|
||||
delete[] request_array;
|
||||
const int64_t BuffSize = 1024;
|
||||
char buf[BuffSize];
|
||||
int64_t pos = 0;
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
pos += snprintf(buf + pos, BuffSize - pos, "svr_cnt:%ld perc:%f ", (1 + idx),
|
||||
((double)svr_consume_distribution[idx] / (double)RequestCnt));
|
||||
}
|
||||
fprintf(stderr, "request count: %ld distribution: %s succeed perc: %f \n",
|
||||
RequestCnt, buf, (double)succ_cnt / (double)RequestCnt);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(StartLogIdLocator, BasicFuncTest1)
|
||||
{
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockRpcInterfaceFactory rpc_factory;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
StartLogIdLocator locator;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = locator.init(&rpc_factory, &err_handler1, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
const int64_t TestWorkerCnt = 3;
|
||||
TestWorker workers[TestWorkerCnt];
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.locator_ = &locator;
|
||||
w.create();
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.join();
|
||||
}
|
||||
|
||||
err = locator.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
42
unittest/obcdc/test_log_fetcher_stream.cpp
Normal file
42
unittest/obcdc/test_log_fetcher_stream.cpp
Normal file
@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_log_fetcher_stream.h"
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
// Deprecated. Del me later.
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
242
unittest/obcdc/test_log_fetcher_svr_finder.cpp
Normal file
242
unittest/obcdc/test_log_fetcher_svr_finder.cpp
Normal file
@ -0,0 +1,242 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/container/ob_se_array.h"
|
||||
|
||||
#include "obcdc/src/ob_log_fetcher_svr_finder.h"
|
||||
|
||||
#include "test_log_fetcher_common_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace fetcher;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* SvrFinder Tests.
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* Basic function test 1.
|
||||
* - N thread & M requests for each thread
|
||||
*/
|
||||
namespace basic_func_test_1
|
||||
{
|
||||
class MockSystableHelper : public ObILogSysTableHelper
|
||||
{
|
||||
public:
|
||||
virtual int query_all_clog_history_info_by_log_id_1(const common::ObPartitionKey &pkey, const uint64_t log_id,
|
||||
AllClogHistoryInfos &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t seed = get_timestamp();
|
||||
records.reset();
|
||||
AllClogHistoryInfoRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
rec.table_id_ = (uint64_t)(pkey.table_id_);
|
||||
rec.partition_idx_ = (int32_t)(pkey.get_partition_id());
|
||||
rec.partition_cnt_ = pkey.get_partition_cnt();
|
||||
rec.start_log_id_ = log_id;
|
||||
rec.end_log_id_ = log_id + 10000;
|
||||
rec.start_log_timestamp_ = seed - (1 * _HOUR_);
|
||||
rec.end_log_timestamp_ = seed + (1 * _HOUR_);
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
records.push_back(rec);
|
||||
seed += 13;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_clog_history_info_by_timestamp_1(const common::ObPartitionKey &pkey, const int64_t timestamp,
|
||||
AllClogHistoryInfos &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t seed = get_timestamp();
|
||||
records.reset();
|
||||
AllClogHistoryInfoRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
rec.table_id_ = (uint64_t)(pkey.table_id_);
|
||||
rec.partition_idx_ = (int32_t)(pkey.get_partition_id());
|
||||
rec.partition_cnt_ = (int32_t)(pkey.get_partition_cnt());
|
||||
rec.start_log_id_ = 0;
|
||||
rec.end_log_id_ = 65536;
|
||||
rec.start_log_timestamp_ = timestamp - (1 * _HOUR_);
|
||||
rec.end_log_timestamp_ = timestamp + (1 * _HOUR_);
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
records.push_back(rec);
|
||||
seed += 13;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
virtual int query_all_meta_table_1(const common::ObPartitionKey &pkey, AllMetaTableRecords &records) {
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(pkey);
|
||||
int64_t seed = get_timestamp();
|
||||
records.reset();
|
||||
AllMetaTableRecord rec;
|
||||
const int64_t cnt = 1 + (seed % 6);
|
||||
for (int64_t idx = 0; idx < cnt; ++idx) {
|
||||
rec.reset();
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (seed % 128));
|
||||
rec.svr_port_ = 8888;
|
||||
rec.role_ = (0 == idx) ? LEADER : FOLLOWER;
|
||||
records.push_back(rec);
|
||||
seed += 13;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
virtual int query_all_meta_table_for_leader(
|
||||
const common::ObPartitionKey &pkey,
|
||||
bool &has_leader,
|
||||
common::ObAddr &leader)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
has_leader = true;
|
||||
leader.set_ip_addr("127.0.0.1", 8888);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int query_all_server_table_1(AllServerTableRecords &records) {
|
||||
int ret = OB_SUCCESS;
|
||||
records.reset();
|
||||
AllServerTableRecord rec;
|
||||
for (int64_t idx = 0; idx < 128; ++idx) {
|
||||
rec.reset();
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", (idx));
|
||||
rec.svr_port_ = 8888;
|
||||
records.push_back(rec);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Worker.
|
||||
*/
|
||||
class TestWorker : public Runnable
|
||||
{
|
||||
public:
|
||||
SvrFinder *svrfinder_;
|
||||
virtual int routine()
|
||||
{
|
||||
// Build requests.
|
||||
const int64_t RequestCnt = 10 * 10000;
|
||||
SvrFindReq *request_array = new SvrFindReq[RequestCnt];
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
SvrFindReq &r = request_array[idx];
|
||||
r.reset();
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
const int64_t seed = get_timestamp();
|
||||
if ((seed % 100) < 50) {
|
||||
r.req_by_start_tstamp_ = true;
|
||||
r.start_tstamp_ = seed;
|
||||
}
|
||||
else {
|
||||
r.req_by_next_log_id_ = true;
|
||||
r.next_log_id_ = (uint64_t)(seed % 65536);
|
||||
}
|
||||
}
|
||||
// Push requests into svrfinder.
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
SvrFindReq &r = request_array[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, svrfinder_->async_svr_find_req(&r));
|
||||
if (0 == (idx % 1000)) {
|
||||
usec_sleep(10 * _MSEC_);
|
||||
}
|
||||
}
|
||||
// Wait for requests end. Max test time should set.
|
||||
int64_t end_request_cnt = 0;
|
||||
const int64_t TestTimeLimit = 10 * _MIN_;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TestTimeLimit)
|
||||
&& (end_request_cnt < RequestCnt)) {
|
||||
for (int64_t idx = 0, cnt = RequestCnt; idx < cnt; ++idx) {
|
||||
SvrFindReq &r = request_array[idx];
|
||||
if (SvrFindReq::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
_E_(">>> svr list size", "size", r.svr_list_.count());
|
||||
r.set_state_idle();
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
// Assert if test cannot finish.
|
||||
EXPECT_EQ(RequestCnt, end_request_cnt);
|
||||
delete[] request_array;
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(DISABLED_SvrFinder, BasicFuncTest1)
|
||||
{
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockSystableHelper systable_helper;
|
||||
FixedJobPerWorkerPool worker_pool;
|
||||
SvrFinder svrfinder;
|
||||
|
||||
int err = OB_SUCCESS;
|
||||
err = worker_pool.init(1);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = svrfinder.init(&systable_helper, &err_handler1, &worker_pool, 3);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
const int64_t TestWorkerCnt = 3;
|
||||
TestWorker workers[TestWorkerCnt];
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.svrfinder_ = &svrfinder;
|
||||
w.create();
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.join();
|
||||
}
|
||||
|
||||
err = svrfinder.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = worker_pool.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
138
unittest/obcdc/test_log_mysql_connector.cpp
Normal file
138
unittest/obcdc/test_log_mysql_connector.cpp
Normal file
@ -0,0 +1,138 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "lib/allocator/ob_malloc.h"
|
||||
#include "lib/net/ob_addr.h"
|
||||
|
||||
#include "ob_log_mysql_connector.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
class PrintRow : public MySQLQueryBase
|
||||
{
|
||||
public:
|
||||
PrintRow()
|
||||
{
|
||||
sql_ = "select table_id, partition_id, ip, port, role "
|
||||
"from __all_root_table";
|
||||
sql_len_ = strlen(sql_);
|
||||
}
|
||||
int print_row()
|
||||
{
|
||||
int ret = common::OB_SUCCESS;
|
||||
while (common::OB_SUCCESS == (ret = next_row())) {
|
||||
uint64_t table_id = 0;
|
||||
int32_t partition_id = 0;
|
||||
ObAddr addr;
|
||||
if (OB_SUCC(ret)) {
|
||||
if (common::OB_SUCCESS != (ret = get_uint(0, table_id))) {
|
||||
OBLOG_LOG(WARN, "err get uint", K(ret));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
int64_t val = 0;
|
||||
if (common::OB_SUCCESS != (ret = get_int(1, val))) {
|
||||
OBLOG_LOG(WARN, "err get int", K(ret));
|
||||
} else {
|
||||
partition_id = static_cast<int32_t>(val);
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
ObString ip_str;
|
||||
int64_t port = 0;
|
||||
if (common::OB_SUCCESS != (ret = get_varchar(2, ip_str))) {
|
||||
OBLOG_LOG(WARN, "err get var char", K(ret));
|
||||
} else if (common::OB_SUCCESS != (ret = get_int(3, port))) {
|
||||
OBLOG_LOG(WARN, "err get int", K(ret));
|
||||
} else {
|
||||
addr.set_ip_addr(ip_str, static_cast<int32_t>(port));
|
||||
}
|
||||
}
|
||||
// Print values.
|
||||
if (OB_SUCC(ret)) {
|
||||
OBLOG_LOG(INFO, "\n>>>", K(table_id),
|
||||
K(partition_id),
|
||||
K(addr));
|
||||
}
|
||||
}
|
||||
ret = (common::OB_ITER_END == ret) ? common::OB_SUCCESS : ret;
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
class CreateTable : public MySQLQueryBase
|
||||
{
|
||||
public:
|
||||
CreateTable(const char *tname)
|
||||
{
|
||||
snprintf(buf_, 512, "create table %s(c1 int primary key)", tname);
|
||||
sql_ = buf_;
|
||||
sql_len_ = strlen(sql_);
|
||||
}
|
||||
private:
|
||||
char buf_[512];
|
||||
};
|
||||
|
||||
TEST(MySQLConnector, run)
|
||||
{
|
||||
ConnectorConfig cfg;
|
||||
cfg.mysql_addr_ = "10.210.177.162";
|
||||
cfg.mysql_port_ = 26556;
|
||||
cfg.mysql_user_ = "root";
|
||||
cfg.mysql_password_ = "";
|
||||
cfg.mysql_db_ = "oceanbase";
|
||||
cfg.mysql_timeout_ = 100;
|
||||
|
||||
ObLogMySQLConnector conn;
|
||||
|
||||
int ret = conn.init(cfg);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
// Print rows.
|
||||
PrintRow pr;
|
||||
ret = conn.query(pr);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = pr.print_row();
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
// Create dup tables.
|
||||
CreateTable ct("table_1");
|
||||
ret = conn.exec(ct);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = conn.exec(ct);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
ret = conn.destroy();
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("debug");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
46
unittest/obcdc/test_log_part_mgr.cpp
Normal file
46
unittest/obcdc/test_log_part_mgr.cpp
Normal file
@ -0,0 +1,46 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "lib/allocator/ob_malloc.h"
|
||||
|
||||
#include "ob_log_part_mgr.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
|
||||
|
||||
TEST(ObLogPartMgr, Function1)
|
||||
{
|
||||
// -- TODO --
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("debug");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
158
unittest/obcdc/test_log_part_mgr_mock.h
Normal file
158
unittest/obcdc/test_log_part_mgr_mock.h
Normal file
@ -0,0 +1,158 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_part_mgr.h" // ObLogPartMgr
|
||||
|
||||
using namespace oceanbase::common;
|
||||
using namespace oceanbase::liboblog;
|
||||
using namespace oceanbase::transaction;
|
||||
|
||||
class MockObLogPartMgr : public IObLogPartMgr
|
||||
{
|
||||
public:
|
||||
static const int64_t START_TIMESTAMP = 1452763440;
|
||||
static const int64_t CUR_SCHEMA_VERSION = 100;
|
||||
|
||||
MockObLogPartMgr(): start_tstamp_(START_TIMESTAMP), cur_schema_version_(CUR_SCHEMA_VERSION)
|
||||
{ }
|
||||
|
||||
~MockObLogPartMgr()
|
||||
{ }
|
||||
|
||||
virtual int add_table(const uint64_t table_id,
|
||||
const int64_t start_schema_version,
|
||||
const int64_t start_server_tstamp,
|
||||
const int64_t timeout)
|
||||
{
|
||||
UNUSED(table_id);
|
||||
UNUSED(start_schema_version);
|
||||
UNUSED(start_server_tstamp);
|
||||
UNUSED(timeout);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int drop_table(const uint64_t table_id,
|
||||
const int64_t schema_version_before_drop,
|
||||
const int64_t schema_version_after_drop,
|
||||
const int64_t timeout)
|
||||
{
|
||||
UNUSED(table_id);
|
||||
UNUSED(schema_version_before_drop);
|
||||
UNUSED(schema_version_after_drop);
|
||||
UNUSED(timeout);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int drop_tenant(const uint64_t tenant_id,
|
||||
const int64_t schema_version_before_drop,
|
||||
const int64_t schema_version_after_drop,
|
||||
const int64_t timeout)
|
||||
{
|
||||
UNUSED(tenant_id);
|
||||
UNUSED(schema_version_before_drop);
|
||||
UNUSED(schema_version_after_drop);
|
||||
UNUSED(timeout);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int drop_database(const uint64_t database_id,
|
||||
const int64_t schema_version_before_drop,
|
||||
const int64_t schema_version_after_drop,
|
||||
const int64_t timeout)
|
||||
{
|
||||
UNUSED(database_id);
|
||||
UNUSED(schema_version_before_drop);
|
||||
UNUSED(schema_version_after_drop);
|
||||
UNUSED(timeout);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int add_all_tables(const int64_t schema_version, const int64_t start_tstamp)
|
||||
{
|
||||
UNUSED(schema_version);
|
||||
UNUSED(start_tstamp);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int update_schema_version(const int64_t schema_version)
|
||||
{
|
||||
UNUSED(schema_version);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int inc_part_trans_count_on_serving(bool &is_serving,
|
||||
const ObPartitionKey &key,
|
||||
const uint64_t prepare_log_id,
|
||||
const int64_t prepare_log_timestamp,
|
||||
const int64_t timeout)
|
||||
{
|
||||
if (prepare_log_timestamp < start_tstamp_) {
|
||||
// If the Prepare log timestamp is less than the start timestamp, it must not be served
|
||||
is_serving = false;
|
||||
} else {
|
||||
is_serving = true;
|
||||
}
|
||||
|
||||
UNUSED(key);
|
||||
UNUSED(prepare_log_id);
|
||||
UNUSED(timeout);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
||||
virtual int dec_part_trans_count(const ObPartitionKey &key)
|
||||
{
|
||||
UNUSED(key);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int update_part_info(const ObPartitionKey &pkey, const uint64_t start_log_id)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(start_log_id);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int table_group_match(const char *pattern, bool &is_matched,
|
||||
int fnmatch_flags = FNM_CASEFOLD)
|
||||
{
|
||||
UNUSED(pattern);
|
||||
UNUSED(is_matched);
|
||||
UNUSED(fnmatch_flags);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int get_table_groups(std::vector<std::string> &table_groups)
|
||||
{
|
||||
UNUSED(table_groups);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int register_part_add_callback(PartAddCallback *callback)
|
||||
{
|
||||
UNUSED(callback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int register_part_rm_callback(PartRMCallback *callback)
|
||||
{
|
||||
UNUSED(callback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int register_part_recycle_callback(PartRecycleCallback *callback)
|
||||
{
|
||||
UNUSED(callback);
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual void print_part_info() {}
|
||||
|
||||
private:
|
||||
int64_t start_tstamp_;
|
||||
int64_t cur_schema_version_;
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(MockObLogPartMgr);
|
||||
};
|
||||
248
unittest/obcdc/test_log_sql_server_provider.cpp
Normal file
248
unittest/obcdc/test_log_sql_server_provider.cpp
Normal file
@ -0,0 +1,248 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_web_service_root_addr.h" // to_json
|
||||
#include "lib/oblog/ob_log_module.h"
|
||||
#include "lib/string/ob_sql_string.h" // ObSqlString
|
||||
|
||||
#include "ob_log_sql_server_provider.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
using namespace common;
|
||||
using namespace share;
|
||||
|
||||
namespace liboblog
|
||||
{
|
||||
#define CONFIG_SERVER_PORT 6789
|
||||
#define CONFIG_SERVER_PORT_STR "6789"
|
||||
#define CONFIG_SERVER_IP "127.0.0.1"
|
||||
#define CONFIG_SERVER_PROGRAM "./fake_config_server"
|
||||
|
||||
class TestLogSQLServerProvider : public ::testing::Test
|
||||
{
|
||||
static const int64_t MAX_JASON_BUFFER_SIZE = 1 << 10;
|
||||
static const int64_t MAX_CONFIG_URL_LENGTH = 1 << 10;
|
||||
static const int64_t MAX_APPNAME_LENGTH = 1 << 10;
|
||||
|
||||
public:
|
||||
TestLogSQLServerProvider() : rs_leader_(),
|
||||
rs_follower_1_(),
|
||||
rs_follower_2_(),
|
||||
service_pid_(0),
|
||||
server_provider_()
|
||||
{}
|
||||
|
||||
virtual void SetUp();
|
||||
virtual void TearDown();
|
||||
|
||||
void set_rs_list(const ObRootAddrList &rs_list);
|
||||
|
||||
protected:
|
||||
ObRootAddr rs_leader_;
|
||||
ObRootAddr rs_follower_1_;
|
||||
ObRootAddr rs_follower_2_;
|
||||
|
||||
pid_t service_pid_;
|
||||
ObLogSQLServerProvider server_provider_;
|
||||
|
||||
char appname_[MAX_APPNAME_LENGTH];
|
||||
char config_url_[MAX_CONFIG_URL_LENGTH];
|
||||
char json_buffer_[MAX_JASON_BUFFER_SIZE];
|
||||
};
|
||||
|
||||
void TestLogSQLServerProvider::SetUp()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
const char *config_url_arbitrary_str = "i_am_an_arbitrary_string/versin=1";
|
||||
const char *appname_str = "test";
|
||||
|
||||
// Constructing the ConfigURL
|
||||
// Note that the URL is actually only accessible as "http::/IP:PORT", the subsequent string is an arbitrary string
|
||||
(void)snprintf(config_url_, sizeof(config_url_), "http://%s:%d/%s",
|
||||
CONFIG_SERVER_IP, CONFIG_SERVER_PORT, config_url_arbitrary_str);
|
||||
|
||||
(void)snprintf(appname_, sizeof(appname_), "%s", appname_str);
|
||||
|
||||
// Create configure server simulation process
|
||||
pid_t pid = fork();
|
||||
if (0 == pid) {
|
||||
// Set the child process to a new child process group to facilitate KILL
|
||||
ret = setpgid(pid, pid);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR("setpgid failed", K(errno));
|
||||
} else if (-1 == (ret = execl(
|
||||
"/bin/bash", CONFIG_SERVER_PROGRAM, CONFIG_SERVER_PROGRAM, CONFIG_SERVER_PORT_STR, (char *)NULL))) {
|
||||
LOG_ERROR("execl failed", K(errno));
|
||||
}
|
||||
exit(1);
|
||||
} else if (-1 == pid) {
|
||||
LOG_ERROR("fork failed", K(errno));
|
||||
} else {
|
||||
LOG_INFO("create child", K(pid));
|
||||
service_pid_ = pid;
|
||||
|
||||
// wait child process execute.
|
||||
usleep(100000);
|
||||
}
|
||||
|
||||
// init rs addr list
|
||||
ObSEArray<ObRootAddr, 3> rs_list;
|
||||
|
||||
rs_leader_.server_.set_ip_addr("10.210.170.11", 100);
|
||||
rs_leader_.role_ = LEADER;
|
||||
rs_leader_.sql_port_ = 2828;
|
||||
rs_list.push_back(rs_leader_);
|
||||
|
||||
rs_follower_1_.server_.set_ip_addr("10.210.170.16", 200);
|
||||
rs_follower_1_.role_ = FOLLOWER;
|
||||
rs_follower_1_.sql_port_ = 3838;
|
||||
rs_list.push_back(rs_follower_1_);
|
||||
|
||||
rs_follower_2_.server_.set_ip_addr("10.210.180.96", 300);
|
||||
rs_follower_2_.role_ = FOLLOWER;
|
||||
rs_follower_2_.sql_port_ = 4848;
|
||||
rs_list.push_back(rs_follower_2_);
|
||||
|
||||
// Setting up the Rootserver list
|
||||
set_rs_list(rs_list);
|
||||
|
||||
// init Server Provider
|
||||
ret = server_provider_.init(config_url_, appname_);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
// init 3 rootserver
|
||||
ObAddr server;
|
||||
EXPECT_EQ(3, server_provider_.get_server_count());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(0, server));
|
||||
EXPECT_EQ(rs_leader_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_leader_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(1, server));
|
||||
EXPECT_EQ(rs_follower_1_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_follower_1_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(2, server));
|
||||
EXPECT_EQ(rs_follower_2_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_follower_2_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, server_provider_.get_server(3, server));
|
||||
}
|
||||
|
||||
void TestLogSQLServerProvider::TearDown()
|
||||
{
|
||||
int status = 0;
|
||||
int64_t orig_server_count = server_provider_.get_server_count();
|
||||
|
||||
// Sends SIGINT to all processes in the process group of the child process
|
||||
kill(-service_pid_, SIGINT);
|
||||
|
||||
pid_t pid = wait(&status);
|
||||
LOG_INFO("child exit", K(pid));
|
||||
|
||||
// Refresh error if Configure Server does not exist
|
||||
EXPECT_NE(OB_SUCCESS, server_provider_.refresh_server_list());
|
||||
|
||||
// Refresh the error without modifying the previous Server list
|
||||
EXPECT_EQ(orig_server_count, server_provider_.get_server_count());
|
||||
}
|
||||
|
||||
void TestLogSQLServerProvider::set_rs_list(const ObRootAddrList &rs_list)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString cmd;
|
||||
ObSqlString json;
|
||||
|
||||
usleep(50000);
|
||||
const int64_t cluster_id = 100;
|
||||
ret = ObWebServiceRootAddr::to_json(rs_list, appname_, cluster_id, json);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
LOG_INFO("to_json", K(json));
|
||||
|
||||
ret = cmd.assign_fmt("echo -n 'POST / HTTP/1.1\r\nContent-Length: %ld\r\n%s' | nc %s %d &> /dev/null",
|
||||
json.length(), json.ptr(), CONFIG_SERVER_IP, CONFIG_SERVER_PORT);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
ret = system(cmd.ptr());
|
||||
usleep(50000);
|
||||
}
|
||||
|
||||
TEST_F(TestLogSQLServerProvider, fetch)
|
||||
{
|
||||
ObAddr server;
|
||||
ObSEArray<ObRootAddr, 16> rs_list;
|
||||
|
||||
// Test zero RS
|
||||
rs_list.reuse();
|
||||
set_rs_list(rs_list); // Set up a new RS list
|
||||
ASSERT_EQ(OB_SUCCESS, server_provider_.refresh_server_list()); // Refresh RS list
|
||||
EXPECT_EQ(0, server_provider_.get_server_count());
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, server_provider_.get_server(0, server));
|
||||
|
||||
// Test one RS
|
||||
rs_list.reuse();
|
||||
rs_list.push_back(rs_leader_);
|
||||
set_rs_list(rs_list); // Set up a new RS list
|
||||
ASSERT_EQ(OB_SUCCESS, server_provider_.refresh_server_list()); // Refresh RS list
|
||||
EXPECT_EQ(1, server_provider_.get_server_count());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(0, server));
|
||||
EXPECT_EQ(rs_leader_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_leader_.sql_port_, server.get_port()); // Server SQL port
|
||||
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, server_provider_.get_server(1, server));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, server_provider_.get_server(-1, server));
|
||||
|
||||
// Test two RS
|
||||
rs_list.reuse();
|
||||
rs_list.push_back(rs_leader_);
|
||||
rs_list.push_back(rs_follower_1_);
|
||||
set_rs_list(rs_list); // Set up a new RS list
|
||||
ASSERT_EQ(OB_SUCCESS, server_provider_.refresh_server_list()); // Refresh RS list
|
||||
EXPECT_EQ(2, server_provider_.get_server_count());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(0, server));
|
||||
EXPECT_EQ(rs_leader_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_leader_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(1, server));
|
||||
EXPECT_EQ(rs_follower_1_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_follower_1_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, server_provider_.get_server(2, server));
|
||||
|
||||
// Test three RS
|
||||
rs_list.reuse();
|
||||
rs_list.push_back(rs_leader_);
|
||||
rs_list.push_back(rs_follower_1_);
|
||||
rs_list.push_back(rs_follower_2_);
|
||||
set_rs_list(rs_list); // Set up a new RS list
|
||||
ASSERT_EQ(OB_SUCCESS, server_provider_.refresh_server_list()); // Refresh RS list
|
||||
EXPECT_EQ(3, server_provider_.get_server_count());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(0, server));
|
||||
EXPECT_EQ(rs_leader_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_leader_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(1, server));
|
||||
EXPECT_EQ(rs_follower_1_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_follower_1_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_SUCCESS, server_provider_.get_server(2, server));
|
||||
EXPECT_EQ(rs_follower_2_.server_.get_ipv4(), server.get_ipv4());
|
||||
EXPECT_EQ(rs_follower_2_.sql_port_, server.get_port());
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, server_provider_.get_server(3, server));
|
||||
}
|
||||
|
||||
} // end namespace share
|
||||
} // end namespace oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
123
unittest/obcdc/test_log_svr_blacklist.cpp
Normal file
123
unittest/obcdc/test_log_svr_blacklist.cpp
Normal file
@ -0,0 +1,123 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/oblog/ob_log.h"
|
||||
#include "obcdc/src/ob_log_svr_blacklist.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
class SvrBlacklist : public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
};
|
||||
|
||||
TEST(SvrBlacklist, BasicTest1)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
ObLogSvrBlacklist svr_blacklist;
|
||||
const char *svr_blacklist_str ="|";
|
||||
const bool is_sql_server = false;
|
||||
|
||||
err = svr_blacklist.init(svr_blacklist_str, is_sql_server);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
ObAddr svr1(ObAddr::IPV4, "127.0.0.1", 2880);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr1));
|
||||
ObAddr svr2(ObAddr::IPV4, "127.0.0.2", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr2));
|
||||
ObAddr svr3(ObAddr::IPV4, "127.0.0.3", 2882);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr3));
|
||||
|
||||
ObAddr svr4(ObAddr::IPV4, "127.0.0.1", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr4));
|
||||
ObAddr svr5(ObAddr::IPV4, "127.0.0.4", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr5));
|
||||
|
||||
svr_blacklist.destroy();
|
||||
}
|
||||
|
||||
TEST(SvrBlacklist, BasicTest2)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
ObLogSvrBlacklist svr_blacklist;
|
||||
const char *svr_blacklist_str ="127.0.0.1:2880";
|
||||
const bool is_sql_server = false;
|
||||
|
||||
err = svr_blacklist.init(svr_blacklist_str, is_sql_server);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
ObAddr svr1(ObAddr::IPV4, "127.0.0.1", 2880);
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr1));
|
||||
ObAddr svr2(ObAddr::IPV4, "127.0.0.2", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr2));
|
||||
ObAddr svr3(ObAddr::IPV4, "127.0.0.3", 2882);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr3));
|
||||
|
||||
const char *svr_blacklist_str2="127.0.0.1:2880|127.0.0.2:2881|127.0.0.3:2882";
|
||||
svr_blacklist.refresh(svr_blacklist_str2);
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr1));
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr2));
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr3));
|
||||
|
||||
svr_blacklist.destroy();
|
||||
}
|
||||
|
||||
TEST(SvrBlacklist, BasicTest3)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
ObLogSvrBlacklist svr_blacklist;
|
||||
const char *svr_blacklist_str ="127.0.0.1:2880|127.0.0.2:2881|127.0.0.3:2882";
|
||||
const bool is_sql_server = false;
|
||||
|
||||
err = svr_blacklist.init(svr_blacklist_str, is_sql_server);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
ObAddr svr1(ObAddr::IPV4, "127.0.0.1", 2880);
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr1));
|
||||
ObAddr svr2(ObAddr::IPV4, "127.0.0.2", 2881);
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr2));
|
||||
ObAddr svr3(ObAddr::IPV4, "127.0.0.3", 2882);
|
||||
EXPECT_TRUE(svr_blacklist.is_exist(svr3));
|
||||
|
||||
ObAddr svr4(ObAddr::IPV4, "127.0.0.1", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr4));
|
||||
ObAddr svr5(ObAddr::IPV4, "127.0.0.4", 2881);
|
||||
EXPECT_FALSE(svr_blacklist.is_exist(svr5));
|
||||
|
||||
svr_blacklist.destroy();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_svr_blacklist.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
259
unittest/obcdc/test_log_table_matcher.cpp
Normal file
259
unittest/obcdc/test_log_table_matcher.cpp
Normal file
@ -0,0 +1,259 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <fnmatch.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
|
||||
#include "obcdc/src/ob_log_table_matcher.h"
|
||||
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* TEST1.
|
||||
* Test fnmatch.
|
||||
* Used to study fnmatch().
|
||||
*/
|
||||
/*
|
||||
* Test Functions.
|
||||
* fnmatch Prototype:
|
||||
* int fnmatch(const char *pattern, const char *string, int flags);
|
||||
*/
|
||||
void CASE_MATCH(const char *pattern, const char *string, int flags = 0)
|
||||
{
|
||||
int err = fnmatch(pattern, string, flags);
|
||||
EXPECT_EQ(0, err);
|
||||
fprintf(stderr, ">>> %s: \t\"%s\" -> \"%s\"\n",
|
||||
(0 == err) ? "MATCH" : "NOMATCH",
|
||||
pattern, string);
|
||||
}
|
||||
|
||||
void CASE_NOMATCH(const char *pattern, const char *string, int flags = 0)
|
||||
{
|
||||
int err = fnmatch(pattern, string, flags);
|
||||
EXPECT_EQ(FNM_NOMATCH, err);
|
||||
fprintf(stderr, ">>> %s: \t\"%s\" -> \"%s\"\n",
|
||||
(0 == err) ? "MATCH" : "NOMATCH",
|
||||
pattern, string);
|
||||
}
|
||||
TEST(DISABLED_TableMatcher, Fnmatch1)
|
||||
{
|
||||
CASE_MATCH("sky*", "SkyBlue", FNM_CASEFOLD);
|
||||
CASE_NOMATCH("sky*[!e]", "SkyBlue", FNM_CASEFOLD);
|
||||
CASE_MATCH("ab\\0c", "ab\\0c");
|
||||
}
|
||||
|
||||
/*
|
||||
* TEST2.
|
||||
* Test TableMatcher.
|
||||
*/
|
||||
TEST(TableMatcher, BasicTest1)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
ObLogTableMatcher matcher;
|
||||
const char *tb_whilte_list="TN1.DB-A*.table_1*|"
|
||||
"TN2.DB-A*.TABLE_2*|"
|
||||
"tn3.db-a*.table_*_tmp";
|
||||
const char *tb_black_list="|";
|
||||
const char *tg_whilte_list="*.*";
|
||||
const char *tg_black_list="|";
|
||||
|
||||
err = matcher.init(tb_whilte_list, tb_black_list, tg_whilte_list, tg_black_list);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
int flag = FNM_CASEFOLD;
|
||||
|
||||
// Test match.
|
||||
bool matched = false;
|
||||
err = matcher.table_match("tn1", "db-a-1", "table_1_1", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
err = matcher.table_match("tn1", "db-b-1", "table_1_1", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
err = matcher.table_match("tn3", "db-a-2", "table_1_tmp", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
matcher.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* TEST3.
|
||||
* Test TableMatcher static match.
|
||||
*/
|
||||
TEST(TableMatcher, BasicTest2)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
const char *tb_whilte_list="*.*.*";
|
||||
const char *tb_black_list="|";
|
||||
const char *tg_whilte_list="*.*";
|
||||
const char *tg_black_list="|";
|
||||
|
||||
ObLogTableMatcher matcher;
|
||||
|
||||
err = matcher.init(tb_whilte_list, tb_black_list, tg_whilte_list, tg_black_list);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
int flag = FNM_CASEFOLD;
|
||||
|
||||
// Case 1. Match.
|
||||
{
|
||||
const char *pattern1 = "tn1.db1*|tn2.db2*|tn3.db3*|tn4.db4*";
|
||||
ObArray<ObString> pattern2;
|
||||
err = pattern2.push_back(ObString("tn1.db1"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = pattern2.push_back(ObString("tnx.dbx"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
bool matched = false;
|
||||
err = matcher.match(pattern1, pattern2, matched, flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_TRUE(matched);
|
||||
}
|
||||
|
||||
// Case 2. No match.
|
||||
{
|
||||
const char *pattern1 = "tn1.db1*|tn2.db2*|tn3.db3*|tn4.db4*";
|
||||
ObArray<ObString> pattern2;
|
||||
err = pattern2.push_back(ObString("tnx.dbx"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = pattern2.push_back(ObString("tny.dby"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
bool matched = false;
|
||||
err = matcher.match(pattern1, pattern2, matched, flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_FALSE(matched);
|
||||
}
|
||||
|
||||
// Case 3. Empty pattern1.
|
||||
{
|
||||
const char *pattern1 = "";
|
||||
ObArray<ObString> pattern2;
|
||||
err = pattern2.push_back(ObString("tnx.dbx"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = pattern2.push_back(ObString("tny.dby"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
bool matched = false;
|
||||
err = matcher.match(pattern1, pattern2, matched, flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_FALSE(matched);
|
||||
}
|
||||
|
||||
// Case 4. Invalid pattern1.
|
||||
{
|
||||
const char *pattern1 = "|";
|
||||
ObArray<ObString> pattern2;
|
||||
err = pattern2.push_back(ObString("tnx.dbx"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = pattern2.push_back(ObString("tny.dby"));
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
bool matched = false;
|
||||
err = matcher.match(pattern1, pattern2, matched, flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_FALSE(matched);
|
||||
}
|
||||
|
||||
matcher.destroy();
|
||||
}
|
||||
|
||||
// test tablegroup
|
||||
TEST(TableMatcher, BasicTest3)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
ObLogTableMatcher matcher;
|
||||
const char *tb_whilte_list="*.*.*";
|
||||
const char *tb_black_list="|";
|
||||
const char *tg_whilte_list="tt1.alitg*";
|
||||
const char *tg_black_list="|";
|
||||
|
||||
err = matcher.init(tb_whilte_list, tb_black_list, tg_whilte_list, tg_black_list);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
int flag = FNM_CASEFOLD;
|
||||
|
||||
// Test match.
|
||||
bool matched = false;
|
||||
err = matcher.tablegroup_match("tt1", "alitg1", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt1", "alitg2", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt1", "alipaytg", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt2", "alitg1", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
matcher.destroy();
|
||||
}
|
||||
|
||||
TEST(TableMatcher, BasicTest4)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
ObLogTableMatcher matcher;
|
||||
const char *tb_whilte_list="*.*.*";
|
||||
const char *tb_black_list="|";
|
||||
const char *tg_whilte_list="tt1.alitg*|tt1.anttg*";
|
||||
const char *tg_black_list="tt1.alitg*";
|
||||
|
||||
err = matcher.init(tb_whilte_list, tb_black_list, tg_whilte_list, tg_black_list);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
int flag = FNM_CASEFOLD;
|
||||
// Whitelist matches, but blacklist does not
|
||||
|
||||
// Test match.
|
||||
bool matched = false;
|
||||
err = matcher.tablegroup_match("tt1", "alitg1", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt1", "alitg2", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt1", "anttg1", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt1", "anttghello", matched, flag);
|
||||
EXPECT_TRUE(matched);
|
||||
|
||||
err = matcher.tablegroup_match("tt2", "anttghello", matched, flag);
|
||||
EXPECT_FALSE(matched);
|
||||
|
||||
matcher.destroy();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_table_match.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
137
unittest/obcdc/test_log_task_pool.cpp
Normal file
137
unittest/obcdc/test_log_task_pool.cpp
Normal file
@ -0,0 +1,137 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "lib/allocator/ob_malloc.h"
|
||||
#include "lib/allocator/ob_concurrent_fifo_allocator.h"
|
||||
|
||||
#include "obcdc/src/ob_log_task_pool.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
class MockTransTask : public TransTaskBase<MockTransTask>
|
||||
{
|
||||
public:
|
||||
void foo() { bar_ += 1; }
|
||||
|
||||
void set_allocator(int64_t page_size, common::ObIAllocator &large_allocator)
|
||||
{
|
||||
UNUSED(page_size);
|
||||
UNUSED(large_allocator);
|
||||
}
|
||||
|
||||
void set_prealloc_page(void *page)
|
||||
{
|
||||
UNUSED(page);
|
||||
}
|
||||
|
||||
void revert_prealloc_page(void *page)
|
||||
{
|
||||
UNUSED(page);
|
||||
}
|
||||
|
||||
void set_pkey_info(const common::ObPartitionKey &partition,
|
||||
const char *pkey_str)
|
||||
{
|
||||
UNUSED(partition);
|
||||
UNUSED(pkey_str);
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t bar_;
|
||||
};
|
||||
|
||||
TEST(ObLogTransTaskPool, Function1)
|
||||
{
|
||||
const int64_t task_cnt = 1024 * 32;
|
||||
|
||||
ObConcurrentFIFOAllocator fifo;
|
||||
int64_t G = 1024 * 1024 * 1024;
|
||||
fifo.init(1 * G, 1 * G, OB_MALLOC_BIG_BLOCK_SIZE);
|
||||
|
||||
ObLogTransTaskPool<MockTransTask> pool;
|
||||
|
||||
int ret = pool.init(&fifo, 1024 * 8, 1024, true, 1024);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
MockTransTask **tasks = new MockTransTask*[task_cnt];
|
||||
const char *part_info = "partition";
|
||||
ObPartitionKey pkey;
|
||||
|
||||
for (int64_t idx = 0; idx < task_cnt; ++idx) {
|
||||
tasks[idx] = pool.get(part_info, pkey);
|
||||
EXPECT_TRUE(NULL != tasks[idx]);
|
||||
}
|
||||
|
||||
for (int64_t idx = 0; idx < task_cnt; ++idx) {
|
||||
tasks[idx]->revert();
|
||||
}
|
||||
|
||||
pool.destroy();
|
||||
|
||||
delete []tasks;
|
||||
fifo.destroy();
|
||||
}
|
||||
|
||||
// 2 tasks not returned.
|
||||
TEST(ObLogTransTaskPool, Function2)
|
||||
{
|
||||
const int64_t task_cnt = 1024 * 32;
|
||||
|
||||
ObConcurrentFIFOAllocator fifo;
|
||||
int64_t G = 1024 * 1024 * 1024;
|
||||
fifo.init(1 * G, 1 * G, OB_MALLOC_BIG_BLOCK_SIZE);
|
||||
|
||||
ObLogTransTaskPool<MockTransTask> pool;
|
||||
|
||||
int ret = pool.init(&fifo, 1024 * 8, 1024, true, 1024);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
MockTransTask **tasks = new MockTransTask*[task_cnt];
|
||||
const char *part_info = "partition";
|
||||
ObPartitionKey pkey;
|
||||
|
||||
for (int64_t idx = 0; idx < task_cnt; ++idx) {
|
||||
tasks[idx] = pool.get(part_info, pkey);
|
||||
EXPECT_TRUE(NULL != tasks[idx]);
|
||||
}
|
||||
|
||||
for (int64_t idx = 0; idx < task_cnt - 2; ++idx) {
|
||||
tasks[idx + 1]->revert();
|
||||
}
|
||||
|
||||
pool.destroy();
|
||||
|
||||
delete []tasks;
|
||||
fifo.destroy();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("debug");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
722
unittest/obcdc/test_log_trans_ctx.cpp
Normal file
722
unittest/obcdc/test_log_trans_ctx.cpp
Normal file
@ -0,0 +1,722 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "ob_log_trans_ctx.h" // ObLogDepParser
|
||||
#include "common/ob_clock_generator.h" // ObClockGenerator
|
||||
#include "ob_log_trans_task.h" // PartTransTask
|
||||
#include "ob_log_trans_ctx_mgr.h" // ObLogTransCtxMgr
|
||||
#include "test_log_part_mgr_mock.h" // MockObLogPartMgr
|
||||
#include "ob_log_common.h" // MAX_CACHED_TRANS_CTX_COUNT
|
||||
|
||||
using namespace oceanbase::common;
|
||||
using namespace oceanbase::liboblog;
|
||||
using namespace oceanbase::transaction;
|
||||
|
||||
class TransCtxTest : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
static const int64_t PART_TRANS_TASK_ARRAY_SIZE = 10;
|
||||
typedef ObSEArray<PartTransTask *, PART_TRANS_TASK_ARRAY_SIZE> PartTransTaskArray;
|
||||
|
||||
public:
|
||||
TransCtxTest();
|
||||
virtual ~TransCtxTest();
|
||||
virtual void SetUp();
|
||||
virtual void TearDown();
|
||||
|
||||
public:
|
||||
bool is_exist(const TransCtx::ReverseDepSet &reverse_dep_set, const ObTransID &trans_id) const
|
||||
{
|
||||
bool ret = false;
|
||||
TransCtx::ReverseDepSet::const_iterator_t itor = reverse_dep_set.begin();
|
||||
for (; itor != reverse_dep_set.end(); ++itor) {
|
||||
if (trans_id == *itor) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool is_exist(const TransCtx::TransIDArray dep_parsed_reverse_deps,
|
||||
const ObTransID &trans_id) const
|
||||
{
|
||||
bool ret = false;
|
||||
ObTransID trans_id_cmp;
|
||||
for (int64_t index = 0; index < dep_parsed_reverse_deps.count(); index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, dep_parsed_reverse_deps.at(index, trans_id_cmp));
|
||||
if (trans_id == trans_id_cmp) {
|
||||
ret = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void init_trans_ctx(const ObTransID &trans_id, TransCtx *&trans_ctx, const bool enable_create)
|
||||
{
|
||||
EXPECT_TRUE(NULL != trans_ctx_mgr_);
|
||||
EXPECT_TRUE(OB_SUCCESS == trans_ctx_mgr_->get_trans_ctx(trans_id, trans_ctx, enable_create));
|
||||
EXPECT_TRUE(NULL != trans_ctx);
|
||||
EXPECT_TRUE(OB_SUCCESS == trans_ctx->set_trans_id(trans_id));
|
||||
EXPECT_TRUE(OB_SUCCESS == trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY));
|
||||
}
|
||||
|
||||
IObLogPartMgr *create_part_mgr()
|
||||
{
|
||||
IObLogPartMgr *part_mgr = NULL;
|
||||
if (NULL != (part_mgr = (MockObLogPartMgr *)ob_malloc(sizeof(MockObLogPartMgr),
|
||||
ObModIds::OB_LOG_PART_INFO))) {
|
||||
new(part_mgr)MockObLogPartMgr();
|
||||
}
|
||||
return part_mgr;
|
||||
}
|
||||
|
||||
IObLogTransCtxMgr *create_trans_mgr()
|
||||
{
|
||||
ObLogTransCtxMgr *tx_mgr = NULL;
|
||||
if (NULL != (tx_mgr = (ObLogTransCtxMgr *)ob_malloc(sizeof(ObLogTransCtxMgr),
|
||||
ObModIds::OB_LOG_TRANS_CTX))) {
|
||||
new(tx_mgr)ObLogTransCtxMgr();
|
||||
if (OB_SUCCESS != tx_mgr->init(MAX_CACHED_TRANS_CTX_COUNT)) {
|
||||
tx_mgr->~ObLogTransCtxMgr();
|
||||
ob_free(tx_mgr);
|
||||
tx_mgr = NULL;
|
||||
}
|
||||
}
|
||||
return tx_mgr;
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
trans_ctx_.reset();
|
||||
trans_id_.reset();
|
||||
part_trans_task_.reset();
|
||||
if (NULL != part_mgr_) {
|
||||
part_mgr_->~IObLogPartMgr();
|
||||
ob_free(part_mgr_);
|
||||
part_mgr_ = NULL;
|
||||
}
|
||||
|
||||
if (NULL != trans_ctx_mgr_) {
|
||||
trans_ctx_mgr_->~IObLogTransCtxMgr();
|
||||
ob_free(trans_ctx_mgr_);
|
||||
trans_ctx_mgr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void init_part_trans_task_array(PartTransTaskArray &array, const ObTransID &trans_id)
|
||||
{
|
||||
EXPECT_TRUE(trans_id.is_valid());
|
||||
|
||||
PartTransTask *part_trans_task = NULL;
|
||||
for (int i = 0; i < PART_TRANS_TASK_ARRAY_SIZE; i++) {
|
||||
init_part_trans_task(part_trans_task, trans_id);
|
||||
EXPECT_EQ(OB_SUCCESS, array.push_back(part_trans_task));
|
||||
}
|
||||
}
|
||||
|
||||
void free_part_trans_task_array(PartTransTaskArray &array)
|
||||
{
|
||||
PartTransTask *part_trans_task = NULL;
|
||||
for (int i = 0; i < PART_TRANS_TASK_ARRAY_SIZE; i++) {
|
||||
EXPECT_EQ(OB_SUCCESS, array.at(i, part_trans_task));
|
||||
free_part_trans_task(part_trans_task);
|
||||
}
|
||||
}
|
||||
|
||||
void init_part_trans_task(PartTransTask *&part_trans_task, const ObTransID &trans_id)
|
||||
{
|
||||
EXPECT_TRUE(trans_id.is_valid());
|
||||
|
||||
if (NULL != (part_trans_task = (PartTransTask *)ob_malloc(sizeof(PartTransTask),
|
||||
ObModIds::OB_LOG_PART_TRANS_TASK_SMALL))) {
|
||||
new(part_trans_task)PartTransTask();
|
||||
part_trans_task->set_trans_id(trans_id);
|
||||
part_trans_task->set_ref_cnt(0);
|
||||
part_trans_task->set_pool(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void free_part_trans_task(PartTransTask *part_trans_task)
|
||||
{
|
||||
if (NULL != part_trans_task) {
|
||||
part_trans_task->~PartTransTask();
|
||||
ob_free(part_trans_task);
|
||||
part_trans_task = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
TransCtx trans_ctx_;
|
||||
ObTransID trans_id_;
|
||||
PartTransTask part_trans_task_;
|
||||
IObLogPartMgr *part_mgr_;
|
||||
IObLogTransCtxMgr *trans_ctx_mgr_;
|
||||
|
||||
private:
|
||||
// disallow copy
|
||||
DISALLOW_COPY_AND_ASSIGN(TransCtxTest);
|
||||
};
|
||||
|
||||
TransCtxTest::TransCtxTest(): trans_ctx_(),
|
||||
trans_id_(),
|
||||
part_trans_task_(),
|
||||
part_mgr_(NULL),
|
||||
trans_ctx_mgr_(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
TransCtxTest::~TransCtxTest()
|
||||
{
|
||||
}
|
||||
|
||||
void TransCtxTest::SetUp()
|
||||
{
|
||||
const ObAddr svr(ObAddr::IPV4, "127.0.0.1", 1000);
|
||||
trans_id_ = ObTransID(svr);
|
||||
part_trans_task_.set_trans_id(trans_id_);
|
||||
EXPECT_TRUE(NULL != (part_mgr_ = create_part_mgr()));
|
||||
EXPECT_TRUE(NULL != (trans_ctx_mgr_ = create_trans_mgr()));
|
||||
trans_ctx_.set_host(trans_ctx_mgr_);
|
||||
}
|
||||
|
||||
void TransCtxTest::TearDown()
|
||||
{
|
||||
destroy();
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, prepare_failed)
|
||||
{
|
||||
bool stop_flag = false;
|
||||
bool need_discard = false;
|
||||
IObLogPartMgr *part_mgr_null = NULL;
|
||||
|
||||
// 1. If part mrg is null
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, trans_ctx_.prepare(part_trans_task_, part_mgr_null, stop_flag,
|
||||
need_discard));
|
||||
|
||||
// 2. If the state is TRANS_CTX_STATE_DISCARDED, prepare returns an error
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_DISCARDED);
|
||||
EXPECT_EQ(OB_INVALID_ERROR, trans_ctx_.prepare(part_trans_task_, part_mgr_, stop_flag,
|
||||
need_discard));
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, prepare_discard)
|
||||
{
|
||||
bool stop_flag = false;
|
||||
bool need_discard = false;
|
||||
const int64_t prepare_tstamp = 1452763000;
|
||||
|
||||
// prepare partition key
|
||||
ObPartitionKey partition_key_0;
|
||||
partition_key_0.init(1000000000, 0, 3);
|
||||
|
||||
// Make the prepare log timestamp less than the specified timestamp
|
||||
part_trans_task_.set_partition(partition_key_0);
|
||||
part_trans_task_.set_timestamp(prepare_tstamp);
|
||||
part_trans_task_.set_prepare_log_id(1);
|
||||
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_INVALID);
|
||||
|
||||
// Current transaction not in service, need discard
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.prepare(part_trans_task_, part_mgr_, stop_flag, need_discard));
|
||||
EXPECT_TRUE(need_discard);
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, prepare_success)
|
||||
{
|
||||
bool stop_flag = false;
|
||||
bool need_discard = false;
|
||||
const int64_t prepare_tstamp = 1452763900;
|
||||
|
||||
// prepare partition key
|
||||
ObPartitionKey partition_key_0;
|
||||
partition_key_0.init(1000000000, 0, 3);
|
||||
ObPartitionKey partition_key_1;
|
||||
partition_key_1.init(1000000000, 1, 3);
|
||||
ObPartitionKey partition_key_2;
|
||||
partition_key_2.init(1000000000, 2, 3);
|
||||
|
||||
// If the current partitioned transaction service has 2 service participants, verify that the participants are obtained correctly
|
||||
// Make the prepare log timestamp greater than the specified timestamp
|
||||
part_trans_task_.set_partition(partition_key_0);
|
||||
part_trans_task_.set_timestamp(prepare_tstamp);
|
||||
part_trans_task_.set_prepare_log_id(1);
|
||||
ObPartitionLogInfo part_info_0(partition_key_0, 1, prepare_tstamp);
|
||||
ObPartitionLogInfo part_info_1(partition_key_1, 1, 1452763999);
|
||||
ObPartitionLogInfo part_info_2(partition_key_2, 1, 1452763000);
|
||||
PartitionLogInfoArray participants;
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_0));
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_1));
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_2));
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_.set_participants(participants));
|
||||
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_INVALID);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.prepare(part_trans_task_, part_mgr_, stop_flag, need_discard));
|
||||
EXPECT_FALSE(need_discard);
|
||||
const TransPartInfo *valid_participants = trans_ctx_.get_participants();
|
||||
int64_t valid_participant_count = trans_ctx_.get_participant_count();
|
||||
int64_t participants_count = valid_participant_count;
|
||||
EXPECT_EQ(2, participants_count);
|
||||
for (int64_t index = 0; index < participants_count; index++) {
|
||||
EXPECT_FALSE(partition_key_2 == valid_participants[index].pkey_);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, add_participant_failed)
|
||||
{
|
||||
bool is_part_trans_served = true;
|
||||
bool is_all_participants_ready = false;
|
||||
|
||||
// 1. The current state is not advanced to the PREPARE state
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_INVALID);
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx_.add_participant(part_trans_task_, is_part_trans_served,
|
||||
is_all_participants_ready));
|
||||
|
||||
// 2. The current state is already ready, the current participant will not be gathered
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.add_participant(part_trans_task_, is_part_trans_served,
|
||||
is_all_participants_ready));
|
||||
EXPECT_FALSE(is_part_trans_served);
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, add_participant_not_served)
|
||||
{
|
||||
bool stop_flag = false;
|
||||
bool need_discard = false;
|
||||
bool is_part_trans_served = true;
|
||||
bool is_all_participants_ready = false;
|
||||
const int64_t prepare_tstamp = 1452763900;
|
||||
|
||||
// prepare partition key
|
||||
ObPartitionKey partition_key_0;
|
||||
partition_key_0.init(1000000000, 0, 3);
|
||||
ObPartitionKey partition_key_1;
|
||||
partition_key_1.init(1000000000, 1, 3);
|
||||
|
||||
// Make the prepare log timestamp greater than the specified timestamp
|
||||
part_trans_task_.set_partition(partition_key_0);
|
||||
part_trans_task_.set_timestamp(prepare_tstamp);
|
||||
part_trans_task_.set_prepare_log_id(1);
|
||||
ObPartitionLogInfo part_info_0(partition_key_0, 1, prepare_tstamp);
|
||||
ObPartitionLogInfo part_info_1(partition_key_1, 1, 1452763999);
|
||||
PartitionLogInfoArray participants;
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_0));
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_1));
|
||||
part_trans_task_.set_participants(participants);
|
||||
|
||||
// prepare
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_INVALID);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.prepare(part_trans_task_, part_mgr_, stop_flag, need_discard));
|
||||
EXPECT_FALSE(need_discard);
|
||||
|
||||
// construct a pkey which not in participants_list
|
||||
PartTransTask part_trans_task_new;
|
||||
part_trans_task_new.set_trans_id(trans_id_);
|
||||
ObPartitionKey partition_key_new;
|
||||
partition_key_new.init(1000000000, 2, 3);
|
||||
part_trans_task_new.set_partition(partition_key_new);
|
||||
|
||||
// in prepare state but partition not in participants_list
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_PREPARED);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.add_participant(part_trans_task_new, is_part_trans_served,
|
||||
is_all_participants_ready));
|
||||
EXPECT_FALSE(is_part_trans_served);
|
||||
EXPECT_FALSE(is_all_participants_ready);
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, add_participant_all_push_in_ready)
|
||||
{
|
||||
bool stop_flag = false;
|
||||
bool need_discard = false;
|
||||
bool is_part_trans_served = true;
|
||||
bool is_all_participants_ready = false;
|
||||
const int64_t prepare_tstamp = 1452763900;
|
||||
|
||||
// prepare partition key
|
||||
ObPartitionKey partition_key_0;
|
||||
partition_key_0.init(1000000000, 0, 3);
|
||||
ObPartitionKey partition_key_1;
|
||||
partition_key_1.init(1000000000, 1, 3);
|
||||
|
||||
// Make the prepare log timestamp greater than the specified timestamp
|
||||
part_trans_task_.set_partition(partition_key_0);
|
||||
part_trans_task_.set_timestamp(prepare_tstamp);
|
||||
part_trans_task_.set_prepare_log_id(1);
|
||||
ObPartitionLogInfo part_info_0(partition_key_0, 1, prepare_tstamp);
|
||||
ObPartitionLogInfo part_info_1(partition_key_1, 1, 1452763999);
|
||||
PartitionLogInfoArray participants;
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_0));
|
||||
EXPECT_EQ(OB_SUCCESS, participants.push_back(part_info_1));
|
||||
part_trans_task_.set_participants(participants);
|
||||
|
||||
// Prepare first, generating a list of participants for all services
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_INVALID);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.prepare(part_trans_task_, part_mgr_, stop_flag, need_discard));
|
||||
EXPECT_FALSE(need_discard);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PREPARED, trans_ctx_.get_state());
|
||||
|
||||
PartTransTask part_trans_task_2;
|
||||
part_trans_task_2.set_trans_id(trans_id_);
|
||||
part_trans_task_2.set_partition(partition_key_1);
|
||||
part_trans_task_2.set_timestamp(prepare_tstamp + 100);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.prepare(part_trans_task_2, part_mgr_, stop_flag, need_discard));
|
||||
EXPECT_FALSE(need_discard);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PREPARED, trans_ctx_.get_state());
|
||||
|
||||
// 1.Currently in prepare state: partition 1 is in the participants list, then it is added to the ready list, but has not yet reached the ready state
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.add_participant(part_trans_task_, is_part_trans_served,
|
||||
is_all_participants_ready));
|
||||
EXPECT_TRUE(is_part_trans_served);
|
||||
EXPECT_FALSE(is_all_participants_ready);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PREPARED, trans_ctx_.get_state());
|
||||
|
||||
// 2.All partitions have been added to the ready list and should be in the ready state
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.add_participant(part_trans_task_2, is_part_trans_served,
|
||||
is_all_participants_ready));
|
||||
EXPECT_TRUE(is_part_trans_served);
|
||||
EXPECT_TRUE(is_all_participants_ready);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY, trans_ctx_.get_state());
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, parse_deps_failed)
|
||||
{
|
||||
IObLogTransCtxMgr *trans_ctx_mgr = NULL;
|
||||
bool all_deps_cleared = false;
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_PREPARED);
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx_.parse_deps(trans_ctx_mgr, all_deps_cleared));
|
||||
EXPECT_FALSE(all_deps_cleared);
|
||||
|
||||
trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, trans_ctx_.parse_deps(trans_ctx_mgr, all_deps_cleared));
|
||||
EXPECT_FALSE(all_deps_cleared);
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, parse_deps_and_sequence)
|
||||
{
|
||||
// Create 4 transactions
|
||||
const ObAddr svr1(ObAddr::IPV4, "127.0.0.1", 1000);
|
||||
const ObAddr svr2(ObAddr::IPV4, "127.0.0.1", 2000);
|
||||
const ObAddr svr3(ObAddr::IPV4, "127.0.0.1", 3000);
|
||||
const ObAddr svr4(ObAddr::IPV4, "127.0.0.1", 4000);
|
||||
const ObTransID trans_id_1(svr1);
|
||||
const ObTransID trans_id_2(svr2);
|
||||
const ObTransID trans_id_3(svr3);
|
||||
const ObTransID trans_id_4(svr4);
|
||||
|
||||
TransCtx *trans_ctx_1 = NULL;
|
||||
TransCtx *trans_ctx_2 = NULL;
|
||||
TransCtx *trans_ctx_3 = NULL;
|
||||
TransCtx *trans_ctx_4 = NULL;
|
||||
bool enable_create = true;
|
||||
|
||||
// init 4 trans_ctx
|
||||
init_trans_ctx(trans_id_1, trans_ctx_1, enable_create);
|
||||
init_trans_ctx(trans_id_2, trans_ctx_2, enable_create);
|
||||
init_trans_ctx(trans_id_3, trans_ctx_3, enable_create);
|
||||
init_trans_ctx(trans_id_4, trans_ctx_4, enable_create);
|
||||
|
||||
// set deps of trans
|
||||
trans_ctx_1->set_deps(trans_id_2);
|
||||
trans_ctx_1->set_deps(trans_id_3);
|
||||
trans_ctx_2->set_deps(trans_id_3);
|
||||
trans_ctx_3->set_deps(trans_id_4);
|
||||
|
||||
bool all_deps_cleared = false;
|
||||
TransCtx::TransIDArray dep_parsed_reverse_deps;
|
||||
|
||||
// 1. trans_ctx 4 can be ordered
|
||||
// trans_ctx_4 parses the dependencies (since there are no dependencies, the partitioned transaction parses the end of the dependencies and the state is changed from ready->parsed)
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_4->parse_deps(trans_ctx_mgr_, all_deps_cleared));
|
||||
EXPECT_TRUE(all_deps_cleared);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_DEP_PARSED, trans_ctx_4->get_state());
|
||||
|
||||
// trans_ctx_4 sequenced
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_4->sequence(0, 0));
|
||||
|
||||
// trans_ctx_4 parse reverse deps
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_4->parse_reverse_deps(trans_ctx_mgr_, dep_parsed_reverse_deps));
|
||||
EXPECT_EQ(0, dep_parsed_reverse_deps.count());
|
||||
|
||||
// 2. trans_ctx 1 cannot be ordered
|
||||
// trans_ctx_1 analyses the dependencies and adds the reverse dependency list of 2 and 3, with the status ready, because 2 and 3 are not ordered
|
||||
all_deps_cleared = false;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_1->parse_deps(trans_ctx_mgr_, all_deps_cleared));
|
||||
EXPECT_FALSE(all_deps_cleared);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY, trans_ctx_1->get_state());
|
||||
// Determine if the reverse dependency list for the next 2/3 includes 1
|
||||
const TransCtx::ReverseDepSet &reverse_dep_set_2 = trans_ctx_2->get_reverse_dep_set();
|
||||
EXPECT_EQ(1, reverse_dep_set_2.count());
|
||||
EXPECT_TRUE(is_exist(reverse_dep_set_2, trans_id_1));
|
||||
const TransCtx::ReverseDepSet &reverse_dep_set_3 = trans_ctx_3->get_reverse_dep_set();
|
||||
EXPECT_EQ(1, reverse_dep_set_3.count());
|
||||
EXPECT_TRUE(is_exist(reverse_dep_set_3, trans_id_1));
|
||||
|
||||
// 3.trans_ctx 2 cannot be ordered and will join the set of reverse dependencies of 3
|
||||
all_deps_cleared = false;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_2->parse_deps(trans_ctx_mgr_, all_deps_cleared));
|
||||
EXPECT_FALSE(all_deps_cleared);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PARTICIPANT_READY, trans_ctx_2->get_state());
|
||||
// Determine the reverse dependency list for 3, containing 2
|
||||
const TransCtx::ReverseDepSet &reverse_dep_set_3_new = trans_ctx_3->get_reverse_dep_set();
|
||||
EXPECT_EQ(2, reverse_dep_set_3_new.count());
|
||||
EXPECT_TRUE(is_exist(reverse_dep_set_3_new, trans_id_2));
|
||||
|
||||
// 4.trans-ctx 3 can parse deps
|
||||
all_deps_cleared = false;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_3->parse_deps(trans_ctx_mgr_, all_deps_cleared));
|
||||
EXPECT_TRUE(all_deps_cleared);
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_DEP_PARSED, trans_ctx_3->get_state());
|
||||
|
||||
// trans_ctx_3 sequenced
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_3->sequence(1, 1));
|
||||
|
||||
// trans_ctx_3 parse reverse deps
|
||||
dep_parsed_reverse_deps.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_3->parse_reverse_deps(trans_ctx_mgr_, dep_parsed_reverse_deps));
|
||||
EXPECT_EQ(1, dep_parsed_reverse_deps.count());
|
||||
EXPECT_TRUE(is_exist(dep_parsed_reverse_deps, trans_id_2));
|
||||
|
||||
// The set of dependencies of trans 2 is 0 and the set of dependencies of trans 1 is 1
|
||||
EXPECT_EQ(0, trans_ctx_2->get_cur_dep_count());
|
||||
EXPECT_EQ(1, trans_ctx_1->get_cur_dep_count());
|
||||
|
||||
// 2 Execution of sequencing, reverse decoupling
|
||||
dep_parsed_reverse_deps.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_2->sequence(2, 2));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_2->parse_reverse_deps(trans_ctx_mgr_, dep_parsed_reverse_deps));
|
||||
EXPECT_EQ(1, dep_parsed_reverse_deps.count());
|
||||
EXPECT_EQ(0, trans_ctx_1->get_cur_dep_count());
|
||||
EXPECT_TRUE(is_exist(dep_parsed_reverse_deps, trans_id_1));
|
||||
|
||||
// 5.trans_ctx_1 can be sequenced
|
||||
dep_parsed_reverse_deps.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_1->sequence(3, 3));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_1->parse_reverse_deps(trans_ctx_mgr_, dep_parsed_reverse_deps));
|
||||
EXPECT_EQ(0, dep_parsed_reverse_deps.count());
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, format_participant_failed)
|
||||
{
|
||||
TransCtx *trans_ctx = NULL;
|
||||
bool enable_create = true;
|
||||
init_trans_ctx(trans_id_, trans_ctx, enable_create);
|
||||
|
||||
// 1.Transaction status not in order, error reported
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_DEP_PARSED));
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx->format_participant(part_trans_task_));
|
||||
|
||||
// 2.Inconsistent transaction id, error reported
|
||||
const ObAddr svr(ObAddr::IPV4, "127.0.0.1", 2000);
|
||||
const ObTransID trans_id(svr);
|
||||
PartTransTask part_trans_task;
|
||||
part_trans_task.set_trans_id(trans_id);
|
||||
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, trans_ctx->format_participant(part_trans_task));
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, format_participant)
|
||||
{
|
||||
TransCtx *trans_ctx = NULL;
|
||||
bool enable_create = true;
|
||||
init_trans_ctx(trans_id_, trans_ctx, enable_create);
|
||||
|
||||
// Total of 10 partition transactions
|
||||
PartTransTaskArray part_trans_task_array;
|
||||
init_part_trans_task_array(part_trans_task_array, trans_id_);
|
||||
EXPECT_TRUE(PART_TRANS_TASK_ARRAY_SIZE == part_trans_task_array.count());
|
||||
|
||||
// Set the number of ready participants
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_count(PART_TRANS_TASK_ARRAY_SIZE));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_SEQUENCED));
|
||||
|
||||
// After the first 9 formations, each time the transaction status is not updated, the formated participant count is increased by 1
|
||||
int64_t formated_count = 0;
|
||||
PartTransTask *part_trans_task = NULL;
|
||||
int64_t index = 0;
|
||||
for (index = 0; index < PART_TRANS_TASK_ARRAY_SIZE - 1; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(index, part_trans_task));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->format_participant(*part_trans_task));
|
||||
EXPECT_EQ(++formated_count, trans_ctx->get_formatted_participant_count());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_SEQUENCED, trans_ctx->get_state());
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(index, part_trans_task));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->format_participant(*part_trans_task));
|
||||
EXPECT_EQ(trans_ctx->get_ready_participant_count(), trans_ctx->get_formatted_participant_count());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_FORMATTED, trans_ctx->get_state());
|
||||
|
||||
free_part_trans_task_array(part_trans_task_array);
|
||||
part_trans_task_array.destroy();
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, commit)
|
||||
{
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_SEQUENCED));
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx_.commit());
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.set_state(TransCtx::TRANS_CTX_STATE_FORMATTED));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_.commit());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_COMMITTED, trans_ctx_.get_state());
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, release_participants_failed)
|
||||
{
|
||||
TransCtx *trans_ctx = NULL;
|
||||
bool enable_create = true;
|
||||
init_trans_ctx(trans_id_, trans_ctx, enable_create);
|
||||
|
||||
// 1. The current state is not a commit state and an error is reported
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_SEQUENCED));
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx->release_participants());
|
||||
|
||||
// 2. Not all parts are currently available for release, 10 partitioned transactions in total
|
||||
PartTransTaskArray part_trans_task_array;
|
||||
init_part_trans_task_array(part_trans_task_array, trans_id_);
|
||||
EXPECT_TRUE(PART_TRANS_TASK_ARRAY_SIZE == part_trans_task_array.count());
|
||||
|
||||
// Set the number of ready participants, the status is commited
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_count(PART_TRANS_TASK_ARRAY_SIZE));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_COMMITTED));
|
||||
|
||||
// set next participant
|
||||
PartTransTask *part_trans_task = NULL;
|
||||
PartTransTask *first_part_trans_task = NULL;
|
||||
PartTransTask *next_part_trans_task = NULL;
|
||||
int64_t index = 0;
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(0, part_trans_task));
|
||||
first_part_trans_task = part_trans_task;
|
||||
|
||||
for (index = 0; index < PART_TRANS_TASK_ARRAY_SIZE - 1; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(index + 1, next_part_trans_task));
|
||||
part_trans_task->set_next_participant(next_part_trans_task);
|
||||
part_trans_task = next_part_trans_task;
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_objs(first_part_trans_task));
|
||||
bool all_part_releasable = false;
|
||||
for (index = 0; index < PART_TRANS_TASK_ARRAY_SIZE - 1; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->inc_releasable_participant_count(all_part_releasable));
|
||||
}
|
||||
|
||||
// Not all parts are releasable, error reported
|
||||
EXPECT_EQ(OB_STATE_NOT_MATCH, trans_ctx->release_participants());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_COMMITTED, trans_ctx->get_state());
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->inc_releasable_participant_count(all_part_releasable));
|
||||
EXPECT_TRUE(true == all_part_releasable);
|
||||
|
||||
// 3. Not all part reference counts are 0, error reported
|
||||
part_trans_task->set_ref_cnt(2);
|
||||
next_part_trans_task->set_ref_cnt(1);
|
||||
EXPECT_EQ(OB_ERR_UNEXPECTED, trans_ctx->release_participants());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_COMMITTED, trans_ctx->get_state());
|
||||
|
||||
free_part_trans_task_array(part_trans_task_array);
|
||||
part_trans_task_array.destroy();
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, releasd_participants_less_than_ready)
|
||||
{
|
||||
TransCtx *trans_ctx = NULL;
|
||||
bool enable_create = true;
|
||||
init_trans_ctx(trans_id_, trans_ctx, enable_create);
|
||||
|
||||
PartTransTask *part_trans_task;
|
||||
init_part_trans_task(part_trans_task, trans_id_);
|
||||
|
||||
// set count of ready participants, status is commited
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_count(PART_TRANS_TASK_ARRAY_SIZE));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_COMMITTED));
|
||||
|
||||
// set next participant
|
||||
part_trans_task->set_next_participant(NULL);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_objs(part_trans_task));
|
||||
EXPECT_TRUE(NULL != trans_ctx->get_participant_objs());
|
||||
bool all_part_releasable = false;
|
||||
for (int index = 0; index < PART_TRANS_TASK_ARRAY_SIZE; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->inc_releasable_participant_count(all_part_releasable));
|
||||
}
|
||||
|
||||
EXPECT_TRUE(true == all_part_releasable);
|
||||
EXPECT_EQ(OB_INVALID_ERROR, trans_ctx->release_participants());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_COMMITTED, trans_ctx->get_state());
|
||||
|
||||
free_part_trans_task(part_trans_task);
|
||||
}
|
||||
|
||||
TEST_F(TransCtxTest, release_participants)
|
||||
{
|
||||
TransCtx *trans_ctx = NULL;
|
||||
bool enable_create = true;
|
||||
init_trans_ctx(trans_id_, trans_ctx, enable_create);
|
||||
|
||||
// Total of 10 partition transactions
|
||||
PartTransTaskArray part_trans_task_array;
|
||||
init_part_trans_task_array(part_trans_task_array, trans_id_);
|
||||
EXPECT_TRUE(PART_TRANS_TASK_ARRAY_SIZE == part_trans_task_array.count());
|
||||
|
||||
// set count of ready participants, status is commited
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_count(PART_TRANS_TASK_ARRAY_SIZE));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_state(TransCtx::TRANS_CTX_STATE_COMMITTED));
|
||||
|
||||
// set ready participant objs
|
||||
PartTransTask *part_trans_task = NULL;
|
||||
PartTransTask *first_part_trans_task = NULL;
|
||||
PartTransTask *next_part_trans_task = NULL;
|
||||
int64_t index = 0;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(0, part_trans_task));
|
||||
first_part_trans_task = part_trans_task;
|
||||
for (index = 0; index < PART_TRANS_TASK_ARRAY_SIZE - 1; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, part_trans_task_array.at(index + 1, next_part_trans_task));
|
||||
part_trans_task->set_next_participant(next_part_trans_task);
|
||||
part_trans_task = next_part_trans_task;
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->set_ready_participant_objs(first_part_trans_task));
|
||||
EXPECT_TRUE(NULL != trans_ctx->get_participant_objs());
|
||||
|
||||
bool all_part_releasable = false;
|
||||
for (index = 0; index < PART_TRANS_TASK_ARRAY_SIZE; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->inc_releasable_participant_count(all_part_releasable));
|
||||
}
|
||||
|
||||
EXPECT_TRUE(true == all_part_releasable);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx->release_participants());
|
||||
EXPECT_EQ(TransCtx::TRANS_CTX_STATE_PARTICIPANT_RELEASED, trans_ctx->get_state());
|
||||
EXPECT_TRUE(0 == trans_ctx->get_ready_participant_count());
|
||||
EXPECT_TRUE(0 == trans_ctx->get_releasable_participant_count());
|
||||
EXPECT_TRUE(0 == trans_ctx->get_formatted_participant_count());
|
||||
EXPECT_TRUE(NULL == trans_ctx->get_participant_objs());
|
||||
|
||||
free_part_trans_task_array(part_trans_task_array);
|
||||
part_trans_task_array.destroy();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// used for init of ObTransIDTest for incoming length errors
|
||||
ObClockGenerator::init();
|
||||
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
357
unittest/obcdc/test_log_trans_ctx_mgr.cpp
Normal file
357
unittest/obcdc/test_log_trans_ctx_mgr.cpp
Normal file
@ -0,0 +1,357 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_trans_ctx_mgr.h" // ObLogTransCtxMgr
|
||||
#include "common/ob_clock_generator.h" // ObClockGenerator
|
||||
|
||||
using namespace oceanbase::common;
|
||||
using namespace oceanbase::liboblog;
|
||||
using namespace oceanbase::transaction;
|
||||
|
||||
class ObLogTransCtxMgrTest : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
static const int64_t SLEEP_TIME = 10000;
|
||||
static const int64_t THREAD_NUM = 10;
|
||||
static const int64_t RUN_TIME_SEC = 60;
|
||||
static const int64_t CACHED_CTX_COUNT = 10000;
|
||||
static const int64_t TEST_CTX_COUNT = CACHED_CTX_COUNT + 1024;
|
||||
|
||||
public:
|
||||
ObLogTransCtxMgrTest();
|
||||
virtual ~ObLogTransCtxMgrTest();
|
||||
virtual void SetUp();
|
||||
virtual void TearDown();
|
||||
|
||||
static void *thread_func(void *args);
|
||||
|
||||
public:
|
||||
void run();
|
||||
void test_imediately_remove();
|
||||
void test_dely_remove();
|
||||
|
||||
public:
|
||||
int32_t port_;
|
||||
ObTransID *trans_ids_;
|
||||
pthread_t threads_[THREAD_NUM];
|
||||
ObLogTransCtxMgr mgr_;
|
||||
|
||||
private:
|
||||
// disallow copy
|
||||
DISALLOW_COPY_AND_ASSIGN(ObLogTransCtxMgrTest);
|
||||
};
|
||||
ObLogTransCtxMgrTest::ObLogTransCtxMgrTest() : port_(0), trans_ids_(NULL), mgr_()
|
||||
{
|
||||
}
|
||||
|
||||
ObLogTransCtxMgrTest::~ObLogTransCtxMgrTest()
|
||||
{
|
||||
}
|
||||
|
||||
void ObLogTransCtxMgrTest::SetUp()
|
||||
{
|
||||
}
|
||||
|
||||
void ObLogTransCtxMgrTest::TearDown()
|
||||
{
|
||||
}
|
||||
|
||||
TEST_F(ObLogTransCtxMgrTest, DISABLED_single_thread_immediately_remove)
|
||||
{
|
||||
ObLogTransCtxMgr trans_ctx_mgr;
|
||||
|
||||
EXPECT_NE(OB_SUCCESS, trans_ctx_mgr.init(0));
|
||||
EXPECT_NE(OB_SUCCESS, trans_ctx_mgr.init(-1));
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.init(CACHED_CTX_COUNT));
|
||||
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
|
||||
// Up to two transaction context objects are allocated at the same time when used by a single thread following the "allocate-return-release" process.
|
||||
// One of them is not deleted from the cache. The logic is verified below.
|
||||
int64_t free_count = 0;
|
||||
int64_t alloc_count = 0;
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
ObAddr svr(ObAddr::IPV4, "127.0.0.1", 1 + (int32_t)index);
|
||||
ObTransID trans_id(svr);
|
||||
|
||||
// At the beginning, the effective number is 0
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
|
||||
free_count = index <= 1 ? 0 : 1;
|
||||
alloc_count = index <= 2 ? index : 2;
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
|
||||
// get with a not-exist trans_id
|
||||
TransCtx *ctx1 = NULL;
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, trans_ctx_mgr.get_trans_ctx(trans_id, ctx1));
|
||||
|
||||
// create when get a not-exist trans_id
|
||||
bool enable_create = true;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.get_trans_ctx(trans_id, ctx1, enable_create));
|
||||
EXPECT_TRUE(NULL != ctx1);
|
||||
|
||||
// get trans_ctx that create just now
|
||||
TransCtx *ctx1_get = NULL;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.get_trans_ctx(trans_id, ctx1_get));
|
||||
EXPECT_TRUE(ctx1 == ctx1_get);
|
||||
|
||||
// Valid quantity is 1
|
||||
EXPECT_EQ(1, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
|
||||
// Idle transaction context object used, idle becomes 0, allocated to a maximum of 2
|
||||
free_count = 0;
|
||||
alloc_count = index <= 0 ? 1 : 2;
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
|
||||
// revert the trans_ctx
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.revert_trans_ctx(ctx1));
|
||||
|
||||
// A revert before a remove does not affect the number of objects
|
||||
EXPECT_EQ(1, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
free_count = 0;
|
||||
alloc_count = index <= 0 ? 1 : 2;
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
|
||||
// remove
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.remove_trans_ctx(trans_id));
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, trans_ctx_mgr.remove_trans_ctx(trans_id));
|
||||
|
||||
// After deletion, the effective number becomes 0
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
|
||||
// After deletion, the object just deleted is not released immediately, but the last deleted object is released
|
||||
// So after the second time, the number of free objects becomes 1, but the number of allocated objects remains the same
|
||||
free_count = index <= 0 ? 0 : 1;
|
||||
alloc_count = index <= 0 ? 1 : 2;
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
|
||||
// revert the last one
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.revert_trans_ctx(ctx1_get));
|
||||
|
||||
// When all returned, the valid number remains 0
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
|
||||
// Even after swapping them all back, the object just deleted is not immediately released until the next time it is deleted
|
||||
free_count = index <= 0 ? 0 : 1;
|
||||
alloc_count = index <= 0 ? 1 : 2;
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ObLogTransCtxMgrTest, DISABLED_single_thread_delay_remove)
|
||||
{
|
||||
ObLogTransCtxMgr trans_ctx_mgr;
|
||||
ObTransID *tids = (ObTransID *)ob_malloc(sizeof(ObTransID) * TEST_CTX_COUNT);
|
||||
TransCtx *tctxs_[TEST_CTX_COUNT];
|
||||
|
||||
(void)memset(tctxs_, 0, sizeof(tctxs_));
|
||||
ASSERT_TRUE(NULL != tids);
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.init(CACHED_CTX_COUNT));
|
||||
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
new(tids + index) ObTransID(ObAddr(ObAddr::IPV4, "127.0.0.1", 1 + (int32_t)index));
|
||||
ObTransID &trans_id = tids[index];
|
||||
|
||||
// get with a not-exist trans_id
|
||||
TransCtx *ctx1 = NULL;
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, trans_ctx_mgr.get_trans_ctx(trans_id, ctx1));
|
||||
|
||||
// create when get a not-exist trans_id
|
||||
bool enable_create = true;
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.get_trans_ctx(trans_id, ctx1, enable_create));
|
||||
EXPECT_TRUE(NULL != ctx1);
|
||||
|
||||
// revert the trans_ctx
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.revert_trans_ctx(ctx1));
|
||||
}
|
||||
|
||||
EXPECT_EQ(TEST_CTX_COUNT + 0, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
EXPECT_EQ(TEST_CTX_COUNT + 0, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
EXPECT_EQ(0, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
|
||||
int64_t REMOVE_INTERVAL_COUNT = 10;
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
ObTransID &trans_id = tids[index];
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.get_trans_ctx(trans_id, tctxs_[index]));
|
||||
EXPECT_TRUE(NULL != tctxs_[index]);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.remove_trans_ctx(trans_id));
|
||||
EXPECT_EQ(TEST_CTX_COUNT - index - 1, trans_ctx_mgr.get_valid_trans_ctx_count());
|
||||
|
||||
// revert REMOVE_INTERVAL_COUNT the object you got before, so that it is actually deleted on the next remove
|
||||
if (index >= REMOVE_INTERVAL_COUNT) {
|
||||
int64_t revert_index = index - REMOVE_INTERVAL_COUNT;
|
||||
// After revert, the next remove will delete
|
||||
EXPECT_EQ(OB_SUCCESS, trans_ctx_mgr.revert_trans_ctx(tctxs_[revert_index]));
|
||||
tctxs_[revert_index] = NULL;
|
||||
|
||||
static int64_t alloc_count = TEST_CTX_COUNT;
|
||||
static int64_t free_count = 0;
|
||||
|
||||
// The alloc_count is only decremented when a second non-cached object is deleted
|
||||
if (revert_index > CACHED_CTX_COUNT) {
|
||||
alloc_count--;
|
||||
} else if (revert_index > 0) {
|
||||
// The free_count is incremented when the cached transaction context object is deleted
|
||||
free_count++;
|
||||
}
|
||||
|
||||
EXPECT_EQ(alloc_count, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
EXPECT_EQ(free_count, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_EQ(CACHED_CTX_COUNT + REMOVE_INTERVAL_COUNT + 1, trans_ctx_mgr.get_alloc_trans_ctx_count());
|
||||
EXPECT_EQ(CACHED_CTX_COUNT + 0, trans_ctx_mgr.get_free_trans_ctx_count());
|
||||
|
||||
ob_free((void *)tids);
|
||||
tids = NULL;
|
||||
}
|
||||
|
||||
TEST_F(ObLogTransCtxMgrTest, multiple_thread)
|
||||
{
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.init(CACHED_CTX_COUNT));
|
||||
|
||||
OB_ASSERT(NULL == trans_ids_);
|
||||
trans_ids_ = (ObTransID *)ob_malloc(sizeof(ObTransID) * TEST_CTX_COUNT);
|
||||
ASSERT_TRUE(NULL != trans_ids_);
|
||||
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
new(trans_ids_ + index) ObTransID(ObAddr(ObAddr::IPV4, "127.0.0.1", 1 + (int32_t)index));
|
||||
}
|
||||
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
ASSERT_EQ(0, pthread_create(threads_ + index, NULL, thread_func, this));
|
||||
}
|
||||
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
if (0 != threads_[index]) {
|
||||
pthread_join(threads_[index], NULL);
|
||||
threads_[index] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
trans_ids_[index].~ObTransID();
|
||||
}
|
||||
|
||||
ob_free(trans_ids_);
|
||||
trans_ids_ = NULL;
|
||||
}
|
||||
|
||||
void *ObLogTransCtxMgrTest::thread_func(void *args)
|
||||
{
|
||||
if (NULL != args) {
|
||||
((ObLogTransCtxMgrTest *)args)->run();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ObLogTransCtxMgrTest::run()
|
||||
{
|
||||
int64_t end_time = ObTimeUtility::current_time() + RUN_TIME_SEC * 1000000;
|
||||
|
||||
while (true) {
|
||||
test_imediately_remove();
|
||||
test_dely_remove();
|
||||
int64_t left_time = end_time - ObTimeUtility::current_time();
|
||||
if (left_time <= 0) break;
|
||||
}
|
||||
}
|
||||
|
||||
void ObLogTransCtxMgrTest::test_imediately_remove()
|
||||
{
|
||||
ObAddr svr(ObAddr::IPV4, "127.0.0.1", ATOMIC_AAF(&port_, 1));
|
||||
ObTransID trans_id(svr); // Although the svr is the same, the internal inc will be self-increasing
|
||||
|
||||
// get with a not-exist trans_id
|
||||
TransCtx *ctx1 = NULL;
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, mgr_.get_trans_ctx(trans_id, ctx1));
|
||||
|
||||
// create when get a not-exist trans_id
|
||||
bool enable_create = true;
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.get_trans_ctx(trans_id, ctx1, enable_create));
|
||||
EXPECT_TRUE(NULL != ctx1);
|
||||
|
||||
// get trans_ctx that create just now
|
||||
TransCtx *ctx1_get = NULL;
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.get_trans_ctx(trans_id, ctx1_get));
|
||||
EXPECT_TRUE(ctx1 == ctx1_get);
|
||||
|
||||
// revert the trans_ctx
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.revert_trans_ctx(ctx1));
|
||||
|
||||
usleep((useconds_t)random() % SLEEP_TIME);
|
||||
|
||||
// remove
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.remove_trans_ctx(trans_id));
|
||||
EXPECT_EQ(OB_ENTRY_NOT_EXIST, mgr_.remove_trans_ctx(trans_id));
|
||||
|
||||
// Return to the last acquired
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.revert_trans_ctx(ctx1_get));
|
||||
}
|
||||
|
||||
void ObLogTransCtxMgrTest::test_dely_remove()
|
||||
{
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
ObTransID &trans_id = trans_ids_[random() % TEST_CTX_COUNT];
|
||||
TransCtx *ctx = NULL;
|
||||
bool enable_create = true;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.get_trans_ctx(trans_id, ctx, enable_create));
|
||||
EXPECT_TRUE(NULL != ctx);
|
||||
|
||||
usleep((useconds_t)random() % SLEEP_TIME);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.revert_trans_ctx(ctx));
|
||||
}
|
||||
|
||||
for (int64_t index = 0; index < TEST_CTX_COUNT; index++) {
|
||||
ObTransID &trans_id = trans_ids_[random() % TEST_CTX_COUNT];
|
||||
TransCtx *ctx = NULL;
|
||||
|
||||
int ret = mgr_.get_trans_ctx(trans_id, ctx);
|
||||
|
||||
if (OB_SUCC(ret)) {
|
||||
EXPECT_TRUE(NULL != ctx);
|
||||
|
||||
ret = mgr_.remove_trans_ctx(trans_id);
|
||||
EXPECT_TRUE(OB_SUCCESS == ret || OB_ENTRY_NOT_EXIST == ret);
|
||||
|
||||
usleep((useconds_t)random() % SLEEP_TIME);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, mgr_.revert_trans_ctx(ctx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// Used for initialization of ObTransID
|
||||
ObClockGenerator::init();
|
||||
|
||||
srandom((unsigned)ObTimeUtility::current_time());
|
||||
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
::testing::InitGoogleTest(&argc,argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
330
unittest/obcdc/test_log_utils.cpp
Normal file
330
unittest/obcdc/test_log_utils.cpp
Normal file
@ -0,0 +1,330 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_log_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* TEST1.
|
||||
* Test split.
|
||||
*/
|
||||
TEST(utils, split)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
char str[] = "tt1.database1";
|
||||
const char *delimiter = ".";
|
||||
const char *res[16];
|
||||
int64_t res_cnt = 0;
|
||||
|
||||
err = split(str, delimiter, 2, res, res_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(2, res_cnt);
|
||||
EXPECT_STREQ("tt1", res[0]);
|
||||
EXPECT_STREQ("database1", res[1]);
|
||||
|
||||
char str1[] = "tt2.database2.test";
|
||||
err = split(str1, delimiter, 3, res, res_cnt);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(3, res_cnt);
|
||||
EXPECT_STREQ("tt2", res[0]);
|
||||
EXPECT_STREQ("database2", res[1]);
|
||||
EXPECT_STREQ("test", res[2]);
|
||||
}
|
||||
|
||||
/*
|
||||
* TEST2.
|
||||
* Test split. Boundary tests
|
||||
*/
|
||||
TEST(utils, split_boundary)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
char str[] = "tt1.database1";
|
||||
const char *delimiter = ".";
|
||||
const char *res[16];
|
||||
int64_t res_cnt = 0;
|
||||
|
||||
err = split(NULL, delimiter, 2, res, res_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
EXPECT_EQ(0, res_cnt);
|
||||
|
||||
char str1[] = "";
|
||||
err = split(str1, delimiter, 2, res, res_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
EXPECT_EQ(0, res_cnt);
|
||||
|
||||
err = split(str, NULL, 2, res, res_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
EXPECT_EQ(0, res_cnt);
|
||||
|
||||
const char *delimiter1 = "";
|
||||
err = split(str, delimiter1, 2, res, res_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
EXPECT_EQ(0, res_cnt);
|
||||
|
||||
// Test for incoming length errors
|
||||
err = split(str, delimiter, 1, res, res_cnt);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, err);
|
||||
EXPECT_EQ(1, res_cnt);
|
||||
}
|
||||
|
||||
TEST(utils, split_int64_all)
|
||||
{
|
||||
char delimiter = '|';
|
||||
ObString str;
|
||||
const char *ptr = NULL;
|
||||
ObSEArray<int64_t, 8> ret_array;
|
||||
|
||||
// Store a single number
|
||||
ptr = "100";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(1, ret_array.count());
|
||||
EXPECT_EQ(100, ret_array.at(0));
|
||||
|
||||
// Store multi numbers
|
||||
ptr = "100|2000|30000|400000";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(4, ret_array.count());
|
||||
EXPECT_EQ(100, ret_array.at(0));
|
||||
EXPECT_EQ(2000, ret_array.at(1));
|
||||
EXPECT_EQ(30000, ret_array.at(2));
|
||||
EXPECT_EQ(400000, ret_array.at(3));
|
||||
|
||||
// Store multiple numbers with a separator at the end
|
||||
ptr = "100|2000|30000|400000|";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(4, ret_array.count());
|
||||
EXPECT_EQ(100, ret_array.at(0));
|
||||
EXPECT_EQ(2000, ret_array.at(1));
|
||||
EXPECT_EQ(30000, ret_array.at(2));
|
||||
EXPECT_EQ(400000, ret_array.at(3));
|
||||
|
||||
// no number
|
||||
ptr = "";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(0, ret_array.count());
|
||||
|
||||
// obly seperator
|
||||
ptr = "|";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(0, ret_array.count());
|
||||
|
||||
// There are no numbers, only invalid content
|
||||
ptr = ",";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_INVALID_DATA, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(0, ret_array.count());
|
||||
|
||||
// Numerical limit values
|
||||
char max_int[100];
|
||||
snprintf(max_int, sizeof(max_int), "%ld", INT64_MAX);
|
||||
str.assign_ptr(max_int, (ObString::obstr_size_t)strlen(max_int));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(1, ret_array.count());
|
||||
EXPECT_EQ(INT64_MAX, ret_array.at(0));
|
||||
|
||||
// Exceeding numerical limits
|
||||
std::string over_size_int(100, '9');
|
||||
str.assign_ptr(over_size_int.c_str(), (ObString::obstr_size_t)strlen(over_size_int.c_str()));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_INVALID_DATA, split_int64(str, delimiter, ret_array));
|
||||
|
||||
// Use other delimite characters
|
||||
// Only the first one can be parsed
|
||||
ptr = "100,200,300";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_INVALID_DATA, split_int64(str, delimiter, ret_array));
|
||||
|
||||
// constains other char
|
||||
ptr = "100a|200b|300c";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_INVALID_DATA, split_int64(str, delimiter, ret_array));
|
||||
|
||||
// delimite at first pos
|
||||
ptr = "|100|200|";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(2, ret_array.count());
|
||||
EXPECT_EQ(100, ret_array.at(0));
|
||||
EXPECT_EQ(200, ret_array.at(1));
|
||||
|
||||
// The separator appears several times in succession
|
||||
ptr = "300||400|||500|";
|
||||
str.assign_ptr(ptr, (ObString::obstr_size_t)strlen(ptr));
|
||||
ret_array.reuse();
|
||||
EXPECT_EQ(OB_SUCCESS, split_int64(str, delimiter, ret_array));
|
||||
EXPECT_EQ(3, ret_array.count());
|
||||
EXPECT_EQ(300, ret_array.at(0));
|
||||
EXPECT_EQ(400, ret_array.at(1));
|
||||
EXPECT_EQ(500, ret_array.at(2));
|
||||
}
|
||||
|
||||
TEST(utils, kv_pair)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
char kv_str[] = "test:999";
|
||||
const char *delimiter1 = ":";
|
||||
const char *delimiter2 = "%";
|
||||
|
||||
ObLogKVCollection::KVPair kvpair;
|
||||
ret = kvpair.init(delimiter1);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = kvpair.deserialize(kv_str);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_STREQ("test", kvpair.get_key());
|
||||
EXPECT_STREQ("999", kvpair.get_value());
|
||||
|
||||
kvpair.reset();
|
||||
char key[] = "kjdngasdey";
|
||||
char value[] = "vaksahgasfashjlue";
|
||||
ret = kvpair.init(delimiter2);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = kvpair.set_key_and_value(key, value);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_STREQ("kjdngasdey", kvpair.get_key());
|
||||
EXPECT_STREQ("vaksahgasfashjlue", kvpair.get_value());
|
||||
int64_t pos = 0;
|
||||
int64_t len = kvpair.length();
|
||||
EXPECT_EQ(strlen(key) + strlen(value) + strlen(delimiter2), len);
|
||||
char buf[len+1];
|
||||
ret = kvpair.serialize(buf, len+1, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_STREQ("kjdngasdey%vaksahgasfashjlue", buf);
|
||||
}
|
||||
|
||||
TEST(utils, kv_collection)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
char kv_str[] = "data:2346234;test:5asdfgasf; time:21354213";
|
||||
int64_t origin_len = strlen(kv_str);
|
||||
const char *pair_delimiter = "; ";
|
||||
const char *kv_delimiter = ":";
|
||||
ObLogKVCollection kv_c;
|
||||
ret = kv_c.init(kv_delimiter, pair_delimiter);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = kv_c.deserialize(kv_str);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(3, kv_c.size());
|
||||
int64_t len = kv_c.length();
|
||||
EXPECT_EQ(origin_len, len-1);
|
||||
bool contain = false;
|
||||
ret = kv_c.contains_key("data", contain);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(true, contain);
|
||||
ret = kv_c.contains_key("versin", contain);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(false, contain);
|
||||
const char *value_time = NULL;
|
||||
ret = kv_c.get_value_of_key("time", value_time);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_STREQ("21354213", value_time);
|
||||
kv_c.reset();
|
||||
|
||||
// test append
|
||||
kv_c.init(kv_delimiter, pair_delimiter);
|
||||
ObLogKVCollection::KVPair kvpair;
|
||||
char key[] = "jakds";
|
||||
char value[] = "dsagads";
|
||||
ret = kvpair.init(kv_delimiter);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = kvpair.set_key_and_value(key, value);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(true, kvpair.is_valid());
|
||||
ret = kv_c.append_kv_pair(kvpair);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
kvpair.reset();
|
||||
char key1[] = "time";
|
||||
char value1[] = "1237851204";
|
||||
ret = kvpair.init(kv_delimiter);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
ret = kvpair.set_key_and_value(key1, value1);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(true, kvpair.is_valid());
|
||||
ret = kv_c.append_kv_pair(kvpair);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
kvpair.reset();
|
||||
ret = kv_c.contains_key("time1", contain);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(false, contain);
|
||||
origin_len = strlen(key) + strlen(value) + strlen(key1) + strlen(value1)
|
||||
+ 2 * strlen(kv_delimiter) + strlen(pair_delimiter);
|
||||
len = kv_c.length();
|
||||
EXPECT_EQ(origin_len, len);
|
||||
char buf[len+1];
|
||||
int64_t pos = 0;
|
||||
ret = kv_c.serialize(buf, len+1, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_STREQ("jakds:dsagads; time:1237851204", buf);
|
||||
}
|
||||
|
||||
TEST(utils, cstring_to_num)
|
||||
{
|
||||
char numstr1[] = "123412";
|
||||
char numstr2[] = "-683251";
|
||||
char numstr3[] = "0";
|
||||
char numstr4[] = "a123";
|
||||
char numstr5[] = " 123";
|
||||
char numstr6[] = "";
|
||||
int64_t val = 0;
|
||||
int ret = c_str_to_int(numstr1, val);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(123412, val);
|
||||
ret = c_str_to_int(numstr2, val);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(-683251, val);
|
||||
ret = c_str_to_int(numstr3, val);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(0, val);
|
||||
ret = c_str_to_int(numstr4, val);
|
||||
EXPECT_EQ(OB_INVALID_DATA, ret);
|
||||
ret = c_str_to_int(numstr5, val);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(123, val);
|
||||
ret = c_str_to_int(numstr6, val);
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, ret);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
198
unittest/obcdc/test_ob_concurrent_seq_queue.cpp
Normal file
198
unittest/obcdc/test_ob_concurrent_seq_queue.cpp
Normal file
@ -0,0 +1,198 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_concurrent_seq_queue.h"
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/time/ob_time_utility.h"
|
||||
#include "lib/atomic/ob_atomic.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace common
|
||||
{
|
||||
class TestConSeqQueue : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
static const int64_t RUN_TIME = 1L * 60L * 60L * 1000L * 1000L;
|
||||
static const int64_t THREAD_NUM = 20;
|
||||
static const int64_t STAT_INTERVAL = 5 * 1000 * 1000;
|
||||
public:
|
||||
TestConSeqQueue() {}
|
||||
~TestConSeqQueue() {}
|
||||
|
||||
virtual void SetUp()
|
||||
{
|
||||
ASSERT_EQ(0, queue_.init(1024));
|
||||
produce_seq_ = 0;
|
||||
consume_seq_ = 0;
|
||||
consume_thread_counter_ = 0;
|
||||
consume_task_count_ = 0;
|
||||
last_stat_time_ = 0;
|
||||
last_consume_task_count_ = 0;
|
||||
stop_flag_ = false;
|
||||
}
|
||||
virtual void TearDown()
|
||||
{
|
||||
queue_.destroy();
|
||||
}
|
||||
static void *produce_thread_func(void *args);
|
||||
static void *consume_thread_func(void *args);
|
||||
void run_produce();
|
||||
void run_consume();
|
||||
|
||||
public:
|
||||
pthread_t produce_threads_[THREAD_NUM];
|
||||
pthread_t consume_threads_[THREAD_NUM];
|
||||
int64_t consume_thread_counter_;
|
||||
ObConcurrentSeqQueue queue_;
|
||||
int64_t produce_seq_ CACHE_ALIGNED;
|
||||
int64_t consume_seq_ CACHE_ALIGNED;
|
||||
int64_t consume_task_count_ CACHE_ALIGNED;
|
||||
int64_t last_consume_task_count_ CACHE_ALIGNED;
|
||||
int64_t last_stat_time_ CACHE_ALIGNED;
|
||||
|
||||
volatile bool stop_flag_ CACHE_ALIGNED;
|
||||
};
|
||||
|
||||
TEST_F(TestConSeqQueue, basic)
|
||||
{
|
||||
ObConcurrentSeqQueue queue;
|
||||
void *data = 0;
|
||||
|
||||
EXPECT_EQ(0, queue.init(1024));
|
||||
|
||||
EXPECT_EQ(0, queue.push((void*)0, 0, 0));
|
||||
EXPECT_EQ(0, queue.push((void*)1, 1, 0));
|
||||
EXPECT_EQ(0, queue.push((void*)2, 2, 0));
|
||||
|
||||
EXPECT_EQ(0, queue.pop(data, 0, 0));
|
||||
EXPECT_EQ(0, (int64_t)data);
|
||||
EXPECT_EQ(0, queue.pop(data, 1, 0));
|
||||
EXPECT_EQ(1, (int64_t)data);
|
||||
EXPECT_EQ(0, queue.pop(data, 2, 0));
|
||||
EXPECT_EQ(2, (int64_t)data);
|
||||
|
||||
// Failed to push and pop elements with the same serial number again
|
||||
EXPECT_NE(0, queue.push((void*)0, 0, 0));
|
||||
EXPECT_NE(0, queue.push((void*)1, 1, 0));
|
||||
EXPECT_NE(0, queue.push((void*)2, 2, 0));
|
||||
EXPECT_NE(0, queue.pop(data, 0, 0));
|
||||
EXPECT_NE(0, queue.pop(data, 1, 0));
|
||||
EXPECT_NE(0, queue.pop(data, 2, 0));
|
||||
}
|
||||
|
||||
void *TestConSeqQueue::produce_thread_func(void *args)
|
||||
{
|
||||
if (NULL != args) {
|
||||
((TestConSeqQueue *)args)->run_produce();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void TestConSeqQueue::run_produce()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t batch_count = 1000;
|
||||
|
||||
while (OB_SUCCESS == ret && ! stop_flag_) {
|
||||
for (int64_t index = 0; OB_SUCCESS == ret && index < batch_count; index++) {
|
||||
int64_t seq = ATOMIC_FAA(&produce_seq_, 1);
|
||||
while (! stop_flag_ && OB_TIMEOUT == (ret = queue_.push((void*)seq, seq, 1 * 1000 * 1000)));
|
||||
if (! stop_flag_) {
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void *TestConSeqQueue::consume_thread_func(void *args)
|
||||
{
|
||||
if (NULL != args) {
|
||||
((TestConSeqQueue *)args)->run_consume();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void TestConSeqQueue::run_consume()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t batch_count = 1000;
|
||||
int64_t end_time = ObTimeUtility::current_time();
|
||||
|
||||
int64_t thread_index = ATOMIC_FAA(&consume_thread_counter_, 0);
|
||||
|
||||
while (OB_SUCCESS == ret && !stop_flag_) {
|
||||
for (int64_t index = 0; OB_SUCCESS == ret && index < batch_count; index++) {
|
||||
int64_t seq = ATOMIC_FAA(&consume_seq_, 1);
|
||||
void *data = NULL;
|
||||
while (! stop_flag_ && OB_TIMEOUT == (ret = queue_.pop(data, seq, 1 * 1000 * 1000)));
|
||||
if (! stop_flag_) {
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
EXPECT_EQ(seq, (int64_t)data);
|
||||
ATOMIC_INC(&consume_task_count_);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t cur_time = ObTimeUtility::current_time();
|
||||
if (OB_UNLIKELY(0 == thread_index) && cur_time - last_stat_time_ > STAT_INTERVAL) {
|
||||
int64_t task_count = ATOMIC_LOAD(&consume_task_count_);
|
||||
int64_t consume_seq = ATOMIC_LOAD(&consume_seq_);
|
||||
int64_t produce_seq = ATOMIC_LOAD(&produce_seq_);
|
||||
if (0 != last_stat_time_) {
|
||||
int64_t delta_task_count = task_count - last_consume_task_count_;
|
||||
int64_t delta_time_sec = (cur_time - last_stat_time_)/1000000;
|
||||
LIB_LOG(INFO, "STAT", "POP_TPS", delta_task_count/delta_time_sec, K(delta_task_count),
|
||||
K(consume_seq), K(produce_seq), K(INT32_MAX));
|
||||
}
|
||||
|
||||
last_stat_time_ = cur_time;
|
||||
last_consume_task_count_ = task_count;
|
||||
}
|
||||
|
||||
if (end_time - cur_time <= 0) {
|
||||
stop_flag_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestConSeqQueue, thread)
|
||||
{
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
ASSERT_EQ(0, pthread_create(produce_threads_ + index, NULL, produce_thread_func, this));
|
||||
}
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
ASSERT_EQ(0, pthread_create(consume_threads_ + index, NULL, consume_thread_func, this));
|
||||
}
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
pthread_join(produce_threads_[index], NULL);
|
||||
produce_threads_[index] = 0;
|
||||
}
|
||||
for (int64_t index = 0; index < THREAD_NUM; index++) {
|
||||
pthread_join(consume_threads_[index], NULL);
|
||||
consume_threads_[index] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
132
unittest/obcdc/test_ob_log_adapt_string.cpp
Normal file
132
unittest/obcdc/test_ob_log_adapt_string.cpp
Normal file
@ -0,0 +1,132 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_adapt_string.h" // ObLogAdaptString
|
||||
|
||||
using namespace oceanbase::common;
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace liboblog
|
||||
{
|
||||
|
||||
class TestLogAdaptString : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
TestLogAdaptString() {}
|
||||
~TestLogAdaptString() {}
|
||||
};
|
||||
|
||||
void test_append_str(ObLogAdaptString &str, std::string &std_str, const char *cstr)
|
||||
{
|
||||
const char *ret_cstr = NULL;
|
||||
ASSERT_EQ(OB_SUCCESS, str.append(cstr));
|
||||
std_str.append(cstr);
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS, str.cstr(ret_cstr));
|
||||
ASSERT_STREQ(std_str.c_str(), ret_cstr);
|
||||
}
|
||||
|
||||
TEST_F(TestLogAdaptString, smoke_test)
|
||||
{
|
||||
ObLogAdaptString str(ObModIds::OB_LOG_TEMP_MEMORY);
|
||||
std::string std_str;
|
||||
const char *cstr = "";
|
||||
|
||||
test_append_str(str, std_str, "");
|
||||
test_append_str(str, std_str, "I am me ");
|
||||
test_append_str(str, std_str, "CHINA ");
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, str.append_int64(100));
|
||||
std_str.append("100");
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS, str.cstr(cstr));
|
||||
ASSERT_STREQ(std_str.c_str(), cstr);
|
||||
|
||||
OBLOG_LOG(INFO, "cstr", K(cstr), K(str));
|
||||
}
|
||||
|
||||
TEST_F(TestLogAdaptString, argument_test)
|
||||
{
|
||||
ObLogAdaptString str(ObModIds::OB_LOG_TEMP_MEMORY);
|
||||
std::string std_str;
|
||||
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, str.append(NULL));
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, str.append(""));
|
||||
std_str.append("");
|
||||
EXPECT_EQ(OB_SUCCESS, str.append_int64(-1));
|
||||
std_str.append("-1");
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, str.append_int64(INT64_MAX));
|
||||
char int64_max[100];
|
||||
sprintf(int64_max, "%ld", INT64_MAX);
|
||||
std_str.append(int64_max);
|
||||
|
||||
|
||||
const char *cstr = "";
|
||||
ASSERT_EQ(OB_SUCCESS, str.cstr(cstr));
|
||||
ASSERT_STREQ(std_str.c_str(), cstr);
|
||||
|
||||
OBLOG_LOG(INFO, "cstr", K(cstr), K(std_str.c_str()));
|
||||
}
|
||||
|
||||
TEST_F(TestLogAdaptString, all_sort_of_string)
|
||||
{
|
||||
ObLogAdaptString str(ObModIds::OB_LOG_TEMP_MEMORY);
|
||||
std::string std_str;
|
||||
const char *cstr = "";
|
||||
char buf[1 * _M_ + 1];
|
||||
|
||||
(void)memset(buf, 'a', sizeof(buf));
|
||||
|
||||
// Empty strings are also equal
|
||||
EXPECT_EQ(OB_SUCCESS, str.cstr(cstr));
|
||||
EXPECT_STREQ(std_str.c_str(), cstr);
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
// less than 8K
|
||||
test_append_str(str, std_str, "");
|
||||
test_append_str(str, std_str, "11111111111111");
|
||||
test_append_str(str, std_str, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
|
||||
|
||||
// equals to 8K
|
||||
buf[8 * _K_] = '\0';
|
||||
test_append_str(str, std_str, buf);
|
||||
test_append_str(str, std_str, buf);
|
||||
test_append_str(str, std_str, buf);
|
||||
buf[8 * _K_] = 'a';
|
||||
|
||||
// greater than 8K
|
||||
buf[16 * _K_] = '\0';
|
||||
test_append_str(str, std_str, buf);
|
||||
buf[16 * _K_] = 'a';
|
||||
buf[32 * _K_] = '\0';
|
||||
test_append_str(str, std_str, buf);
|
||||
buf[32 * _K_] = 'a';
|
||||
buf[1 * _M_] = '\0';
|
||||
test_append_str(str, std_str, buf);
|
||||
buf[1 * _M_] = 'a';
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
OB_LOGGER.set_file_name("test_ob_log_adapt_string.log", true);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
528
unittest/obcdc/test_ob_log_all_svr_cache.cpp
Normal file
528
unittest/obcdc/test_ob_log_all_svr_cache.cpp
Normal file
@ -0,0 +1,528 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#define private public
|
||||
#include "obcdc/src/ob_log_all_svr_cache.h"
|
||||
#include "obcdc/src/ob_log_systable_helper.h"
|
||||
#include "ob_log_utils.h"
|
||||
#include "test_ob_log_fetcher_common_utils.h"
|
||||
#include "lib/atomic/ob_atomic.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogAllSvrCache: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static const int64_t ALLSVR_CACHE_UPDATE_INTERVAL = 10 * _MSEC_;
|
||||
};
|
||||
|
||||
static const int64_t SERVER_COUNT = 120;
|
||||
static const int64_t FIRST_QUERY_RECORD_COUNT = 60;
|
||||
static const int64_t VARY_RECORD_COUNT = 6;
|
||||
|
||||
typedef IObLogSysTableHelper::AllServerRecordArray AllServerRecordArray;
|
||||
typedef IObLogSysTableHelper::AllServerRecord AllServerRecord;
|
||||
AllServerRecord all_server_records[SERVER_COUNT];
|
||||
const char *zones[4] = {"z1", "z2", "z3", "z4"};
|
||||
const char *regions[4] = {"hz", "sh", "sz", "sh"};
|
||||
const char *zone_types[4] = {"ReadWrite", "ReadWrite", "ReadWrite", "ReadOnly"};
|
||||
|
||||
void generate_data()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObString ip_str = "127.0.0.1";
|
||||
|
||||
for(int64_t idx = 0; idx < SERVER_COUNT; idx++) {
|
||||
AllServerRecord &record = all_server_records[idx];
|
||||
int64_t pos = 0;
|
||||
if (OB_FAIL(databuff_printf(record.svr_ip_, sizeof(record.svr_ip_), pos,
|
||||
"%.*s", ip_str.length(), ip_str.ptr()))) {
|
||||
LOG_ERROR("save ip address fail", K(ret), K(pos),
|
||||
"buf_size", sizeof(record.svr_ip_), K(ip_str));
|
||||
}
|
||||
record.svr_port_ = static_cast<int32_t>(idx + 8000);
|
||||
int64_t index = idx % 4;
|
||||
switch (index) {
|
||||
case 0:
|
||||
record.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_INACTIVE;
|
||||
break;
|
||||
case 1:
|
||||
record.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_ACTIVE;
|
||||
break;
|
||||
case 2:
|
||||
record.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_DELETING;
|
||||
break;
|
||||
case 3:
|
||||
record.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_ACTIVE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (OB_FAIL(record.zone_.assign(zones[index]))) {
|
||||
LOG_ERROR("record zone assign fail", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// To test if the cached __all_server system table data is cached correctly, the following dynamic policy is used for the returned data.
|
||||
// Assume FIRST_QUERY_RECORD_COUNT=60, VARY_RECORD_COUNT=6
|
||||
// 1. First query returns: records from 0 to 59
|
||||
// 2. Second query returns: 6 new rows, i.e. 0 to 65 rows, 60 to 65 rows added
|
||||
// 3. Third query returns: Decrease the first 6 rows, i.e. return 6 to 65 rows, decrease 0 to 5 rows
|
||||
// ...
|
||||
// and so on, until the end, the final validation result 60~119
|
||||
class MockSysTableHelper1 : public IObLogSysTableHelper
|
||||
{
|
||||
public:
|
||||
MockSysTableHelper1() : query_time_(1),
|
||||
start_index_(0),
|
||||
end_index_(FIRST_QUERY_RECORD_COUNT - 1),
|
||||
is_invariable_(false) {}
|
||||
virtual ~MockSysTableHelper1() {}
|
||||
|
||||
public:
|
||||
int query_with_multiple_statement(BatchSQLQuery &batch_query)
|
||||
{
|
||||
UNUSED(batch_query);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_clog_history_info_v2 based on log_id to get all servers with service log IDs greater than or equal to log_id logs
|
||||
virtual int query_clog_history_by_log_id(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const uint64_t log_id,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(log_id);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_clog_history_info_v2 for all servers with timestamp greater than or equal to timestamp log based on timestamp
|
||||
virtual int query_clog_history_by_tstamp(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const int64_t timestamp,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(timestamp);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_meta_table / __all_root_table to get information about the servers that are serving the partition
|
||||
virtual int query_meta_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
MetaRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Query __all_meta_table / __all_root_table for leader information
|
||||
virtual int query_leader_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
bool &has_leader,
|
||||
common::ObAddr &leader)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(has_leader);
|
||||
UNUSED(leader);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_server table for all active server information
|
||||
virtual int query_all_server_info(AllServerRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
// The first query returns records from 0 to FIRST_QUERY_RECORD_COUNT-1
|
||||
if (1 == query_time_) {
|
||||
start_index_ = 0;
|
||||
end_index_ = FIRST_QUERY_RECORD_COUNT - 1;
|
||||
} else {
|
||||
if (is_invariable_) { // Return records no longer change
|
||||
// do nothing
|
||||
} else if (0 == (query_time_ & 0x01)) { // ADD record
|
||||
if (end_index_ + VARY_RECORD_COUNT >= SERVER_COUNT) {
|
||||
ATOMIC_STORE(&is_invariable_, true);
|
||||
} else {
|
||||
end_index_ += VARY_RECORD_COUNT;
|
||||
}
|
||||
} else if (1 == (query_time_ & 0x01)) { // minus records
|
||||
start_index_ += VARY_RECORD_COUNT;
|
||||
}
|
||||
}
|
||||
|
||||
// make records
|
||||
for (int64_t idx = start_index_; OB_SUCC(ret) && idx <= end_index_; idx++) {
|
||||
AllServerRecord &record = all_server_records[idx];
|
||||
if (OB_FAIL(records.push_back(record))) {
|
||||
LOG_ERROR("records push error", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
LOG_INFO("query all server info", K(query_time_), K(start_index_),
|
||||
K(end_index_), K(is_invariable_));
|
||||
query_time_++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_zone_info(AllZoneRecordArray &records)
|
||||
{
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int query_all_zone_type(AllZoneTypeRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
for (int64_t idx = 0; idx < 4; ++idx) {
|
||||
AllZoneTypeRecord record;
|
||||
record.zone_type_ = str_to_zone_type(zone_types[idx]);
|
||||
if (OB_FAIL(record.zone_.assign(zones[idx]))) {
|
||||
LOG_ERROR("record assign zone error", K(ret), K(record));
|
||||
} else if (OB_FAIL(records.push_back(record))) {
|
||||
LOG_ERROR("records push error", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_cluster_info(ClusterInfo &cluster_info)
|
||||
{
|
||||
UNUSED(cluster_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int query_cluster_min_observer_version(uint64_t &min_observer_version)
|
||||
{
|
||||
UNUSED(min_observer_version);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int reset_connection()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int query_timezone_info_version(const uint64_t tenant_id,
|
||||
int64_t &timezone_info_version)
|
||||
{
|
||||
UNUSED(tenant_id);
|
||||
UNUSED(timezone_info_version);
|
||||
return 0;
|
||||
}
|
||||
public:
|
||||
int64_t query_time_;
|
||||
int64_t start_index_;
|
||||
int64_t end_index_;
|
||||
bool is_invariable_;
|
||||
};
|
||||
|
||||
class MockSysTableHelper2 : public IObLogSysTableHelper
|
||||
{
|
||||
public:
|
||||
MockSysTableHelper2() : query_time_(1),
|
||||
start_index_(0),
|
||||
end_index_(FIRST_QUERY_RECORD_COUNT - 1) {}
|
||||
virtual ~MockSysTableHelper2() {}
|
||||
|
||||
public:
|
||||
virtual int query_with_multiple_statement(BatchSQLQuery &batch_query)
|
||||
{
|
||||
UNUSED(batch_query);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_clog_history_info_v2 based on log_id to get all servers with service log IDs greater than or equal to log_id logs
|
||||
virtual int query_clog_history_by_log_id(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const uint64_t log_id,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(log_id);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_clog_history_info_v2 for all servers with timestamp greater than or equal to timestamp log based on timestamp
|
||||
virtual int query_clog_history_by_tstamp(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const int64_t timestamp,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(timestamp);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query __all_meta_table / __all_root_table to get information about the servers that are serving the partition
|
||||
virtual int query_meta_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
MetaRecordArray &records)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Query __all_meta_table / __all_root_table for leader information
|
||||
virtual int query_leader_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
bool &has_leader,
|
||||
common::ObAddr &leader)
|
||||
{
|
||||
UNUSED(pkey);
|
||||
UNUSED(has_leader);
|
||||
UNUSED(leader);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Query the __all_server table to get all active server information
|
||||
// First query: return a batch of servers, 1/3 of which are ACTIVE
|
||||
// Second query: return the servers returned in the first query, and the ACTIVE server status is changed to INACTIVE
|
||||
virtual int query_all_server_info(AllServerRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
// build records
|
||||
for (int64_t idx = start_index_; OB_SUCC(ret) && idx <= end_index_; idx++) {
|
||||
AllServerRecord &record = all_server_records[idx];
|
||||
if (2 == query_time_) {
|
||||
// ACTIVE->INACTIVE
|
||||
if (1 == idx % 4) {
|
||||
record.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_INACTIVE;
|
||||
}
|
||||
}
|
||||
if (OB_FAIL(records.push_back(record))) {
|
||||
LOG_ERROR("records push error", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
LOG_INFO("query all server info", K(query_time_), K(start_index_), K(end_index_));
|
||||
query_time_++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_zone_info(AllZoneRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
for (int64_t idx = 0; idx < 4; ++idx) {
|
||||
AllZoneRecord record;
|
||||
if (OB_FAIL(record.zone_.assign(zones[idx]))) {
|
||||
LOG_ERROR("record assign zone error", K(ret), K(record));
|
||||
} else if (OB_FAIL(record.region_.assign(regions[idx]))) {
|
||||
LOG_ERROR("record assign error", K(ret), K(record));
|
||||
} else if (OB_FAIL(records.push_back(record))) {
|
||||
LOG_ERROR("records push error", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_zone_type(AllZoneTypeRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
for (int64_t idx = 0; idx < 4; ++idx) {
|
||||
AllZoneTypeRecord record;
|
||||
record.zone_type_ = str_to_zone_type(zone_types[idx]);
|
||||
if (OB_FAIL(record.zone_.assign(zones[idx]))) {
|
||||
LOG_ERROR("record assign zone error", K(ret), K(record));
|
||||
} else if (OB_FAIL(records.push_back(record))) {
|
||||
LOG_ERROR("records push error", K(ret), K(record));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_cluster_info(ClusterInfo &cluster_info)
|
||||
{
|
||||
UNUSED(cluster_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int query_cluster_min_observer_version(uint64_t &min_observer_version)
|
||||
{
|
||||
UNUSED(min_observer_version);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int reset_connection()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
virtual int query_timezone_info_version(const uint64_t tenant_id,
|
||||
int64_t &timezone_info_version)
|
||||
{
|
||||
UNUSED(tenant_id);
|
||||
UNUSED(timezone_info_version);
|
||||
return 0;
|
||||
}
|
||||
public:
|
||||
int64_t query_time_;
|
||||
int64_t start_index_;
|
||||
int64_t end_index_;
|
||||
};
|
||||
|
||||
|
||||
////////////////////// Test of basic functions //////////////////////////////////////////
|
||||
TEST_F(TestObLogAllSvrCache, init)
|
||||
{
|
||||
generate_data();
|
||||
|
||||
ObLogAllSvrCache all_svr_cache;
|
||||
MockSysTableHelper1 mock_systable_helper;
|
||||
MockFetcherErrHandler1 err_handler;
|
||||
|
||||
// set update interval
|
||||
all_svr_cache.set_update_interval_(ALLSVR_CACHE_UPDATE_INTERVAL);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.init(mock_systable_helper, err_handler));
|
||||
while (false == ATOMIC_LOAD(&mock_systable_helper.is_invariable_)) {
|
||||
// do nothing
|
||||
}
|
||||
LOG_INFO("exit", K(mock_systable_helper.start_index_), K(mock_systable_helper.end_index_));
|
||||
|
||||
/// verify result
|
||||
EXPECT_EQ(FIRST_QUERY_RECORD_COUNT, all_svr_cache.svr_map_.count());
|
||||
int64_t end_index = SERVER_COUNT - 1;
|
||||
int64_t start_index = end_index - FIRST_QUERY_RECORD_COUNT + 1;
|
||||
|
||||
// Test servers in the __all_server table
|
||||
// Servers in the ACTIVE and DELETING states are serviceable
|
||||
// Servers in the INACTIVE state are not serviceable
|
||||
for (int64_t idx = start_index; idx <= end_index; idx++) {
|
||||
ObAddr svr(ObAddr::IPV4, all_server_records[idx].svr_ip_, all_server_records[idx].svr_port_);
|
||||
if (0 == idx % 4) {
|
||||
// INACTIVE/ENCRYPTION ZONE
|
||||
EXPECT_FALSE(all_svr_cache.is_svr_avail(svr));
|
||||
} else {
|
||||
// ACTIVE/DELETEING
|
||||
EXPECT_TRUE(all_svr_cache.is_svr_avail(svr));
|
||||
}
|
||||
}
|
||||
|
||||
// test server not in __all_server table
|
||||
for (int64_t idx = 0; idx < start_index; idx++) {
|
||||
ObAddr svr(ObAddr::IPV4, all_server_records[idx].svr_ip_, all_server_records[idx].svr_port_);
|
||||
EXPECT_FALSE(all_svr_cache.is_svr_avail(svr));
|
||||
}
|
||||
|
||||
all_svr_cache.destroy();
|
||||
}
|
||||
|
||||
// state change from active to inactive
|
||||
TEST_F(TestObLogAllSvrCache, all_svr_cache2)
|
||||
{
|
||||
ObLogAllSvrCache all_svr_cache;
|
||||
MockSysTableHelper2 mock_systable_helper;
|
||||
MockFetcherErrHandler1 err_handler;
|
||||
|
||||
// No threads open, manual assignment
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(all_svr_cache.svr_map_.init(ObModIds::OB_LOG_ALL_SERVER_CACHE))) {
|
||||
LOG_ERROR("init svr map fail", K(ret));
|
||||
}
|
||||
if (OB_FAIL(all_svr_cache.zone_map_.init(ObModIds::OB_LOG_ALL_SERVER_CACHE))) {
|
||||
LOG_ERROR("init svr map fail", K(ret));
|
||||
}
|
||||
|
||||
all_svr_cache.cur_version_ = 0;
|
||||
all_svr_cache.cur_zone_version_ = 0;
|
||||
all_svr_cache.err_handler_ = &err_handler;
|
||||
all_svr_cache.systable_helper_ = &mock_systable_helper;
|
||||
|
||||
// update __all_zone
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.update_zone_cache_());
|
||||
|
||||
// manual update and clearance
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.update_server_cache_());
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.purge_stale_records_());
|
||||
|
||||
/// verify result
|
||||
EXPECT_EQ(FIRST_QUERY_RECORD_COUNT, all_svr_cache.svr_map_.count());
|
||||
int64_t start_index = 0;
|
||||
int64_t end_index = FIRST_QUERY_RECORD_COUNT - 1;
|
||||
|
||||
for (int64_t idx = start_index; idx <= end_index; idx++) {
|
||||
ObAddr svr(ObAddr::IPV4, all_server_records[idx].svr_ip_, all_server_records[idx].svr_port_);
|
||||
if (1 == idx % 4) {
|
||||
EXPECT_TRUE(all_svr_cache.is_svr_avail(svr));
|
||||
}
|
||||
}
|
||||
|
||||
// Second manual update and clearance
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.update_server_cache_());
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.purge_stale_records_());
|
||||
|
||||
// Verify that it is ACTIVE-INACTIVE
|
||||
for (int64_t idx = start_index; idx <= end_index; idx++) {
|
||||
ObAddr svr(ObAddr::IPV4, all_server_records[idx].svr_ip_, all_server_records[idx].svr_port_);
|
||||
if (1 == idx % 4) {
|
||||
EXPECT_FALSE(all_svr_cache.is_svr_avail(svr));
|
||||
}
|
||||
}
|
||||
|
||||
all_svr_cache.destroy();
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_all_svr_cache.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
165
unittest/obcdc/test_ob_log_dlist.cpp
Normal file
165
unittest/obcdc/test_ob_log_dlist.cpp
Normal file
@ -0,0 +1,165 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#define private public
|
||||
#include "obcdc/src/ob_log_dlist.h"
|
||||
#include "ob_log_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class DeriveDlistNode;
|
||||
typedef ObLogDListNode<DeriveDlistNode> TestDlistNode;
|
||||
|
||||
class DeriveDlistNode : public TestDlistNode
|
||||
{
|
||||
public:
|
||||
DeriveDlistNode() : value_(0) {}
|
||||
~DeriveDlistNode() {}
|
||||
public:
|
||||
void reset(int64_t value)
|
||||
{
|
||||
value_ = value;
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t value_;
|
||||
};
|
||||
typedef DeriveDlistNode Type;
|
||||
// test count
|
||||
static const int64_t ONE_TEST_COUNT = 1;
|
||||
static const int64_t MUL_TEST_COUNT = 1000;
|
||||
|
||||
class TestObLogDlist: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public:
|
||||
// generate data
|
||||
void generate_data(const int64_t count, Type *&datas);
|
||||
// check data correct
|
||||
bool is_dlist_correct(const int64_t start_value, DeriveDlistNode *head);
|
||||
};
|
||||
|
||||
void TestObLogDlist::generate_data(const int64_t count, Type *&datas)
|
||||
{
|
||||
datas = (Type *)ob_malloc(sizeof(Type) * count);
|
||||
OB_ASSERT(NULL != datas);
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
new (datas + idx) Type();
|
||||
datas[idx].reset(idx);
|
||||
}
|
||||
}
|
||||
|
||||
bool TestObLogDlist::is_dlist_correct(const int64_t start_value, DeriveDlistNode *head)
|
||||
{
|
||||
bool bool_ret = true;
|
||||
int64_t expect_val = start_value;
|
||||
|
||||
if (OB_ISNULL(head) || OB_NOT_NULL(head->get_prev())) {
|
||||
LOG_ERROR("invalid argument");
|
||||
bool_ret = false;
|
||||
} else if (OB_ISNULL(head->get_next())) { // single node
|
||||
if (expect_val != head->value_) {
|
||||
bool_ret = false;
|
||||
}
|
||||
LOG_DEBUG("is_dlist_correct", K(expect_val));
|
||||
} else { // multi node
|
||||
DeriveDlistNode *current_node = head;
|
||||
DeriveDlistNode *next_node = current_node->get_next();
|
||||
while ((NULL != current_node)
|
||||
&& (NULL != current_node->get_next())) {
|
||||
if ((expect_val != current_node->value_)
|
||||
|| (expect_val != next_node->get_prev()->value_)) {
|
||||
bool_ret = false;
|
||||
}
|
||||
LOG_DEBUG("is_dlist_correct", K(expect_val));
|
||||
current_node = next_node;
|
||||
next_node = current_node->get_next();
|
||||
expect_val--;
|
||||
}
|
||||
// last node
|
||||
if ((expect_val == current_node->value_)
|
||||
&& OB_ISNULL(current_node->get_next())) {
|
||||
LOG_DEBUG("is_dlist_correct", K(expect_val));
|
||||
} else {
|
||||
bool_ret = false;
|
||||
}
|
||||
}
|
||||
|
||||
return bool_ret;
|
||||
}
|
||||
|
||||
////////////////////// basic functions //////////////////////////////////////////
|
||||
TEST_F(TestObLogDlist, dlist)
|
||||
{
|
||||
// generate data
|
||||
Type *datas = NULL;
|
||||
generate_data(MUL_TEST_COUNT, datas);
|
||||
|
||||
// ObLogDList
|
||||
ObLogDList<DeriveDlistNode> dlist;
|
||||
EXPECT_EQ(0, dlist.count_);
|
||||
EXPECT_EQ(NULL, dlist.head_);
|
||||
|
||||
// insert data
|
||||
dlist.add_head(datas[0]);
|
||||
EXPECT_EQ(ONE_TEST_COUNT, dlist.count_);
|
||||
EXPECT_TRUE(is_dlist_correct(ONE_TEST_COUNT - 1, dlist.head()));
|
||||
|
||||
// insert multi data
|
||||
for (int64_t idx = 1; idx < MUL_TEST_COUNT; idx++) {
|
||||
dlist.add_head(datas[idx]);
|
||||
}
|
||||
EXPECT_EQ(MUL_TEST_COUNT, dlist.count_);
|
||||
EXPECT_TRUE(is_dlist_correct(MUL_TEST_COUNT - 1, dlist.head()));
|
||||
|
||||
// Delete the last half of the data and check for correctness
|
||||
for (int64_t idx = 0; idx < MUL_TEST_COUNT / 2; idx++) {
|
||||
dlist.erase(datas[idx]);
|
||||
}
|
||||
EXPECT_EQ(MUL_TEST_COUNT / 2, dlist.count_);
|
||||
EXPECT_TRUE(is_dlist_correct(MUL_TEST_COUNT - 1, dlist.head()));
|
||||
|
||||
// Delete the first half of the data and check for correctness
|
||||
for (int64_t idx = MUL_TEST_COUNT / 2; idx < MUL_TEST_COUNT; idx++) {
|
||||
dlist.erase(datas[idx]);
|
||||
}
|
||||
EXPECT_EQ(0, dlist.count_);
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_dlist.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
598
unittest/obcdc/test_ob_log_fetcher_common_utils.h
Normal file
598
unittest/obcdc/test_ob_log_fetcher_common_utils.h
Normal file
@ -0,0 +1,598 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
//#include "lib/oblog/ob_log_module.h"
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
|
||||
#include "obcdc/src/ob_log_instance.h"
|
||||
#include "ob_log_stream_worker.h"
|
||||
#define private public
|
||||
#include "ob_log_rpc.h"
|
||||
#include "ob_log_utils.h"
|
||||
#include "ob_log_systable_helper.h"
|
||||
|
||||
//#include "ob_log_part_fetch_ctx.h"
|
||||
//#include "ob_log_fetcher_stream.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
//using namespace clog;
|
||||
//using namespace fetcher;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
/*
|
||||
* Utils.
|
||||
*/
|
||||
typedef std::vector<ObAddr> Svrs;
|
||||
typedef std::vector<ObPartitionKey> PKeys;
|
||||
typedef std::vector<uint64_t> LogIds;
|
||||
typedef std::vector<int64_t> Tstamps;
|
||||
|
||||
class MockFetcherErrHandler1 : public IObLogErrHandler
|
||||
{
|
||||
public:
|
||||
virtual ~MockFetcherErrHandler1() { }
|
||||
public:
|
||||
virtual void handle_error(const int err_no, const char *fmt, ...)
|
||||
{
|
||||
UNUSED(err_no);
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
//__E__(fmt, ap);
|
||||
//LOG_ERROR("test", fmt, ap);
|
||||
va_end(ap);
|
||||
abort();
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* SvrFinder
|
||||
*
|
||||
*/
|
||||
static const int64_t ALL_SERVER_COUNT = 100;
|
||||
|
||||
static const int64_t QUERY_CLOG_HISTORY_VALID_COUNT = 10;
|
||||
static const int64_t QUERY_CLOG_HISTORY_INVALID_COUNT = 5;
|
||||
static const int64_t QUERY_META_INFO_ADD_COUNT = 6;
|
||||
|
||||
static const int64_t SVR_FINDER_REQ_NUM = 10 * 1000;
|
||||
static const int64_t LEADER_FINDER_REQ_NUM = 10 * 1000;
|
||||
|
||||
// Construct a request server to initiate asynchronous requests
|
||||
// request server: query clog/query meta
|
||||
// Request leader:
|
||||
class MockSysTableHelperBase: public IObLogSysTableHelper
|
||||
{
|
||||
public:
|
||||
MockSysTableHelperBase() {}
|
||||
virtual ~MockSysTableHelperBase() {}
|
||||
|
||||
public:
|
||||
/// Query __all_clog_history_info_v2 based on log_id to get all servers with service log IDs greater than or equal to log_id logs
|
||||
/// Returns two types of logs: one for servers in the _all_server table, and one for servers not in the _all_server table
|
||||
virtual int query_clog_history_by_log_id(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const uint64_t log_id,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
UNUSED(pkey);
|
||||
records.reset();
|
||||
ClogHistoryRecord rec;
|
||||
|
||||
int64_t valid_seed = static_cast<int64_t>(pkey.table_id_);
|
||||
int64_t invalid_seed = ALL_SERVER_COUNT;
|
||||
int64_t cnt = QUERY_CLOG_HISTORY_VALID_COUNT + QUERY_CLOG_HISTORY_INVALID_COUNT;
|
||||
|
||||
for (int64_t idx = 0; idx < cnt; idx++) {
|
||||
rec.reset();
|
||||
rec.start_log_id_ = log_id;
|
||||
rec.end_log_id_ = log_id + 10000;
|
||||
if (idx < QUERY_CLOG_HISTORY_VALID_COUNT) {
|
||||
// Insert QUERY_CLOG_HISTORY_VALID_COUNT a valid record
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1,
|
||||
"127.0.0.%ld", valid_seed % ALL_SERVER_COUNT);
|
||||
valid_seed++;
|
||||
} else {
|
||||
// Insert QUERY_CLOG_HISTORY_INVALID_COUNT an invalid record
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", invalid_seed);
|
||||
invalid_seed++;
|
||||
}
|
||||
rec.svr_port_ = 8888;
|
||||
|
||||
records.push_back(rec);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Query __all_clog_history_info_v2 for all servers with timestamp greater than or equal to timestamp log based on timestamp
|
||||
virtual int query_clog_history_by_tstamp(
|
||||
const common::ObPartitionKey &pkey,
|
||||
const int64_t timestamp,
|
||||
ClogHistoryRecordArray &records)
|
||||
{
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
UNUSED(timestamp);
|
||||
|
||||
records.reset();
|
||||
ClogHistoryRecord rec;
|
||||
|
||||
int64_t valid_seed = static_cast<int64_t>(pkey.table_id_);
|
||||
int64_t invalid_seed = ALL_SERVER_COUNT;
|
||||
int64_t cnt = QUERY_CLOG_HISTORY_VALID_COUNT + QUERY_CLOG_HISTORY_INVALID_COUNT;
|
||||
|
||||
for (int64_t idx = 0; idx < cnt; idx++) {
|
||||
rec.reset();
|
||||
rec.start_log_id_ = 0;
|
||||
rec.end_log_id_ = 65536;
|
||||
if (idx < QUERY_CLOG_HISTORY_VALID_COUNT) {
|
||||
// Insert QUERY_CLOG_HISTORY_VALID_COUNT a valid record
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1,
|
||||
"127.0.0.%ld", valid_seed % ALL_SERVER_COUNT);
|
||||
valid_seed++;
|
||||
} else {
|
||||
// Insert QUERY_CLOG_HISTORY_INVALID_COUNT an invalid record
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", invalid_seed);
|
||||
invalid_seed++;
|
||||
}
|
||||
rec.svr_port_ = 8888;
|
||||
|
||||
records.push_back(rec);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Query __all_meta_table / __all_root_table to get information about the servers that are serving the partition
|
||||
// Add records: return a batch of servers to add to query_clog_history, add only those servers for which clog history does not exist
|
||||
virtual int query_meta_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
MetaRecordArray &records)
|
||||
{
|
||||
// Generate random results.
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
UNUSED(pkey);
|
||||
records.reset();
|
||||
MetaRecord rec;
|
||||
|
||||
int64_t seed = static_cast<int64_t>(pkey.table_id_);
|
||||
int64_t cnt = QUERY_CLOG_HISTORY_VALID_COUNT + QUERY_META_INFO_ADD_COUNT;
|
||||
|
||||
for (int64_t idx = 0; idx < cnt; idx++) {
|
||||
rec.reset();
|
||||
if (idx < QUERY_CLOG_HISTORY_VALID_COUNT) {
|
||||
// Returns the same server as query_clog_history
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1,
|
||||
"127.0.0.%ld", seed % ALL_SERVER_COUNT);
|
||||
} else {
|
||||
// Return QUERY_META_INFO_ADD_COUNT additional records
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1,
|
||||
"127.0.0.%ld", seed % ALL_SERVER_COUNT);
|
||||
}
|
||||
rec.svr_port_ = 8888;
|
||||
rec.replica_type_ = REPLICA_TYPE_FULL;
|
||||
seed++;
|
||||
|
||||
records.push_back(rec);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Query __all_meta_table / __all_root_table for leader information
|
||||
virtual int query_leader_info(
|
||||
const common::ObPartitionKey &pkey,
|
||||
bool &has_leader,
|
||||
common::ObAddr &leader)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
UNUSED(pkey);
|
||||
has_leader = true;
|
||||
leader.set_ip_addr("127.0.0.1", 8888);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// Query __all_server table for all active server information
|
||||
virtual int query_all_server_info(AllServerRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
UNUSED(records);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int query_all_zone_info(AllZoneRecordArray &records)
|
||||
{
|
||||
UNUSED(records);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual int query_cluster_info(ClusterInfo &cluster_info)
|
||||
{
|
||||
UNUSED(cluster_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
class MockSysTableHelperDerive1 : public MockSysTableHelperBase
|
||||
{
|
||||
public:
|
||||
MockSysTableHelperDerive1() {}
|
||||
virtual ~MockSysTableHelperDerive1() {}
|
||||
|
||||
public:
|
||||
/// Query the __all_server table to get all active server information
|
||||
/// The _all_server table has 100 servers in the range 127.0.0.1:8888 ~ 127.0.0.99:8888
|
||||
virtual int query_all_server_info(AllServerRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
int64_t seed = 0;
|
||||
AllServerRecord rec;
|
||||
for(int64_t idx = 0; idx < ALL_SERVER_COUNT; idx++) {
|
||||
rec.reset();
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", seed);
|
||||
rec.svr_port_ = 8888;
|
||||
rec.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_ACTIVE;
|
||||
records.push_back(rec);
|
||||
seed++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
class MockSysTableHelperDerive2 : public MockSysTableHelperBase
|
||||
{
|
||||
public:
|
||||
MockSysTableHelperDerive2() {}
|
||||
virtual ~MockSysTableHelperDerive2() {}
|
||||
|
||||
public:
|
||||
/// Query the __all_server table to get all active server information
|
||||
/// The _all_server table has 100 servers in the range of 127.0.0.1:8888 ~ 127.0.0.20:8888
|
||||
// 1. 50 of them are ACTIVE
|
||||
// 2. 50 of them are INACTIVE
|
||||
virtual int query_all_server_info(AllServerRecordArray &records)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
int64_t seed = 0;
|
||||
AllServerRecord rec;
|
||||
for(int64_t idx = 0; idx < ALL_SERVER_COUNT; idx++) {
|
||||
rec.reset();
|
||||
snprintf(rec.svr_ip_, common::MAX_IP_ADDR_LENGTH + 1, "127.0.0.%ld", seed);
|
||||
rec.svr_port_ = 8888;
|
||||
if (0 == (idx & 0x01)) {
|
||||
rec.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_ACTIVE;
|
||||
} else {
|
||||
rec.status_ = share::ObServerStatus::DisplayStatus::OB_SERVER_INACTIVE;
|
||||
}
|
||||
|
||||
records.push_back(rec);
|
||||
seed++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
class MockObLogRpcBase : public IObLogRpc
|
||||
{
|
||||
public:
|
||||
MockObLogRpcBase() {}
|
||||
virtual ~MockObLogRpcBase() { }
|
||||
|
||||
// Request start log id based on timestamp
|
||||
virtual int req_start_log_id_by_tstamp(const common::ObAddr &svr,
|
||||
const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint& res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Request Leader Heartbeat
|
||||
virtual int req_leader_heartbeat(const common::ObAddr &svr,
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Open a new stream
|
||||
// Synchronous RPC
|
||||
virtual int open_stream(const common::ObAddr &svr,
|
||||
const obrpc::ObLogOpenStreamReq &req,
|
||||
obrpc::ObLogOpenStreamResp &resp,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Stream based, get logs
|
||||
// Asynchronous RPC
|
||||
virtual int async_stream_fetch_log(const common::ObAddr &svr,
|
||||
const obrpc::ObLogStreamFetchLogReq &req,
|
||||
obrpc::ObLogExternalProxy::AsyncCB<obrpc::OB_LOG_STREAM_FETCH_LOG> &cb,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(cb);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
class MockObLogStartLogIdRpc : public MockObLogRpcBase
|
||||
{
|
||||
typedef const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint::Param Param;
|
||||
typedef const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint::ParamArray ParamArray;
|
||||
public:
|
||||
MockObLogStartLogIdRpc() :
|
||||
spec_err_(false),
|
||||
svr_err_(OB_SUCCESS),
|
||||
part_err_(OB_SUCCESS)
|
||||
{}
|
||||
virtual ~MockObLogStartLogIdRpc() { }
|
||||
|
||||
void set_err(const int svr_err, const int part_err)
|
||||
{
|
||||
svr_err_ = svr_err;
|
||||
part_err_ = part_err;
|
||||
spec_err_ = true;
|
||||
}
|
||||
|
||||
// Request start log id based on timestamp
|
||||
// 1. rpc always assumes success
|
||||
// 2. 10% chance of server internal error
|
||||
// 3. 30% probability that partition returns success (30%) when server succeeds,
|
||||
// 30% probability that start_log_id returns pkey-table_id with breakpoint information
|
||||
// 4. Support for external error codes
|
||||
virtual int req_start_log_id_by_tstamp(const common::ObAddr &svr,
|
||||
const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint& res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(timeout);
|
||||
|
||||
res.reset();
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
int64_t rand = (seed) % 100;
|
||||
bool svr_internal_err = (rand < 10);
|
||||
|
||||
// Preferred use of the specified error code
|
||||
if (spec_err_) {
|
||||
res.set_err(svr_err_);
|
||||
} else if (svr_internal_err) {
|
||||
res.set_err(OB_ERR_UNEXPECTED);
|
||||
}
|
||||
|
||||
if (OB_SUCCESS == res.get_err()) {
|
||||
ParamArray ¶m_array = req.get_params();
|
||||
for (int64_t idx = 0, cnt = param_array.count(); idx < cnt; ++idx) {
|
||||
Param ¶m = param_array[idx];
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint::Result result;
|
||||
result.reset();
|
||||
result.start_log_id_ = param.pkey_.table_id_;
|
||||
|
||||
if (spec_err_) {
|
||||
result.err_ = part_err_;
|
||||
} else {
|
||||
// 30% success, 30% break.
|
||||
rand = (idx + seed) % 100;
|
||||
bool succeed = (rand < 30);
|
||||
bool breakrpc = (30 <= rand) && (rand < 60);
|
||||
result.err_ = (succeed) ? OB_SUCCESS : ((breakrpc) ? OB_EXT_HANDLE_UNFINISH : OB_NEED_RETRY);
|
||||
}
|
||||
|
||||
// Break info is actually not returned.
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
bool spec_err_;
|
||||
int svr_err_;
|
||||
int part_err_;
|
||||
};
|
||||
|
||||
class MockObLogRpcDerived2 : public MockObLogRpcBase
|
||||
{
|
||||
typedef obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint Req;
|
||||
typedef Req::Param Param;
|
||||
typedef Req::ParamArray ParamArray;
|
||||
public:
|
||||
MockObLogRpcDerived2() : request_(NULL),
|
||||
start_pos_(0),
|
||||
end_pos_(0),
|
||||
query_time_(0) {}
|
||||
|
||||
virtual ~MockObLogRpcDerived2() {}
|
||||
|
||||
int init(int64_t req_cnt)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(req_cnt <= 0)) {
|
||||
//LOG_ERROR("invalid_argument");
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
} else {
|
||||
request_ = new Req;
|
||||
request_->reset();
|
||||
start_pos_ = 0;
|
||||
end_pos_ = req_cnt - 1;
|
||||
query_time_ = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
delete request_;
|
||||
start_pos_ = 0;
|
||||
end_pos_ = 0;
|
||||
query_time_ = 1;
|
||||
}
|
||||
|
||||
// Request start log id based on timestamp
|
||||
// 1. rpc always assumes success, and no server internal error
|
||||
// 2. Each time the second half returns succ and the first half returns break info
|
||||
virtual int req_start_log_id_by_tstamp(const common::ObAddr &svr,
|
||||
const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint& res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(timeout);
|
||||
|
||||
res.reset();
|
||||
int64_t mid_index = (end_pos_ - start_pos_ + 1) / 2;
|
||||
const ParamArray ¶m_array = req.get_params();
|
||||
|
||||
if (1 == query_time_) {
|
||||
// No validation is required for the first query
|
||||
// Save the request parameters
|
||||
for (int64_t idx = 0, cnt = param_array.count(); idx < cnt; ++idx) {
|
||||
const Param ¶m = param_array[idx];
|
||||
Param add_param;
|
||||
add_param.reset(param.pkey_, param.start_tstamp_, param.break_info_);
|
||||
|
||||
if (OB_FAIL(request_->append_param(add_param))) {
|
||||
//LOG_ERROR("append param fail", K(ret), K(idx), K(add_param));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Verify that it is the original request
|
||||
is_original_req(&req, start_pos_, end_pos_);
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = param_array.count(); idx < cnt; ++idx) {
|
||||
const Param ¶m = param_array[idx];
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint::Result result;
|
||||
|
||||
if (idx < mid_index) {
|
||||
// First half returns break info
|
||||
result.reset();
|
||||
result.err_ = OB_EXT_HANDLE_UNFINISH;;
|
||||
reset_break_info(result.break_info_, static_cast<uint32_t>(idx), idx + 100);
|
||||
result.start_log_id_ = OB_INVALID_ID;
|
||||
|
||||
// dynamically update the break info of the corresponding parameter of the saved requeset, for subsequent verification
|
||||
Param &all_param = const_cast<Param &>(request_->params_[idx]);
|
||||
reset_break_info(all_param.break_info_, static_cast<uint32_t>(idx), idx + 100);
|
||||
} else {
|
||||
// The second half returns success
|
||||
result.reset();
|
||||
result.err_ = OB_SUCCESS;
|
||||
result.start_log_id_ = param.pkey_.table_id_;
|
||||
}
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
}
|
||||
if (end_pos_ != 0) {
|
||||
end_pos_ = mid_index - 1;
|
||||
}
|
||||
query_time_++;
|
||||
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
void is_original_req(const Req *cur_req, int64_t start_pos, int64_t end_pos)
|
||||
{
|
||||
ParamArray all_param_array = request_->get_params();
|
||||
ParamArray cur_param_array = cur_req->get_params();
|
||||
|
||||
for (int64_t idx = start_pos; idx <= end_pos; idx++) {
|
||||
Param all_param = all_param_array[idx];
|
||||
Param cur_param = cur_param_array[idx];
|
||||
// verify pkey, start_tstamp
|
||||
EXPECT_EQ(all_param.pkey_, cur_param.pkey_);
|
||||
EXPECT_EQ(all_param.start_tstamp_, cur_param.start_tstamp_);
|
||||
// verify BreakInfo
|
||||
const obrpc::BreakInfo all_breakinfo = all_param.break_info_;
|
||||
const obrpc::BreakInfo cur_breakinfo = cur_param.break_info_;
|
||||
EXPECT_EQ(all_breakinfo.break_file_id_, cur_breakinfo.break_file_id_);
|
||||
EXPECT_EQ(all_breakinfo.min_greater_log_id_, cur_breakinfo.min_greater_log_id_);
|
||||
}
|
||||
}
|
||||
|
||||
void reset_break_info(obrpc::BreakInfo &break_info,
|
||||
uint32_t break_file_id,
|
||||
uint64_t min_greater_log_id)
|
||||
{
|
||||
break_info.break_file_id_ = break_file_id;
|
||||
break_info.min_greater_log_id_ = min_greater_log_id;
|
||||
}
|
||||
private:
|
||||
Req *request_;
|
||||
int64_t start_pos_;
|
||||
int64_t end_pos_;
|
||||
int64_t query_time_;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
520
unittest/obcdc/test_ob_log_heartbeater.cpp
Normal file
520
unittest/obcdc/test_ob_log_heartbeater.cpp
Normal file
@ -0,0 +1,520 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/hash/ob_linear_hash_map.h" // ObLinearHashMap
|
||||
#include "lib/atomic/ob_atomic.h"
|
||||
#define private public
|
||||
#include "test_ob_log_fetcher_common_utils.h"
|
||||
#include "ob_log_utils.h"
|
||||
#include "ob_log_rpc.h"
|
||||
#include "ob_log_fetcher_heartbeat_worker.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogFetcherHeartbeatWorker: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static const int64_t SINGLE_WORKER_COUNT = 1;
|
||||
static const int64_t WORKER_COUNT = 3;
|
||||
};
|
||||
|
||||
static const int64_t ONE_SERVER_COUNT = 1;
|
||||
static const int64_t SERVER_COUNT = 3;
|
||||
|
||||
static const int64_t HEARTBEATER_REQUEST_COUNT = 5 * 10000;
|
||||
static const int64_t SMALL_HEARTBEATER_REQUEST_COUNT = 1000;
|
||||
|
||||
static const int64_t MAP_MOD_ID = 1;
|
||||
|
||||
static const int64_t TEST_TIME_LIMIT = 10 * _MIN_;
|
||||
static const int64_t FIXED_TIMESTAMP = 10000;
|
||||
|
||||
|
||||
class MockObLogRpcBaseHeartbeat : public IObLogRpc
|
||||
{
|
||||
public:
|
||||
typedef common::ObLinearHashMap<common::ObPartitionKey, common::ObAddr> PkeySvrMap;
|
||||
public:
|
||||
MockObLogRpcBaseHeartbeat(PkeySvrMap &map) : pkey_svr_map_(map) {}
|
||||
virtual ~MockObLogRpcBaseHeartbeat() { }
|
||||
|
||||
// Request start log id based on timestamp
|
||||
virtual int req_start_log_id_by_tstamp(const common::ObAddr &svr,
|
||||
const obrpc::ObLogReqStartLogIdByTsRequestWithBreakpoint& req,
|
||||
obrpc::ObLogReqStartLogIdByTsResponseWithBreakpoint& res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Request Leader Heartbeat
|
||||
virtual int req_leader_heartbeat(const common::ObAddr &svr,
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(res);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Open a new stream
|
||||
// Synchronous RPC
|
||||
virtual int open_stream(const common::ObAddr &svr,
|
||||
const obrpc::ObLogOpenStreamReq &req,
|
||||
obrpc::ObLogOpenStreamResp &resp,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(resp);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Stream based, get logs
|
||||
// Asynchronous RPC
|
||||
virtual int async_stream_fetch_log(const common::ObAddr &svr,
|
||||
const obrpc::ObLogStreamFetchLogReq &req,
|
||||
obrpc::ObLogExternalProxy::AsyncCB<obrpc::OB_LOG_STREAM_FETCH_LOG> &cb,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(req);
|
||||
UNUSED(cb);
|
||||
UNUSED(timeout);
|
||||
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
// Record the pkey-svr mapping, which is used to verify that the pkey was sent to the expected observer when the rpc is received
|
||||
PkeySvrMap &pkey_svr_map_;
|
||||
};
|
||||
|
||||
class MockObLogRpcDerived1Heartbeat : public MockObLogRpcBaseHeartbeat
|
||||
{
|
||||
public:
|
||||
MockObLogRpcDerived1Heartbeat(PkeySvrMap &map) : MockObLogRpcBaseHeartbeat(map) {}
|
||||
virtual ~MockObLogRpcDerived1Heartbeat() { }
|
||||
|
||||
// Requesting a leader heartbeat
|
||||
// 1. rpc always assumes success
|
||||
// 2. 10% probability of server internal error
|
||||
// 3. 30% probability that the partition returns OB_SUCESS, 30% probability that OB_NOT_MASTER when server succeeds,
|
||||
// 30% chance of returning OB_PARTITON_NOT_EXIST, 10% chance of returning other
|
||||
virtual int req_leader_heartbeat(const common::ObAddr &svr,
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(timeout);
|
||||
|
||||
res.reset();
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
// Seed.
|
||||
int64_t seed = (get_timestamp());
|
||||
int64_t rand = (seed) % 100;
|
||||
bool svr_internal_err = (rand < 10);
|
||||
if (svr_internal_err) {
|
||||
res.set_err(OB_ERR_UNEXPECTED);
|
||||
} else {
|
||||
res.set_err(OB_SUCCESS);
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); OB_SUCCESS == ret && idx < cnt; ++idx) {
|
||||
// 30%.
|
||||
seed = get_timestamp();
|
||||
rand = (idx + seed) % 100;
|
||||
bool succeed = (rand < 30);
|
||||
bool not_master = (30 <= rand) && (rand < 60);
|
||||
bool partition_not_exist = (60 <= rand) && (rand < 90);
|
||||
|
||||
const obrpc::ObLogLeaderHeartbeatReq::Param ¶m = req.get_params().at(idx);
|
||||
obrpc::ObLogLeaderHeartbeatResp::Result result;
|
||||
result.reset();
|
||||
if (succeed) {
|
||||
result.err_ = OB_SUCCESS;
|
||||
} else if (not_master) {
|
||||
result.err_ = OB_NOT_MASTER;
|
||||
} else if (partition_not_exist) {
|
||||
result.err_ = OB_PARTITION_NOT_EXIST;
|
||||
} else {
|
||||
result.err_ = OB_ERR_UNEXPECTED;
|
||||
}
|
||||
result.next_served_log_id_ = (succeed || not_master) ? param.next_log_id_ : OB_INVALID_ID;
|
||||
result.next_served_ts_ = (succeed || not_master) ? FIXED_TIMESTAMP : OB_INVALID_TIMESTAMP;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
|
||||
// Verify that the partitions correspond to the same request server
|
||||
common::ObAddr cur_svr;
|
||||
if (OB_FAIL(pkey_svr_map_.get(param.pkey_, cur_svr))) {
|
||||
LOG_ERROR("pkey_svr_map_ get error", K(ret), K(param), K(cur_svr));
|
||||
} else {
|
||||
EXPECT_EQ(svr, cur_svr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("req leader heartbeat", K(req), K(res));
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
class MockObLogRpcDerived2Heartbeat : public MockObLogRpcBaseHeartbeat
|
||||
{
|
||||
public:
|
||||
MockObLogRpcDerived2Heartbeat(PkeySvrMap &map) : MockObLogRpcBaseHeartbeat(map) {}
|
||||
virtual ~MockObLogRpcDerived2Heartbeat() { }
|
||||
|
||||
// Request leader heartbeat
|
||||
// 1. rpc always assumes success, no server internal error
|
||||
// 2. partitions all return OB_SUCESS
|
||||
virtual int req_leader_heartbeat(const common::ObAddr &svr,
|
||||
const obrpc::ObLogLeaderHeartbeatReq &req,
|
||||
obrpc::ObLogLeaderHeartbeatResp &res,
|
||||
const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(svr);
|
||||
UNUSED(timeout);
|
||||
|
||||
res.reset();
|
||||
res.set_debug_err(OB_SUCCESS);
|
||||
res.set_err(OB_SUCCESS);
|
||||
for (int64_t idx = 0, cnt = req.get_params().count(); OB_SUCCESS == ret && idx < cnt; ++idx) {
|
||||
obrpc::ObLogLeaderHeartbeatResp::Result result;
|
||||
const obrpc::ObLogLeaderHeartbeatReq::Param ¶m = req.get_params().at(idx);
|
||||
result.reset();
|
||||
result.err_ = OB_SUCCESS;
|
||||
result.next_served_log_id_ = param.next_log_id_;
|
||||
result.next_served_ts_ = FIXED_TIMESTAMP;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, res.append_result(result));
|
||||
|
||||
// Verify that the partitions correspond to the same request server
|
||||
common::ObAddr cur_svr;
|
||||
if (OB_FAIL(pkey_svr_map_.get(param.pkey_, cur_svr))) {
|
||||
LOG_ERROR("pkey_svr_map_ get error", K(ret), K(param), K(cur_svr));
|
||||
} else {
|
||||
EXPECT_EQ(svr, cur_svr);
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("req leader heartbeat", K(req), K(res));
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
void generate_req(const int64_t all_svr_cnt,
|
||||
const int64_t req_cnt,
|
||||
HeartbeatRequest *&request_array,
|
||||
common::ObLinearHashMap<common::ObPartitionKey, common::ObAddr> &map)
|
||||
{
|
||||
// Build requests.
|
||||
ObAddr svrs[all_svr_cnt];
|
||||
for (int64_t idx = 0, cnt = all_svr_cnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
|
||||
request_array = new HeartbeatRequest[req_cnt];
|
||||
for (int64_t idx = 0, cnt = req_cnt; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array[idx];
|
||||
r.reset();
|
||||
// set pkey, next_log_id, svr
|
||||
// next_log_id = pkey.table_id + 1
|
||||
r.reset(ObPartitionKey((uint64_t)(1000 + idx), 0, 1), 1000 + idx + 1, svrs[idx % all_svr_cnt]);
|
||||
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_FAIL(map.insert(r.pkey_, svrs[idx % all_svr_cnt]))) {
|
||||
if (OB_ENTRY_EXIST != ret) {
|
||||
LOG_ERROR("map insert error", K(ret), K(r), K(idx));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void free_req(HeartbeatRequest *request_array)
|
||||
{
|
||||
delete[] request_array;
|
||||
}
|
||||
|
||||
/*
|
||||
* Worker.
|
||||
*/
|
||||
class TestWorker : public liboblog::Runnable
|
||||
{
|
||||
public:
|
||||
ObLogFetcherHeartbeatWorker *heartbeater_;
|
||||
HeartbeatRequest *request_array_;
|
||||
int64_t request_cnt_;
|
||||
int64_t all_svr_cnt_;
|
||||
bool push_req_finish_;
|
||||
double success_rate_;
|
||||
|
||||
void reset(ObLogFetcherHeartbeatWorker *heartbeater, HeartbeatRequest *req_array,
|
||||
int64_t req_cnt, int64_t all_svr_cnt)
|
||||
{
|
||||
heartbeater_ = heartbeater;
|
||||
request_array_ = req_array;
|
||||
request_cnt_ = req_cnt;
|
||||
all_svr_cnt_ = all_svr_cnt;
|
||||
push_req_finish_ = false;
|
||||
success_rate_ = 0;
|
||||
}
|
||||
|
||||
virtual int routine()
|
||||
{
|
||||
// Push requests into heartbeater
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array_[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater_->async_heartbeat_req(&r));
|
||||
if (0 == (idx % 1000)) {
|
||||
usec_sleep(10 * _MSEC_);
|
||||
}
|
||||
}
|
||||
ATOMIC_STORE(&push_req_finish_, true);
|
||||
|
||||
// Wait for requests end. Max test time should set.
|
||||
int64_t end_request_cnt = 0;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_request_cnt < request_cnt_)) {
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &r = request_array_[idx];
|
||||
if (HeartbeatRequest::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state(HeartbeatRequest::IDLE);
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
// Assert if test cannot finish.
|
||||
EXPECT_EQ(request_cnt_, end_request_cnt);
|
||||
|
||||
// Do some statistics.
|
||||
int64_t svr_consume_distribution[all_svr_cnt_]; // 1, 2, 3, ...
|
||||
for (int64_t idx = 0, cnt = all_svr_cnt_; idx < cnt; ++idx) {
|
||||
svr_consume_distribution[idx] = 0;
|
||||
}
|
||||
int64_t succ_cnt = 0;
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
HeartbeatRequest &req = request_array_[idx];
|
||||
svr_consume_distribution[idx % all_svr_cnt_] += 1;
|
||||
|
||||
const HeartbeatResponse &res = req.get_resp();
|
||||
if (res.next_served_log_id_ != OB_INVALID_ID
|
||||
&& res.next_served_tstamp_ != OB_INVALID_TIMESTAMP) {
|
||||
EXPECT_EQ(req.pkey_.table_id_ + 1, res.next_served_log_id_);
|
||||
EXPECT_EQ(FIXED_TIMESTAMP, res.next_served_tstamp_);
|
||||
succ_cnt += 1;
|
||||
LOG_DEBUG("verify", K(res), K(succ_cnt));
|
||||
}
|
||||
}
|
||||
|
||||
const int64_t BuffSize = 1024;
|
||||
char buf[BuffSize];
|
||||
int64_t pos = 0;
|
||||
for (int64_t idx = 0, cnt = all_svr_cnt_; idx < cnt; ++idx) {
|
||||
pos += snprintf(buf + pos, BuffSize - pos, "svr_cnt:%ld perc:%f ", (1 + idx),
|
||||
((double)svr_consume_distribution[idx] / (double)request_cnt_));
|
||||
}
|
||||
success_rate_ = (double)succ_cnt / (double)request_cnt_;
|
||||
fprintf(stderr, "request count: %ld distribution: %s succeed perc: %f \n",
|
||||
request_cnt_, buf, success_rate_);
|
||||
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////Basic function tests//////////////////////////////////////////
|
||||
TEST_F(TestObLogFetcherHeartbeatWorker, HeartbeatRequest)
|
||||
{
|
||||
HeartbeatRequest req;
|
||||
req.reset();
|
||||
EXPECT_TRUE(req.is_state_idle());
|
||||
|
||||
req.set_state_req();
|
||||
EXPECT_TRUE(req.is_state_req());
|
||||
EXPECT_EQ(HeartbeatRequest::REQ, req.get_state());
|
||||
|
||||
req.set_state_done();
|
||||
EXPECT_TRUE(req.is_state_done());
|
||||
EXPECT_EQ(HeartbeatRequest::DONE, req.get_state());
|
||||
|
||||
req.set_state_idle();
|
||||
EXPECT_TRUE(req.is_state_idle());
|
||||
EXPECT_EQ(HeartbeatRequest::IDLE, req.get_state());
|
||||
}
|
||||
|
||||
//TEST_F(TestObLogStartLogIdLocator, DISABLED_locator)
|
||||
TEST_F(TestObLogFetcherHeartbeatWorker, heartbeater)
|
||||
{
|
||||
const int64_t TestWorkerCnt = 3;
|
||||
// generate data
|
||||
HeartbeatRequest *request_arrays[TestWorkerCnt];
|
||||
common::ObLinearHashMap<common::ObPartitionKey, common::ObAddr> map;
|
||||
EXPECT_EQ(OB_SUCCESS, map.init(MAP_MOD_ID));
|
||||
for (int64_t idx = 0; idx < TestWorkerCnt; idx++) {
|
||||
generate_req(SERVER_COUNT, HEARTBEATER_REQUEST_COUNT, request_arrays[idx], map);
|
||||
OB_ASSERT(NULL != request_arrays[idx]);
|
||||
}
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogRpcDerived1Heartbeat rpc(map);
|
||||
ObLogFetcherHeartbeatWorker heartbeater;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.init(WORKER_COUNT, rpc, err_handler1));
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.start());
|
||||
|
||||
TestWorker workers[TestWorkerCnt];
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.reset(&heartbeater, request_arrays[idx], HEARTBEATER_REQUEST_COUNT, SERVER_COUNT);
|
||||
w.create();
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.join();
|
||||
}
|
||||
|
||||
// free
|
||||
for (int64_t idx = 0; idx < TestWorkerCnt; idx++) {
|
||||
free_req(request_arrays[idx]);
|
||||
request_arrays[idx] = NULL;
|
||||
}
|
||||
map.destroy();
|
||||
heartbeater.destroy();
|
||||
}
|
||||
|
||||
// Test the request logic
|
||||
// Currently aggregating up to 10,000 requests at a time, pushing more than 10,000 requests to test if multiple aggregations are possible
|
||||
TEST_F(TestObLogFetcherHeartbeatWorker, aggregation)
|
||||
{
|
||||
// generate data
|
||||
HeartbeatRequest *request_array;
|
||||
common::ObLinearHashMap<common::ObPartitionKey, common::ObAddr> map;
|
||||
EXPECT_EQ(OB_SUCCESS, map.init(MAP_MOD_ID));
|
||||
// All requests are made to the same server
|
||||
generate_req(ONE_SERVER_COUNT, HEARTBEATER_REQUEST_COUNT, request_array, map);
|
||||
OB_ASSERT(NULL != request_array);
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogRpcDerived1Heartbeat rpc(map);
|
||||
|
||||
ObLogFetcherHeartbeatWorker heartbeater;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.init(SINGLE_WORKER_COUNT, rpc, err_handler1));
|
||||
|
||||
// Insert all data first, then open the StartLogIdLocator thread to ensure that all subsequent requests are aggregated on a single server;
|
||||
TestWorker worker;
|
||||
worker.reset(&heartbeater, request_array, HEARTBEATER_REQUEST_COUNT, ONE_SERVER_COUNT);
|
||||
worker.create();
|
||||
|
||||
while (false == ATOMIC_LOAD(&worker.push_req_finish_)) {
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.start());
|
||||
|
||||
// join
|
||||
worker.join();
|
||||
// free
|
||||
free_req(request_array);
|
||||
request_array = NULL;
|
||||
|
||||
map.destroy();
|
||||
heartbeater.destroy();
|
||||
}
|
||||
|
||||
// Test scenario: when the observer returns all the correct data, whether the ObLogFetcherHeartbeatWorker processes it correctly
|
||||
TEST_F(TestObLogFetcherHeartbeatWorker, heartbeater_handle)
|
||||
{
|
||||
// generate data
|
||||
HeartbeatRequest *request_array;
|
||||
common::ObLinearHashMap<common::ObPartitionKey, common::ObAddr> map;
|
||||
EXPECT_EQ(OB_SUCCESS, map.init(MAP_MOD_ID));
|
||||
generate_req(SERVER_COUNT, SMALL_HEARTBEATER_REQUEST_COUNT, request_array, map);
|
||||
OB_ASSERT(NULL != request_array);
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogRpcDerived2Heartbeat rpc(map);
|
||||
ObLogFetcherHeartbeatWorker heartbeater;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.init(WORKER_COUNT, rpc, err_handler1));
|
||||
EXPECT_EQ(OB_SUCCESS, heartbeater.start());
|
||||
|
||||
TestWorker worker;
|
||||
worker.reset(&heartbeater, request_array, SMALL_HEARTBEATER_REQUEST_COUNT, SERVER_COUNT);
|
||||
worker.create();
|
||||
|
||||
while (0 == ATOMIC_LOAD((int64_t*)&worker.success_rate_)) {
|
||||
}
|
||||
// all request succ
|
||||
EXPECT_EQ(1, worker.success_rate_);
|
||||
|
||||
worker.join();
|
||||
|
||||
// free
|
||||
free_req(request_array);
|
||||
request_array = NULL;
|
||||
|
||||
map.destroy();
|
||||
heartbeater.destroy();
|
||||
}
|
||||
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_heartbeater.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
200
unittest/obcdc/test_ob_log_part_fetch_mgr.cpp
Normal file
200
unittest/obcdc/test_ob_log_part_fetch_mgr.cpp
Normal file
@ -0,0 +1,200 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "ob_log_utils.h"
|
||||
#define private public
|
||||
#include "ob_log_part_fetch_mgr.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogPartFetchMgr: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static const int64_t MAX_CACHED_PART_FETCH_CTX_COUNT = 10 * 1000;
|
||||
static const int64_t PART_FETCH_CTX_POOL_BLOCK_SIZE = 1L << 24;
|
||||
static const uint64_t DEFAULT_TENANT_ID = common::OB_SERVER_TENANT_ID;
|
||||
|
||||
static const int64_t PART_FETCH_CTX__COUNT = 10 * 1000;
|
||||
static const int64_t SINGLE_PART_FETCH_CTX__COUNT = 1;
|
||||
static int64_t g_slowest_part_num;
|
||||
};
|
||||
|
||||
int64_t TestObLogPartFetchMgr::g_slowest_part_num =
|
||||
ObLogConfig::default_print_fetcher_slowest_part_num;
|
||||
typedef common::ObSmallObjPool<PartFetchCtx> PartFetchCtxPool;
|
||||
PartFetchCtxPool ctx_pool;
|
||||
|
||||
void generate_ctx(const int64_t part_fetch_ctx_count,
|
||||
PartTransResolver &part_trans_resolver,
|
||||
PartFetchCtx *pctx_array[],
|
||||
ObLogPartFetchMgr::PartFetchCtxArray &part_fetch_ctx_array)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
for (int64_t idx = 0; idx < part_fetch_ctx_count; ++idx) {
|
||||
PartFetchCtx *&ctx = pctx_array[idx];
|
||||
if (OB_FAIL(ctx_pool.alloc(ctx)) || OB_ISNULL(ctx)) {
|
||||
LOG_ERROR("alloc PartFetchCtx fail", K(ret), K(idx), KPC(ctx));
|
||||
} else {
|
||||
// Initialising the fetch logging context
|
||||
ctx->reset(ObPartitionKey(1000U, idx, part_fetch_ctx_count),
|
||||
get_timestamp(), idx, idx, part_trans_resolver);
|
||||
// Manually assigning values to partition progress, for testing purposes
|
||||
ctx->progress_.progress_ = part_fetch_ctx_count - idx;
|
||||
if (OB_FAIL(part_fetch_ctx_array.push_back(ctx))) {
|
||||
LOG_ERROR("part_fetch_ctx_array push back fail", K(ret), K(idx), KPC(ctx));
|
||||
} else {
|
||||
LOG_DEBUG("data", K(idx), "progress", ctx->get_progress());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void free_all_ctx(const int64_t array_cnt,
|
||||
PartFetchCtx *pctx_array[])
|
||||
{
|
||||
for (int64_t idx = 0; idx < array_cnt; ++idx) {
|
||||
PartFetchCtx *&ctx = pctx_array[idx];
|
||||
if (NULL != ctx) {
|
||||
ctx->reset();
|
||||
ctx_pool.free(ctx);
|
||||
ctx = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int do_top_k(const ObLogPartFetchMgr::PartFetchCtxArray &part_fetch_ctx_array,
|
||||
const int64_t g_slowest_part_num)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
ObLogPartFetchMgr::SlowestPartArray slow_part_array;
|
||||
ObLogPartFetchMgr part_fetch_mgr;
|
||||
int64_t start_time = get_timestamp();
|
||||
int64_t end_time = 0;
|
||||
if (OB_FAIL(part_fetch_mgr.find_k_slowest_partition_(part_fetch_ctx_array,
|
||||
g_slowest_part_num,
|
||||
slow_part_array))) {
|
||||
LOG_ERROR("find_the_k_slowest_partition_ fail", K(ret));
|
||||
} else {
|
||||
end_time = get_timestamp();
|
||||
LOG_INFO("top-k cost time", "time", TVAL_TO_STR(end_time - start_time));
|
||||
|
||||
int64_t array_cnt = slow_part_array.count();
|
||||
for (int64_t idx = 0; idx < array_cnt; ++idx) {
|
||||
const PartFetchCtx *ctx = slow_part_array.at(idx);
|
||||
EXPECT_EQ(idx + 1, ctx->get_progress());
|
||||
LOG_INFO("slow part", K(idx), "pkey", ctx->get_pkey(),
|
||||
"progress", ctx->get_progress());
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// TEST find_k_slowest_partition
|
||||
TEST_F(TestObLogPartFetchMgr, top_k)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
ObLogPartFetchMgr::PartFetchCtxArray part_fetch_ctx_array;
|
||||
// Storage Context Pointer
|
||||
PartFetchCtx *pctx_array[PART_FETCH_CTX__COUNT];
|
||||
PartTransResolver part_trans_resolver;
|
||||
|
||||
// PartFetchCtxPool
|
||||
if (OB_FAIL(ctx_pool.init(MAX_CACHED_PART_FETCH_CTX_COUNT,
|
||||
ObModIds::OB_LOG_PART_FETCH_CTX_POOL,
|
||||
DEFAULT_TENANT_ID,
|
||||
PART_FETCH_CTX_POOL_BLOCK_SIZE))) {
|
||||
LOG_ERROR("init PartFetchCtxPool fail", K(ret), LITERAL_K(MAX_CACHED_PART_FETCH_CTX_COUNT),
|
||||
LITERAL_K(PART_FETCH_CTX_POOL_BLOCK_SIZE));
|
||||
}
|
||||
|
||||
// case-1:
|
||||
// Test 100,000 partitions
|
||||
// Generate ctx
|
||||
generate_ctx(PART_FETCH_CTX__COUNT, part_trans_resolver, pctx_array, part_fetch_ctx_array);
|
||||
// Execute top-k
|
||||
EXPECT_EQ(OB_SUCCESS, do_top_k(part_fetch_ctx_array, g_slowest_part_num));
|
||||
// free
|
||||
free_all_ctx(PART_FETCH_CTX__COUNT, pctx_array);
|
||||
|
||||
|
||||
// case-2
|
||||
// Test 0 partitions
|
||||
part_fetch_ctx_array.reset();
|
||||
// Generate ctx
|
||||
generate_ctx(0, part_trans_resolver, pctx_array, part_fetch_ctx_array);
|
||||
// Execute top-k
|
||||
EXPECT_EQ(OB_SUCCESS, do_top_k(part_fetch_ctx_array, g_slowest_part_num));
|
||||
// free
|
||||
free_all_ctx(0, pctx_array);
|
||||
|
||||
|
||||
// case-3
|
||||
//Test 1 partitions
|
||||
part_fetch_ctx_array.reset();
|
||||
// Generate ctx
|
||||
generate_ctx(SINGLE_PART_FETCH_CTX__COUNT, part_trans_resolver, pctx_array, part_fetch_ctx_array);
|
||||
// Execute top-k
|
||||
EXPECT_EQ(OB_SUCCESS, do_top_k(part_fetch_ctx_array, g_slowest_part_num));
|
||||
// free
|
||||
free_all_ctx(SINGLE_PART_FETCH_CTX__COUNT, pctx_array);
|
||||
|
||||
|
||||
// case-4
|
||||
// Test 2 partitions, one of which is NULL
|
||||
part_fetch_ctx_array.reset();
|
||||
// Generate ctx
|
||||
generate_ctx(SINGLE_PART_FETCH_CTX__COUNT, part_trans_resolver, pctx_array, part_fetch_ctx_array);
|
||||
// push NULL
|
||||
EXPECT_EQ(OB_SUCCESS, part_fetch_ctx_array.push_back(NULL));
|
||||
// Execute top-k
|
||||
EXPECT_EQ(OB_ERR_UNEXPECTED, do_top_k(part_fetch_ctx_array, g_slowest_part_num));
|
||||
// free
|
||||
free_all_ctx(SINGLE_PART_FETCH_CTX__COUNT, pctx_array);
|
||||
|
||||
|
||||
// ctx pool destory
|
||||
ctx_pool.destroy();
|
||||
}
|
||||
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_part_fetch_mgr.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
|
||||
return ret;
|
||||
}
|
||||
428
unittest/obcdc/test_ob_log_part_svr_list.cpp
Normal file
428
unittest/obcdc/test_ob_log_part_svr_list.cpp
Normal file
@ -0,0 +1,428 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/net/ob_addr.h"
|
||||
#include "lib/allocator/page_arena.h"
|
||||
#include "ob_log_utils.h"
|
||||
#define private public
|
||||
#include "obcdc/src/ob_log_part_svr_list.h"
|
||||
|
||||
#include "ob_log_start_log_id_locator.h" // StartLogIdLocateReq
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogPartSvrList: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp();
|
||||
virtual void TearDown();
|
||||
public:
|
||||
typedef PartSvrList::SvrItem SvrItem;
|
||||
typedef PartSvrList::LogIdRange LogIdRange;
|
||||
// verify correctness for svr_item filed
|
||||
void is_svr_item_correct(PartSvrList &svr_list,
|
||||
const int64_t svr_item_index,
|
||||
common::ObAddr &expect_svr,
|
||||
const int64_t expect_range_num,
|
||||
LogIdRange *expect_log_ranges);
|
||||
private:
|
||||
static const int64_t SERVER_COUNT = 64;
|
||||
static const int64_t MAX_RANGE_NUMBER = 4;
|
||||
private:
|
||||
common::ObAddr servers[SERVER_COUNT];
|
||||
common::ObArenaAllocator allocator;
|
||||
};
|
||||
|
||||
void TestObLogPartSvrList::SetUp()
|
||||
{
|
||||
for (int64_t idx = 0; idx < SERVER_COUNT; idx++) {
|
||||
servers[idx].set_ip_addr("127.0.0.1", static_cast<int32_t>(idx + 8000));
|
||||
}
|
||||
}
|
||||
|
||||
void TestObLogPartSvrList::TearDown()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void TestObLogPartSvrList::is_svr_item_correct(PartSvrList &svr_list,
|
||||
const int64_t svr_item_index,
|
||||
common::ObAddr &expect_svr,
|
||||
const int64_t expect_range_num,
|
||||
LogIdRange *expect_log_ranges)
|
||||
{
|
||||
SvrItem svr_item;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.svr_items_.at(svr_item_index, svr_item));
|
||||
|
||||
EXPECT_EQ(expect_svr, svr_item.svr_);
|
||||
EXPECT_EQ(expect_range_num, svr_item.range_num_);
|
||||
for (int64_t idx = 0; idx < expect_range_num; idx++) {
|
||||
EXPECT_EQ(expect_log_ranges[idx].start_log_id_, svr_item.log_ranges_[idx].start_log_id_);
|
||||
EXPECT_EQ(expect_log_ranges[idx].end_log_id_, svr_item.log_ranges_[idx].end_log_id_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//////////////////////Basic function tests//////////////////////////////////////////
|
||||
// PartSvrList::add_server_or_update()
|
||||
// The main test is the insert_range_ function, which calls find_pos_and_merge to find the position, but no log range merge has occurred
|
||||
TEST_F(TestObLogPartSvrList, add_server_test1)
|
||||
{
|
||||
// declear
|
||||
const int64_t svr_idx = 0;
|
||||
common::ObAddr expect_svr = servers[svr_idx];
|
||||
int64_t expect_range_num = 0;
|
||||
LogIdRange expect_log_ranges[MAX_RANGE_NUMBER];
|
||||
(void)memset(expect_log_ranges, 0, MAX_RANGE_NUMBER * sizeof(LogIdRange));
|
||||
const bool is_located_in_meta_table = false;
|
||||
const bool is_leader = false;
|
||||
|
||||
PartSvrList svr_list;
|
||||
EXPECT_EQ(0, svr_list.next_svr_index_);
|
||||
EXPECT_EQ(0, svr_list.count());
|
||||
|
||||
/// add log id range: (100, 200)
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 100, 200,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
expect_range_num++;
|
||||
// Verify the correctness of the svr_item field
|
||||
EXPECT_EQ(1, svr_list.count());
|
||||
expect_log_ranges[0].reset(100, 200);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (300, 400)
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 300, 400,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
expect_range_num++;
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[1].reset(300, 400);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (500, 600)
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 500, 600,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
expect_range_num++;
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[2].reset(500, 600);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (60, 80)
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 60, 80,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
expect_range_num++;
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[0].reset(60, 80);
|
||||
expect_log_ranges[1].reset(100, 200);
|
||||
expect_log_ranges[2].reset(300, 400);
|
||||
expect_log_ranges[3].reset(500, 600);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (700, 800)
|
||||
// current range:[60, 80], [100, 200], [300, 400], [500, 600]
|
||||
// No merge occurs and the array is full, perform a manual merge with the last range
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 700, 800,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[3].reset(500, 800);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (410, 450)
|
||||
// current range:[60, 80], [100, 200], [300, 400], [700, 800]
|
||||
// If no merge occurs and the array is full, find the insertion position pos, and perform a manual merge with the range at position pos
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 410, 450,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[3].reset(410, 800);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (30, 40)
|
||||
// current range:[60, 80], [100, 200], [300, 400], [410, 800]
|
||||
// If no merge occurs and the array is full, find the insertion position pos, and perform a manual merge with the range at position pos
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[0], 30, 40,
|
||||
is_located_in_meta_table, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader));
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[0].reset(30, 80);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
}
|
||||
|
||||
// PartSvrList::add_server_or_update()
|
||||
// The main test is the find_pos_and_merge_ function, where find_pos_and_merge_ is called to find the position and a merge occurs
|
||||
TEST_F(TestObLogPartSvrList, add_server_test2)
|
||||
{
|
||||
const int64_t svr_idx = 0;
|
||||
common::ObAddr expect_svr = servers[svr_idx];
|
||||
int64_t expect_range_num = 0;
|
||||
LogIdRange expect_log_ranges[MAX_RANGE_NUMBER];
|
||||
(void)memset(expect_log_ranges, 0, MAX_RANGE_NUMBER * sizeof(LogIdRange));
|
||||
const bool is_located_in_meta_table = false;
|
||||
const bool is_leader = false;
|
||||
|
||||
PartSvrList svr_list;
|
||||
EXPECT_EQ(0, svr_list.next_svr_index_);
|
||||
EXPECT_EQ(0, svr_list.count());
|
||||
// init range
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 60, 80,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 100, 200,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 300, 400,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 500, 600,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
expect_range_num = 4;
|
||||
expect_log_ranges[0].reset(60, 80);
|
||||
expect_log_ranges[1].reset(100, 200);
|
||||
expect_log_ranges[2].reset(300, 400);
|
||||
expect_log_ranges[3].reset(500, 600);
|
||||
|
||||
/// add log id range: (70, 90)
|
||||
// current range:[60, 80], [100, 200], [300, 400], [500, 600]
|
||||
// Merge with 1st range only
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 70, 90,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_log_ranges[0].reset(60, 90);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
/// add log id range: (50, 450)
|
||||
// current range:[60, 90], [100, 200], [300, 400], [500, 600]
|
||||
// and the 1st, 2nd and 3rd rang occur together
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(expect_svr, 50, 450,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
// Verify the correctness of the svr_item field
|
||||
expect_range_num = 2;
|
||||
expect_log_ranges[0].reset(50, 450);
|
||||
expect_log_ranges[1].reset(500, 600);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
}
|
||||
|
||||
TEST_F(TestObLogPartSvrList, next_server)
|
||||
{
|
||||
// request next log: log_id=250
|
||||
uint64_t next_log_id = 250;
|
||||
BlackList black_list;
|
||||
common::ObAddr svr;
|
||||
|
||||
int64_t svr_idx = 0;
|
||||
common::ObAddr expect_svr;
|
||||
int64_t expect_range_num = 0;
|
||||
LogIdRange expect_log_ranges[MAX_RANGE_NUMBER];
|
||||
(void)memset(expect_log_ranges, 0, MAX_RANGE_NUMBER * sizeof(LogIdRange));
|
||||
|
||||
PartSvrList svr_list;
|
||||
EXPECT_EQ(0, svr_list.next_svr_index_);
|
||||
EXPECT_EQ(0, svr_list.count());
|
||||
const bool is_located_in_meta_table = false;
|
||||
const bool is_leader = false;
|
||||
|
||||
/// case 1: for this partition, the current ServerList has 3 servers
|
||||
// server-1: log range: [300, 500], [600, 700]
|
||||
// server-2: log range: [100, 150], [160, 200]
|
||||
// server-3: log range: [50, 90], [100, 150], [200, 300], [400, 500]
|
||||
//
|
||||
// for server-1, log id is at lower limit of range, exit directly; server-1 does not serve 250 logs, but server1 is valid, move to next server
|
||||
//
|
||||
// for server-2, server-2 does not serve 250 logs, and server2 is invalid, because next_log_id is generally
|
||||
// monotonically increasing, then server-2 maintains a log range, all less than 250, ServerList needs to delete server2
|
||||
//
|
||||
// For server-3, server-3 serves 250 logs, and needs to delete the [50, 90], [100, 150] logs it maintains
|
||||
|
||||
// server-1
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[1], 300, 500,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[1], 600, 700,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
|
||||
// server-2
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[2], 100, 150,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[2], 160, 200,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
|
||||
// server-3
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[3], 50, 90,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[3], 100, 150,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[3], 200, 300,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[3], 400, 500,
|
||||
is_located_in_meta_table, REGION_PRIORITY_LOW, REPLICA_PRIORITY_FULL, is_leader));
|
||||
|
||||
EXPECT_EQ(3, svr_list.count());
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.next_server(next_log_id, black_list, svr));
|
||||
|
||||
/// Verify correctness
|
||||
/// Number of svr minus 1
|
||||
EXPECT_EQ(2, svr_list.count());
|
||||
|
||||
// verify log rang of eserver-3
|
||||
svr_idx = 1;
|
||||
expect_svr = servers[3];
|
||||
expect_range_num = 2;
|
||||
expect_log_ranges[0].reset(200, 300);
|
||||
expect_log_ranges[1].reset(400, 500);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
|
||||
|
||||
/// case 2: For this partition, the current ServerList has 2 servers
|
||||
// server-1: log range: [300, 500], [600, 700]
|
||||
// server-3: log range: [200, 300], [400, 500]
|
||||
//
|
||||
EXPECT_EQ(2, svr_list.svr_items_.count());
|
||||
svr.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.next_server(next_log_id, black_list, svr));
|
||||
|
||||
// request log_id 650
|
||||
next_log_id = 650;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.next_server(next_log_id, black_list, svr));
|
||||
svr_idx = 0;
|
||||
expect_svr = servers[1];
|
||||
expect_range_num = 1;
|
||||
expect_log_ranges[0].reset(600, 700);
|
||||
is_svr_item_correct(svr_list, svr_idx, expect_svr, expect_range_num, expect_log_ranges);
|
||||
}
|
||||
|
||||
// PartSvrList: exist(), get_sever_array()
|
||||
TEST_F(TestObLogPartSvrList, other_function)
|
||||
{
|
||||
PartSvrList svr_list;
|
||||
|
||||
for (int64_t idx = 0; idx < 32; idx++) {
|
||||
// Half of the clog_history table records and half of the meta table records
|
||||
if (idx < 16) {
|
||||
const bool is_located_in_meta_table1 = false;
|
||||
const bool is_leader1 = false;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[idx], 100, 200,
|
||||
is_located_in_meta_table1, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader1));
|
||||
} else {
|
||||
const bool is_located_in_meta_table2 = true;
|
||||
bool is_leader2 = false;
|
||||
if (31 == idx) {
|
||||
is_leader2 = true;
|
||||
}
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.add_server_or_update(servers[idx], 100, 200,
|
||||
is_located_in_meta_table2, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, is_leader2));
|
||||
}
|
||||
}
|
||||
|
||||
int64_t svr_index = -1;
|
||||
for (int64_t idx = 0; idx < 32; idx++) {
|
||||
EXPECT_TRUE(svr_list.exist(servers[idx], svr_index));
|
||||
}
|
||||
StartLogIdLocateReq::SvrList svr_list_for_locate_start_log_id;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list.get_server_array_for_locate_start_log_id(svr_list_for_locate_start_log_id));
|
||||
EXPECT_EQ(32, svr_list_for_locate_start_log_id.count());
|
||||
|
||||
// verify leader is the first
|
||||
EXPECT_EQ(svr_list_for_locate_start_log_id.at(0).svr_, servers[31]);
|
||||
for (int64_t idx = 1; idx < 32; idx++) {
|
||||
ObAddr &addr = svr_list_for_locate_start_log_id.at(idx).svr_;
|
||||
int64_t start_idx = -1;
|
||||
int64_t end_idx = -1;
|
||||
|
||||
// meta table
|
||||
if (idx < 16) {
|
||||
start_idx = 16;
|
||||
end_idx = 32;
|
||||
} else {
|
||||
// clog history table
|
||||
start_idx = 0;
|
||||
end_idx = 16;
|
||||
}
|
||||
|
||||
bool find = false;
|
||||
for (int64_t svr_idx = start_idx; svr_idx < end_idx; ++svr_idx) {
|
||||
if (addr == servers[svr_idx]) {
|
||||
find = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPECT_TRUE(find);
|
||||
}
|
||||
//
|
||||
// There are 6 servers in total, added in the following order.
|
||||
// server sequence: svr1, svr2, svr1, svr3, svr4, svr1, svr2, sv3, sv4, sv5, sv6
|
||||
//
|
||||
// server: svr1, sv2, sv3, sv4, sv5, sv6
|
||||
// is_meta_table 0 0 0 0 0 1 1
|
||||
// is_leader 0 0 0 0 0 0 1
|
||||
// Expected: leader comes first, followed by meta table, remaining
|
||||
// sv6, sv5 .....
|
||||
|
||||
PartSvrList svr_list1;
|
||||
// svr1
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[1], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr2
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[2], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr1
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[1], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr3
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[3], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr4
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[4], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr4
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[1], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr4
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[2], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr4
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[3], 100, 200, false, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr5
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[5], 100, 200, true, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, false));
|
||||
|
||||
// svr6
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.add_server_or_update(servers[6], 100, 200, true, REGION_PRIORITY_HIGH, REPLICA_PRIORITY_FULL, true));
|
||||
|
||||
StartLogIdLocateReq::SvrList svr_list_for_locate_start_log_id_1;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_list1.get_server_array_for_locate_start_log_id(svr_list_for_locate_start_log_id_1));
|
||||
EXPECT_EQ(6, svr_list_for_locate_start_log_id_1.count());
|
||||
|
||||
int expect_result_index[] = {6, 5};
|
||||
for (int64_t idx = 0; idx < 2; idx++) {
|
||||
EXPECT_EQ(svr_list_for_locate_start_log_id_1.at(idx).svr_, servers[expect_result_index[idx]]);
|
||||
}
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_part_svr_list.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
966
unittest/obcdc/test_ob_log_part_trans_resolver.cpp
Normal file
966
unittest/obcdc/test_ob_log_part_trans_resolver.cpp
Normal file
@ -0,0 +1,966 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
#include "ob_log_fetch_stat_info.h"
|
||||
|
||||
#define private public
|
||||
#include "obcdc/src/ob_log_part_trans_resolver.h"
|
||||
#include "test_trans_log_generator.h"
|
||||
#include "test_sp_trans_log_generator.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
// Task Pool
|
||||
static const int64_t PREALLOC_POOL_SIZE = 10 * 1024;
|
||||
static const int64_t TRANS_TASK_PAGE_SIZE = 1024;
|
||||
static const int64_t TRANS_TASK_BLOCK_SIZE = 4 * 1024 *1024;
|
||||
static const int64_t PREALLOC_PAGE_COUNT = 1024;
|
||||
|
||||
// For task pool init
|
||||
ObConcurrentFIFOAllocator fifo_allocator;
|
||||
|
||||
// test trans count
|
||||
static const int64_t TRANS_COUNT = 100;
|
||||
// redo log count
|
||||
static const int64_t TRANS_REDO_LOG_COUNT = 100;
|
||||
|
||||
int init_task_pool(ObLogTransTaskPool<PartTransTask> &task_pool)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
ret = fifo_allocator.init(16 * _G_, 16 * _M_, OB_MALLOC_NORMAL_BLOCK_SIZE);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
ret = task_pool.init(&fifo_allocator, PREALLOC_POOL_SIZE, TRANS_TASK_PAGE_SIZE,
|
||||
TRANS_TASK_BLOCK_SIZE, true, PREALLOC_PAGE_COUNT);
|
||||
EXPECT_EQ(OB_SUCCESS, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario.
|
||||
* For N transactions, half of which commit, half of which abort
|
||||
* Each transaction has a random redo log
|
||||
*
|
||||
* Log sequence: redo, redo, ... redo, prepare, commit/abort
|
||||
*
|
||||
* // redo info
|
||||
* redo_log_cnt
|
||||
* ObLogIdArray redo_log_ids;
|
||||
*
|
||||
* // prepare info
|
||||
* int64_t seq;
|
||||
* common::ObPartitionKey partition;
|
||||
* int64_t prepare_timestamp;
|
||||
* ObTransID trans_id;
|
||||
* uint64_t prepare_log_id;
|
||||
* uint64_t cluster_id;
|
||||
*
|
||||
* // commit info
|
||||
* int64_t global_trans_version;
|
||||
* PartitionLogInfoArray *participants;
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest1)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
const int64_t abort_trans_cnt = trans_cnt - commit_trans_cnt;
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
|
||||
TransLogInfo trans_log_info;
|
||||
// redo info
|
||||
int64_t redo_cnt = 0;
|
||||
ObLogIdArray redo_log_ids;
|
||||
// prepare info
|
||||
int64_t seq = 0;
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
int64_t prepare_timestamp = PREPARE_TIMESTAMP;
|
||||
ObTransID trans_id(addr);
|
||||
uint64_t prepare_log_id = 0;
|
||||
uint64_t CLOUSTER_ID = 1000;
|
||||
// commit info
|
||||
int64_t global_trans_version = GLOBAL_TRANS_VERSION;
|
||||
PartitionLogInfoArray ptl_ids;
|
||||
|
||||
// Log gen.
|
||||
TransLogEntryGeneratorBase log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.init());
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
volatile bool stop_flag = false;
|
||||
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 1;
|
||||
redo_log_ids.reset();
|
||||
for (int64_t cnt = 0; cnt < redo_cnt; ++cnt) {
|
||||
EXPECT_EQ(OB_SUCCESS, redo_log_ids.push_back(log_gen.get_log_id() + cnt));
|
||||
}
|
||||
prepare_log_id = log_gen.get_log_id() + redo_cnt;
|
||||
ptl_ids.reset();
|
||||
|
||||
ObPartitionLogInfo ptl_id(pkey, prepare_log_id, PREPARE_TIMESTAMP);
|
||||
err = ptl_ids.push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// push fixed participant information
|
||||
for (int64_t idx = 0; idx < FIXED_PART_COUNT; ++idx) {
|
||||
err = ptl_ids.push_back(FIXED_PART_INFO[idx]);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
trans_log_info.reset(redo_cnt, redo_log_ids, seq, pkey, prepare_timestamp,
|
||||
trans_id, prepare_log_id, CLOUSTER_ID, global_trans_version, ptl_ids);
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info));
|
||||
seq++;
|
||||
|
||||
// Commit trans with even idx.
|
||||
log_gen.next_trans(redo_cnt, (0 == idx % 2));
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
while (OB_SUCCESS == log_gen.next_log_entry(log_entry)) {
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Verify the correctness of partition task data
|
||||
bool check_result;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.get_check_result(check_result));
|
||||
EXPECT_TRUE(check_result);
|
||||
LOG_DEBUG("debug", K(idx));
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
EXPECT_EQ(abort_trans_cnt, parser.get_abort_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario.
|
||||
* For N transactions, half of which commit, half of which abort
|
||||
* Each transaction has a random redo log
|
||||
* Log sequence: redo, redo... redo-prepare, commit/abort
|
||||
* redo-prepare in a log entry
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest2)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
const int64_t abort_trans_cnt = trans_cnt - commit_trans_cnt;
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
|
||||
TransLogInfo trans_log_info;
|
||||
// redo info
|
||||
int64_t redo_cnt = 0;
|
||||
ObLogIdArray redo_log_ids;
|
||||
// prepare info
|
||||
int64_t seq = 0;
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
int64_t prepare_timestamp = PREPARE_TIMESTAMP;
|
||||
ObTransID trans_id(addr);
|
||||
uint64_t prepare_log_id = 0;
|
||||
uint64_t CLOUSTER_ID = 1000;
|
||||
// commit info
|
||||
int64_t global_trans_version = GLOBAL_TRANS_VERSION;
|
||||
PartitionLogInfoArray ptl_ids;
|
||||
|
||||
// Log gen.
|
||||
TransLogEntryGeneratorBase log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.init());
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
bool stop_flag = false;
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 2;
|
||||
redo_log_ids.reset();
|
||||
for (int64_t cnt = 0; cnt < redo_cnt; ++cnt) {
|
||||
EXPECT_EQ(OB_SUCCESS, redo_log_ids.push_back(log_gen.get_log_id() + cnt));
|
||||
}
|
||||
prepare_log_id = log_gen.get_log_id() + redo_cnt - 1;
|
||||
|
||||
ptl_ids.reset();
|
||||
ObPartitionLogInfo ptl_id(pkey, prepare_log_id, PREPARE_TIMESTAMP);
|
||||
err = ptl_ids.push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// push fixed participant information
|
||||
for (int64_t idx = 0; idx < FIXED_PART_COUNT; ++idx) {
|
||||
err = ptl_ids.push_back(FIXED_PART_INFO[idx]);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
trans_log_info.reset(redo_cnt, redo_log_ids, seq, pkey, prepare_timestamp,
|
||||
trans_id, prepare_log_id, CLOUSTER_ID, global_trans_version, ptl_ids);
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info));
|
||||
seq++;
|
||||
|
||||
// Commit trans with even idx.
|
||||
log_gen.next_trans_with_redo_prepare(redo_cnt, (0 == idx % 2));
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
// read redo, redo... redo-prepare
|
||||
for (int64_t log_cnt = 0; log_cnt < redo_cnt; log_cnt++) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_with_redo_prepare(log_entry));
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// read commit/abort log
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_with_redo_prepare(log_entry));
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Verify the correctness of partition task data
|
||||
bool check_result;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.get_check_result(check_result));
|
||||
EXPECT_TRUE(check_result);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
EXPECT_EQ(abort_trans_cnt, parser.get_abort_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario.
|
||||
* Parse to prepare log, find redo log missing, need to read miss log
|
||||
* For N transactions, half of them commit, half of them abort
|
||||
* Each transaction has a random redo log
|
||||
* Two cases.
|
||||
* 1. redo, redo, redo...prepare, commit/abort
|
||||
* 2. redo, redo, redo...redo-prepare, commit/abort
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest3)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
int64_t redo_cnt = 0;
|
||||
int64_t miss_redo_cnt = 0;
|
||||
int64_t can_read_redo_cnt = 0;
|
||||
|
||||
// Pkey.
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
// addr
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
ObTransID trans_id(addr);
|
||||
|
||||
// Log gen.
|
||||
TransLogEntryGenerator1 log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser2 parser;
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
bool stop_flag = false;
|
||||
|
||||
// case 1: redo, redo, redo...prepare, commit/abort
|
||||
// case 2: redo, redo, redo...redo-prepare, commit/abort
|
||||
bool is_normal_trans = false;
|
||||
bool is_redo_with_prapare_trans = false;
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
if (idx < trans_cnt / 2) {
|
||||
is_normal_trans = true;
|
||||
} else {
|
||||
is_redo_with_prapare_trans = true;
|
||||
}
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 1;
|
||||
if (is_normal_trans) {
|
||||
miss_redo_cnt = get_timestamp() % redo_cnt + 1;
|
||||
can_read_redo_cnt = redo_cnt - miss_redo_cnt;
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
miss_redo_cnt = get_timestamp() % redo_cnt;
|
||||
can_read_redo_cnt = redo_cnt - miss_redo_cnt - 1;
|
||||
} else {
|
||||
}
|
||||
|
||||
// Commit trans with even idx.
|
||||
if (is_normal_trans) {
|
||||
log_gen.next_trans_with_miss_redo(redo_cnt, miss_redo_cnt, (0 == idx % 2), NORMAL_TRAN);
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
log_gen.next_trans_with_miss_redo(redo_cnt, miss_redo_cnt, (0 == idx % 2), REDO_WITH_PREPARE_TRAN);
|
||||
} else {
|
||||
}
|
||||
|
||||
uint64_t start_redo_log_id = log_gen.get_log_id();
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
// First read the can_read_redo_cnt redo log
|
||||
for (int64_t log_cnt = 0; log_cnt < can_read_redo_cnt; ++log_cnt) {
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(REDO_WITH_PREPARE_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Read prepare log and find miss redo log
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(REDO_WITH_PREPARE_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_ITEM_NOT_SETTED, err);
|
||||
|
||||
// Verify the misses array and read the misses redo log
|
||||
const int64_t miss_array_cnt = missing.count();
|
||||
EXPECT_EQ(miss_redo_cnt, miss_array_cnt);
|
||||
for (int64_t log_cnt = 0; log_cnt < miss_array_cnt; ++log_cnt) {
|
||||
LOG_DEBUG("miss", K(missing[log_cnt]));
|
||||
EXPECT_EQ(start_redo_log_id, missing[log_cnt]);
|
||||
start_redo_log_id++;
|
||||
|
||||
clog::ObLogEntry miss_log_entry;
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_miss_log_entry(missing[log_cnt], miss_log_entry));
|
||||
err = pr.read_missing_redo(miss_log_entry);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// After reading the missing redo log, read the prepare log again to advance the partitioning task
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.get_prepare_log_entry(NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.get_prepare_log_entry(REDO_WITH_PREPARE_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// read commit/abort log
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_prapare_trans){
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(REDO_WITH_PREPARE_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* r stands for redo, p stands for prepare, c stands for commit, a stands for abort)
|
||||
* The numbers after r/p/c/a represent the different transactions
|
||||
* Log sequence:
|
||||
* r1 r2 r2 r2 p2 p1 c1 c2 r3 p3 c3
|
||||
* Verifying the correctness of parsing multiple transactions, i.e. constructing different partitioned transaction tasks based on different transaction IDs
|
||||
* Verify the output order of transactions: transaction 2 -> transaction 1 -> transaction 3
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest4)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.init());
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
const int64_t commit_trans_cnt = 3;
|
||||
// redo info
|
||||
int64_t redo_cnt_array[3] = {1, 3, 1};
|
||||
ObLogIdArray redo_log_ids_array[3];
|
||||
for (int64_t i = 0; i < 3; ++i) {
|
||||
for (int64_t j = 0; j < redo_cnt_array[i]; ++j) {
|
||||
EXPECT_EQ(OB_SUCCESS, redo_log_ids_array[i].push_back(j));
|
||||
}
|
||||
}
|
||||
|
||||
// prepare info
|
||||
// trans 2 - trans 1 - trans 3->seq: 0, 1, 2
|
||||
int64_t seq_array[3] = {1, 0, 2};
|
||||
int64_t prepare_timestamp = PREPARE_TIMESTAMP;
|
||||
ObAddr addr_array[3];
|
||||
for (int64_t idx = 0; idx < 3; idx++) {
|
||||
addr_array[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", static_cast<int32_t>(8888 + idx));
|
||||
}
|
||||
// trans ID
|
||||
ObTransID trans_id_array[3] = {
|
||||
ObTransID(addr_array[0]), ObTransID(addr_array[1]), ObTransID(addr_array[2])
|
||||
};
|
||||
uint64_t prepare_log_id_array[3] = {1, 3, 1};
|
||||
uint64_t CLOUSTER_ID = 1000;
|
||||
|
||||
// commit info
|
||||
int64_t global_trans_version = GLOBAL_TRANS_VERSION;
|
||||
PartitionLogInfoArray ptl_ids_array[3];
|
||||
for (int64_t i = 0; i < 3; ++i) {
|
||||
ptl_ids_array[i].reset();
|
||||
|
||||
ObPartitionLogInfo ptl_id(pkey, prepare_log_id_array[i], PREPARE_TIMESTAMP);
|
||||
err = ptl_ids_array[i].push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// push fixed participant information
|
||||
for (int64_t j = 0; j < FIXED_PART_COUNT; ++j) {
|
||||
err = ptl_ids_array[i].push_back(FIXED_PART_INFO[j]);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
}
|
||||
|
||||
TransLogInfo trans_log_info_array[3];
|
||||
for (int64_t i = 0; i < 3; ++i) {
|
||||
trans_log_info_array[i].reset(redo_cnt_array[i], redo_log_ids_array[i], seq_array[i], pkey, prepare_timestamp,
|
||||
trans_id_array[i], prepare_log_id_array[i],
|
||||
CLOUSTER_ID, global_trans_version, ptl_ids_array[i]);
|
||||
}
|
||||
|
||||
// Push in the order of transaction 2 - transaction 1 - transaction 3 for subsequent validation of the transaction output order
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info_array[1]));
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info_array[0]));
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info_array[2]));
|
||||
|
||||
// Log gen. Generate logs for transactions 1, 2 and 3 respectively
|
||||
TransLogEntryGeneratorBase log_gen_1(pkey, trans_id_array[0]);
|
||||
TransLogEntryGeneratorBase log_gen_2(pkey, trans_id_array[1]);
|
||||
TransLogEntryGeneratorBase log_gen_3(pkey, trans_id_array[2]);
|
||||
|
||||
log_gen_1.next_trans(redo_cnt_array[0], true);
|
||||
log_gen_2.next_trans(redo_cnt_array[1], true);
|
||||
log_gen_3.next_trans(redo_cnt_array[2], true);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing1;
|
||||
ObLogIdArray missing2;
|
||||
ObLogIdArray missing3;
|
||||
TransStatInfo tsi;
|
||||
volatile bool stop_flag = false;
|
||||
|
||||
// log seq:
|
||||
// r1 r2 r2 r2 p2 p1 c1 c2 r3 p3 c3
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
EXPECT_EQ(0, pr.task_seq_);
|
||||
EXPECT_EQ(0, pr.prepare_seq_);
|
||||
// r1
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_1.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing1, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// r2
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_2.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing2, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// r2
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_2.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing2, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// r2
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_2.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing2, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// p2
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_2.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing2, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(1, pr.prepare_seq_);
|
||||
// p1
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_1.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing1, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(2, pr.prepare_seq_);
|
||||
// c1
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_1.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing1, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// c2
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_2.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing2, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// r3
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_3.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing3, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
// p3
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_3.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing3, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(3, pr.prepare_seq_);
|
||||
// c3
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen_3.next_log_entry(log_entry));
|
||||
err = pr.read(log_entry, missing3, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
EXPECT_EQ(3, pr.task_seq_);
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
|
||||
// Verify the correctness of partition task data
|
||||
for (int64_t idx = 0; idx < 3; ++idx) {
|
||||
bool check_result;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.get_check_result(check_result));
|
||||
EXPECT_TRUE(check_result);
|
||||
}
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario:
|
||||
* For N Sp transactions, half of them commit, half of them abort
|
||||
* Each Sp transaction has a random redo log
|
||||
*
|
||||
* log seq: redo, redo, ... redo, commit/abort
|
||||
*
|
||||
* // redo info
|
||||
* redo_log_cnt
|
||||
* ObLogIdArray redo_log_ids;
|
||||
*
|
||||
* // prepare info
|
||||
* int64_t seq;
|
||||
* common::ObPartitionKey partition;
|
||||
* int64_t prepare_timestamp;
|
||||
* ObTransID trans_id;
|
||||
* uint64_t prepare_log_id;
|
||||
* uint64_t cluster_id;
|
||||
*
|
||||
* // commit info
|
||||
* int64_t global_trans_version;
|
||||
* PartitionLogInfoArray *participants;
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest5)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
// Commit half trans, whose has even idx.
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
const int64_t commit_trans_cnt = trans_cnt / 2;
|
||||
const int64_t abort_trans_cnt = trans_cnt - commit_trans_cnt;
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
|
||||
TransLogInfo trans_log_info;
|
||||
|
||||
// redo info
|
||||
int64_t redo_cnt = 0;
|
||||
ObLogIdArray redo_log_ids;
|
||||
// prepare info
|
||||
int64_t seq = 0;
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
int64_t prepare_timestamp = SP_PREPARE_TIMESTAMP;
|
||||
ObTransID trans_id(addr);
|
||||
uint64_t prepare_log_id = 0;
|
||||
uint64_t CLOUSTER_ID = 1000;
|
||||
// commit info
|
||||
int64_t global_trans_version = SP_GLOBAL_TRANS_VERSION;
|
||||
PartitionLogInfoArray ptl_ids;
|
||||
|
||||
// Log gen.
|
||||
SpTransLogEntryGeneratorBase log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.init());
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
volatile bool stop_flag = false;
|
||||
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 1;
|
||||
redo_log_ids.reset();
|
||||
for (int64_t cnt = 0; cnt < redo_cnt; ++cnt) {
|
||||
EXPECT_EQ(OB_SUCCESS, redo_log_ids.push_back(log_gen.get_log_id() + cnt));
|
||||
}
|
||||
prepare_log_id = log_gen.get_log_id() + redo_cnt;
|
||||
ptl_ids.reset();
|
||||
ObPartitionLogInfo ptl_id(pkey, prepare_log_id, PREPARE_TIMESTAMP);
|
||||
err = ptl_ids.push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
trans_log_info.reset(redo_cnt, redo_log_ids, seq, pkey, prepare_timestamp,
|
||||
trans_id, prepare_log_id, CLOUSTER_ID, global_trans_version, ptl_ids);
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info));
|
||||
seq++;
|
||||
|
||||
// Commit trans with even idx.
|
||||
log_gen.next_trans(redo_cnt, (0 == idx % 2));
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
while (OB_SUCCESS == log_gen.next_log_entry(log_entry)) {
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Verify the correctness of partition task data
|
||||
bool check_result;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.get_check_result(check_result));
|
||||
EXPECT_TRUE(check_result);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
EXPECT_EQ(abort_trans_cnt, parser.get_abort_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario:
|
||||
* For N Sp transactions, redo and commit in the same log entry
|
||||
* Each Sp transaction has a random redo log
|
||||
*
|
||||
* log seq: redo, redo, ... redo, redo-commit
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest6)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
const int64_t commit_trans_cnt = trans_cnt;
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
|
||||
TransLogInfo trans_log_info;
|
||||
|
||||
// redo info
|
||||
int64_t redo_cnt = 0;
|
||||
ObLogIdArray redo_log_ids;
|
||||
// prepare info
|
||||
int64_t seq = 0;
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
int64_t prepare_timestamp = SP_PREPARE_TIMESTAMP;
|
||||
ObTransID trans_id(addr);
|
||||
uint64_t prepare_log_id = 0;
|
||||
uint64_t CLOUSTER_ID = 1000;
|
||||
// commit info
|
||||
int64_t global_trans_version = SP_GLOBAL_TRANS_VERSION;
|
||||
PartitionLogInfoArray ptl_ids;
|
||||
|
||||
// Log gen.
|
||||
SpTransLogEntryGeneratorBase log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser1 parser;
|
||||
//MockParser2 parser;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.init());
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
volatile bool stop_flag = false;
|
||||
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 1;
|
||||
// First test, if redo_cnt=1, only one redo-commit, prepare_log_id=0, illegal
|
||||
if (0 == idx && 1 == redo_cnt) {
|
||||
redo_cnt++;
|
||||
}
|
||||
redo_log_ids.reset();
|
||||
for (int64_t cnt = 0; cnt < redo_cnt; ++cnt) {
|
||||
EXPECT_EQ(OB_SUCCESS, redo_log_ids.push_back(log_gen.get_log_id() + cnt));
|
||||
}
|
||||
// sp transaction does not have prepare log, prepare log id is the same as commit log id
|
||||
prepare_log_id = log_gen.get_log_id() + redo_cnt - 1;
|
||||
ptl_ids.reset();
|
||||
ObPartitionLogInfo ptl_id(pkey, prepare_log_id, PREPARE_TIMESTAMP);
|
||||
err = ptl_ids.push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
trans_log_info.reset(redo_cnt, redo_log_ids, seq, pkey, prepare_timestamp,
|
||||
trans_id, prepare_log_id, CLOUSTER_ID, global_trans_version, ptl_ids);
|
||||
EXPECT_EQ(OB_SUCCESS, parser.push_into_queue(&trans_log_info));
|
||||
seq++;
|
||||
|
||||
log_gen.next_trans_with_redo_commit(redo_cnt);
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
while (OB_SUCCESS == log_gen.next_log_entry_with_redo_commit(log_entry)) {
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Verify the correctness of partition task data
|
||||
bool check_result;
|
||||
EXPECT_EQ(OB_SUCCESS, parser.get_check_result(check_result));
|
||||
EXPECT_TRUE(check_result);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test scenario:
|
||||
* For N Sp transactions, redo and commit in the same log entry
|
||||
* Each Sp transaction has a random redo log
|
||||
*
|
||||
* Log sequence: redo, redo, ... redo, redo-commit
|
||||
* Read to redo-commit and find redo log missing, need to read miss log
|
||||
*
|
||||
*/
|
||||
TEST(PartTransResolver, BasicTest7)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
|
||||
const int64_t trans_cnt = TRANS_COUNT;
|
||||
//const int64_t trans_cnt = 2;
|
||||
const int64_t commit_trans_cnt = trans_cnt;
|
||||
int64_t redo_cnt = 0;
|
||||
int64_t miss_redo_cnt = 0;
|
||||
int64_t can_read_redo_cnt = 0;
|
||||
|
||||
// Pkey.
|
||||
ObPartitionKey pkey(1000U, 1, 1);
|
||||
ObAddr addr(ObAddr::IPV4, "127.0.0.1", 8888);
|
||||
ObTransID trans_id(addr);
|
||||
|
||||
// Log gen.
|
||||
SpTransLogEntryGenerator1 log_gen(pkey, trans_id);
|
||||
// Task Pool.
|
||||
ObLogTransTaskPool<PartTransTask> task_pool;
|
||||
EXPECT_EQ(OB_SUCCESS, init_task_pool(task_pool));
|
||||
// Parser.
|
||||
MockParser2 parser;
|
||||
|
||||
// Partitioned Transaction Parser
|
||||
PartTransResolver pr;
|
||||
err = pr.init(pkey, parser, task_pool);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// Read logs.
|
||||
ObLogIdArray missing;
|
||||
TransStatInfo tsi;
|
||||
volatile bool stop_flag = false;
|
||||
|
||||
// case 1: redo, redo, redo, ... redo, commit
|
||||
// case 2: redo, redo, redo, ... redo, redo-commit
|
||||
bool is_normal_trans = false;
|
||||
bool is_redo_with_commit_trans = false;
|
||||
for (int64_t idx = 0; idx < trans_cnt; ++idx) {
|
||||
if (idx < trans_cnt / 2) {
|
||||
is_normal_trans = true;
|
||||
} else {
|
||||
is_redo_with_commit_trans = true;
|
||||
}
|
||||
|
||||
redo_cnt = get_timestamp() % TRANS_REDO_LOG_COUNT + 1;
|
||||
//redo_cnt = 2;
|
||||
if (is_normal_trans) {
|
||||
miss_redo_cnt = get_timestamp() % redo_cnt + 1;
|
||||
can_read_redo_cnt = redo_cnt - miss_redo_cnt;
|
||||
} else if (is_redo_with_commit_trans){
|
||||
miss_redo_cnt = get_timestamp() % redo_cnt;
|
||||
can_read_redo_cnt = redo_cnt - miss_redo_cnt - 1;
|
||||
} else {
|
||||
}
|
||||
|
||||
if (is_normal_trans) {
|
||||
log_gen.next_trans_with_miss_redo(redo_cnt, miss_redo_cnt, SP_NORMAL_TRAN);
|
||||
} else if (is_redo_with_commit_trans){
|
||||
log_gen.next_trans_with_miss_redo(redo_cnt, miss_redo_cnt, SP_REDO_WITH_COMMIT_TRAN);
|
||||
} else {
|
||||
}
|
||||
|
||||
uint64_t start_redo_log_id = log_gen.get_log_id();
|
||||
clog::ObLogEntry log_entry;
|
||||
|
||||
// First read the can_read_redo_cnt redo log
|
||||
for (int64_t log_cnt = 0; log_cnt < can_read_redo_cnt; ++log_cnt) {
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(SP_NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_commit_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(SP_REDO_WITH_COMMIT_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// read commit log. found miss redo log,
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(SP_NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_commit_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_log_entry_missing_redo(SP_REDO_WITH_COMMIT_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_ITEM_NOT_SETTED, err);
|
||||
|
||||
// Verify the misses array and read the misses redo log
|
||||
const int64_t miss_array_cnt = missing.count();
|
||||
EXPECT_EQ(miss_redo_cnt, miss_array_cnt);
|
||||
for (int64_t log_cnt = 0; log_cnt < miss_array_cnt; ++log_cnt) {
|
||||
LOG_DEBUG("miss", K(missing[log_cnt]));
|
||||
EXPECT_EQ(start_redo_log_id, missing[log_cnt]);
|
||||
start_redo_log_id++;
|
||||
|
||||
clog::ObLogEntry miss_log_entry;
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.next_miss_log_entry(missing[log_cnt], miss_log_entry));
|
||||
err = pr.read_missing_redo(miss_log_entry);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// After reading the missing redo log, read the commit log again to advance the partitioning task and free up commit_log_entry memory
|
||||
if (is_normal_trans) {
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.get_commit_log_entry(SP_NORMAL_TRAN, log_entry));
|
||||
} else if (is_redo_with_commit_trans){
|
||||
EXPECT_EQ(OB_SUCCESS, log_gen.get_commit_log_entry(SP_REDO_WITH_COMMIT_TRAN, log_entry));
|
||||
} else {
|
||||
}
|
||||
err = pr.read(log_entry, missing, tsi);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = pr.flush(stop_flag);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
// Check.
|
||||
EXPECT_EQ(commit_trans_cnt, parser.get_commit_trans_cnt());
|
||||
|
||||
// Destroy.
|
||||
pr.destroy();
|
||||
task_pool.destroy();
|
||||
fifo_allocator.destroy();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_part_trans_resolver.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
175
unittest/obcdc/test_ob_log_part_trans_resolver_new.cpp
Normal file
175
unittest/obcdc/test_ob_log_part_trans_resolver_new.cpp
Normal file
@ -0,0 +1,175 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
#include "ob_log_fetch_stat_info.h"
|
||||
|
||||
#define private public
|
||||
#include "obcdc/src/ob_log_part_trans_resolver.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
void call_sort_and_unique_missing_log_ids(IObLogPartTransResolver::ObLogMissingInfo &missing_info)
|
||||
{
|
||||
LOG_INFO("MISSING LOG [BEGIN]", K(missing_info));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.sort_and_unique_missing_log_ids());
|
||||
LOG_INFO("MISSING LOG [END]", K(missing_info));
|
||||
}
|
||||
|
||||
TEST(ObLogPartTransResolver, Function1)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
IObLogPartTransResolver::ObLogMissingInfo missing_info;
|
||||
ObLogIdArray &missing_log_id = missing_info.missing_log_ids_;
|
||||
|
||||
// 1. one miss log with id 1
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(1, missing_info.get_missing_log_count());
|
||||
|
||||
// 2. two miss log with id 1
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(1, missing_info.get_missing_log_count());
|
||||
|
||||
// 3. repeatable miss log with id 1
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(1, missing_info.get_missing_log_count());
|
||||
|
||||
// 4. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(2, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
|
||||
// 5. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(2, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
|
||||
// 6. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(2, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
|
||||
|
||||
// 7. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(4, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
|
||||
// 8. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(4, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
|
||||
// 9. multi repeatable miss log
|
||||
missing_info.reset();
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(1));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(2));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(3));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
EXPECT_EQ(OB_SUCCESS, missing_info.push_back_missing_log_id(4));
|
||||
call_sort_and_unique_missing_log_ids(missing_info);
|
||||
EXPECT_EQ(4, missing_info.get_missing_log_count());
|
||||
for (int64_t idx=0; OB_SUCC(ret) && idx < missing_log_id.count(); ++idx) {
|
||||
EXPECT_EQ(idx+1, missing_log_id.at(idx));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
//ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_part_trans_resolver.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc,argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
366
unittest/obcdc/test_ob_log_start_log_id_locator.cpp
Normal file
366
unittest/obcdc/test_ob_log_start_log_id_locator.cpp
Normal file
@ -0,0 +1,366 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "lib/atomic/ob_atomic.h"
|
||||
#include "ob_log_utils.h"
|
||||
#define private public
|
||||
#include "test_ob_log_fetcher_common_utils.h"
|
||||
#include "ob_log_start_log_id_locator.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogStartLogIdLocator: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static const int64_t WORKER_COUNT = 3;
|
||||
static const int64_t LOCATE_COUNT = 1;
|
||||
static const int64_t SINGLE_WORKER_COUNT = 1;
|
||||
};
|
||||
|
||||
static const int64_t SERVER_COUNT = 10;
|
||||
static const int64_t START_LOG_ID_REQUEST_COUNT = 5 * 10000;
|
||||
// for test break info
|
||||
static const int64_t BREAK_INFO_START_LOG_ID_REQUEST_COUNT = 256;
|
||||
static const int64_t TEST_TIME_LIMIT = 10 * _MIN_;
|
||||
|
||||
void generate_req(const int64_t req_cnt, StartLogIdLocateReq *&request_array,
|
||||
const int64_t start_tstamp)
|
||||
{
|
||||
// Build requests.
|
||||
const int64_t AllSvrCnt = 10;
|
||||
ObAddr svrs[AllSvrCnt];
|
||||
for (int64_t idx = 0, cnt = AllSvrCnt; idx < cnt; ++idx) {
|
||||
svrs[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
|
||||
request_array = new StartLogIdLocateReq[req_cnt];
|
||||
for (int64_t idx = 0, cnt = req_cnt; idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq &r = request_array[idx];
|
||||
r.reset();
|
||||
r.pkey_ = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
r.start_tstamp_ = start_tstamp;
|
||||
// Set server list.
|
||||
for (int64_t idx2 = 0, cnt2 = AllSvrCnt; idx2 < cnt2; ++idx2) {
|
||||
StartLogIdLocateReq::SvrItem item;
|
||||
item.reset();
|
||||
item.svr_ = svrs[idx2];
|
||||
r.svr_list_.push_back(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void free_req(StartLogIdLocateReq *request_array)
|
||||
{
|
||||
delete[] request_array;
|
||||
}
|
||||
|
||||
/*
|
||||
* Worker.
|
||||
*/
|
||||
class TestWorker : public liboblog::Runnable
|
||||
{
|
||||
public:
|
||||
ObLogStartLogIdLocator *locator_;
|
||||
StartLogIdLocateReq *request_array_;
|
||||
int64_t request_cnt_;
|
||||
int64_t all_svr_cnt_;
|
||||
bool push_req_finish_;
|
||||
|
||||
void reset(ObLogStartLogIdLocator *locator, StartLogIdLocateReq *req_array,
|
||||
int64_t req_cnt, int64_t all_svr_cnt)
|
||||
{
|
||||
locator_ = locator;
|
||||
request_array_ = req_array;
|
||||
request_cnt_ = req_cnt;
|
||||
all_svr_cnt_ = all_svr_cnt;
|
||||
push_req_finish_ = false;
|
||||
}
|
||||
|
||||
virtual int routine()
|
||||
{
|
||||
// Push requests into locator.
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq &r = request_array_[idx];
|
||||
EXPECT_EQ(OB_SUCCESS, locator_->async_start_log_id_req(&r));
|
||||
if (0 == (idx % 1000)) {
|
||||
usec_sleep(10 * _MSEC_);
|
||||
}
|
||||
}
|
||||
ATOMIC_STORE(&push_req_finish_, true);
|
||||
|
||||
// Wait for requests end. Max test time should set.
|
||||
int64_t end_request_cnt = 0;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_request_cnt < request_cnt_)) {
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq &r = request_array_[idx];
|
||||
if (StartLogIdLocateReq::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state(StartLogIdLocateReq::IDLE);
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
// Assert if test cannot finish.
|
||||
EXPECT_EQ(request_cnt_, end_request_cnt);
|
||||
|
||||
// Do some statistics.
|
||||
int64_t svr_consume_distribution[all_svr_cnt_]; // 1, 2, 3, ...
|
||||
for (int64_t idx = 0, cnt = all_svr_cnt_; idx < cnt; ++idx) {
|
||||
svr_consume_distribution[idx] = 0;
|
||||
}
|
||||
int64_t succ_cnt = 0;
|
||||
for (int64_t idx = 0, cnt = request_cnt_; idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq &r = request_array_[idx];
|
||||
EXPECT_GE(r.svr_list_consumed_, 0);
|
||||
svr_consume_distribution[(r.svr_list_consumed_ - 1)] += 1;
|
||||
uint64_t start_log_id = 0;
|
||||
common::ObAddr svr;
|
||||
if (r.get_result(start_log_id, svr)) {
|
||||
succ_cnt += 1;
|
||||
EXPECT_EQ(r.pkey_.table_id_, start_log_id);
|
||||
}
|
||||
}
|
||||
|
||||
const int64_t BuffSize = 1024;
|
||||
char buf[BuffSize];
|
||||
int64_t pos = 0;
|
||||
for (int64_t idx = 0, cnt = all_svr_cnt_; idx < cnt; ++idx) {
|
||||
pos += snprintf(buf + pos, BuffSize - pos, "svr_cnt:%ld perc:%f ", (1 + idx),
|
||||
((double)svr_consume_distribution[idx] / (double)request_cnt_));
|
||||
}
|
||||
fprintf(stderr, "request count: %ld distribution: %s succeed perc: %f \n",
|
||||
request_cnt_, buf, (double)succ_cnt / (double)request_cnt_);
|
||||
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////Basic function tests//////////////////////////////////////////
|
||||
TEST_F(TestObLogStartLogIdLocator, start_log_id_request)
|
||||
{
|
||||
StartLogIdLocateReq req;
|
||||
req.reset();
|
||||
EXPECT_TRUE(req.is_state_idle());
|
||||
|
||||
req.set_state_req();
|
||||
EXPECT_TRUE(req.is_state_req());
|
||||
EXPECT_EQ(StartLogIdLocateReq::REQ, req.get_state());
|
||||
|
||||
req.set_state_done();
|
||||
EXPECT_TRUE(req.is_state_done());
|
||||
EXPECT_EQ(StartLogIdLocateReq::DONE, req.get_state());
|
||||
|
||||
req.set_state_idle();
|
||||
EXPECT_TRUE(req.is_state_idle());
|
||||
EXPECT_EQ(StartLogIdLocateReq::IDLE, req.get_state());
|
||||
|
||||
/// build svr_list
|
||||
int ret = OB_SUCCESS;
|
||||
ObAddr svr_list[SERVER_COUNT];
|
||||
for (int64_t idx = 0, cnt = SERVER_COUNT; idx < cnt; ++idx) {
|
||||
svr_list[idx] = ObAddr(ObAddr::IPV4, "127.0.0.1", (int32_t)(idx + 1000));
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = SERVER_COUNT; (OB_SUCCESS == ret) && idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq::SvrItem item;
|
||||
item.reset(svr_list[idx]);
|
||||
if (OB_FAIL(req.svr_list_.push_back(item))) {
|
||||
LOG_ERROR("push error", K(ret));
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(SERVER_COUNT, req.svr_list_.count());
|
||||
|
||||
// next_svr_item, cur_svr_item
|
||||
for (int64_t idx = 0, cnt = SERVER_COUNT; (OB_SUCCESS == ret) && idx < cnt; ++idx) {
|
||||
StartLogIdLocateReq::SvrItem *item;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, req.next_svr_item(item));
|
||||
EXPECT_EQ(svr_list[idx], item->svr_);
|
||||
EXPECT_EQ(OB_SUCCESS, req.cur_svr_item(item));
|
||||
EXPECT_EQ(svr_list[idx], item->svr_);
|
||||
}
|
||||
// is_request_ended, get_result
|
||||
EXPECT_TRUE(req.is_request_ended(LOCATE_COUNT));
|
||||
uint64_t start_log_id = 0;
|
||||
common::ObAddr svr;
|
||||
EXPECT_FALSE(req.get_result(start_log_id, svr));
|
||||
EXPECT_EQ(OB_INVALID_ID, start_log_id);
|
||||
}
|
||||
|
||||
//TEST_F(TestObLogStartLogIdLocator, DISABLED_locator)
|
||||
TEST_F(TestObLogStartLogIdLocator, locator)
|
||||
{
|
||||
const int64_t TestWorkerCnt = 3;
|
||||
// genereate data
|
||||
StartLogIdLocateReq *request_arrays[TestWorkerCnt];
|
||||
for (int64_t idx = 0; idx < TestWorkerCnt; idx++) {
|
||||
//StartLogIdLocateReq *request_array = NULL;
|
||||
generate_req(START_LOG_ID_REQUEST_COUNT, request_arrays[idx], get_timestamp());
|
||||
OB_ASSERT(NULL != request_arrays[idx]);
|
||||
}
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogStartLogIdRpc rpc;
|
||||
ObLogStartLogIdLocator locator;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, locator.init(WORKER_COUNT, LOCATE_COUNT, rpc, err_handler1));
|
||||
EXPECT_EQ(OB_SUCCESS, locator.start());
|
||||
|
||||
TestWorker workers[TestWorkerCnt];
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.reset(&locator, request_arrays[idx], START_LOG_ID_REQUEST_COUNT, SERVER_COUNT);
|
||||
w.create();
|
||||
}
|
||||
|
||||
for (int64_t idx = 0, cnt = TestWorkerCnt; idx < cnt; ++idx) {
|
||||
TestWorker &w = workers[idx];
|
||||
w.join();
|
||||
}
|
||||
|
||||
// free
|
||||
for (int64_t idx = 0; idx < TestWorkerCnt; idx++) {
|
||||
free_req(request_arrays[idx]);
|
||||
request_arrays[idx] = NULL;
|
||||
}
|
||||
locator.destroy();
|
||||
}
|
||||
|
||||
TEST_F(TestObLogStartLogIdLocator, test_out_of_lower_bound)
|
||||
{
|
||||
// Default configuration of observer log retention time
|
||||
int64_t default_clog_save_time = ObLogStartLogIdLocator::g_observer_clog_save_time;
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogStartLogIdRpc rpc;
|
||||
ObLogStartLogIdLocator locator;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, locator.init(WORKER_COUNT, LOCATE_COUNT, rpc, err_handler1));
|
||||
EXPECT_EQ(OB_SUCCESS, locator.start());
|
||||
|
||||
// Generate data, set start time to current time
|
||||
StartLogIdLocateReq *req = NULL;
|
||||
int64_t start_tstamp = get_timestamp();
|
||||
generate_req(1, req, start_tstamp);
|
||||
|
||||
// RPC setup, observer returns success, but partition returns log less than lower bound
|
||||
rpc.set_err(OB_SUCCESS, OB_ERR_OUT_OF_LOWER_BOUND);
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, locator.async_start_log_id_req(req));
|
||||
while (req->get_state() != StartLogIdLocateReq::DONE) {
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
|
||||
// Since all servers return less than the lower bound and have a start time stamp of less than 2 hours,
|
||||
// expect the location to succeed
|
||||
uint64_t start_log_id = OB_INVALID_ID;
|
||||
common::ObAddr svr;
|
||||
EXPECT_EQ(true, req->get_result(start_log_id, svr));
|
||||
EXPECT_EQ(req->pkey_.get_table_id(), start_log_id);
|
||||
|
||||
// free
|
||||
free_req(req);
|
||||
req = NULL;
|
||||
|
||||
/////////////// Set the start time past the log retention time, in which case the location returns a failure ///////////////////
|
||||
// Start-up time less than minimum log retention time
|
||||
start_tstamp = get_timestamp() - default_clog_save_time - 1;
|
||||
generate_req(1, req, start_tstamp); // Regeneration request
|
||||
|
||||
// RPC setup, observer returns success, but partition returns less than lower bound
|
||||
rpc.set_err(OB_SUCCESS, OB_ERR_OUT_OF_LOWER_BOUND);
|
||||
|
||||
// Execute location requests
|
||||
EXPECT_EQ(OB_SUCCESS, locator.async_start_log_id_req(req));
|
||||
while (req->get_state() != StartLogIdLocateReq::DONE) {
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
|
||||
// Although all servers return less than the lower bound, the start-up timestamp is no longer within the log retention time
|
||||
// and expects the location to fail
|
||||
EXPECT_EQ(false, req->get_result(start_log_id, svr));
|
||||
|
||||
// free
|
||||
free_req(req);
|
||||
req = NULL;
|
||||
|
||||
// destroy locator
|
||||
locator.destroy();
|
||||
}
|
||||
|
||||
// When the break_info message is returned, test the correct processing
|
||||
TEST_F(TestObLogStartLogIdLocator, break_info_test)
|
||||
{
|
||||
// genereate data
|
||||
StartLogIdLocateReq *request_array;
|
||||
generate_req(BREAK_INFO_START_LOG_ID_REQUEST_COUNT, request_array, get_timestamp());
|
||||
OB_ASSERT(NULL != request_array);
|
||||
|
||||
MockFetcherErrHandler1 err_handler1;
|
||||
MockObLogRpcDerived2 rpc;
|
||||
EXPECT_EQ(OB_SUCCESS, rpc.init(BREAK_INFO_START_LOG_ID_REQUEST_COUNT));
|
||||
ObLogStartLogIdLocator locator;
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, locator.init(SINGLE_WORKER_COUNT, LOCATE_COUNT, rpc, err_handler1));
|
||||
|
||||
// Insert all data first, then open the StartLogIdLocator thread to ensure that all subsequent requests are aggregated on a single server;
|
||||
TestWorker worker;
|
||||
worker.reset(&locator, request_array, BREAK_INFO_START_LOG_ID_REQUEST_COUNT, SERVER_COUNT);
|
||||
worker.create();
|
||||
|
||||
while (false == ATOMIC_LOAD(&worker.push_req_finish_)) {
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, locator.start());
|
||||
|
||||
// join
|
||||
worker.join();
|
||||
// free
|
||||
free_req(request_array);
|
||||
request_array = NULL;
|
||||
|
||||
locator.destroy();
|
||||
rpc.destroy();
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_start_log_id_locator.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
296
unittest/obcdc/test_ob_log_svr_finder.cpp
Normal file
296
unittest/obcdc/test_ob_log_svr_finder.cpp
Normal file
@ -0,0 +1,296 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#define private public
|
||||
#include "test_ob_log_fetcher_common_utils.h"
|
||||
#include "ob_log_utils.h"
|
||||
#include "ob_log_svr_finder.h"
|
||||
#include "ob_log_all_svr_cache.h"
|
||||
#include "lib/atomic/ob_atomic.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObLogSvrFinder: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static const int64_t SVR_FINDER_THREAD_NUM = 1;
|
||||
};
|
||||
|
||||
static const int64_t TEST_TIME_LIMIT = 10 * _MIN_;
|
||||
|
||||
void generate_part_svr_list(const int64_t count, PartSvrList *&part_svr_list)
|
||||
{
|
||||
part_svr_list = static_cast<PartSvrList *>(
|
||||
ob_malloc(sizeof(PartSvrList) * count));
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
new (part_svr_list + idx) PartSvrList();
|
||||
}
|
||||
}
|
||||
|
||||
// Constructing SvrFindReq, two types of requests
|
||||
// 1. logid request
|
||||
// 2. timestamp request
|
||||
void generate_svr_finder_requset(const int64_t count,
|
||||
PartSvrList *part_svr_list,
|
||||
SvrFindReq *&svr_req_array)
|
||||
{
|
||||
svr_req_array = static_cast<SvrFindReq *>(
|
||||
ob_malloc(sizeof(SvrFindReq) * count));
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
new (svr_req_array + idx) SvrFindReq();
|
||||
ObPartitionKey pkey = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
|
||||
const int64_t seed = get_timestamp();
|
||||
if ((seed % 100) < 50) {
|
||||
svr_req_array[idx].reset_for_req_by_log_id(part_svr_list[idx], pkey, idx);
|
||||
EXPECT_TRUE(svr_req_array[idx].is_state_idle());
|
||||
} else {
|
||||
svr_req_array[idx].reset_for_req_by_tstamp(part_svr_list[idx], pkey, seed);
|
||||
EXPECT_TRUE(svr_req_array[idx].is_state_idle());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// build LeaderFindReq
|
||||
void generate_leader_finder_request(const int64_t count, LeaderFindReq *&leader_req_array)
|
||||
{
|
||||
leader_req_array = static_cast<LeaderFindReq *>(
|
||||
ob_malloc(sizeof(LeaderFindReq) * count));
|
||||
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
new (leader_req_array + idx) LeaderFindReq();
|
||||
ObPartitionKey pkey = ObPartitionKey((uint64_t)(1000 + idx), 0, 1);
|
||||
leader_req_array[idx].reset(pkey);
|
||||
EXPECT_TRUE(leader_req_array[idx].is_state_idle());
|
||||
}
|
||||
}
|
||||
|
||||
void wait_svr_finer_req_end(SvrFindReq *svr_req_array,
|
||||
const int64_t count,
|
||||
int64_t &end_request_cnt)
|
||||
{
|
||||
end_request_cnt = 0;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_request_cnt < count)) {
|
||||
for (int64_t idx = 0, cnt = count; idx < cnt; ++idx) {
|
||||
SvrFindReq &r = svr_req_array[idx];
|
||||
if (SvrFindReq::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state_idle();
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
}
|
||||
|
||||
void wait_leader_finer_req_end(LeaderFindReq *leader_req_array,
|
||||
const int64_t count,
|
||||
int64_t &end_request_cnt)
|
||||
{
|
||||
end_request_cnt = 0;
|
||||
const int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_request_cnt < count)) {
|
||||
for (int64_t idx = 0, cnt = count; idx < cnt; ++idx) {
|
||||
LeaderFindReq &r = leader_req_array[idx];
|
||||
if (LeaderFindReq::DONE == r.get_state()) {
|
||||
end_request_cnt += 1;
|
||||
r.set_state_idle();
|
||||
}
|
||||
}
|
||||
usec_sleep(100 * _MSEC_);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////Basic function tests//////////////////////////////////////////
|
||||
TEST_F(TestObLogSvrFinder, init)
|
||||
{
|
||||
MockFetcherErrHandler1 err_handler;
|
||||
MockSysTableHelperDerive1 mock_systable_helper;
|
||||
|
||||
// AllSvrCache init
|
||||
ObLogAllSvrCache all_svr_cache;
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.init(mock_systable_helper, err_handler));
|
||||
|
||||
// SvrFinder init
|
||||
ObLogSvrFinder svr_finder;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.init(SVR_FINDER_THREAD_NUM, err_handler,
|
||||
all_svr_cache, mock_systable_helper));
|
||||
// sever list for partition
|
||||
PartSvrList *part_svr_list = NULL;
|
||||
generate_part_svr_list(SVR_FINDER_REQ_NUM, part_svr_list);
|
||||
|
||||
// Constructing SvrFindReq, two types of requests
|
||||
// 1. logid request
|
||||
// 2. timestamp request
|
||||
SvrFindReq *svr_req_array = NULL;
|
||||
generate_svr_finder_requset(SVR_FINDER_REQ_NUM, part_svr_list, svr_req_array);
|
||||
|
||||
// build LeaderFindReq
|
||||
LeaderFindReq *leader_req_array = NULL;
|
||||
generate_leader_finder_request(LEADER_FINDER_REQ_NUM, leader_req_array);
|
||||
|
||||
// push request to svr_finder
|
||||
for (int64_t idx = 0; idx < SVR_FINDER_REQ_NUM; idx++) {
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.async_svr_find_req(svr_req_array + idx));
|
||||
}
|
||||
for (int64_t idx = 0; idx < LEADER_FINDER_REQ_NUM; idx++) {
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.async_leader_find_req(leader_req_array + idx));
|
||||
}
|
||||
|
||||
// SvrFinder start
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.start());
|
||||
|
||||
// Wait for asynchronous SvrFinderReq to finish
|
||||
int64_t end_svr_finder_req_cnt = 0;
|
||||
wait_svr_finer_req_end(svr_req_array, SVR_FINDER_REQ_NUM, end_svr_finder_req_cnt);
|
||||
// Assert
|
||||
EXPECT_EQ(SVR_FINDER_REQ_NUM, end_svr_finder_req_cnt);
|
||||
|
||||
// Waiting for the end of the asynchronous LeaderFinderReq
|
||||
int64_t end_leader_finder_req_cnt = 0;
|
||||
wait_leader_finer_req_end(leader_req_array, LEADER_FINDER_REQ_NUM, end_leader_finder_req_cnt);
|
||||
// Assert
|
||||
EXPECT_EQ(LEADER_FINDER_REQ_NUM, end_leader_finder_req_cnt);
|
||||
|
||||
// Validate SvrFinderReq results
|
||||
for (int64_t idx = 0; idx < SVR_FINDER_REQ_NUM; idx++) {
|
||||
PartSvrList &svr_list = part_svr_list[idx];
|
||||
PartSvrList::SvrItemArray svr_items = svr_list.svr_items_;
|
||||
int64_t EXPECT_START_LOG_ID = 0;
|
||||
int64_t EXPECT_END_LOG_ID = 0;
|
||||
|
||||
if (svr_req_array[idx].req_by_next_log_id_) {
|
||||
EXPECT_START_LOG_ID = svr_req_array[idx].next_log_id_;
|
||||
EXPECT_END_LOG_ID = EXPECT_START_LOG_ID + 10000;
|
||||
} else if (svr_req_array[idx].req_by_start_tstamp_) {
|
||||
EXPECT_START_LOG_ID = 0;
|
||||
EXPECT_END_LOG_ID = 65536;
|
||||
}
|
||||
|
||||
int cnt = QUERY_CLOG_HISTORY_VALID_COUNT + QUERY_META_INFO_ADD_COUNT;
|
||||
EXPECT_EQ(cnt, svr_list.count());
|
||||
// Validate log range
|
||||
for (int64_t svr_idx = 0; svr_idx < cnt; svr_idx++) {
|
||||
const PartSvrList::LogIdRange &range = svr_items[svr_idx].log_ranges_[0];
|
||||
if (svr_idx < QUERY_CLOG_HISTORY_VALID_COUNT) {
|
||||
// clog history record
|
||||
EXPECT_EQ(EXPECT_START_LOG_ID, range.start_log_id_);
|
||||
EXPECT_EQ(EXPECT_END_LOG_ID, range.end_log_id_);
|
||||
} else {
|
||||
// Additional records
|
||||
EXPECT_EQ(0, range.start_log_id_);
|
||||
EXPECT_EQ(OB_INVALID_ID, range.end_log_id_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate LeaderFinderReq results
|
||||
ObAddr EXPECT_ADDR;
|
||||
EXPECT_ADDR.set_ip_addr("127.0.0.1", 8888);
|
||||
for (int64_t idx = 0; idx < LEADER_FINDER_REQ_NUM; idx++) {
|
||||
LeaderFindReq &req = leader_req_array[idx];
|
||||
EXPECT_TRUE(req.has_leader_);
|
||||
EXPECT_EQ(EXPECT_ADDR, req.leader_);
|
||||
}
|
||||
|
||||
// destroy
|
||||
ob_free(part_svr_list);
|
||||
ob_free(svr_req_array);
|
||||
svr_finder.destroy();
|
||||
all_svr_cache.destroy();
|
||||
}
|
||||
|
||||
// Used to test if SvrFinder can filter INACTIVE records
|
||||
TEST_F(TestObLogSvrFinder, inactive_test)
|
||||
{
|
||||
MockFetcherErrHandler1 err_handler;
|
||||
MockSysTableHelperDerive2 mock_systable_helper;
|
||||
|
||||
// AllSvrCache init
|
||||
ObLogAllSvrCache all_svr_cache;
|
||||
EXPECT_EQ(OB_SUCCESS, all_svr_cache.init(mock_systable_helper, err_handler));
|
||||
|
||||
// SvrFinder init
|
||||
ObLogSvrFinder svr_finder;
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.init(SVR_FINDER_THREAD_NUM, err_handler,
|
||||
all_svr_cache, mock_systable_helper));
|
||||
// Declaration of partition sever list
|
||||
PartSvrList *part_svr_list = NULL;
|
||||
generate_part_svr_list(SVR_FINDER_REQ_NUM, part_svr_list);
|
||||
|
||||
// Constructing SvrFindReq, two types of requests
|
||||
// 1. logid request
|
||||
// 2. timestamp request
|
||||
SvrFindReq *svr_req_array = NULL;
|
||||
generate_svr_finder_requset(SVR_FINDER_REQ_NUM, part_svr_list, svr_req_array);
|
||||
|
||||
// push request to svr_finder
|
||||
for (int64_t idx = 0; idx < SVR_FINDER_REQ_NUM; idx++) {
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.async_svr_find_req(svr_req_array + idx));
|
||||
}
|
||||
|
||||
// SvrFinder start
|
||||
EXPECT_EQ(OB_SUCCESS, svr_finder.start());
|
||||
|
||||
// Wait for asynchronous SvrFinderReq to finish
|
||||
int64_t end_svr_finder_req_cnt = 0;
|
||||
wait_svr_finer_req_end(svr_req_array, SVR_FINDER_REQ_NUM, end_svr_finder_req_cnt);
|
||||
// Assert
|
||||
EXPECT_EQ(SVR_FINDER_REQ_NUM, end_svr_finder_req_cnt);
|
||||
|
||||
// Validate SvrFinderReq results
|
||||
int cnt = (QUERY_CLOG_HISTORY_VALID_COUNT + QUERY_META_INFO_ADD_COUNT) / 2;
|
||||
for (int64_t idx = 0; idx < 1; idx++) {
|
||||
PartSvrList &svr_list = part_svr_list[idx];
|
||||
PartSvrList::SvrItemArray svr_items = svr_list.svr_items_;
|
||||
|
||||
EXPECT_EQ(cnt, svr_list.count());
|
||||
}
|
||||
|
||||
ob_free(part_svr_list);
|
||||
ob_free(svr_req_array);
|
||||
svr_finder.destroy();
|
||||
all_svr_cache.destroy();
|
||||
}
|
||||
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_svr_finder.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
198
unittest/obcdc/test_ob_log_timer.cpp
Normal file
198
unittest/obcdc/test_ob_log_timer.cpp
Normal file
@ -0,0 +1,198 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_map_queue_thread.h"
|
||||
#include "ob_log_utils.h"
|
||||
#define private public
|
||||
#include "test_ob_log_fetcher_common_utils.h"
|
||||
#include "obcdc/src/ob_log_timer.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
static const int MAX_THREAD_NUM = 16;
|
||||
typedef common::ObMapQueueThread<MAX_THREAD_NUM> QueueThread;
|
||||
|
||||
// Timed task implementation
|
||||
class PushMapTimerTask : public ObLogTimerTask
|
||||
{
|
||||
public:
|
||||
PushMapTimerTask() : host_(NULL), start_time_(0), end_time_(0), process_count_(NULL)
|
||||
{}
|
||||
virtual ~PushMapTimerTask() {}
|
||||
|
||||
void reset()
|
||||
{
|
||||
host_ = NULL;
|
||||
start_time_ = 0;
|
||||
end_time_ = 0;
|
||||
process_count_ = NULL;
|
||||
}
|
||||
|
||||
void reset(int64_t *&process_count, QueueThread *host)
|
||||
{
|
||||
reset();
|
||||
process_count_ = process_count;
|
||||
host_ = host;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual void process_timer_task() override
|
||||
{
|
||||
EXPECT_EQ(OB_SUCCESS, host_->push(this, static_cast<uint64_t>(get_timestamp())));
|
||||
end_time_ = get_timestamp();
|
||||
(*process_count_)++;
|
||||
}
|
||||
private:
|
||||
QueueThread *host_;
|
||||
int64_t start_time_;
|
||||
int64_t end_time_;
|
||||
// Record the number of successfully executed timed tasks
|
||||
int64_t *process_count_;
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(PushMapTimerTask);
|
||||
};
|
||||
typedef PushMapTimerTask Type;
|
||||
// Timer task count
|
||||
static const int64_t TASK_COUNT = 1000;
|
||||
|
||||
class TestObLogTimer: public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
static constexpr const int64_t TEST_TIME_LIMIT = 10 * _MIN_;
|
||||
// ObMapQueue mod_id
|
||||
static constexpr const char *MOD_ID = "1";
|
||||
// thread num of ObMapQueueThread
|
||||
static const int THREAD_NUM = 6;
|
||||
// max task count
|
||||
static const int64_t MAX_TASK_COUNT = 10 * 1000;
|
||||
public:
|
||||
// Generate timed task data
|
||||
void generate_data(const int64_t count, QueueThread *host, int64_t *&process_count, Type *&datas);
|
||||
};
|
||||
|
||||
void TestObLogTimer::generate_data(const int64_t count,
|
||||
QueueThread *host,
|
||||
int64_t *&process_count,
|
||||
Type *&datas)
|
||||
{
|
||||
datas = (Type *)ob_malloc(sizeof(Type) * count);
|
||||
OB_ASSERT(NULL != datas);
|
||||
for (uint64_t idx = 0; idx < count; idx++) {
|
||||
new (datas + idx) Type();
|
||||
datas[idx].reset(process_count, host);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////Basic function tests//////////////////////////////////////////
|
||||
TEST_F(TestObLogTimer, timer)
|
||||
{
|
||||
// ObMapQueueThread init
|
||||
QueueThread host;
|
||||
EXPECT_EQ(OB_SUCCESS, host.init(THREAD_NUM, MOD_ID));
|
||||
|
||||
// Number of timer tasks handled
|
||||
int64_t process_timer_task_count = 0;
|
||||
int64_t *ptr = &process_timer_task_count;
|
||||
|
||||
// Generate timed tasks
|
||||
Type *datas = NULL;
|
||||
generate_data(TASK_COUNT, &host, ptr, datas);
|
||||
OB_ASSERT(NULL != datas);
|
||||
|
||||
// ObLogFixedTimer init
|
||||
ObLogFixedTimer timer;
|
||||
MockFetcherErrHandler1 err_handle;
|
||||
EXPECT_EQ(OB_SUCCESS, timer.init(err_handle, MAX_TASK_COUNT));
|
||||
|
||||
// Insert timed tasks
|
||||
int64_t start_push_time = 0;
|
||||
int64_t end_push_time = 0;
|
||||
start_push_time = get_timestamp();
|
||||
for (int64_t idx = 0; idx < TASK_COUNT; idx++) {
|
||||
// Giving start time
|
||||
datas[idx].start_time_ = get_timestamp();
|
||||
EXPECT_EQ(OB_SUCCESS, timer.schedule(&datas[idx]));
|
||||
}
|
||||
end_push_time = get_timestamp();
|
||||
int64_t push_take_time = end_push_time - start_push_time;
|
||||
EXPECT_EQ(TASK_COUNT, timer.task_queue_.get_total());
|
||||
LOG_INFO("timer push", K(push_take_time));
|
||||
|
||||
// ObLogTimer start
|
||||
EXPECT_EQ(OB_SUCCESS, timer.start());
|
||||
|
||||
int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (process_timer_task_count < TASK_COUNT)) {
|
||||
}
|
||||
LOG_INFO("process", K(process_timer_task_count));
|
||||
|
||||
int64_t min_interval = 1 * _SEC_;
|
||||
int64_t max_interval = 0;
|
||||
|
||||
for (int64_t idx = 0; idx < TASK_COUNT; idx++) {
|
||||
int64_t inv = datas[idx].end_time_ - datas[idx].start_time_;
|
||||
if (inv < min_interval) {
|
||||
min_interval = inv;
|
||||
}
|
||||
|
||||
if (inv > max_interval) {
|
||||
max_interval = inv;
|
||||
}
|
||||
}
|
||||
LOG_INFO("interval", K(min_interval), K(max_interval));
|
||||
|
||||
host.destroy();
|
||||
ob_free(datas);
|
||||
timer.destroy();
|
||||
}
|
||||
|
||||
////////////////////////Boundary condition testing//////////////////////////////////////////
|
||||
// ObLogTimer init fail
|
||||
TEST_F(TestObLogTimer, init_failed)
|
||||
{
|
||||
ObLogFixedTimer timer;
|
||||
MockFetcherErrHandler1 err_handle;
|
||||
EXPECT_EQ(OB_SUCCESS, timer.init(err_handle, MAX_TASK_COUNT));
|
||||
EXPECT_EQ(OB_INIT_TWICE, timer.init(err_handle, MAX_TASK_COUNT));
|
||||
timer.destroy();
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_log_timer.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
425
unittest/obcdc/test_ob_map_queue.cpp
Normal file
425
unittest/obcdc/test_ob_map_queue.cpp
Normal file
@ -0,0 +1,425 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_map_queue.h"
|
||||
#include "ob_log_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
class TestObMapQueue : public ::testing::Test
|
||||
{
|
||||
public :
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
// ObMapQueue label
|
||||
static constexpr const char *LABEL = "TestObMapQueue";
|
||||
// push thread
|
||||
static const int64_t ONE_PUSH_THREAD_NUM = 1;
|
||||
static const int64_t MULTI_PUSH_THREAD_NUM = 3;
|
||||
// pop thread
|
||||
static const int64_t ONE_POP_THREAD_NUM = 1;
|
||||
static const int64_t MULTI_POP_THREAD_NUM = 5;
|
||||
|
||||
static const int64_t TEST_TIME_LIMIT = 10 * _MIN_;
|
||||
};
|
||||
|
||||
// ObMapQueue type
|
||||
typedef int64_t Type;
|
||||
// push ObMapQueue value
|
||||
static const int64_t START_VALUE = 0;
|
||||
static const int64_t END_VALUE = 1 * 1000 * 1000 - 1;
|
||||
static const int64_t VALUE_COUNT = END_VALUE - START_VALUE + 1;
|
||||
|
||||
class TestPushWorker : public liboblog::Runnable
|
||||
{
|
||||
public:
|
||||
enum State
|
||||
{
|
||||
IDLE, //
|
||||
REQ, // pushing
|
||||
DONE // push DONE
|
||||
};
|
||||
// Identifies the current thread status
|
||||
State state_;
|
||||
|
||||
// thread index
|
||||
int64_t thread_idx_;
|
||||
// thread count
|
||||
int64_t thread_count_;
|
||||
// ObMapQueue
|
||||
ObMapQueue<Type> *map_queue_;
|
||||
// record map_queue push count
|
||||
int64_t push_count_;
|
||||
// value interval
|
||||
int64_t interval_;
|
||||
|
||||
virtual int routine()
|
||||
{
|
||||
int64_t start = thread_idx_ * interval_;
|
||||
int64_t end = (thread_count_ - 1 != thread_idx_) ? start + interval_ - 1 : END_VALUE;
|
||||
LOG_INFO("TestPushWorker", K(start), K(end));
|
||||
|
||||
int64_t val = start;
|
||||
while (val <= end) {
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue_->push(val++));
|
||||
push_count_++;
|
||||
}
|
||||
|
||||
if (end + 1 == val) {
|
||||
state_ = DONE;
|
||||
}
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
class TestPopWorker: public liboblog::Runnable
|
||||
{
|
||||
public:
|
||||
// thread index
|
||||
int64_t thread_idx_;
|
||||
// ObMapQueue
|
||||
ObMapQueue<Type> *map_queue_;
|
||||
// record thread map_queue pop count
|
||||
int64_t pop_count_ CACHE_ALIGNED;
|
||||
// record poped count for all threads
|
||||
int64_t *end_pop_count_ CACHE_ALIGNED;
|
||||
// save data poped out
|
||||
Type *array_;
|
||||
|
||||
virtual int routine()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
while (OB_SUCC(ret)) {
|
||||
Type val;
|
||||
while (OB_SUCC(map_queue_->pop(val))) {
|
||||
if (val >= START_VALUE && val <= END_VALUE) {
|
||||
if (0 == array_[val]) {
|
||||
array_[val] = val;
|
||||
ATOMIC_INC(&pop_count_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (OB_EAGAIN == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
if (ATOMIC_LOAD(end_pop_count_) == VALUE_COUNT) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
////////////////////// Basic function tests //////////////////////////////////////////
|
||||
// ObMapQueue init
|
||||
TEST_F(TestObMapQueue, init)
|
||||
{
|
||||
ObMapQueue<Type> map_queue;
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue.init(LABEL));
|
||||
EXPECT_TRUE(map_queue.is_inited());
|
||||
|
||||
map_queue.destroy();
|
||||
EXPECT_FALSE(map_queue.is_inited());
|
||||
}
|
||||
|
||||
// Test scenarios.
|
||||
// 1. single-threaded push - single-threaded pop
|
||||
// 2. single-threaded push - multi-threaded pop
|
||||
// 3. multi-threaded push - single-threaded pop
|
||||
// 4. multi-threaded push - multi-threaded pop
|
||||
TEST_F(TestObMapQueue, push_pop_test)
|
||||
{
|
||||
ObMapQueue<Type> map_queue;
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue.init(LABEL));
|
||||
EXPECT_TRUE(map_queue.is_inited());
|
||||
|
||||
// malloc array
|
||||
Type *array = (Type *)ob_malloc(sizeof(Type) * VALUE_COUNT);
|
||||
OB_ASSERT(NULL != array);
|
||||
|
||||
for (int64_t test_type = 0, test_cnt = 4; test_type < test_cnt; ++test_type) {
|
||||
memset(array, 0, sizeof(Type) * VALUE_COUNT);
|
||||
int64_t PUSH_THREAD_NUM = 0;
|
||||
int64_t POP_THREAD_NUM = 0;
|
||||
int64_t end_push_count = 0;
|
||||
int64_t end_pop_count = 0;
|
||||
|
||||
switch (test_type) {
|
||||
// single-threaded push - single-threaded pop
|
||||
case 0:
|
||||
PUSH_THREAD_NUM = ONE_PUSH_THREAD_NUM;
|
||||
POP_THREAD_NUM = ONE_POP_THREAD_NUM;
|
||||
break;
|
||||
// single-threaded push - multi-threaded pop
|
||||
case 1:
|
||||
PUSH_THREAD_NUM = ONE_PUSH_THREAD_NUM;
|
||||
POP_THREAD_NUM = MULTI_POP_THREAD_NUM;
|
||||
break;
|
||||
// multi-threaded push - single-threaded pop
|
||||
case 2:
|
||||
PUSH_THREAD_NUM = MULTI_PUSH_THREAD_NUM;
|
||||
POP_THREAD_NUM = ONE_POP_THREAD_NUM;
|
||||
break;
|
||||
// multi-threaded push - multi-threaded pop
|
||||
case 3:
|
||||
PUSH_THREAD_NUM = MULTI_PUSH_THREAD_NUM;
|
||||
POP_THREAD_NUM = MULTI_POP_THREAD_NUM;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
LOG_INFO("push_pop_test", K(test_type), K(PUSH_THREAD_NUM), K(POP_THREAD_NUM));
|
||||
|
||||
// push thread
|
||||
TestPushWorker push_workers[PUSH_THREAD_NUM];
|
||||
const int64_t INTERVAL = VALUE_COUNT / PUSH_THREAD_NUM;
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
// assign value
|
||||
w.state_ = TestPushWorker::REQ;
|
||||
w.thread_idx_ = idx;
|
||||
w.thread_count_ = PUSH_THREAD_NUM;
|
||||
w.map_queue_ = &map_queue;
|
||||
w.push_count_ = 0;
|
||||
w.interval_ = INTERVAL;
|
||||
// create threads
|
||||
w.create();
|
||||
LOG_INFO("push_pop_test", "push thread", "create OB_SUCCESS");
|
||||
}
|
||||
|
||||
// pop thread
|
||||
TestPopWorker pop_workers[POP_THREAD_NUM];
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
// addign value
|
||||
w.map_queue_ = &map_queue;
|
||||
w.array_ = array;
|
||||
w.pop_count_ = 0;
|
||||
w.end_pop_count_ = &end_pop_count;
|
||||
// create threads
|
||||
w.create();
|
||||
LOG_INFO("push_pop_test", "pop thread", "create OB_SUCCESS");
|
||||
}
|
||||
|
||||
// Verify the correctness of the push: verify the total number of pushes into the ObMapQueue-Type
|
||||
int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_push_count < VALUE_COUNT)) {
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
if (TestPushWorker::DONE == w.state_) {
|
||||
end_push_count += w.push_count_;
|
||||
w.state_ = TestPushWorker::IDLE;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_push_count);
|
||||
|
||||
// Verify that the pop is correct:
|
||||
// 1. verify the total number of -Types popped from ObMapQueue
|
||||
// 2. Correctness of the fields
|
||||
start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_pop_count < VALUE_COUNT)) {
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
|
||||
int64_t pop_cnt = ATOMIC_LOAD(&w.pop_count_);
|
||||
while (!ATOMIC_BCAS(&w.pop_count_, pop_cnt, 0)) {
|
||||
pop_cnt = ATOMIC_LOAD(&w.pop_count_);
|
||||
}
|
||||
|
||||
end_pop_count += pop_cnt;
|
||||
//LOG_DEBUG("pop verify", K(idx), K(pop_cnt), K(end_pop_count));
|
||||
LOG_INFO("pop verify", K(idx), K(pop_cnt), K(end_pop_count));
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_pop_count);
|
||||
|
||||
int64_t correct_field = 0;
|
||||
for (int64_t idx = 0, cnt = VALUE_COUNT; idx < cnt; ++idx) {
|
||||
if (idx == array[idx]) {
|
||||
correct_field++;
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, correct_field);
|
||||
|
||||
// push thread join
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
w.join();
|
||||
LOG_INFO("push_pop_test", "push thread", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
// pop thread join
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
w.join();
|
||||
LOG_INFO("push_pop_test", "pop thread", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue.reset());
|
||||
}
|
||||
|
||||
// free array
|
||||
ob_free(array);
|
||||
map_queue.destroy();
|
||||
EXPECT_FALSE(map_queue.is_inited());
|
||||
}
|
||||
|
||||
// 1. push performance test: push data with 10 threads
|
||||
// 2. pop performance test: pop data with 10 threads
|
||||
TEST_F(TestObMapQueue, DISABLED_performance)
|
||||
{
|
||||
int64_t start_test_tstamp = 0;
|
||||
int64_t end_test_tstamp = 0;
|
||||
|
||||
ObMapQueue<Type> map_queue;
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue.init(LABEL));
|
||||
|
||||
// push
|
||||
int64_t PUSH_THREAD_NUM = 10;
|
||||
const int64_t INTERVAL = VALUE_COUNT / PUSH_THREAD_NUM;
|
||||
int64_t end_push_count = 0;
|
||||
TestPushWorker push_workers[PUSH_THREAD_NUM];
|
||||
|
||||
start_test_tstamp = get_timestamp();
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
w.state_ = TestPushWorker::REQ;
|
||||
w.thread_idx_ = idx;
|
||||
w.thread_count_ = PUSH_THREAD_NUM;
|
||||
w.map_queue_ = &map_queue;
|
||||
w.push_count_ = 0;
|
||||
w.interval_ = INTERVAL;
|
||||
w.create();
|
||||
LOG_INFO("push_performance", "push thread", "create OB_SUCCESS");
|
||||
}
|
||||
// Detect the end of push in all threads
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_push_count < VALUE_COUNT)) {
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
if (TestPushWorker::DONE == w.state_) {
|
||||
end_push_count += w.push_count_;
|
||||
w.state_ = TestPushWorker::IDLE;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_push_count);
|
||||
end_test_tstamp = get_timestamp();
|
||||
|
||||
double push_time = static_cast<double>(end_test_tstamp - start_test_tstamp) * 1.0 / 1000000;
|
||||
double push_cnt_per_second = static_cast<double>(VALUE_COUNT) * 1.0 / (push_time);
|
||||
LOG_INFO("push_performance", K(end_push_count), K(push_time), "push count/s", push_cnt_per_second);
|
||||
|
||||
// pop
|
||||
int64_t POP_THREAD_NUM = 10;
|
||||
int64_t end_pop_count = 0;
|
||||
TestPopWorker pop_workers[POP_THREAD_NUM];
|
||||
|
||||
// malloc array
|
||||
Type *array = (Type *)ob_malloc(sizeof(Type) * VALUE_COUNT);
|
||||
OB_ASSERT(NULL != array);
|
||||
memset(array, 0, sizeof(Type) * VALUE_COUNT);
|
||||
|
||||
start_test_tstamp = get_timestamp();
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
w.map_queue_ = &map_queue;
|
||||
w.array_ = array;
|
||||
w.pop_count_ = 0;
|
||||
w.end_pop_count_ = &end_pop_count;
|
||||
w.create();
|
||||
LOG_INFO("pop_performance", "pop thread", "create OB_SUCCESS");
|
||||
}
|
||||
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_pop_count < VALUE_COUNT)) {
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
|
||||
int64_t pop_cnt = ATOMIC_LOAD(&w.pop_count_);
|
||||
while (!ATOMIC_BCAS(&w.pop_count_, pop_cnt, 0)) {
|
||||
pop_cnt = ATOMIC_LOAD(&w.pop_count_);
|
||||
}
|
||||
|
||||
end_pop_count += pop_cnt;
|
||||
LOG_DEBUG("pop verify", K(idx), K(pop_cnt), K(end_pop_count));
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_pop_count);
|
||||
end_test_tstamp = get_timestamp();
|
||||
|
||||
double pop_time = static_cast<double>(end_test_tstamp - start_test_tstamp) * 1.0 / 1000000;
|
||||
double pop_cnt_per_second = static_cast<double>(VALUE_COUNT) * 1.0 / (pop_time);
|
||||
LOG_INFO("pop_performance", K(end_pop_count), K(pop_time), "pop count/s", pop_cnt_per_second);
|
||||
|
||||
// push thread join
|
||||
for (int64_t idx = 0, cnt = PUSH_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
w.join();
|
||||
LOG_INFO("performance", "push thread", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
// pop thread join
|
||||
for (int64_t idx = 0, cnt = POP_THREAD_NUM; idx < cnt; ++idx) {
|
||||
TestPopWorker &w = pop_workers[idx];
|
||||
w.join();
|
||||
LOG_INFO("performance", "pop thread", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
ob_free(array);
|
||||
map_queue.destroy();
|
||||
}
|
||||
|
||||
//////////////////////// Boundary condition testing //////////////////////////////////////////
|
||||
// ObMapQueue init fail
|
||||
TEST_F(TestObMapQueue, init_failed)
|
||||
{
|
||||
ObMapQueue<Type> map_queue;
|
||||
EXPECT_EQ(OB_SUCCESS, map_queue.init(LABEL));
|
||||
EXPECT_EQ(OB_INIT_TWICE, map_queue.init(LABEL));
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_map_queue.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
544
unittest/obcdc/test_ob_map_queue_thread.cpp
Normal file
544
unittest/obcdc/test_ob_map_queue_thread.cpp
Normal file
@ -0,0 +1,544 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#define USING_LOG_PREFIX OBLOG_FETCHER
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include "share/ob_define.h"
|
||||
#include "obcdc/src/ob_map_queue_thread.h"
|
||||
#include "ob_log_utils.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace std;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
// ObMapQueue label
|
||||
static const char *label = "test";
|
||||
// Thread num of ObMapQueueThread
|
||||
static const int THREAD_NUM = 6;
|
||||
|
||||
// ObMapQueue type
|
||||
struct MapQueueType
|
||||
{
|
||||
int64_t value_;
|
||||
uint64_t hash_val_;
|
||||
void reset(int64_t value, uint64_t hash_val)
|
||||
{
|
||||
value_ = value;
|
||||
hash_val_ = hash_val;
|
||||
}
|
||||
TO_STRING_KV(K(value_), K(hash_val_));
|
||||
};
|
||||
typedef MapQueueType Type;
|
||||
|
||||
// ObMapQueue value range
|
||||
static const int64_t START_VALUE = 0;
|
||||
static const int64_t END_VALUE = 1 * 100 * 1000 - 1;
|
||||
static const int64_t VALUE_COUNT = END_VALUE - START_VALUE + 1;
|
||||
|
||||
class TestObMapQueueThread : public ::testing::Test
|
||||
{
|
||||
public :
|
||||
TestObMapQueueThread() {}
|
||||
virtual ~TestObMapQueueThread() {}
|
||||
virtual void SetUp() {}
|
||||
virtual void TearDown() {}
|
||||
public :
|
||||
// push thread
|
||||
static const int64_t ONE_PUSH_THREAD_NUM = 1;
|
||||
static const int64_t MULTI_PUSH_THREAD_NUM = 3;
|
||||
// time limit
|
||||
static const int64_t TEST_TIME_LIMIT = 1 * _MIN_;
|
||||
public:
|
||||
// generate data
|
||||
void generate_data(const int64_t count, Type *&datas);
|
||||
};
|
||||
|
||||
void TestObMapQueueThread::generate_data(const int64_t count, Type *&datas)
|
||||
{
|
||||
datas = (Type *)ob_malloc(sizeof(Type) * count);
|
||||
OB_ASSERT(NULL != datas);
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
datas[idx].reset(idx, idx % THREAD_NUM);
|
||||
}
|
||||
for (int64_t idx = 0; idx < count; idx++) {
|
||||
LOG_DEBUG("data", K(datas[idx]));
|
||||
}
|
||||
}
|
||||
|
||||
static const int MAX_THREAD_NUM = 16;
|
||||
typedef common::ObMapQueueThread<MAX_THREAD_NUM> QueueThread;
|
||||
// DerivedQueueThread1
|
||||
// Overload handle, test the correctness of ObMapQueueThread execution
|
||||
class DerivedQueueThread1 : public QueueThread
|
||||
{
|
||||
public:
|
||||
DerivedQueueThread1(vector<vector<Type> > &handle_result) : end_handle_count_(0),
|
||||
handle_result_(handle_result),
|
||||
inited_(false) {}
|
||||
virtual ~DerivedQueueThread1() { destroy(); }
|
||||
public:
|
||||
int init();
|
||||
void destroy();
|
||||
int start();
|
||||
public:
|
||||
// Record the number of data that has been processed by the thread
|
||||
int64_t end_handle_count_ CACHE_ALIGNED;
|
||||
public:
|
||||
// Implement ObMapQueueThread dummy function-handle to overload the thread handling function
|
||||
virtual int handle(void *data, const int64_t thread_index, volatile bool &stop_flag);
|
||||
private:
|
||||
vector<vector<Type> > &handle_result_;
|
||||
bool inited_;
|
||||
};
|
||||
|
||||
// DerivedQueueThread2
|
||||
// Overload run, test the correctness of ObMapQueueThread execution
|
||||
class DerivedQueueThread2 : public QueueThread
|
||||
{
|
||||
public:
|
||||
DerivedQueueThread2(vector<vector<Type> > &handle_result) : end_handle_count_(0),
|
||||
handle_result_(handle_result),
|
||||
inited_(false) {}
|
||||
virtual ~DerivedQueueThread2() { destroy(); }
|
||||
public:
|
||||
int init();
|
||||
void destroy();
|
||||
int start();
|
||||
public:
|
||||
// Record the number of data that has been processed by the thread
|
||||
int64_t end_handle_count_ CACHE_ALIGNED;
|
||||
public:
|
||||
// Overload run, test the correctness of ObMapQueueThread execution
|
||||
virtual void run(const int64_t thread_index);
|
||||
private:
|
||||
static const int64_t IDLE_WAIT_TIME = 10 * 1000;
|
||||
private:
|
||||
vector<vector<Type> > &handle_result_;
|
||||
bool inited_;
|
||||
};
|
||||
|
||||
int DerivedQueueThread1::init()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread1 init twice");
|
||||
ret = OB_INIT_TWICE;
|
||||
} else if (OB_FAIL(QueueThread::init(THREAD_NUM, label))) {
|
||||
LOG_ERROR("init QueueThread fail", K(ret), K(THREAD_NUM), K(label));
|
||||
} else {
|
||||
EXPECT_EQ(THREAD_NUM, QueueThread::get_thread_num());
|
||||
EXPECT_TRUE(QueueThread::is_stoped());
|
||||
end_handle_count_ = 0;
|
||||
inited_ = true;
|
||||
|
||||
LOG_INFO("DerivedQueueThread1 init ok", K(ret));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DerivedQueueThread1::destroy()
|
||||
{
|
||||
if (inited_) {
|
||||
QueueThread::destroy();
|
||||
EXPECT_TRUE(QueueThread::is_stoped());
|
||||
inited_ = false;
|
||||
|
||||
LOG_INFO("DerivedQueueThread1 destory");
|
||||
}
|
||||
}
|
||||
|
||||
int DerivedQueueThread1::start()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread1 not init");
|
||||
ret = OB_NOT_INIT;
|
||||
} else if (OB_FAIL(QueueThread::start())) {
|
||||
LOG_ERROR("DerivedQueueThread1 start error", K(ret));
|
||||
} else {
|
||||
LOG_INFO("DerivedQueueThread1 start ok");
|
||||
}
|
||||
EXPECT_FALSE(QueueThread::is_stoped());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int DerivedQueueThread1::handle(void *data, const int64_t thread_index, volatile bool &stop_flag)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
stop_flag = stop_flag;
|
||||
Type *task = NULL;
|
||||
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread1 not init");
|
||||
ret = OB_NOT_INIT;
|
||||
} else if (OB_ISNULL(data)
|
||||
|| OB_UNLIKELY(thread_index < 0)
|
||||
|| OB_UNLIKELY(thread_index >= get_thread_num())) {
|
||||
LOG_ERROR("invalid argument", K(thread_index), K(get_thread_num()));
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
} else if (OB_ISNULL(task = static_cast<Type *>(data))) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_ERROR("invalid argument", K(ret), KP(task), K(thread_index));
|
||||
} else {
|
||||
LOG_DEBUG("DerivedQueueThread1 handle", K(ret), K(*task), K(thread_index));
|
||||
|
||||
handle_result_[thread_index].push_back(*task);
|
||||
ATOMIC_INC(&end_handle_count_);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int DerivedQueueThread2::init()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread2 init twice");
|
||||
ret = OB_INIT_TWICE;
|
||||
} else if (OB_FAIL(QueueThread::init(THREAD_NUM, label))) {
|
||||
LOG_ERROR("init QueueThread fail", K(ret), K(THREAD_NUM), K(label));
|
||||
} else {
|
||||
EXPECT_EQ(THREAD_NUM, QueueThread::get_thread_num());
|
||||
EXPECT_TRUE(QueueThread::is_stoped());
|
||||
end_handle_count_ = 0;
|
||||
inited_ = true;
|
||||
|
||||
LOG_INFO("DerivedQueueThread2 init ok", K(ret));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DerivedQueueThread2::destroy()
|
||||
{
|
||||
if (inited_) {
|
||||
QueueThread::destroy();
|
||||
EXPECT_TRUE(QueueThread::is_stoped());
|
||||
inited_ = false;
|
||||
|
||||
LOG_INFO("DerivedQueueThread2 destory");
|
||||
}
|
||||
}
|
||||
|
||||
int DerivedQueueThread2::start()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread2 not init");
|
||||
ret = OB_NOT_INIT;
|
||||
} else if (OB_FAIL(QueueThread::start())) {
|
||||
LOG_ERROR("DerivedQueueThread2 start error", K(ret));
|
||||
} else {
|
||||
LOG_INFO("DerivedQueueThread2 start ok");
|
||||
}
|
||||
EXPECT_FALSE(QueueThread::is_stoped());
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void DerivedQueueThread2::run(const int64_t thread_index)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_UNLIKELY(!inited_)) {
|
||||
LOG_ERROR("DerivedQueueThread2 not init");
|
||||
ret = OB_NOT_INIT;
|
||||
} else if (OB_UNLIKELY(thread_index < 0) || OB_UNLIKELY(thread_index >= get_thread_num())) {
|
||||
LOG_ERROR("invalid argument", K(thread_index), K(get_thread_num()));
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
} else {
|
||||
LOG_INFO("DerivedQueueThread2 run start", K(thread_index));
|
||||
|
||||
while (!stop_flag_ && OB_SUCCESS == ret) {
|
||||
void *data = NULL;
|
||||
Type *task = NULL;
|
||||
|
||||
if (OB_FAIL(pop(thread_index, data))) {
|
||||
if (OB_EAGAIN == ret) {
|
||||
// empty
|
||||
ret = OB_SUCCESS;
|
||||
cond_timedwait(thread_index, IDLE_WAIT_TIME);
|
||||
LOG_DEBUG("DerivedQueueThread2 pop empty");
|
||||
} else {
|
||||
LOG_ERROR("DerivedQueueThread2 pop data error", K(ret));
|
||||
}
|
||||
} else if (OB_ISNULL(data) || OB_ISNULL(task = static_cast<Type *>(data))) {
|
||||
LOG_ERROR("invalid argument", KPC(task), K(thread_index));
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
} else {
|
||||
LOG_DEBUG("DerivedQueueThread2 handle", K(ret), K(*task), K(thread_index));
|
||||
|
||||
handle_result_[thread_index].push_back(*task);
|
||||
ATOMIC_INC(&end_handle_count_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TestPushWorker : public liboblog::Runnable
|
||||
{
|
||||
public:
|
||||
enum State
|
||||
{
|
||||
IDLE, // empty
|
||||
REQ, // pushing
|
||||
DONE // push done
|
||||
};
|
||||
// Identifies the current thread status
|
||||
State state_;
|
||||
// thread index
|
||||
int64_t thread_idx_;
|
||||
// thread count
|
||||
int64_t thread_count_;
|
||||
// push data
|
||||
Type *datas_;
|
||||
// value interval
|
||||
int64_t interval_;
|
||||
// ObMapQueueThread
|
||||
QueueThread *host_;
|
||||
// record thread map_queue push count
|
||||
int64_t push_count_;
|
||||
|
||||
void reset(const int64_t thread_idx,
|
||||
const int64_t push_thread_num,
|
||||
Type *datas,
|
||||
QueueThread *host)
|
||||
{
|
||||
state_ = TestPushWorker::REQ;
|
||||
thread_idx_ = thread_idx;
|
||||
thread_count_ = push_thread_num;
|
||||
datas_ = datas;
|
||||
interval_ = VALUE_COUNT / push_thread_num;
|
||||
host_ = host;
|
||||
push_count_ = 0;
|
||||
}
|
||||
|
||||
void start()
|
||||
{
|
||||
// create threads
|
||||
create();
|
||||
LOG_INFO("TestPushWorker start", "push worker thread", "create OB_SUCCESS");
|
||||
}
|
||||
|
||||
void stop()
|
||||
{
|
||||
join();
|
||||
LOG_INFO("TestPushWorker join", "push worker thread", "join OB_SUCCESS");
|
||||
}
|
||||
|
||||
virtual int routine()
|
||||
{
|
||||
int64_t start = thread_idx_ * interval_;
|
||||
int64_t end = (thread_count_ - 1 != thread_idx_) ? start + interval_ - 1 : END_VALUE;
|
||||
LOG_INFO("TestPushWorker", K(start), K(end));
|
||||
|
||||
int64_t idx = 0;
|
||||
for (idx = start; idx <= end; idx++) {
|
||||
Type *type = datas_ + idx;
|
||||
EXPECT_EQ(OB_SUCCESS, host_->push(type, type->hash_val_));
|
||||
push_count_++;
|
||||
LOG_DEBUG("TestPushWorker", K(idx), KPC(type));
|
||||
}
|
||||
|
||||
if (end + 1 == idx) {
|
||||
state_ = DONE;
|
||||
}
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
};
|
||||
|
||||
const int64_t MAX_PUSH_WORKER_NUM = 32;
|
||||
TestPushWorker push_workers[MAX_PUSH_WORKER_NUM];
|
||||
// start push worker
|
||||
static void start_push_worker(const int64_t push_thread_num,
|
||||
Type *datas,
|
||||
QueueThread *host)
|
||||
{
|
||||
//push thread
|
||||
for (int64_t idx = 0, cnt = push_thread_num; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
w.reset(idx, push_thread_num, datas, host);
|
||||
w.start();
|
||||
}
|
||||
}
|
||||
|
||||
// stop push worker
|
||||
static void stop_push_worker(const int64_t push_thread_num)
|
||||
{
|
||||
// push thread join
|
||||
for (int64_t idx = 0, cnt = push_thread_num; idx < cnt; ++idx) {
|
||||
TestPushWorker &w = push_workers[idx];
|
||||
w.stop();
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////// Basic function tests //////////////////////////////////////////
|
||||
// ObMapQueueThread init, destory, start, stop, is_stoped, get_thread_num
|
||||
// overload ObMapQueueThread-handle
|
||||
TEST_F(TestObMapQueueThread, DerivedQueueThread1)
|
||||
{
|
||||
// genereate data
|
||||
Type *datas = NULL;
|
||||
generate_data(VALUE_COUNT, datas);
|
||||
OB_ASSERT(NULL != datas);
|
||||
|
||||
// savd result of DerivedQueueThread1 handle
|
||||
vector<vector<Type> > handle_result;
|
||||
for (int64_t idx = 0; idx < THREAD_NUM; idx++) {
|
||||
vector<Type> res;
|
||||
res.clear();
|
||||
handle_result.push_back(res);
|
||||
}
|
||||
|
||||
// init and start worker thread
|
||||
DerivedQueueThread1 derived1(handle_result);
|
||||
EXPECT_EQ(OB_SUCCESS, derived1.init());
|
||||
EXPECT_EQ(OB_SUCCESS, derived1.start());
|
||||
|
||||
// start push thread
|
||||
int64_t PUSH_THREAD_NUM = ONE_PUSH_THREAD_NUM;
|
||||
start_push_worker(PUSH_THREAD_NUM, datas, &derived1);
|
||||
|
||||
// Check handle completion and verify that the result totals and fields are correct
|
||||
int64_t end_handle_count = 0;
|
||||
int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_handle_count < VALUE_COUNT)) {
|
||||
end_handle_count = ATOMIC_LOAD(&derived1.end_handle_count_);
|
||||
usleep(static_cast<__useconds_t>(1000));
|
||||
LOG_DEBUG("handle verify", K(end_handle_count));
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_handle_count);
|
||||
|
||||
int64_t handle_result_count = 0;
|
||||
for (int64_t idx = 0; idx < THREAD_NUM; idx++) {
|
||||
int64_t cnt = handle_result[idx].size();
|
||||
LOG_INFO("DerivedQueueThread1 vector count", K(idx), K(cnt));
|
||||
handle_result_count += cnt;
|
||||
for (int64_t i = 0; i < cnt; i++) {
|
||||
Type t = handle_result[idx][i];
|
||||
LOG_DEBUG("type", K(t));
|
||||
EXPECT_TRUE(idx == (t.value_ % THREAD_NUM));
|
||||
EXPECT_TRUE(idx == t.hash_val_);
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, handle_result_count);
|
||||
|
||||
stop_push_worker(PUSH_THREAD_NUM);
|
||||
derived1.destroy();
|
||||
ob_free(datas);
|
||||
}
|
||||
|
||||
// ObMapQueueThread run, pop, cond_timewait
|
||||
// overload ObMapQueueThread-run
|
||||
TEST_F(TestObMapQueueThread, DerivedQueueThread2)
|
||||
{
|
||||
// genereate data
|
||||
Type *datas = NULL;
|
||||
generate_data(VALUE_COUNT, datas);
|
||||
OB_ASSERT(NULL != datas);
|
||||
|
||||
// save result of DerivedQueueThread2 handle
|
||||
vector<vector<Type> > handle_result;
|
||||
for (int64_t idx = 0; idx < THREAD_NUM; idx++) {
|
||||
vector<Type> res;
|
||||
res.clear();
|
||||
handle_result.push_back(res);
|
||||
}
|
||||
|
||||
// init and start worker thread
|
||||
DerivedQueueThread2 derived2(handle_result);
|
||||
EXPECT_EQ(OB_SUCCESS, derived2.init());
|
||||
EXPECT_EQ(OB_SUCCESS, derived2.start());
|
||||
|
||||
// start push thread
|
||||
int64_t PUSH_THREAD_NUM = ONE_PUSH_THREAD_NUM;
|
||||
start_push_worker(PUSH_THREAD_NUM, datas, &derived2);
|
||||
|
||||
// Check handle completion and verify that the result totals and fields are correct
|
||||
int64_t end_handle_count = 0;
|
||||
int64_t start_test_tstamp = get_timestamp();
|
||||
while (((get_timestamp() - start_test_tstamp) < TEST_TIME_LIMIT)
|
||||
&& (end_handle_count < VALUE_COUNT)) {
|
||||
end_handle_count = ATOMIC_LOAD(&derived2.end_handle_count_);
|
||||
usleep(static_cast<__useconds_t>(1000));
|
||||
LOG_DEBUG("handle verify", K(end_handle_count));
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, end_handle_count);
|
||||
|
||||
int64_t handle_result_count = 0;
|
||||
for (int64_t idx = 0; idx < THREAD_NUM; idx++) {
|
||||
int64_t cnt = handle_result[idx].size();
|
||||
LOG_INFO("DerivedQueueThread2 vector count", K(idx), K(cnt));
|
||||
handle_result_count += cnt;
|
||||
for (int64_t i = 0; i < cnt; i++) {
|
||||
Type t = handle_result[idx][i];
|
||||
LOG_DEBUG("type", K(t));
|
||||
EXPECT_TRUE(idx == (t.value_ % THREAD_NUM));
|
||||
EXPECT_TRUE(idx == t.hash_val_);
|
||||
}
|
||||
}
|
||||
EXPECT_EQ(VALUE_COUNT, handle_result_count);
|
||||
|
||||
stop_push_worker(PUSH_THREAD_NUM);
|
||||
derived2.destroy();
|
||||
ob_free(datas);
|
||||
}
|
||||
|
||||
////////////////////////Boundary condition testing//////////////////////////////////////////
|
||||
// ObMapQueue init fail
|
||||
TEST_F(TestObMapQueueThread, init_failed)
|
||||
{
|
||||
QueueThread queue_thread;
|
||||
EXPECT_EQ(OB_SUCCESS, queue_thread.init(THREAD_NUM, label));
|
||||
EXPECT_EQ(OB_INIT_TWICE, queue_thread.init(THREAD_NUM, label));
|
||||
queue_thread.destroy();
|
||||
|
||||
// MAX_THREAD_NUM = 16
|
||||
const int64_t INVALID_THREAD_NUM1 = 0;
|
||||
const int64_t INVALID_THREAD_NUM2 = 17;
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, queue_thread.init(INVALID_THREAD_NUM1, label));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, queue_thread.init(INVALID_THREAD_NUM2, label));
|
||||
EXPECT_EQ(OB_SUCCESS, queue_thread.init(THREAD_NUM, label));
|
||||
queue_thread.destroy();
|
||||
}
|
||||
|
||||
}//end of unittest
|
||||
}//end of oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
// ObLogger::get_logger().set_mod_log_levels("ALL.*:DEBUG, TLOG.*:DEBUG");
|
||||
// testing::InitGoogleTest(&argc,argv);
|
||||
// testing::FLAGS_gtest_filter = "DO_NOT_RUN";
|
||||
int ret = 1;
|
||||
ObLogger &logger = ObLogger::get_logger();
|
||||
logger.set_file_name("test_ob_map_queue_thread.log", true);
|
||||
logger.set_log_level(OB_LOG_LEVEL_INFO);
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
ret = RUN_ALL_TESTS();
|
||||
return ret;
|
||||
}
|
||||
73
unittest/obcdc/test_ob_seq_thread.cpp
Normal file
73
unittest/obcdc/test_ob_seq_thread.cpp
Normal file
@ -0,0 +1,73 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_seq_thread.h"
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace common
|
||||
{
|
||||
class MyClass {};
|
||||
|
||||
class CThread : public ObSeqThread<256, MyClass>
|
||||
{
|
||||
public:
|
||||
CThread() {}
|
||||
virtual ~CThread() {}
|
||||
|
||||
public:
|
||||
virtual int handle(void *task, const int64_t task_seq, const int64_t thread_index, volatile bool &stop_flag)
|
||||
{
|
||||
if (! stop_flag) {
|
||||
EXPECT_EQ(task_seq + 1, (int64_t)task);
|
||||
}
|
||||
UNUSED(thread_index);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
class TestSeqThread : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
TestSeqThread() {}
|
||||
~TestSeqThread() {}
|
||||
|
||||
void SetUp() {}
|
||||
void TearDown() {}
|
||||
};
|
||||
|
||||
TEST_F(TestSeqThread, basic)
|
||||
{
|
||||
CThread thread;
|
||||
// Parameter not legal
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, thread.init(257, 100));
|
||||
EXPECT_EQ(OB_INVALID_ARGUMENT, thread.init(0, 0));
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, thread.init(256, 10000));
|
||||
EXPECT_EQ(OB_SUCCESS, thread.start());
|
||||
for (int64_t index = 0; index < 1000; index++) {
|
||||
EXPECT_EQ(OB_SUCCESS, thread.push((void*)(index + 1), index, 0));
|
||||
}
|
||||
sleep(1);
|
||||
thread.stop();
|
||||
EXPECT_EQ(true, thread.is_stoped());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
|
||||
OB_LOGGER.set_log_level("INFO");
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
393
unittest/obcdc/test_small_arena.cpp
Normal file
393
unittest/obcdc/test_small_arena.cpp
Normal file
@ -0,0 +1,393 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "ob_log_utils.h" // current_time
|
||||
#include "ob_small_arena.h"
|
||||
|
||||
#include "lib/allocator/ob_concurrent_fifo_allocator.h" // ObConcurrentFIFOAllocator
|
||||
|
||||
#define ALLOC_AND_CHECK(size) ALLOC_ALIGN_AND_CHECK(sizeof(void*), size)
|
||||
|
||||
#define ALLOC_ALIGN_AND_CHECK(align_size, size) \
|
||||
do { \
|
||||
int64_t alloc_size = (size); \
|
||||
int64_t align = (align_size); \
|
||||
int64_t max_small_size = MAX_SMALL_ALLOC_SIZE(align); \
|
||||
void *ptr = sa.alloc_aligned(alloc_size, align); \
|
||||
\
|
||||
ASSERT_TRUE(NULL != ptr); \
|
||||
EXPECT_EQ(0, reinterpret_cast<int64_t>(ptr) & (align - 1)); \
|
||||
\
|
||||
if (alloc_size > max_small_size) { \
|
||||
large_alloc_count++; \
|
||||
} else { \
|
||||
small_alloc_count++; \
|
||||
} \
|
||||
\
|
||||
EXPECT_EQ(small_alloc_count, sa.get_small_alloc_count()); \
|
||||
EXPECT_EQ(large_alloc_count, sa.get_large_alloc_count()); \
|
||||
\
|
||||
((char *)ptr)[alloc_size - 1] = 'a'; \
|
||||
} while (0)
|
||||
|
||||
#define MAX_SMALL_ALLOC_SIZE(align) (MAX_SMALL_ALLOC_SIZE_WITHOUT_ALIGN - align + 1)
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace liboblog
|
||||
{
|
||||
|
||||
static const int64_t SMALL_ARENA_PAGE_SIZE = 1024;
|
||||
static const int64_t MAX_SMALL_ALLOC_SIZE_WITHOUT_ALIGN = SMALL_ARENA_PAGE_SIZE - ObSmallArena::SMALL_PAGE_HEADER_SIZE;
|
||||
static const int64_t PAGE_SIZE = 1024;
|
||||
|
||||
using namespace common;
|
||||
|
||||
// TODO: add multi thread test
|
||||
class TestSmallArena : public ::testing::Test
|
||||
{
|
||||
public:
|
||||
TestSmallArena() {}
|
||||
~TestSmallArena() {}
|
||||
|
||||
virtual void SetUp();
|
||||
virtual void TearDown();
|
||||
|
||||
public:
|
||||
ObConcurrentFIFOAllocator large_allocator_;
|
||||
static const uint64_t tenant_id_ = 0;
|
||||
};
|
||||
|
||||
void TestSmallArena::SetUp()
|
||||
{
|
||||
const static int64_t LARGE_PAGE_SIZE = (1LL << 26);
|
||||
const static int64_t LARGE_TOTAL_LIMIT = (1LL << 34);
|
||||
const static int64_t LARGE_HOLD_LIMIT = LARGE_PAGE_SIZE;
|
||||
ASSERT_EQ(OB_SUCCESS, large_allocator_.init(LARGE_TOTAL_LIMIT, LARGE_HOLD_LIMIT, LARGE_PAGE_SIZE));
|
||||
|
||||
srandom((unsigned int)get_timestamp());
|
||||
}
|
||||
|
||||
void TestSmallArena::TearDown()
|
||||
{
|
||||
large_allocator_.destroy();
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, smoke_test)
|
||||
{
|
||||
ObSmallArena sa;
|
||||
int64_t small_alloc_count = 0;
|
||||
int64_t large_alloc_count = 0;
|
||||
|
||||
sa.set_allocator(PAGE_SIZE, large_allocator_);
|
||||
|
||||
ALLOC_AND_CHECK(8);
|
||||
ALLOC_AND_CHECK(16);
|
||||
ALLOC_AND_CHECK(256);
|
||||
ALLOC_AND_CHECK(512);
|
||||
ALLOC_AND_CHECK(17 + 8);
|
||||
ALLOC_AND_CHECK(17 + 16);
|
||||
ALLOC_AND_CHECK(17 + 256);
|
||||
ALLOC_AND_CHECK(17 + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
|
||||
ALLOC_AND_CHECK(8);
|
||||
ALLOC_AND_CHECK(16);
|
||||
ALLOC_AND_CHECK(256);
|
||||
ALLOC_AND_CHECK(512);
|
||||
ALLOC_AND_CHECK(17 + 8);
|
||||
ALLOC_AND_CHECK(17 + 16);
|
||||
ALLOC_AND_CHECK(17 + 256);
|
||||
ALLOC_AND_CHECK(17 + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_AND_CHECK((1<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, alloc_small)
|
||||
{
|
||||
static const int64_t TEST_COUNT = 10;
|
||||
int64_t max_alloc_size = MAX_SMALL_ALLOC_SIZE(8);
|
||||
int64_t small_alloc_count = 0;
|
||||
int64_t large_alloc_count = 0;
|
||||
ObSmallArena sa;
|
||||
|
||||
sa.set_allocator(PAGE_SIZE, large_allocator_);
|
||||
|
||||
for (int i = 0; i < TEST_COUNT; i++) {
|
||||
ALLOC_AND_CHECK(8);
|
||||
ALLOC_AND_CHECK(16);
|
||||
ALLOC_AND_CHECK(256);
|
||||
ALLOC_AND_CHECK(512);
|
||||
ALLOC_AND_CHECK(17 + 8);
|
||||
ALLOC_AND_CHECK(17 + 16);
|
||||
ALLOC_AND_CHECK(17 + 256);
|
||||
ALLOC_AND_CHECK(17 + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 15);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 17);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 64);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 67);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 128);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 129);
|
||||
|
||||
ALLOC_AND_CHECK(random() % (max_alloc_size + 1));
|
||||
}
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
|
||||
for (int i = 0; i < TEST_COUNT; i++) {
|
||||
ALLOC_AND_CHECK(8);
|
||||
ALLOC_AND_CHECK(16);
|
||||
ALLOC_AND_CHECK(256);
|
||||
ALLOC_AND_CHECK(512);
|
||||
ALLOC_AND_CHECK(17 + 8);
|
||||
ALLOC_AND_CHECK(17 + 16);
|
||||
ALLOC_AND_CHECK(17 + 256);
|
||||
ALLOC_AND_CHECK(17 + 512);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 15);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 16);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 17);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 64);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 67);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 128);
|
||||
ALLOC_AND_CHECK(SMALL_ARENA_PAGE_SIZE - 129);
|
||||
|
||||
ALLOC_AND_CHECK(random() % (max_alloc_size + 1));
|
||||
}
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, alloc_large)
|
||||
{
|
||||
static const int64_t TEST_COUNT = 10;
|
||||
int64_t max_alloc_size = (1 << 22);
|
||||
int64_t min_alloc_size = MAX_SMALL_ALLOC_SIZE(8) + 1;
|
||||
int64_t small_alloc_count = 0;
|
||||
int64_t large_alloc_count = 0;
|
||||
ObSmallArena sa;
|
||||
|
||||
sa.set_allocator(PAGE_SIZE, large_allocator_);
|
||||
|
||||
for (int i = 0; i < TEST_COUNT; i++) {
|
||||
ALLOC_AND_CHECK(min_alloc_size + 0);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 1);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 2);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 4);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 8);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 16);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 256);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 512);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 8);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 16);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 256);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 512);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 1 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 2 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 3 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 4 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 5 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 6 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 7 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK((1LL << 10) + 11);
|
||||
ALLOC_AND_CHECK((1LL << 12) + 13);
|
||||
ALLOC_AND_CHECK((1LL << 18) + 17);
|
||||
ALLOC_AND_CHECK((1LL << 19) + 19);
|
||||
ALLOC_AND_CHECK((1LL << 20) + 7);
|
||||
ALLOC_AND_CHECK((1LL << 21) + 3);
|
||||
|
||||
ALLOC_AND_CHECK((random() % (max_alloc_size)) + min_alloc_size);
|
||||
}
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
|
||||
for (int i = 0; i < TEST_COUNT; i++) {
|
||||
ALLOC_AND_CHECK(min_alloc_size + 0);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 1);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 2);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 4);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 8);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 16);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 256);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 512);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 8);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 16);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 256);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 17 + 512);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 1 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 2 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 3 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 4 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 5 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 6 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK(min_alloc_size + 7 * SMALL_ARENA_PAGE_SIZE);
|
||||
ALLOC_AND_CHECK((1LL << 10) + 11);
|
||||
ALLOC_AND_CHECK((1LL << 12) + 13);
|
||||
ALLOC_AND_CHECK((1LL << 18) + 17);
|
||||
ALLOC_AND_CHECK((1LL << 19) + 19);
|
||||
ALLOC_AND_CHECK((1LL << 20) + 7);
|
||||
ALLOC_AND_CHECK((1LL << 21) + 3);
|
||||
|
||||
ALLOC_AND_CHECK((random() % (max_alloc_size)) + min_alloc_size);
|
||||
}
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, alloc_align)
|
||||
{
|
||||
int64_t small_alloc_count = 0;
|
||||
int64_t large_alloc_count = 0;
|
||||
ObSmallArena sa;
|
||||
|
||||
sa.set_allocator(PAGE_SIZE, large_allocator_);
|
||||
|
||||
ALLOC_ALIGN_AND_CHECK(1, 4);
|
||||
ALLOC_ALIGN_AND_CHECK(16, 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, 17 + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
|
||||
ALLOC_ALIGN_AND_CHECK(1, 4);
|
||||
ALLOC_ALIGN_AND_CHECK(16, 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, 17 + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 512);
|
||||
ALLOC_ALIGN_AND_CHECK(16, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 8);
|
||||
ALLOC_ALIGN_AND_CHECK(32, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 16);
|
||||
ALLOC_ALIGN_AND_CHECK(64, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 256);
|
||||
ALLOC_ALIGN_AND_CHECK(128, (1LL<<21) + SMALL_ARENA_PAGE_SIZE + 17 + 512);
|
||||
|
||||
sa.reset();
|
||||
small_alloc_count = 0;
|
||||
large_alloc_count = 0;
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, init_err)
|
||||
{
|
||||
void *ptr = NULL;
|
||||
ObSmallArena sa;
|
||||
ptr = sa.alloc(8); EXPECT_TRUE(NULL == ptr);
|
||||
|
||||
sa.set_allocator(-1, large_allocator_);
|
||||
ptr = sa.alloc(8); EXPECT_TRUE(NULL == ptr);
|
||||
|
||||
sa.reset();
|
||||
}
|
||||
|
||||
TEST_F(TestSmallArena, invalid_args)
|
||||
{
|
||||
void *ptr = NULL;
|
||||
ObSmallArena sa;
|
||||
sa.set_allocator(PAGE_SIZE, large_allocator_);
|
||||
ptr = sa.alloc(-1); EXPECT_TRUE(NULL == ptr);
|
||||
ptr = sa.alloc_aligned(1,3); EXPECT_TRUE(NULL == ptr);
|
||||
ptr = sa.alloc_aligned(1, 1024); EXPECT_TRUE(NULL == ptr);
|
||||
sa.reset();
|
||||
}
|
||||
|
||||
} // ns liboblog
|
||||
} // ns oceanbase
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
oceanbase::common::ObLogger::get_logger().set_log_level("DEBUG");
|
||||
OB_LOGGER.set_log_level("DEBUG");
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
||||
474
unittest/obcdc/test_sp_trans_log_generator.h
Normal file
474
unittest/obcdc/test_sp_trans_log_generator.h
Normal file
@ -0,0 +1,474 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
|
||||
#include "obcdc/src/ob_log_instance.h"
|
||||
#include "obcdc/src/ob_log_utils.h" // get_timestamp
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
// prepare log
|
||||
static const int64_t SP_PREPARE_TIMESTAMP = 10 * 1000 * 1000;
|
||||
// commit log
|
||||
static const int64_t SP_GLOBAL_TRANS_VERSION = 100;
|
||||
|
||||
// SP Transaction log parameters
|
||||
struct TransParam2
|
||||
{
|
||||
ObPartitionKey pkey_;
|
||||
ObTransID trans_id_;
|
||||
ObStartTransParam trans_param_;
|
||||
};
|
||||
|
||||
// Sp Transaction Log Generator
|
||||
class TransLogGenerator2
|
||||
{
|
||||
public:
|
||||
TransLogGenerator2()
|
||||
: param_(),
|
||||
redo_(),
|
||||
commit_(),
|
||||
abort_()
|
||||
{ }
|
||||
virtual ~TransLogGenerator2() { }
|
||||
public:
|
||||
void next_trans(const TransParam2 ¶m)
|
||||
{
|
||||
param_ = param;
|
||||
}
|
||||
const ObSpTransRedoLog& next_redo(const uint64_t log_id)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
uint64_t tenant_id = 100;
|
||||
const uint64_t cluster_id = 1000;
|
||||
|
||||
redo_.reset();
|
||||
ObVersion active_memstore_version(1);
|
||||
err = redo_.init(OB_LOG_SP_TRANS_REDO, param_.pkey_, param_.trans_id_,
|
||||
tenant_id, log_id, param_.trans_param_, cluster_id, active_memstore_version);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
ObTransMutator &mutator = redo_.get_mutator();
|
||||
if (NULL == mutator.get_mutator_buf()) {
|
||||
mutator.init(true);
|
||||
}
|
||||
const char *data = "fly";
|
||||
char *buf = static_cast<char*>(mutator.alloc(strlen(data)));
|
||||
strcpy(buf, data);
|
||||
|
||||
return redo_;
|
||||
}
|
||||
const ObSpTransCommitLog& next_commit(
|
||||
const ObRedoLogIdArray &all_redos,
|
||||
const uint64_t redo_log_id)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
uint64_t tenant_id = 100;
|
||||
const uint64_t cluster_id = 1000;
|
||||
int64_t checksum = 0;
|
||||
ObVersion active_memstore_version(1);
|
||||
ObString trace_id;
|
||||
|
||||
commit_.reset();
|
||||
err = commit_.init(OB_LOG_SP_TRANS_COMMIT, param_.pkey_, tenant_id, param_.trans_id_,
|
||||
SP_GLOBAL_TRANS_VERSION, checksum, cluster_id, all_redos, param_.trans_param_,
|
||||
active_memstore_version, redo_log_id, trace_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
return commit_;
|
||||
}
|
||||
const ObSpTransAbortLog& next_abort()
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
const uint64_t cluster_id = 1000;
|
||||
|
||||
abort_.reset();
|
||||
err = abort_.init(OB_LOG_SP_TRANS_ABORT, param_.pkey_, param_.trans_id_, cluster_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
return abort_;
|
||||
}
|
||||
const ObSpTransCommitLog& next_redo_with_commit(
|
||||
const ObRedoLogIdArray &all_redos,
|
||||
const uint64_t redo_log_id)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
uint64_t tenant_id = 100;
|
||||
const uint64_t cluster_id = 1000;
|
||||
int64_t checksum = 0;
|
||||
ObVersion active_memstore_version(1);
|
||||
ObString trace_id;
|
||||
|
||||
commit_.reset();
|
||||
err = commit_.init(OB_LOG_SP_TRANS_COMMIT, param_.pkey_, tenant_id, param_.trans_id_,
|
||||
SP_GLOBAL_TRANS_VERSION, checksum, cluster_id, all_redos, param_.trans_param_,
|
||||
active_memstore_version, redo_log_id, trace_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// write redo log
|
||||
ObTransMutator &mutator = commit_.get_mutator();
|
||||
if (NULL == mutator.get_mutator_buf()) {
|
||||
mutator.init(true);
|
||||
}
|
||||
const char *data = "fly";
|
||||
char *buf = static_cast<char*>(mutator.alloc(strlen(data)));
|
||||
strcpy(buf, data);
|
||||
|
||||
return commit_;
|
||||
}
|
||||
private:
|
||||
TransParam2 param_;
|
||||
ObSpTransRedoLog redo_;
|
||||
ObSpTransCommitLog commit_;
|
||||
ObSpTransAbortLog abort_;
|
||||
};
|
||||
|
||||
/*
|
||||
* Responsible for generating Sp transaction logs
|
||||
*/
|
||||
class SpTransLogEntryGeneratorBase
|
||||
{
|
||||
static const ObAddr SCHEDULER;
|
||||
public:
|
||||
// Pass in the ObTransID, which can be used to specify different transactions for the same partition
|
||||
SpTransLogEntryGeneratorBase(const ObPartitionKey &pkey, const ObTransID &trans_id)
|
||||
: pkey_(pkey),
|
||||
log_id_(0),
|
||||
remain_log_cnt_(0),
|
||||
is_commit_(false),
|
||||
param_(),
|
||||
trans_log_gen_(),
|
||||
redos_(),
|
||||
redo_cnt_(0),
|
||||
commit_log_id_(-1),
|
||||
data_len_(-1)
|
||||
{
|
||||
param_.pkey_ = pkey_;
|
||||
param_.trans_id_ = trans_id;
|
||||
param_.trans_param_.set_access_mode(ObTransAccessMode::READ_WRITE);
|
||||
param_.trans_param_.set_isolation(ObTransIsolation::READ_COMMITED);
|
||||
param_.trans_param_.set_type(ObTransType::TRANS_NORMAL);
|
||||
|
||||
buf_ = new char[buf_len_];
|
||||
EXPECT_TRUE(NULL != buf_);
|
||||
}
|
||||
|
||||
virtual ~SpTransLogEntryGeneratorBase()
|
||||
{
|
||||
delete[] buf_;
|
||||
}
|
||||
|
||||
// Generate normal Sp logs. redo, redo.... .redo, commit/abort
|
||||
// Call next_trans to start a new transaction
|
||||
// Call next_log_entry to get the number of redo entries in order by specifying the number of redo entries
|
||||
void next_trans(const int64_t redo_cnt, bool is_commit)
|
||||
{
|
||||
// total log count of normal trans = redo_log_count + 1(commit/abort log)
|
||||
remain_log_cnt_ = redo_cnt + 1;
|
||||
is_commit_ = is_commit;
|
||||
redos_.reset();
|
||||
redo_cnt_ = redo_cnt;
|
||||
commit_log_id_ = -1;
|
||||
trans_log_gen_.next_trans(param_);
|
||||
}
|
||||
|
||||
// Generate special Sp logs: redo, redo.... .redo, redo-commit (redo and commit logs in the same log entry)
|
||||
// call next_trans to start a new transaction
|
||||
// Call next_log_entry_with_redo_commit to get the number of redo entries in order by specifying
|
||||
void next_trans_with_redo_commit(const int64_t redo_cnt)
|
||||
{
|
||||
next_trans(redo_cnt, true);
|
||||
// redo-commit in the same log entrey, remain_log_cnt_reassigned
|
||||
remain_log_cnt_ = redo_cnt;
|
||||
}
|
||||
|
||||
// Get next log entry.
|
||||
// get redo, redo..., commit/abort by order
|
||||
int next_log_entry(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (1 < remain_log_cnt_) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && is_commit_) {
|
||||
commit_log_id_ = log_id_;
|
||||
next_commit_(commit_log_id_, log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && !is_commit_) {
|
||||
next_abort_(log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else {
|
||||
ret = OB_ITER_END;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Get next log entry.
|
||||
// get redo, redo...redo-commit by order
|
||||
int next_log_entry_with_redo_commit(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (1 < remain_log_cnt_) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_) {
|
||||
// redo-commit
|
||||
commit_log_id_ = log_id_;
|
||||
next_redo_with_commit_(commit_log_id_, log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else {
|
||||
ret = OB_ITER_END;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
public:
|
||||
uint64_t get_log_id()
|
||||
{
|
||||
return log_id_;
|
||||
}
|
||||
protected:
|
||||
// Returns the redo log with the specified log_id
|
||||
void next_redo_(const uint64_t redo_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObSpTransRedoLog &redo = trans_log_gen_.next_redo(redo_log_id);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_SP_TRANS_REDO);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = redo.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, redo_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), get_timestamp(), ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_commit_(uint64_t commit_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObSpTransCommitLog &commit = trans_log_gen_.next_commit(redos_, 1);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_SP_TRANS_COMMIT);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = commit.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
// log submit timestamp using SP_PREPARE_TIMESTAMP, because for sp transactions, the partition task stores the prepare timestamp
|
||||
// commit log timestamp, for correctness verification
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, commit_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), SP_PREPARE_TIMESTAMP, ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_abort_(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObSpTransAbortLog &abort = trans_log_gen_.next_abort();
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_SP_TRANS_ABORT);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = abort.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
// log submit timestamp using SP_PREPARE_TIMESTAMP, because for sp transactions, the partition task stores the prepare timestamp
|
||||
// commit log timestamp, for correctness verification
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, log_id_, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), SP_PREPARE_TIMESTAMP, ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_redo_with_commit_(uint64_t commit_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObSpTransCommitLog &commit = trans_log_gen_.next_redo_with_commit(redos_, 1);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_SP_TRANS_COMMIT);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = commit.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
// submit timestamp use SP_PREPARE_TIMESTAMP, because for sp_trans, prepare ts is stored in PartTransTask
|
||||
// commit log ts is used for check correctness.
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, commit_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), SP_PREPARE_TIMESTAMP, ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
protected:
|
||||
// Params.
|
||||
ObPartitionKey pkey_;
|
||||
uint64_t log_id_;
|
||||
int64_t remain_log_cnt_;
|
||||
// Indicates whether the current transaction has been committed or not
|
||||
bool is_commit_;
|
||||
// Gen.
|
||||
TransParam2 param_;
|
||||
TransLogGenerator2 trans_log_gen_;
|
||||
ObRedoLogIdArray redos_;
|
||||
int64_t redo_cnt_;
|
||||
// prepare log id and commit log id are same for sp trans
|
||||
uint64_t commit_log_id_;
|
||||
|
||||
// Buf.
|
||||
int64_t data_len_;
|
||||
static const int64_t buf_len_ = 2 * _M_;
|
||||
char *buf_;
|
||||
};
|
||||
|
||||
/*
|
||||
* test missing redo log, When the commit log is read, the missing redo can be detected
|
||||
*
|
||||
* two case:
|
||||
* 1. redo, redo, redo...redo, commit
|
||||
* 2. redo, redo, redo...redo, redo-commit
|
||||
*/
|
||||
enum SpCaseType
|
||||
{
|
||||
SP_NORMAL_TRAN,
|
||||
SP_REDO_WITH_COMMIT_TRAN
|
||||
};
|
||||
class SpTransLogEntryGenerator1 : public SpTransLogEntryGeneratorBase
|
||||
{
|
||||
public:
|
||||
SpTransLogEntryGenerator1(const ObPartitionKey &pkey, const ObTransID &trans_id)
|
||||
: SpTransLogEntryGeneratorBase(pkey, trans_id),
|
||||
is_first(false),
|
||||
miss_redo_cnt_(0)
|
||||
{}
|
||||
~SpTransLogEntryGenerator1() {}
|
||||
public:
|
||||
// Specify the number of redo logs in redo_cnt, and the number of missing redo logs
|
||||
void next_trans_with_miss_redo(const int64_t redo_cnt,
|
||||
const int64_t miss_redo_cnt,
|
||||
SpCaseType type)
|
||||
{
|
||||
if (SP_NORMAL_TRAN == type) {
|
||||
next_trans(redo_cnt, true);
|
||||
} else if(SP_REDO_WITH_COMMIT_TRAN == type) {
|
||||
next_trans_with_redo_commit(redo_cnt);
|
||||
} else {
|
||||
}
|
||||
miss_redo_cnt_ = miss_redo_cnt;
|
||||
is_first = true;
|
||||
}
|
||||
|
||||
int next_log_entry_missing_redo(SpCaseType type, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
// add redo log to redos list for miss_redo_cnt_ logs before miss
|
||||
if (is_first) {
|
||||
for (int64_t idx = 0; idx < miss_redo_cnt_; idx++) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
}
|
||||
is_first = false;
|
||||
}
|
||||
|
||||
if (SP_NORMAL_TRAN == type) {
|
||||
ret = next_log_entry(log_entry);
|
||||
} else if(SP_REDO_WITH_COMMIT_TRAN == type) {
|
||||
ret = next_log_entry_with_redo_commit(log_entry);
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int next_miss_log_entry(const uint64_t miss_log_id, clog::ObLogEntry &miss_log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
next_redo_(miss_log_id, miss_log_entry);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int get_commit_log_entry(SpCaseType type, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (SP_NORMAL_TRAN == type) {
|
||||
next_commit_(commit_log_id_, log_entry);
|
||||
} else if(SP_REDO_WITH_COMMIT_TRAN == type) {
|
||||
next_redo_with_commit_(commit_log_id_, log_entry);
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_first;
|
||||
int64_t miss_redo_cnt_;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
818
unittest/obcdc/test_trans_log_generator.h
Normal file
818
unittest/obcdc/test_trans_log_generator.h
Normal file
@ -0,0 +1,818 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#include "share/ob_define.h"
|
||||
#include "storage/ob_storage_log_type.h"
|
||||
#include "storage/transaction/ob_trans_log.h"
|
||||
|
||||
#include "obcdc/src/ob_log_instance.h"
|
||||
#include "obcdc/src/ob_log_parser.h"
|
||||
|
||||
#include "ob_log_utils.h" // get_timestamp
|
||||
#include "obcdc/src/ob_map_queue.h"
|
||||
#include "lib/oblog/ob_log_module.h"
|
||||
|
||||
using namespace oceanbase;
|
||||
using namespace common;
|
||||
using namespace liboblog;
|
||||
using namespace transaction;
|
||||
using namespace storage;
|
||||
using namespace clog;
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
// prepare log
|
||||
static const int64_t PREPARE_TIMESTAMP = 10 * 1000 * 1000;
|
||||
// commit log
|
||||
static const int64_t GLOBAL_TRANS_VERSION = 100;
|
||||
|
||||
static const int64_t FIXED_PART_COUNT = 6;
|
||||
static const ObPartitionLogInfo FIXED_PART_INFO[FIXED_PART_COUNT] =
|
||||
{
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 0, 6), 100, PREPARE_TIMESTAMP),
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 1, 6), 100, PREPARE_TIMESTAMP),
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 2, 6), 100, PREPARE_TIMESTAMP),
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 3, 6), 100, PREPARE_TIMESTAMP),
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 4, 6), 100, PREPARE_TIMESTAMP),
|
||||
ObPartitionLogInfo(ObPartitionKey(1000U, 5, 6), 100, PREPARE_TIMESTAMP)
|
||||
};
|
||||
|
||||
/*
|
||||
* TransLog Generator 1.
|
||||
* Generate single partition transaction logs.
|
||||
* Support get trans logs in CORRECT order.
|
||||
* Use:
|
||||
* - Call next_trans(), specify trans params.
|
||||
* - Get logs in correct order: redo, redo, ..., prepare, commit/abort.
|
||||
*/
|
||||
struct TransParam1
|
||||
{
|
||||
// Params used in trans log.
|
||||
ObPartitionKey pkey_;
|
||||
ObTransID trans_id_;
|
||||
ObAddr scheduler_;
|
||||
ObPartitionKey coordinator_;
|
||||
ObPartitionArray participants_;
|
||||
ObStartTransParam trans_param_;
|
||||
};
|
||||
|
||||
class TransLogGenerator1
|
||||
{
|
||||
public:
|
||||
TransLogGenerator1()
|
||||
: param_(),
|
||||
redo_(),
|
||||
prepare_(),
|
||||
commit_(),
|
||||
abort_()
|
||||
{ }
|
||||
virtual ~TransLogGenerator1() { }
|
||||
public:
|
||||
void next_trans(const TransParam1 ¶m)
|
||||
{
|
||||
param_ = param;
|
||||
}
|
||||
const ObTransRedoLog& next_redo(const uint64_t log_id)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
uint64_t tenant_id = 100;
|
||||
const uint64_t cluster_id = 1000;
|
||||
redo_.reset();
|
||||
ObVersion active_memstore_version(1);
|
||||
err = redo_.init(OB_LOG_TRANS_REDO, param_.pkey_, param_.trans_id_,
|
||||
tenant_id, log_id, param_.scheduler_, param_.coordinator_,
|
||||
param_.participants_, param_.trans_param_, cluster_id, active_memstore_version);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
ObTransMutator &mutator = redo_.get_mutator();
|
||||
if (NULL == mutator.get_mutator_buf()) {
|
||||
mutator.init(true);
|
||||
}
|
||||
const char *data = "fly";
|
||||
char *buf = static_cast<char*>(mutator.alloc(strlen(data)));
|
||||
strcpy(buf, data);
|
||||
return redo_;
|
||||
}
|
||||
const ObTransPrepareLog& next_prepare(const ObRedoLogIdArray &all_redos)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
uint64_t tenant_id = 100;
|
||||
const uint64_t cluster_id = 1000;
|
||||
ObString trace_id;
|
||||
prepare_.reset();
|
||||
ObVersion active_memstore_version(1);
|
||||
err = prepare_.init(OB_LOG_TRANS_PREPARE, param_.pkey_, param_.trans_id_,
|
||||
tenant_id, param_.scheduler_, param_.coordinator_,
|
||||
param_.participants_, param_.trans_param_,
|
||||
OB_SUCCESS, all_redos, 0, cluster_id, active_memstore_version, trace_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
return prepare_;
|
||||
}
|
||||
const ObTransCommitLog& next_commit(const uint64_t prepare_log_id)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
const uint64_t cluster_id = 1000;
|
||||
PartitionLogInfoArray ptl_ids;
|
||||
|
||||
ObPartitionLogInfo ptl_id(param_.pkey_, prepare_log_id, PREPARE_TIMESTAMP);
|
||||
err = ptl_ids.push_back(ptl_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
// push Fixed participant information
|
||||
for (int64_t idx = 0; idx < FIXED_PART_COUNT; ++idx) {
|
||||
err = ptl_ids.push_back(FIXED_PART_INFO[idx]);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
}
|
||||
|
||||
commit_.reset();
|
||||
err = commit_.init(OB_LOG_TRANS_COMMIT, param_.pkey_, param_.trans_id_,
|
||||
ptl_ids, GLOBAL_TRANS_VERSION, 0, cluster_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
return commit_;
|
||||
}
|
||||
const ObTransAbortLog& next_abort()
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
const uint64_t cluster_id = 1000;
|
||||
PartitionLogInfoArray array;
|
||||
abort_.reset();
|
||||
err = abort_.init(OB_LOG_TRANS_ABORT, param_.pkey_, param_.trans_id_, array, cluster_id);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
return abort_;
|
||||
}
|
||||
private:
|
||||
TransParam1 param_;
|
||||
ObTransRedoLog redo_;
|
||||
ObTransPrepareLog prepare_;
|
||||
ObTransCommitLog commit_;
|
||||
ObTransAbortLog abort_;
|
||||
};
|
||||
|
||||
/*
|
||||
* Transaction Log Entry Generator base
|
||||
* Generate log entries of transactions.
|
||||
*/
|
||||
class TransLogEntryGeneratorBase
|
||||
{
|
||||
static const ObAddr SCHEDULER;
|
||||
public:
|
||||
// Pass in the ObTransID, which can be used to specify different transactions for the same partition
|
||||
TransLogEntryGeneratorBase(const ObPartitionKey &pkey, const ObTransID &trans_id)
|
||||
: pkey_(pkey),
|
||||
log_id_(0),
|
||||
remain_log_cnt_(0),
|
||||
is_commit_(false),
|
||||
param_(),
|
||||
trans_log_gen_(),
|
||||
prepare_id_(0),
|
||||
redos_(),
|
||||
redo_cnt_(0),
|
||||
data_len_(0)
|
||||
{
|
||||
param_.pkey_ = pkey_;
|
||||
param_.trans_id_ = trans_id;
|
||||
param_.scheduler_ = SCHEDULER;
|
||||
param_.coordinator_ = pkey_;
|
||||
int err = param_.participants_.push_back(pkey_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
param_.trans_param_.set_access_mode(ObTransAccessMode::READ_WRITE);
|
||||
param_.trans_param_.set_isolation(ObTransIsolation::READ_COMMITED);
|
||||
param_.trans_param_.set_type(ObTransType::TRANS_NORMAL);
|
||||
|
||||
buf_ = new char[buf_len_];
|
||||
EXPECT_TRUE(NULL != buf_);
|
||||
}
|
||||
|
||||
virtual ~TransLogEntryGeneratorBase()
|
||||
{
|
||||
delete[] buf_;
|
||||
}
|
||||
|
||||
// Generate normal trans. redo, redo...prepare, commit/abort
|
||||
// Start a new trans.
|
||||
// Specify the number of redo entries and call next_log_entry to get them in order
|
||||
void next_trans(const int64_t redo_cnt, bool is_commit)
|
||||
{
|
||||
// total log count of normal trans = redo log count + 2(prepare log + commit/abort log)
|
||||
remain_log_cnt_ = redo_cnt + 2;
|
||||
is_commit_ = is_commit;
|
||||
redos_.reset();
|
||||
redo_cnt_ = redo_cnt;
|
||||
trans_log_gen_.next_trans(param_);
|
||||
}
|
||||
|
||||
// Generate: redo, redo... redo-prepare, commit/abort
|
||||
// Start a new trans.
|
||||
// redo and prepare logs in the same log entry
|
||||
void next_trans_with_redo_prepare(const int64_t redo_cnt, bool is_commit)
|
||||
{
|
||||
next_trans(redo_cnt, is_commit);
|
||||
// redo and prepare in the same log_entry
|
||||
remain_log_cnt_ = redo_cnt + 1;
|
||||
}
|
||||
|
||||
// Get next log entry.
|
||||
// normal trans: get redo, redo...prepare, commit/abort by sequence
|
||||
int next_log_entry(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (2 < remain_log_cnt_) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (2 == remain_log_cnt_) {
|
||||
next_prepare_(log_id_, log_entry);
|
||||
prepare_id_ = log_id_;
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && is_commit_) {
|
||||
next_commit_(log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && !is_commit_) {
|
||||
next_abort_(log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else {
|
||||
ret = OB_ITER_END;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Get next log entry.
|
||||
// trans log with redo-prepare: get by order as follows: redo, redo...redo-prepare, commit/abort
|
||||
int next_log_entry_with_redo_prepare(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (2 < remain_log_cnt_) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (2 == remain_log_cnt_) {
|
||||
// redo-prepare
|
||||
next_redo_with_prepare_(log_id_, log_entry);
|
||||
prepare_id_ = log_id_;
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && is_commit_) {
|
||||
next_commit_(log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else if (1 == remain_log_cnt_ && !is_commit_) {
|
||||
next_abort_(log_entry);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
} else {
|
||||
ret = OB_ITER_END;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
public:
|
||||
uint64_t get_log_id()
|
||||
{
|
||||
return log_id_;
|
||||
}
|
||||
protected:
|
||||
// return specified log_id and redo log
|
||||
void next_redo_(const uint64_t redo_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObTransRedoLog &redo = trans_log_gen_.next_redo(redo_log_id);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_TRANS_REDO);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = redo.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, redo_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), get_timestamp(), ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
// Returns the prepare log with the specified log_id
|
||||
void next_prepare_(const uint64_t prepare_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObTransPrepareLog &prepare= trans_log_gen_.next_prepare(redos_);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_TRANS_PREPARE);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = prepare.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, prepare_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), PREPARE_TIMESTAMP, ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_commit_(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObTransCommitLog &commit = trans_log_gen_.next_commit(prepare_id_);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_TRANS_COMMIT);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = commit.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, log_id_, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), get_timestamp(), ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_abort_(clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObTransAbortLog &abort = trans_log_gen_.next_abort();
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, OB_LOG_TRANS_ABORT);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = abort.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, log_id_, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), get_timestamp(), ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
void next_redo_with_prepare_(const uint64_t prepare_log_id, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int err = OB_SUCCESS;
|
||||
// Gen trans log.
|
||||
const ObTransRedoLog &redo = trans_log_gen_.next_redo(prepare_log_id);
|
||||
const ObTransPrepareLog &prepare= trans_log_gen_.next_prepare(redos_);
|
||||
int64_t pos = 0;
|
||||
err = serialization::encode_i64(buf_, buf_len_,
|
||||
pos, OB_LOG_TRANS_REDO_WITH_PREPARE);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = serialization::encode_i64(buf_, buf_len_, pos, 0);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
err = redo.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
err = prepare.serialize(buf_, buf_len_, pos);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
|
||||
data_len_ = pos;
|
||||
// Gen entry header.
|
||||
ObLogEntryHeader header;
|
||||
header.generate_header(OB_LOG_SUBMIT, pkey_, prepare_log_id, buf_,
|
||||
data_len_, get_timestamp(), get_timestamp(),
|
||||
ObProposalID(), PREPARE_TIMESTAMP, ObVersion(0));
|
||||
// Gen log entry.
|
||||
log_entry.generate_entry(header, buf_);
|
||||
}
|
||||
protected:
|
||||
// Params.
|
||||
ObPartitionKey pkey_;
|
||||
uint64_t log_id_;
|
||||
int64_t remain_log_cnt_;
|
||||
// mark current trans is commit or not
|
||||
bool is_commit_;
|
||||
// Gen.
|
||||
TransParam1 param_;
|
||||
TransLogGenerator1 trans_log_gen_;
|
||||
uint64_t prepare_id_;
|
||||
ObRedoLogIdArray redos_;
|
||||
int64_t redo_cnt_;
|
||||
// Buf.
|
||||
int64_t data_len_;
|
||||
static const int64_t buf_len_ = 2 * _M_;
|
||||
char *buf_;
|
||||
};
|
||||
|
||||
const ObAddr TransLogEntryGeneratorBase::SCHEDULER = ObAddr(ObAddr::IPV4, "127.0.0.1", 5566);
|
||||
|
||||
/*
|
||||
* test missing redo log, When the prepare log is read, the missing redo can be detected
|
||||
*
|
||||
* two case:
|
||||
* 1. redo, redo, redo...prepare, commit/abort
|
||||
* 2. redo, redo, redo...redo-prepare, commit/abort
|
||||
*/
|
||||
enum CaseType
|
||||
{
|
||||
NORMAL_TRAN,
|
||||
REDO_WITH_PREPARE_TRAN
|
||||
};
|
||||
class TransLogEntryGenerator1 : public TransLogEntryGeneratorBase
|
||||
{
|
||||
public:
|
||||
TransLogEntryGenerator1(const ObPartitionKey &pkey, const ObTransID &trans_id)
|
||||
: TransLogEntryGeneratorBase(pkey, trans_id),
|
||||
is_first(false),
|
||||
miss_redo_cnt_(0)
|
||||
{}
|
||||
~TransLogEntryGenerator1() {}
|
||||
public:
|
||||
// Specify the number of redo logs in redo_cnt, and the number of missing redo logs
|
||||
void next_trans_with_miss_redo(const int64_t redo_cnt, const int64_t miss_redo_cnt,
|
||||
bool is_commit, CaseType type)
|
||||
{
|
||||
if (NORMAL_TRAN == type) {
|
||||
next_trans(redo_cnt, is_commit);
|
||||
} else if(REDO_WITH_PREPARE_TRAN == type) {
|
||||
next_trans_with_redo_prepare(redo_cnt, is_commit);
|
||||
} else {
|
||||
}
|
||||
miss_redo_cnt_ = miss_redo_cnt;
|
||||
is_first = true;
|
||||
}
|
||||
|
||||
int next_log_entry_missing_redo(CaseType type, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
// miss_redo_cnt_bars before miss, no return but add redos
|
||||
if (is_first) {
|
||||
for (int64_t idx = 0; idx < miss_redo_cnt_; idx++) {
|
||||
next_redo_(log_id_, log_entry);
|
||||
// Store redo id.
|
||||
int err = redos_.push_back(log_id_);
|
||||
EXPECT_EQ(OB_SUCCESS, err);
|
||||
log_id_ += 1;
|
||||
remain_log_cnt_ -= 1;
|
||||
}
|
||||
is_first = false;
|
||||
}
|
||||
|
||||
if (NORMAL_TRAN == type) {
|
||||
ret = next_log_entry(log_entry);
|
||||
} else if(REDO_WITH_PREPARE_TRAN == type) {
|
||||
ret = next_log_entry_with_redo_prepare(log_entry);
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int next_miss_log_entry(const uint64_t miss_log_id, clog::ObLogEntry &miss_log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
next_redo_(miss_log_id, miss_log_entry);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int get_prepare_log_entry(CaseType type, clog::ObLogEntry &log_entry)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (NORMAL_TRAN == type) {
|
||||
next_prepare_(prepare_id_, log_entry);
|
||||
} else if(REDO_WITH_PREPARE_TRAN == type) {
|
||||
next_redo_with_prepare_(prepare_id_, log_entry);
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
bool is_first;
|
||||
int64_t miss_redo_cnt_;
|
||||
};
|
||||
|
||||
struct TransLogInfo
|
||||
{
|
||||
// redo info
|
||||
int64_t redo_log_cnt_;
|
||||
ObLogIdArray redo_log_ids_;
|
||||
|
||||
// prepare info
|
||||
int64_t seq_;
|
||||
common::ObPartitionKey partition_;
|
||||
int64_t prepare_timestamp_;
|
||||
ObTransID trans_id_;
|
||||
uint64_t prepare_log_id_;
|
||||
uint64_t cluster_id_;
|
||||
|
||||
// commit info
|
||||
int64_t global_trans_version_;
|
||||
PartitionLogInfoArray participants_;
|
||||
|
||||
void reset()
|
||||
{
|
||||
redo_log_cnt_ = -1;
|
||||
redo_log_ids_.reset();
|
||||
seq_ = -1;
|
||||
partition_.reset();
|
||||
prepare_timestamp_ = -1;
|
||||
trans_id_.reset();
|
||||
prepare_log_id_ = -1;
|
||||
cluster_id_ = -1;
|
||||
global_trans_version_ = -1;
|
||||
participants_.reset();
|
||||
}
|
||||
|
||||
void reset(int64_t redo_cnt, ObLogIdArray &redo_log_ids,
|
||||
int64_t seq, const ObPartitionKey partition, int64_t prepare_timestamp,
|
||||
ObTransID &trans_id, uint64_t prepare_log_id, uint64_t cluster_id,
|
||||
uint64_t global_trans_version, PartitionLogInfoArray &participants)
|
||||
{
|
||||
reset();
|
||||
|
||||
// redo
|
||||
redo_log_cnt_ = redo_cnt;
|
||||
redo_log_ids_ = redo_log_ids;
|
||||
// prepare
|
||||
seq_ = seq;
|
||||
partition_ = partition;
|
||||
prepare_timestamp_ = prepare_timestamp;
|
||||
trans_id_ = trans_id;
|
||||
prepare_log_id_ = prepare_log_id;
|
||||
cluster_id_ = cluster_id;
|
||||
|
||||
// commmit
|
||||
global_trans_version_ = global_trans_version;
|
||||
participants_ = participants;
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Mock Parser 1.
|
||||
* Read Task, revert it immediately, and count Task number.
|
||||
*/
|
||||
class MockParser1 : public IObLogParser
|
||||
{
|
||||
public:
|
||||
MockParser1() : commit_trans_cnt_(0), abort_trans_cnt_(0), info_queue_(), res_queue_() {}
|
||||
virtual ~MockParser1()
|
||||
{
|
||||
info_queue_.destroy();
|
||||
res_queue_.destroy();
|
||||
}
|
||||
|
||||
int init()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_FAIL(info_queue_.init(MOD_ID))) {
|
||||
} else if (OB_FAIL(res_queue_.init(MOD_ID))) {
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual int start() { return OB_SUCCESS; }
|
||||
virtual void stop() { }
|
||||
virtual void mark_stop_flag() { }
|
||||
virtual int push(PartTransTask *task, const int64_t timeout)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
UNUSED(timeout);
|
||||
|
||||
if (OB_ISNULL(task)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
} else {
|
||||
TransLogInfo *trans_log_info = NULL;
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
|
||||
if (OB_SUCCESS != (tmp_ret = info_queue_.pop(trans_log_info))) {
|
||||
// pop error
|
||||
} else if(NULL == trans_log_info){
|
||||
tmp_ret = OB_ERR_UNEXPECTED;
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
bool check_result;
|
||||
if (task->is_normal_trans()) {
|
||||
// Verify correct data for partitioning tasks
|
||||
if (OB_SUCCESS == tmp_ret) {
|
||||
check_result = check_nomal_tran(*task, *trans_log_info);
|
||||
} else {
|
||||
check_result = false;
|
||||
}
|
||||
task->revert();
|
||||
commit_trans_cnt_ += 1;
|
||||
} else if (task->is_heartbeat()) {
|
||||
// Verify correct data for partitioning tasks
|
||||
if (OB_SUCCESS == tmp_ret) {
|
||||
check_result = check_abort_tran(*task, *trans_log_info);
|
||||
} else {
|
||||
check_result = false;
|
||||
}
|
||||
task->revert();
|
||||
abort_trans_cnt_ += 1;
|
||||
}
|
||||
|
||||
// Save the validation result, no need to handle a failed push, the pop result will be validated
|
||||
if (OB_SUCCESS != (tmp_ret = res_queue_.push(check_result))) {
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
virtual int get_pending_task_count(int64_t &task_count)
|
||||
{
|
||||
UNUSED(task_count);
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
int64_t get_commit_trans_cnt() const { return commit_trans_cnt_; }
|
||||
int64_t get_abort_trans_cnt() const { return abort_trans_cnt_; }
|
||||
int push_into_queue(TransLogInfo *trans_log_info)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_FAIL(info_queue_.push(trans_log_info))) {
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
int get_check_result(bool &result)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (OB_FAIL(res_queue_.pop(result))) {
|
||||
} else {
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
// for PartTransTask correctness validation
|
||||
// for commit transactions, validate redo/prepare/commit info
|
||||
// For abort transactions, since they are converted to heartbeat information, only seq_, partition_, prepare_timestamp_ need to be validated
|
||||
bool check_nomal_tran(PartTransTask &task, TransLogInfo &trans_log_info)
|
||||
{
|
||||
bool bool_ret = true;
|
||||
|
||||
// redo info
|
||||
const SortedRedoLogList &redo_list = task.get_sorted_redo_list();
|
||||
if (redo_list.log_num_ != trans_log_info.redo_log_cnt_) {
|
||||
bool_ret = false;
|
||||
} else {
|
||||
RedoLogNode *redo_node = redo_list.head_;
|
||||
for (int64_t idx = 0; true == bool_ret && idx < trans_log_info.redo_log_cnt_; ++idx) {
|
||||
if (redo_node->start_log_id_ == trans_log_info.redo_log_ids_[idx]) {
|
||||
// do nothing
|
||||
} else {
|
||||
bool_ret = false;
|
||||
}
|
||||
|
||||
redo_node = redo_node->next_;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare info
|
||||
if (bool_ret) {
|
||||
if (trans_log_info.seq_ == task.get_seq()
|
||||
&& trans_log_info.partition_ == task.get_partition()
|
||||
&& trans_log_info.prepare_timestamp_ == task.get_timestamp()
|
||||
&& trans_log_info.trans_id_ == task.get_trans_id()
|
||||
&& trans_log_info.prepare_log_id_ == task.get_prepare_log_id()
|
||||
&& trans_log_info.cluster_id_ == task.get_cluster_id()) {
|
||||
} else {
|
||||
bool_ret = false;
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.seq_), K(task.get_seq()));
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.partition_), K(task.get_partition()));
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.prepare_timestamp_), K(task.get_timestamp()));
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.trans_id_), K(task.get_trans_id()));
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.prepare_log_id_), K(task.get_prepare_log_id()));
|
||||
OBLOG_LOG(INFO, "compare", K(trans_log_info.cluster_id_), K(task.get_cluster_id()));
|
||||
}
|
||||
}
|
||||
|
||||
//// commit info
|
||||
if (bool_ret) {
|
||||
if (trans_log_info.global_trans_version_ != task.get_global_trans_version()) {
|
||||
bool_ret = false;
|
||||
} else {
|
||||
const ObPartitionLogInfo *part = task.get_participants();
|
||||
const int64_t part_cnt = task.get_participant_count();
|
||||
|
||||
if (trans_log_info.participants_.count() != part_cnt) {
|
||||
bool_ret = false;
|
||||
} else {
|
||||
const ObPartitionLogInfo *pinfo1 = NULL;
|
||||
const ObPartitionLogInfo *pinfo2 = NULL;
|
||||
|
||||
for (int64_t idx = 0; true == bool_ret && idx < part_cnt; ++idx) {
|
||||
pinfo1 = &trans_log_info.participants_.at(idx);
|
||||
pinfo2 = part + idx;
|
||||
|
||||
if (pinfo1->get_partition() == pinfo2->get_partition()
|
||||
&& pinfo1->get_log_id() == pinfo2->get_log_id()
|
||||
&& pinfo1->get_log_timestamp() == pinfo2->get_log_timestamp()) {
|
||||
// do nothing
|
||||
} else {
|
||||
bool_ret = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bool_ret;
|
||||
}
|
||||
|
||||
bool check_abort_tran(PartTransTask &task, TransLogInfo &trans_log_info)
|
||||
{
|
||||
bool bool_ret = true;
|
||||
|
||||
if (trans_log_info.seq_ == task.get_seq()
|
||||
&& trans_log_info.partition_ == task.get_partition()
|
||||
&& trans_log_info.prepare_timestamp_ == task.get_timestamp()) {
|
||||
} else {
|
||||
bool_ret = false;
|
||||
}
|
||||
|
||||
return bool_ret;
|
||||
}
|
||||
private:
|
||||
static const int64_t MOD_ID = 1;
|
||||
private:
|
||||
int64_t commit_trans_cnt_;
|
||||
int64_t abort_trans_cnt_;
|
||||
// save TransLogInfo
|
||||
ObMapQueue<TransLogInfo*> info_queue_;
|
||||
// save verify result
|
||||
ObMapQueue<bool> res_queue_;
|
||||
};
|
||||
|
||||
class MockParser2 : public IObLogParser
|
||||
{
|
||||
public:
|
||||
MockParser2() : commit_trans_cnt_(0) {}
|
||||
virtual ~MockParser2() { }
|
||||
|
||||
virtual int start() { return OB_SUCCESS; }
|
||||
virtual void stop() { }
|
||||
virtual void mark_stop_flag() { }
|
||||
virtual int push(PartTransTask *task, const int64_t timeout)
|
||||
{
|
||||
UNUSED(timeout);
|
||||
if (OB_ISNULL(task)) {
|
||||
} else if (task->is_normal_trans()) {
|
||||
task->revert();
|
||||
commit_trans_cnt_ += 1;
|
||||
}
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
virtual int get_pending_task_count(int64_t &task_count)
|
||||
{
|
||||
UNUSED(task_count);
|
||||
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
int64_t get_commit_trans_cnt() const { return commit_trans_cnt_; }
|
||||
private:
|
||||
int64_t commit_trans_cnt_;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user