patch 4.0

This commit is contained in:
wangzelin.wzl
2022-10-24 10:34:53 +08:00
parent 4ad6e00ec3
commit 93a1074b0c
10533 changed files with 2588271 additions and 2299373 deletions

View File

@ -1,8 +1,2 @@
ob_unittest(test_merge_sort_receive)
ob_unittest(test_gi_pump test_gi_pump.cpp
ob_fake_partition_service.cpp
ob_fake_partition_service.h
ob_fake_partition_location_cache.cpp
ob_fake_partition_location_cache.h
test_gi_pump.cpp)
ob_unittest(test_random_affi)
sql_unittest(test_random_affi)
#sql_unittest(test_slice_calc)

View File

@ -1,62 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#include "ob_fake_partition_location_cache.h"
using namespace oceanbase::common;
namespace oceanbase {
namespace share {
ObFakePartitionLocationCache::ObFakePartitionLocationCache(ObILocationFetcher& location_fetcher)
: ObPartitionLocationCache(location_fetcher)
{
int ret = OB_SUCCESS;
if (OB_SUCCESS != (ret = partition_loc_map_.create(10000, ObModIds::OB_SQL_EXECUTOR))) {
SQL_ENG_LOG(WARN, "fail to create location map", K(ret));
}
}
ObFakePartitionLocationCache::~ObFakePartitionLocationCache()
{}
int ObFakePartitionLocationCache::get(const uint64_t table_id, const int64_t partition_id,
ObPartitionLocation& location, const int64_t expire_renew_time, bool&)
{
UNUSED(expire_renew_time);
UNUSED(location);
int ret = OB_SUCCESS;
ObFakePartitionKey key;
key.table_id_ = table_id;
key.partition_id_ = partition_id;
UNUSED(key);
// if (HASH_EXIST == (ret = partition_loc_map_.get(key, location))) {
// ret = OB_SUCCESS;
//}
return ret;
}
int ObFakePartitionLocationCache::add_location(ObFakePartitionKey key, ObPartitionLocation location)
{
int ret = OB_SUCCESS;
location.set_table_id(key.table_id_);
location.set_partition_id(key.partition_id_);
if (location.get_partition_cnt() <= key.partition_id_) {
location.set_partition_cnt(key.partition_id_ + 1);
}
location.is_valid();
// if (hash::HASH_INSERT_SUCC == (ret = partition_loc_map_.set(key, location))) {
// ret = OB_SUCCESS;
//}
return ret;
}
} // namespace share
} // namespace oceanbase

View File

@ -1,53 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#ifndef OCEANBASE_FAKE_PARTITION_LOCATION_CACHE_
#define OCEANBASE_FAKE_PARTITION_LOCATION_CACHE_
#include "share/partition_table/ob_partition_location_cache.h"
#include "lib/hash/ob_hashmap.h"
namespace oceanbase {
namespace share {
class ObFakePartitionKey {
public:
uint64_t table_id_;
int64_t partition_id_;
inline int64_t hash() const
{
return table_id_ + partition_id_;
}
inline bool operator==(const ObFakePartitionKey& other) const
{
return table_id_ == other.table_id_ && partition_id_ == other.partition_id_;
}
};
class ObFakePartitionLocationCache : public ObPartitionLocationCache {
public:
ObFakePartitionLocationCache(ObILocationFetcher& location_fetcher);
virtual ~ObFakePartitionLocationCache();
// get partition location of a partition
virtual int get(const uint64_t table_id, const int64_t partition_id, ObPartitionLocation& location,
const int64_t expire_renew_time, bool&);
int add_location(ObFakePartitionKey key, ObPartitionLocation location);
private:
common::hash::ObHashMap<ObFakePartitionKey, ObPartitionLocation> partition_loc_map_;
DISALLOW_COPY_AND_ASSIGN(ObFakePartitionLocationCache);
};
} // namespace share
} // namespace oceanbase
#endif /* OCEANBASE_FAKE_PARTITION_LOCATION_CACHE_ */

View File

@ -1,529 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SQL_EXE
#include "ob_fake_partition_service.h"
#include "storage/transaction/ob_trans_service.h"
#include "clog/ob_clog_mgr.h"
using namespace oceanbase::common;
using namespace oceanbase::sql;
namespace oceanbase {
namespace sql {
int ObFakeTableScanIterator::get_next_row(common::ObNewRow*& row)
{
int ret = common::OB_SUCCESS;
common::ObNewRow* cur_row = OB_NEW(ObNewRow, ObModIds::TEST);
ObObj* objs = (ObObj*)ob_malloc(sizeof(ObObj) * col_num_, ObModIds::TEST);
if (!cur_row || !objs) {
ret = OB_ALLOCATE_MEMORY_FAILED;
SQL_ENG_LOG(WARN, "no memory");
if (cur_row) {
OB_DELETE(ObNewRow, ObModIds::TEST, cur_row);
}
if (objs) {
ob_free(objs);
}
} else {
for (int64_t i = 0; i < col_num_; ++i) {
new (objs + i) ObObj();
}
cur_row->count_ = col_num_;
cur_row->cells_ = objs;
if (OB_SUCCESS != (ret = row_store_it_.get_next_row(*cur_row))) {
if (OB_ITER_END != ret) {
SQL_ENG_LOG(WARN, "fail to get next row", K(ret));
}
} else {
row = cur_row;
}
}
if (NULL == cur_row->cells_) {
SQL_ENG_LOG(WARN, "cur_row->cells_ is NULL");
}
return ret;
}
} // namespace sql
namespace storage {
ObFakePartitionServiceForSQL::ObFakePartitionServiceForSQL()
: scanner_(), col_num_(), rs_rpc_proxy_(nullptr), pts_rpc_(nullptr)
{}
ObFakePartitionServiceForSQL::~ObFakePartitionServiceForSQL()
{}
int ObFakePartitionServiceForSQL::table_scan(ObTableScanParam& param, ObNewRowIterator*& result)
{
ObFakeTableScanIterator* iter = OB_NEW(ObFakeTableScanIterator, ObModIds::TEST);
iter->init(scanner_.begin(), col_num_);
result = static_cast<ObNewRowIterator*>(iter);
SQL_ENG_LOG(INFO, "table scan", K(param));
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::revert_scan_iter(ObNewRowIterator* iter)
{
OB_DELETE(ObNewRowIterator, ObModIds::TEST, iter);
SQL_ENG_LOG(INFO, "revert_scan_iter");
return OB_SUCCESS;
}
bool ObFakePartitionServiceForSQL::is_empty() const
{
return false;
}
int ObFakePartitionServiceForSQL::init(const blocksstable::ObStorageEnv& env, const common::ObAddr& self_addr,
ObIPartitionComponentFactory* cp_fty, share::schema::ObMultiVersionSchemaService* schema_service,
share::ObIPartitionLocationCache* location_cache, common::ObIRSCb* rs_cb, rpc::frame::ObReqTransport* req_transport)
{
UNUSED(env);
UNUSED(self_addr);
UNUSED(cp_fty);
UNUSED(schema_service);
UNUSED(location_cache);
UNUSED(rs_cb);
UNUSED(req_transport);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::start()
{
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::destroy()
{
return OB_SUCCESS;
}
void ObFakePartitionServiceForSQL::set_component_service(ObIPartitionComponentFactory& cp_fty)
{
UNUSED(cp_fty);
}
int ObFakePartitionServiceForSQL::load_partition(const char* buf, const int64_t buf_len, int64_t& pos)
{
UNUSED(buf);
UNUSED(buf_len);
UNUSED(pos);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::replay_base_storage_log(
const int64_t log_seq_num, const int64_t subcmd, const char* buf, const int64_t len, int64_t& pos)
{
UNUSED(log_seq_num);
UNUSED(subcmd);
UNUSED(buf);
UNUSED(len);
UNUSED(pos);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::create_partition(const ObPartitionKey& key, const common::ObVersion data_version,
const int64_t replica_num, const common::ObMemberList& mem_list)
{
UNUSED(key);
UNUSED(data_version);
UNUSED(replica_num);
UNUSED(mem_list);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::online_partition()
{
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::remove_partition(const common::ObPartitionKey& key)
{
UNUSED(key);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::remove_orphans()
{
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::freeze()
{
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::on_leader_takeover(const common::ObPartitionKey& partition_key)
{
UNUSED(partition_key);
return OB_SUCCESS;
}
int64_t ObFakePartitionServiceForSQL::get_min_using_file_id() const
{
return OB_SUCCESS;
}
share::schema::ObMultiVersionSchemaService* ObFakePartitionServiceForSQL::get_schema_service()
{
return NULL;
}
ObIPartitionComponentFactory* ObFakePartitionServiceForSQL::get_cp_fty()
{
return NULL;
}
// ==========================================================================
// transaction service interfaces
int ObFakePartitionServiceForSQL::start_trans(
const uint64_t tenant_id, const transaction::ObStartTransParam& req, transaction::ObTransDesc& trans_desc)
{
UNUSED(tenant_id);
UNUSED(req);
UNUSED(trans_desc);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::end_trans(bool is_rollback, const transaction::ObTransDesc& trans_desc)
{
UNUSED(is_rollback);
UNUSED(trans_desc);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::start_stmt(
const uint64_t tenant_id, transaction::ObTransDesc& trans_desc, const common::ObPartitionArray& participants)
{
UNUSED(tenant_id);
UNUSED(trans_desc);
UNUSED(participants);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::end_stmt(bool is_rollback, const transaction::ObTransDesc& trans_desc)
{
UNUSED(is_rollback);
UNUSED(trans_desc);
return OB_SUCCESS;
}
// ==========================================================================
// partition storage interfaces
#if 0
int ObFakePartitionServiceForSQL::table_scan(ObTableScanParam &param,
common::ObNewRowIterator *&result)
{
UNUSED(param);
UNUSED(result);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::revert_scan_iter(common::ObNewRowIterator *iter)
{
UNUSED(iter);
return OB_SUCCESS;
}
#endif
int ObFakePartitionServiceForSQL::delete_rows(const transaction::ObTransDesc& trans_desc, const int64_t timeout,
const common::ObPartitionKey& pkey, const common::ObIArray<uint64_t>& column_ids,
common::ObNewRowIterator* row_iter, int64_t& affected_rows)
{
UNUSED(trans_desc);
UNUSED(timeout);
UNUSED(pkey);
UNUSED(column_ids);
UNUSED(row_iter);
UNUSED(affected_rows);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::insert_rows(const transaction::ObTransDesc& trans_desc, const int64_t timeout,
const common::ObPartitionKey& pkey, const common::ObIArray<uint64_t>& column_ids,
common::ObNewRowIterator* row_iter, int64_t& affected_rows)
{
UNUSED(trans_desc);
UNUSED(timeout);
UNUSED(pkey);
UNUSED(column_ids);
UNUSED(row_iter);
UNUSED(affected_rows);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::update_rows(const transaction::ObTransDesc& trans_desc, const int64_t timeout,
const common::ObPartitionKey& pkey, const common::ObIArray<uint64_t>& column_ids,
const common::ObIArray<uint64_t>& updated_column_ids, common::ObNewRowIterator* row_iter, int64_t& affected_rows)
{
UNUSED(trans_desc);
UNUSED(timeout);
UNUSED(pkey);
UNUSED(column_ids);
UNUSED(updated_column_ids);
UNUSED(row_iter);
UNUSED(affected_rows);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::lock_rows(const transaction::ObTransDesc& trans_desc, const int64_t timeout,
const common::ObPartitionKey& pkey, common::ObNewRowIterator* row_iter, const ObLockFlag lock_flag,
int64_t& affected_rows)
{
UNUSED(trans_desc);
UNUSED(timeout);
UNUSED(pkey);
UNUSED(row_iter);
UNUSED(lock_flag);
UNUSED(affected_rows);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::fetch_conflict_rows(const transaction::ObTransDesc& trans_desc,
const ObDMLBaseParam& dml_param, const common::ObPartitionKey& pkey,
const common::ObIArray<uint64_t>& in_column_ids, const common::ObIArray<uint64_t>& out_column_ids,
common::ObNewRowIterator& check_row_iter, common::ObIArray<common::ObNewRowIterator*>& dup_row_iters)
{
UNUSED(trans_desc);
UNUSED(dml_param);
UNUSED(pkey);
UNUSED(in_column_ids);
UNUSED(out_column_ids);
UNUSED(check_row_iter);
UNUSED(dup_row_iters);
return OB_SUCCESS;
}
// ==========================================================================
// partition manager interfaces
int ObFakePartitionServiceForSQL::get_all_partitions(common::ObIArray<ObIPartitionGroup*>& partition_list)
{
UNUSED(partition_list);
return 0;
}
int ObFakePartitionServiceForSQL::get_partition(const common::ObPartitionKey& pkey, ObIPartitionGroup*& partition)
{
UNUSED(pkey);
UNUSED(partition);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::get_partition_count(int64_t& partition_count) const
{
UNUSED(partition_count);
return OB_SUCCESS;
}
// ==========================================================================
// replay interfaces
int ObFakePartitionServiceForSQL::replay_redo_log(
const common::ObPartitionKey& pkey, const ObStoreCtx& ctx, const int64_t ts, const char* buf, const int64_t size)
{
UNUSED(pkey);
UNUSED(ctx);
UNUSED(ts), UNUSED(buf);
UNUSED(size);
return OB_SUCCESS;
}
transaction::ObTransService* ObFakePartitionServiceForSQL::get_trans_service()
{
return NULL;
}
clog::ObICLogMgr* ObFakePartitionServiceForSQL::get_clog_mgr()
{
return NULL;
}
// ==========================================================================
// major freeze
int ObFakePartitionServiceForSQL::prepare_major_freeze(
const obrpc::ObPartitionList& partitions, const int64_t frozen_version, const int64_t frozen_timestamp)
{
UNUSED(partitions);
UNUSED(frozen_version);
UNUSED(frozen_timestamp);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::commit_major_freeze(
const obrpc::ObPartitionList& partitions, const int64_t frozen_version, const int64_t frozen_timestamp)
{
UNUSED(partitions);
UNUSED(frozen_version);
UNUSED(frozen_timestamp);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::abort_major_freeze(
const obrpc::ObPartitionList& partitions, const int64_t frozen_version, const int64_t frozen_timestamp)
{
UNUSED(partitions);
UNUSED(frozen_version);
UNUSED(frozen_timestamp);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::set_major_freeze_status(
const common::ObPartitionKey& pkey, const int64_t frozen_version, const int64_t major_freeze_status)
{
UNUSED(pkey);
UNUSED(frozen_version);
UNUSED(major_freeze_status);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::get_major_freeze_status(
const common::ObPartitionKey& pkey, int64_t& frozen_version, int64_t& major_freeze_status)
{
UNUSED(pkey);
UNUSED(frozen_version);
UNUSED(major_freeze_status);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::replay(const ObPartitionKey& partition, const char* log, const int64_t size)
{
UNUSED(partition);
UNUSED(log);
UNUSED(size);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::finish_replay(const ObPartitionKey& partition)
{
UNUSED(partition);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::leader_freeze_success(const ObPartitionKey& pkey, const int64_t freeze_cmd)
{
UNUSED(pkey);
UNUSED(freeze_cmd);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::leader_freeze_fail(const ObPartitionKey& pkey, const int64_t freeze_cmd)
{
UNUSED(pkey);
UNUSED(freeze_cmd);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::follower_freeze_success(const ObPartitionKey& pkey, const int64_t freeze_cmd)
{
UNUSED(pkey);
UNUSED(freeze_cmd);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::follower_freeze_fail(const ObPartitionKey& pkey, const int64_t freeze_cmd)
{
UNUSED(pkey);
UNUSED(freeze_cmd);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::submit_freeze_log_success(const int64_t cmd_type, const ObPartitionKey& pkey)
{
UNUSED(cmd_type);
UNUSED(pkey);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::get_role(const common::ObPartitionKey& pkey, common::ObRole& role) const
{
UNUSED(pkey);
UNUSED(role);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::get_leader_curr_member_list(
const common::ObPartitionKey& pkey, common::ObMemberList& member_list) const
{
UNUSED(pkey);
UNUSED(member_list);
return OB_SUCCESS;
}
int ObFakePartitionServiceForSQL::get_active_memtable_version(const ObPartitionKey& pkey, int64_t& version)
{
UNUSED(pkey);
UNUSED(version);
return OB_SUCCESS;
}
/*
* We can not simulate the whole procedures of query_range_to_macros.
* The reason is these procedures relate to many other module.
* So the result of this function is by design (and immobilization).
* */
int ObFakePartitionServiceForGI::query_range_to_macros(common::ObIAllocator& allocator,
const common::ObPartitionKey& pkey, const common::ObIArray<common::ObStoreRange>& ranges, const int64_t type,
uint64_t* macros_count, const int64_t* total_task_count, ObIArray<common::ObStoreRange>* splitted_ranges,
common::ObIArray<int64_t>* split_index)
{
UNUSED(allocator);
UNUSED(pkey);
UNUSED(ranges);
UNUSED(type);
UNUSED(macros_count);
UNUSED(total_task_count);
UNUSED(splitted_ranges);
UNUSED(split_index);
int ret = OB_SUCCESS;
if (case_idx_ < result_set_.count()) {
if (type == OB_GET_MACROS_COUNT_BY_QUERY_RANGE) {
int idx = result_set_.at(case_idx_).macros_count_idx_++;
if (idx >= result_set_.at(case_idx_).macros_count_.count()) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("out of idx",
K(OB_ERR_UNEXPECTED),
K(idx),
K(case_idx_),
K(result_set_.count()),
K(result_set_.at(case_idx_).macros_count_.count()));
} else {
*macros_count = result_set_.at(case_idx_).macros_count_.at(idx);
}
} else if (type == OB_GET_BLOCK_RANGE) {
if (OB_ISNULL(splitted_ranges) || OB_ISNULL(split_index) || OB_ISNULL(total_task_count)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("unexpexted null", K(ret), K(splitted_ranges), K(split_index));
} else if (pkey_idx_ < 5) {
pkey_idx_++;
for (int64_t i = 0; i < *total_task_count && OB_SUCC(ret); ++i) {
ObStoreRange range;
range.set_table_id(1);
range.set_whole_range();
if (OB_FAIL(splitted_ranges->push_back(range))) {
LOG_WARN("failed to push back", K(ret));
} else if (OB_FAIL(split_index->push_back(splitted_ranges->count() - 1))) {
LOG_WARN("failed to push back", K(ret));
}
}
if (pkey_idx_ == 5) {
pkey_idx_ = 0;
case_idx_++;
}
}
}
}
return OB_SUCCESS;
}
} // namespace storage
} // namespace oceanbase

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,296 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SQL
#include <gtest/gtest.h>
#include <stdarg.h>
#include "lib/tbsys.h"
#include "sql/engine/ob_physical_plan_ctx.h"
#include "sql/engine/ob_physical_plan.h"
#include "sql/engine/px/ob_dfo_mgr.h"
#include "sql/engine/px/ob_px_coord.h"
#include "lib/utility/ob_tracepoint.h"
using namespace oceanbase;
using namespace oceanbase::common;
using namespace oceanbase::sql;
using namespace oceanbase::share;
using namespace oceanbase::share::schema;
class ObDFOMgrTest : public ::testing::Test
{
public:
ObDFOMgrTest();
virtual ~ObDFOMgrTest();
virtual void SetUp();
virtual void TearDown();
private:
// disallow copy
DISALLOW_COPY_AND_ASSIGN(ObDFOMgrTest);
private:
// data members
};
ObDFOMgrTest::ObDFOMgrTest()
{
}
ObDFOMgrTest::~ObDFOMgrTest()
{
}
void ObDFOMgrTest::SetUp()
{
}
void ObDFOMgrTest::TearDown()
{
}
TEST_F(ObDFOMgrTest, left_deep_tree)
{
ObDFOMgr dfo_mgr;
ObDFO hash, prob, join, sort, qc;
ASSERT_EQ(OB_SUCCESS, qc.append_child_dfo(&sort));
ASSERT_EQ(OB_SUCCESS, sort.append_child_dfo(&join));
ASSERT_EQ(OB_SUCCESS, join.append_child_dfo(&hash));
ASSERT_EQ(OB_SUCCESS, join.append_child_dfo(&prob));
hash.set_parent(&join);
prob.set_parent(&join);
join.set_parent(&sort);
sort.set_parent(&qc);
/*
qc
/ <-- edge4
sort
/ <-- edge3
join
edge1--> / \ <-- edge2
hash prob
*/
ASSERT_EQ(OB_SUCCESS, ObDFOSchedOrderGenerator::generate(dfo_mgr, &qc));
ObArray<ObDFO *> dfos;
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_TRUE(dfos.count() == 2);
ASSERT_TRUE(dfos.at(0) == &hash);
ASSERT_TRUE(dfos.at(1) == &join);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_TRUE(dfos.count() == 0);
hash.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_TRUE(dfos.at(0) == &prob);
ASSERT_TRUE(dfos.at(1) == &join);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 0);
prob.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_TRUE(dfos.count() == 2);
ASSERT_TRUE(dfos.at(0) == &join);
ASSERT_TRUE(dfos.at(1) == &sort);
join.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_TRUE(dfos.count() == 2);
ASSERT_TRUE(dfos.at(0) == &sort);
ASSERT_TRUE(dfos.at(1) == &qc);
sort.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_ITER_END, dfo_mgr.get_ready_dfos(dfos));
}
TEST_F(ObDFOMgrTest, normalizer)
{
ObDFOMgr dfo_mgr;
ObDFO h1, h2, prob, nlj, hj, sort, qc;
ASSERT_EQ(OB_SUCCESS, qc.append_child_dfo(&hj));
ASSERT_EQ(OB_SUCCESS, hj.append_child_dfo(&h2));
ASSERT_EQ(OB_SUCCESS, hj.append_child_dfo(&nlj));
ASSERT_EQ(OB_SUCCESS, nlj.append_child_dfo(&h1));
ASSERT_EQ(OB_SUCCESS, nlj.append_child_dfo(&prob));
h1.set_parent(&nlj);
prob.set_parent(&nlj);
h2.set_parent(&hj);
nlj.set_parent(&hj);
hj.set_parent(&qc);
ASSERT_EQ(OB_SUCCESS, ObDFOSchedOrderGenerator::generate(dfo_mgr, &qc));
/*
qc
/ (e5)
hj
(e4)/ \ (e3)
h2 nlj
(e1) / \ (e2)
h1 prob
*/
// 遍历 edge,然后逐个加入 dfo_mgr
ObDFO *c1,*c2;
ASSERT_EQ(OB_SUCCESS, nlj.get_child_dfo(0, c1));
ASSERT_EQ(OB_SUCCESS, nlj.get_child_dfo(1, c2));
ASSERT_EQ(&h1, c1);
ASSERT_EQ(&prob, c2);
ASSERT_EQ(OB_SUCCESS, hj.get_child_dfo(0, c1));
ASSERT_EQ(OB_SUCCESS, hj.get_child_dfo(1, c2));
ASSERT_EQ(&nlj, c1);
ASSERT_EQ(&h2, c2);
ASSERT_EQ(OB_SUCCESS, qc.get_child_dfo(0, c1));
ASSERT_EQ(&hj, c1);
ObArray<ObDFO *> dfos;
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_EQ(dfos.at(0), &h1);
ASSERT_EQ(dfos.at(1), &nlj);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 0);
h1.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_EQ(dfos.at(0), &prob);
ASSERT_EQ(dfos.at(1), &nlj);
prob.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_EQ(dfos.at(0), &nlj);
ASSERT_EQ(dfos.at(1), &hj);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_EQ(dfos.at(0), &h2);
ASSERT_EQ(dfos.at(1), &hj);
h2.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 0);
nlj.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_SUCCESS, dfo_mgr.get_ready_dfos(dfos));
ASSERT_EQ(dfos.count(), 2);
ASSERT_EQ(dfos.at(0), &hj);
ASSERT_EQ(dfos.at(1), &qc);
hj.set_state(ObDFOState::FINISH);
ASSERT_EQ(OB_ITER_END, dfo_mgr.get_ready_dfos(dfos));
}
TEST_F(ObDFOMgrTest, px_coord)
{
class MyRunnable : public share::ObThreadPool
{
public:
void run1()
{
int ret = OB_SUCCESS;
LOG_INFO("start thread", K(thread));
ObPXCoord *coord = reinterpret_cast<ObPXCoord *>(arg);
if (OB_FAIL(coord->open())) {
LOG_WARN("fail open coord", K(ret));
}
LOG_INFO("end thread", K(thread));
}
};
ObPXCoord coord;
ObDFO h1, h2, prob, nlj, hj, qc;
ASSERT_EQ(OB_SUCCESS, qc.append_child_dfo(&hj));
ASSERT_EQ(OB_SUCCESS, hj.append_child_dfo(&h2));
ASSERT_EQ(OB_SUCCESS, hj.append_child_dfo(&nlj));
ASSERT_EQ(OB_SUCCESS, nlj.append_child_dfo(&h1));
ASSERT_EQ(OB_SUCCESS, nlj.append_child_dfo(&prob));
h1.set_parent(&nlj);
prob.set_parent(&nlj);
h2.set_parent(&hj);
nlj.set_parent(&hj);
hj.set_parent(&qc);
h1.set_id(1);
prob.set_id(2);
h2.set_id(3);
nlj.set_id(4);
hj.set_id(5);
qc.set_id(6);
/*
qc
/ (e5)
hj
(e4)/ \ (e3)
h2 nlj
(e1) / \ (e2)
h1 prob
*/
coord.set_dfo_tree(qc);
MyRunnable myrun;
obsys::CThread px_thread;
px_thread.start(&myrun, &coord);
// 模拟消息到达
h2.set_state(ObDFOState::FINISH);
h1.set_state(ObDFOState::FINISH);
usleep(1000 * 1000);
prob.set_state(ObDFOState::FINISH);
usleep(2000 * 1000);
nlj.set_state(ObDFOState::FINISH);
usleep(2000 * 1000);
hj.set_state(ObDFOState::FINISH);
usleep(2000 * 1000);
px_thread.join();
}
int main(int argc, char **argv)
{
OB_LOGGER.set_log_level("INFO");
oceanbase::common::ObLogger::get_logger().set_log_level("INFO");
::testing::InitGoogleTest(&argc,argv);
return RUN_ALL_TESTS();
}

View File

@ -1,518 +0,0 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SQL_EXE
#include <gtest/gtest.h>
#define private public
#define protected public
#include <stdarg.h>
#include "lib/utility/ob_tracepoint.h"
#include "sql/ob_sql_init.h"
#include "ob_fake_partition_location_cache.h"
#include "ob_fake_partition_service.h"
#include "ob_fake_partition_location_cache.h"
#include "ob_fake_partition_service.h"
#include "sql/engine/px/ob_granule_util.h"
#undef protected
#undef private
using namespace oceanbase;
using namespace oceanbase::common;
using namespace oceanbase::sql;
using namespace oceanbase::storage;
using namespace oceanbase::share;
using namespace oceanbase::share::schema;
#define PUSH_ITEM(array, item, times) \
for (int64_t i = 0; i < times; ++i) \
array.push_back(item)
class ObGiPumpTest : public ::testing::Test {
public:
const static int64_t TEST_PARTITION_COUNT = 5;
const static int64_t TEST_SPLIT_TASK_COUNT = 8;
ObGiPumpTest();
virtual ~ObGiPumpTest();
virtual void SetUp();
virtual void TearDown();
void TestGISplitTaskCase(int64_t case_idx);
private:
// disallow copy
ObGiPumpTest(const ObGiPumpTest& other);
ObGiPumpTest& operator=(const ObGiPumpTest& other);
public:
// data members
ObSEArray<ObPartitionArray, 32> pkeys_array_;
ObFakePartitionServiceForGI partition_service_;
ObSEArray<uint64_t, 32> total_macros_by_case_;
ObSEArray<uint64_t, 32> empty_partition_count_by_case_;
};
ObGiPumpTest::ObGiPumpTest()
{}
ObGiPumpTest::~ObGiPumpTest()
{}
void ObGiPumpTest::SetUp()
{
/*
* first type data: all partition micro-blocks empty
* case 1 partition id 1 2 3 4 5
* micro-blocks num 0 0 0 0 0
* second type data: some partition's micro-blocks are empty
* case 2 partition id 11 12 13 14 15
* micro-blocks num 0 0 300 16 0
* case 3 partition id 21 22 23 24 25
* micro-blocks num 0 16 0 16 0
* case 4 partition id 31 32 33 34 35
* micro-blocks num 0 57 16 0 16
* case 5 partition id 41 42 43 44 45
* micro-blocks num 1021 1 2 1 0
* third type data: all micro-blocks are not empty
* case 6 partition id 51 52 53 54 55
* micro-blocks num 1021 1 312 1 1021
* case 7 partition id 61 62 63 64 65
* micro-blocks num 1 1 12 1 1021
* case 8 partition id 71 72 73 74 75
* micro-blocks num 1 1 1 2044 1
* */
ObPartitionArray pkeys;
ObPartitionKey key1;
ObPartitionKey key2;
ObPartitionKey key3;
ObPartitionKey key4;
ObPartitionKey key5;
/* case result 1 */
ObFakePartitionServiceForGI::ObGIResult case_1;
case_1.macros_count_.push_back(0);
case_1.macros_count_.push_back(0);
case_1.macros_count_.push_back(0);
case_1.macros_count_.push_back(0);
case_1.macros_count_.push_back(0);
key1.init(1, 1, 10);
key2.init(1, 2, 10);
key3.init(1, 3, 10);
key4.init(1, 4, 10);
key5.init(1, 5, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_1);
total_macros_by_case_.push_back(0);
empty_partition_count_by_case_.push_back(5);
/* case result 2 */
ObFakePartitionServiceForGI::ObGIResult case_2;
pkeys.reset();
case_2.macros_count_.push_back(0);
case_2.macros_count_.push_back(0);
case_2.macros_count_.push_back(300);
case_2.macros_count_.push_back(16);
case_2.macros_count_.push_back(0);
key1.init(1, 11, 10);
key2.init(1, 12, 10);
key3.init(1, 13, 10);
key4.init(1, 14, 10);
key5.init(1, 15, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_2);
total_macros_by_case_.push_back(316);
empty_partition_count_by_case_.push_back(3);
/* case result 3 */
ObFakePartitionServiceForGI::ObGIResult case_3;
pkeys.reset();
case_3.macros_count_.push_back(0);
case_3.macros_count_.push_back(16);
case_3.macros_count_.push_back(0);
case_3.macros_count_.push_back(16);
case_3.macros_count_.push_back(0);
key1.init(1, 21, 10);
key2.init(1, 22, 10);
key3.init(1, 23, 10);
key4.init(1, 24, 10);
key5.init(1, 25, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_3);
total_macros_by_case_.push_back(16 + 16);
empty_partition_count_by_case_.push_back(3);
/* case result 4 */
ObFakePartitionServiceForGI::ObGIResult case_4;
pkeys.reset();
case_4.macros_count_.push_back(0);
case_4.macros_count_.push_back(57);
case_4.macros_count_.push_back(16);
case_4.macros_count_.push_back(0);
case_4.macros_count_.push_back(16);
key1.init(1, 31, 10);
key2.init(1, 32, 10);
key3.init(1, 33, 10);
key4.init(1, 34, 10);
key5.init(1, 35, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_4);
total_macros_by_case_.push_back(57 + 16 + 16);
empty_partition_count_by_case_.push_back(2);
/* case result 5 */
ObFakePartitionServiceForGI::ObGIResult case_5;
pkeys.reset();
case_5.macros_count_.push_back(1021);
case_5.macros_count_.push_back(1);
case_5.macros_count_.push_back(2);
case_5.macros_count_.push_back(1);
case_5.macros_count_.push_back(0);
key1.init(1, 41, 10);
key2.init(1, 42, 10);
key3.init(1, 43, 10);
key4.init(1, 44, 10);
key5.init(1, 45, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_5);
total_macros_by_case_.push_back(1021 + 1 + 2 + 1);
empty_partition_count_by_case_.push_back(1);
/* case result 6 */
ObFakePartitionServiceForGI::ObGIResult case_6;
pkeys.reset();
case_6.macros_count_.push_back(1021);
case_6.macros_count_.push_back(1);
case_6.macros_count_.push_back(312);
case_6.macros_count_.push_back(1);
case_6.macros_count_.push_back(1021);
key1.init(1, 51, 10);
key2.init(1, 52, 10);
key3.init(1, 53, 10);
key4.init(1, 54, 10);
key5.init(1, 55, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_6);
total_macros_by_case_.push_back(1021 + 1 + 312 + 1 + 1021);
empty_partition_count_by_case_.push_back(0);
/* case result 7 */
ObFakePartitionServiceForGI::ObGIResult case_7;
pkeys.reset();
case_7.macros_count_.push_back(1);
case_7.macros_count_.push_back(1);
case_7.macros_count_.push_back(12);
case_7.macros_count_.push_back(1);
case_7.macros_count_.push_back(1021);
key1.init(1, 61, 10);
key2.init(1, 62, 10);
key3.init(1, 63, 10);
key4.init(1, 64, 10);
key5.init(1, 65, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_7);
total_macros_by_case_.push_back(1 + 1 + 12 + 1 + 1021);
empty_partition_count_by_case_.push_back(0);
/* case result 8 */
ObFakePartitionServiceForGI::ObGIResult case_8;
pkeys.reset();
case_8.macros_count_.push_back(1);
case_8.macros_count_.push_back(1);
case_8.macros_count_.push_back(1);
case_8.macros_count_.push_back(2044);
case_8.macros_count_.push_back(1);
key1.init(1, 71, 10);
key2.init(1, 72, 10);
key3.init(1, 73, 10);
key4.init(1, 74, 10);
key5.init(1, 75, 10);
pkeys.push_back(key1);
pkeys.push_back(key2);
pkeys.push_back(key3);
pkeys.push_back(key4);
pkeys.push_back(key5);
pkeys_array_.push_back(pkeys);
partition_service_.result_set_.push_back(case_8);
total_macros_by_case_.push_back(1 + 1 + 1 + 2044 + 1);
empty_partition_count_by_case_.push_back(0);
}
void ObGiPumpTest::TearDown()
{}
// execute case
TEST_F(ObGiPumpTest, split_task_test)
{
int64_t case_count = ObGiPumpTest::TEST_SPLIT_TASK_COUNT;
for (int64_t i = 0; i < case_count; ++i) {
// TestGISplitTaskCase(i);
}
}
TEST_F(ObGiPumpTest, task_count_test)
{
int ret = OB_SUCCESS;
/*
* There are many factors that affect the number of tasks.
* Construct a special combination of test values.
* The default micro-block size is 2M, each
*
*
* */
int64_t total_macros_count = 0;
int64_t total_task_count = 0;
ObParallelBlockRangeTaskParams params;
params.parallelism_ = 3;
params.expected_task_load_ = 128;
params.max_task_count_per_thread_ = 100;
params.min_task_count_per_thread_ = 13;
params.min_task_access_size_ = 2; //(2M)
/* case 1 */
// The minimum task is 1 macro block
total_macros_count = 1;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 1);
total_macros_count = 2;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 2);
total_macros_count = 3;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 3);
// The minimum task must be 30M, so only one task should be drawn out here
params.min_task_access_size_ = 30;
total_macros_count = 3;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 1);
/* case 2 */
//[13,100]
total_macros_count = 1300;
params.min_task_access_size_ = 2;
params.parallelism_ = 100;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 1300);
total_macros_count = 340;
params.parallelism_ = 10;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 130);
total_macros_count = 340;
params.parallelism_ = 10;
params.expected_task_load_ = 2;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 340);
/* case 3 */
// Falling down to calculate the number of tasks
// based on the minimum amount of data that must be scanned for each task
total_macros_count = 1300;
params.min_task_access_size_ = 20;
params.parallelism_ = 100;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 1300 * 2 / 20);
total_macros_count = 1300;
params.min_task_access_size_ = 43;
params.parallelism_ = 100;
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 1300 * 2 / 43);
params.parallelism_ = 2;
params.expected_task_load_ = 128;
params.max_task_count_per_thread_ = 100;
params.min_task_count_per_thread_ = 13;
params.min_task_access_size_ = 2; //(2M)
if (OB_FAIL(ObGranuleUtil::compute_task_count(params, total_macros_count, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 26);
}
// test ObGranuleUtil::compute_total_task_count
TEST_F(ObGiPumpTest, compute_total_task_count)
{
int ret = OB_SUCCESS;
int64_t total_data_size = 0;
int64_t total_task_count = 0;
ObParallelBlockRangeTaskParams params;
// The degree of parallel is 3
// expect each task load 128M
// lower_bound = 3*128*13
// upper_bound = 3*128*100
params.parallelism_ = 3;
params.expected_task_load_ = 128;
params.max_task_count_per_thread_ = 100;
params.min_task_count_per_thread_ = 13;
params.min_task_access_size_ = 2; //(2M)
/* case 1 */
// each size is 0
total_data_size = 0;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 0);
total_data_size = 128 * 20; // 128*20 < 3*128*13
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
// 3*13
ASSERT_TRUE(total_task_count == 39);
total_data_size = 128 * 20 + 30;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == 39);
/* case 2 */
// [13,100]
total_data_size = 3 * 128 * 13 + 128 * 10;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == (3 * 13 + 10));
total_data_size = 3 * 128 * 13 + 128 * 10 + 50;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == (3 * 13 + 10));
/* case 3 */
// data amount is greater than upper_bound
total_data_size = 3 * 128 * 100 + 128 * 1;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == (3 * 100));
total_data_size = 3 * 128 * 100 + 128 * 13;
if (OB_FAIL(ObGranuleUtil::compute_total_task_count(params, total_data_size, total_task_count))) {
LOG_WARN("compute task count failed", K(ret));
}
ASSERT_TRUE(total_task_count == (3 * 100));
}
// test ObGranuleUtil::compute_task_count_each_partition
TEST_F(ObGiPumpTest, compute_task_count_each_partition)
{
int ret = OB_SUCCESS;
// Test whether the task cnt obtained by dividing each partition is correct in different situations
// case1:
int64_t total_size = 0;
int64_t total_task_cnt = 0; // invalid argument
ObSEArray<int64_t, 4> size_each_partition;
ObSEArray<int64_t, 4> task_cnt_each_partition;
size_each_partition.push_back(0);
size_each_partition.push_back(0);
size_each_partition.push_back(0);
size_each_partition.push_back(0);
if (OB_FAIL(ObGranuleUtil::compute_task_count_each_partition(
total_size, total_task_cnt, size_each_partition, task_cnt_each_partition))) {
LOG_WARN("compute task count each partition failed", K(ret));
}
for (int i = 0; i < size_each_partition.count() && OB_SUCC(ret); i++) {
ASSERT_TRUE(task_cnt_each_partition.at(i) == 1);
}
// case2:
size_each_partition.reset();
task_cnt_each_partition.reset();
total_size = 400;
total_task_cnt = 4;
size_each_partition.push_back(100);
size_each_partition.push_back(100);
size_each_partition.push_back(100);
size_each_partition.push_back(100);
if (OB_FAIL(ObGranuleUtil::compute_task_count_each_partition(
total_size, total_task_cnt, size_each_partition, task_cnt_each_partition))) {
LOG_WARN("compute task count each partition failed", K(ret));
}
for (int i = 0; i < size_each_partition.count() && OB_SUCC(ret); i++) {
ASSERT_TRUE(task_cnt_each_partition.at(i) == 1);
}
// case3:
size_each_partition.reset();
task_cnt_each_partition.reset();
total_size = 400;
total_task_cnt = 20;
size_each_partition.push_back(0);
size_each_partition.push_back(175);
size_each_partition.push_back(205);
size_each_partition.push_back(20);
ObSEArray<int64_t, 4> checks;
checks.push_back(1);
checks.push_back(8);
checks.push_back(10);
checks.push_back(1);
if (OB_FAIL(ObGranuleUtil::compute_task_count_each_partition(
total_size, total_task_cnt, size_each_partition, task_cnt_each_partition))) {
LOG_WARN("compute task count each partition failed", K(ret));
}
for (int i = 0; i < size_each_partition.count() && OB_SUCC(ret); i++) {
LOG_INFO("each task cnt for partition", K(task_cnt_each_partition.at(i)), K(checks.at(i)));
ASSERT_TRUE(task_cnt_each_partition.at(i) == checks.at(i));
}
}
int main(int argc, char** argv)
{
OB_LOGGER.set_log_level("TRACE");
// oceanbase::common::ObLogger::get_logger().set_log_level("TRACE");
init_sql_factories();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -28,26 +28,37 @@ using namespace oceanbase::sql;
using namespace oceanbase::common;
using namespace oceanbase::sql::dtl;
class ObMergeSortReceiveTest : public ::testing::Test {
class ObMergeSortReceiveTest : public ::testing::Test
{
public:
void test_sort(int64_t n_channel, bool local_order, int64_t row_count);
int mock_channel_loop(
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx* recv_ctx, int64_t n_channel, int64_t row_count, bool local_order);
int init_merge_sort_input(ObExecContext& ctx, ObPxMergeSortReceive::ObPxMergeSortReceiveCtx* recv_ctx,
ObPxMergeSortReceive& merge_sort_receive, int64_t n_channel);
int mock_channel_loop(ObPxMergeSortReceive::ObPxMergeSortReceiveCtx *recv_ctx,
int64_t n_channel,
int64_t row_count,
bool local_order);
int init_merge_sort_input(
ObExecContext &ctx,
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx *recv_ctx,
ObPxMergeSortReceive &merge_sort_receive,
int64_t n_channel);
private:
int init_open(ObExecContext& ctx, ObPxMergeSortReceive& merge_sort_receive, int64_t n_channel, int64_t row_count,
bool local_order);
int init_open(ObExecContext &ctx,
ObPxMergeSortReceive &merge_sort_receive,
int64_t n_channel,
int64_t row_count,
bool local_order);
};
class ObMockPxNewRow : public ObPxNewRow {
class ObMockPxNewRow : public ObPxNewRow
{
public:
ObMockPxNewRow(int64_t count, bool local_order, int64_t n_channel)
: count_(count), local_order_(local_order), cur_(0), n_channel_(n_channel)
{}
ObMockPxNewRow(int64_t count, bool local_order, int64_t n_channel) :
count_(count),
local_order_(local_order),
cur_(0),
n_channel_(n_channel) {}
~ObMockPxNewRow() = default;
virtual int get_row(ObNewRow& row);
virtual int get_row(ObNewRow &row);
private:
int64_t count_;
@ -56,32 +67,32 @@ private:
int64_t n_channel_;
};
class ObMockChannelLoop : public ObDtlChannelLoop {
class ObMockChannelLoop : public ObDtlChannelLoop
{
public:
ObMockChannelLoop(int64_t n_channel) : n_channel_(n_channel), nth_process_channel_(0)
{}
ObMockChannelLoop(int64_t n_channel) :
n_channel_(n_channel), nth_process_channel_(0) {}
~ObMockChannelLoop() = default;
virtual int process_one(int64_t& nth_channel, int64_t timeout);
virtual int process_one_if(PredFunc pred, int64_t timeout, int64_t& nth_channel);
virtual int process_one(int64_t &nth_channel, int64_t timeout);
virtual int process_one_if(PredFunc pred, int64_t timeout, int64_t &nth_channel);
private:
int64_t n_channel_;
int64_t nth_process_channel_;
};
int ObMockPxNewRow::get_row(ObNewRow& row)
int ObMockPxNewRow::get_row(ObNewRow &row)
{
int ret = OB_SUCCESS;
int64_t n_group = local_order_ ? 2 : 1;
int64_t pos = (cur_ + n_channel_) / n_channel_;
if (pos > count_ * n_group) {
if (pos > count_ * n_group) {
ret = OB_ITER_END;
cout << "cur_" << cur_ << " ,pos:" << pos << " ,count:" << count_ * n_group << endl;
}
int64_t column_count = 1;
if (OB_SUCC(ret)) {
row.cells_ = static_cast<ObObj*>(malloc(column_count * sizeof(ObObj)));
row.cells_ = static_cast<ObObj *>(malloc(column_count * sizeof(ObObj)));
row.count_ = column_count;
// fill data
int64_t val = cur_;
@ -91,41 +102,44 @@ int ObMockPxNewRow::get_row(ObNewRow& row)
}
}
// cout << "fill val:" << val << " ,cur_:" << cur_ << endl;
ObObj* obj = COL(val);
ObObj *obj = COL(val);
row.assign(obj, column_count);
}
cur_++;
return ret;
}
int ObMockChannelLoop::process_one(int64_t& nth_channel, int64_t timeout)
int ObMockChannelLoop::process_one(int64_t &nth_channel, int64_t timeout)
{
int ret = OB_SUCCESS;
UNUSED(timeout);
nth_channel = nth_process_channel_ % n_channel_;
// cout << "channel:" << nth_process_channel_ << endl;
// cout << "channel:" << nth_process_channel_ << endl;
nth_process_channel_ = (nth_process_channel_ + 1) % n_channel_;
return ret;
}
int ObMockChannelLoop::process_one_if(PredFunc pred, int64_t timeout, int64_t& nth_channel)
int ObMockChannelLoop::process_one_if(PredFunc pred, int64_t timeout, int64_t &nth_channel)
{
int ret = OB_SUCCESS;
UNUSED(pred);
UNUSED(timeout);
nth_channel = nth_process_channel_ % n_channel_;
// cout << "channel:" << nth_process_channel_ << endl;
// cout << "channel:" << nth_process_channel_ << endl;
nth_process_channel_ = (nth_process_channel_ + 1) % n_channel_;
return ret;
}
int ObMergeSortReceiveTest::init_open(ObExecContext& ctx, ObPxMergeSortReceive& merge_sort_receive, int64_t n_channel,
int64_t row_count, bool local_order)
int ObMergeSortReceiveTest::init_open(ObExecContext &ctx,
ObPxMergeSortReceive &merge_sort_receive,
int64_t n_channel,
int64_t row_count,
bool local_order)
{
int ret = OB_SUCCESS;
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx* recv_ctx = nullptr;
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx *recv_ctx = nullptr;
ObSQLSessionInfo my_session;
my_session.test_init(0, 0, 0, NULL);
my_session.test_init(0,0,0,NULL);
ctx.set_my_session(&my_session);
merge_sort_receive.set_id(0);
if (OB_FAIL(ctx.init_phy_op(1))) {
@ -137,14 +151,12 @@ int ObMergeSortReceiveTest::init_open(ObExecContext& ctx, ObPxMergeSortReceive&
} else if (OB_FAIL(merge_sort_receive.init_op_ctx(ctx))) {
ret = OB_ERR_UNEXPECTED;
cout << "fail to inner open" << endl;
} else if (OB_ISNULL(recv_ctx = GET_PHY_OPERATOR_CTX(
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx, ctx, merge_sort_receive.get_id()))) {
} else if (OB_ISNULL(recv_ctx = GET_PHY_OPERATOR_CTX(ObPxMergeSortReceive::ObPxMergeSortReceiveCtx, ctx, merge_sort_receive.get_id()))) {
ret = OB_ERR_UNEXPECTED;
cout << "fail to create physica operator ctx" << endl;
} else if (OB_FAIL(mock_channel_loop(recv_ctx, n_channel, row_count, local_order))) {
cout << "fail to mock channel loop" << endl;
} else if (!merge_sort_receive.local_order_ &&
OB_FAIL(recv_ctx->row_heap_.init(n_channel, merge_sort_receive.sort_columns_))) {
} else if (!merge_sort_receive.local_order_ && OB_FAIL(recv_ctx->row_heap_.init(n_channel, merge_sort_receive.sort_columns_))) {
cout << "fail to init row heap" << endl;
} else if (OB_FAIL(init_merge_sort_input(ctx, recv_ctx, merge_sort_receive, n_channel))) {
// mock the number of channels is 2
@ -156,9 +168,11 @@ int ObMergeSortReceiveTest::init_open(ObExecContext& ctx, ObPxMergeSortReceive&
return ret;
}
int ObMergeSortReceiveTest::init_merge_sort_input(ObExecContext& ctx,
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx* recv_ctx, ObPxMergeSortReceive& merge_sort_receive,
int64_t n_channel)
int ObMergeSortReceiveTest::init_merge_sort_input(
ObExecContext &ctx,
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx *recv_ctx,
ObPxMergeSortReceive &merge_sort_receive,
int64_t n_channel)
{
int ret = OB_SUCCESS;
UNUSED(ctx);
@ -171,8 +185,8 @@ int ObMergeSortReceiveTest::init_merge_sort_input(ObExecContext& ctx,
if (0 >= n_channel) {
cout << "channels are not init" << endl;
} else {
for (int64_t idx = 0; OB_SUCC(ret) && idx < n_channel; ++idx) {
ObRARowStore* row_store = OB_NEW(ObRARowStore, ObModIds::OB_SQL_PX);
for(int64_t idx = 0; OB_SUCC(ret) && idx < n_channel; ++idx) {
ObRARowStore *row_store = OB_NEW(ObRARowStore, ObModIds::OB_SQL_PX);
int64_t mem_limit = 0;
if (OB_FAIL(row_store->init(mem_limit))) {
cout << "row store init fail" << endl;
@ -189,9 +203,8 @@ int ObMergeSortReceiveTest::init_merge_sort_input(ObExecContext& ctx,
cout << "channels are not init" << endl;
} else {
cout << "start alloc msi" << endl;
for (int64_t idx = 0; OB_SUCC(ret) && idx < n_channel; ++idx) {
ObPxMergeSortReceive::MergeSortInput* msi =
OB_NEW(ObPxMergeSortReceive::GlobalOrderInput, ObModIds::OB_SQL_PX, OB_SERVER_TENANT_ID);
for(int64_t idx = 0; OB_SUCC(ret) && idx < n_channel; ++idx) {
ObPxMergeSortReceive::MergeSortInput *msi = OB_NEW(ObPxMergeSortReceive::GlobalOrderInput, ObModIds::OB_SQL_PX, OB_SERVER_TENANT_ID);
cout << "alloc succ msi" << endl;
if (nullptr == msi) {
ret = OB_ALLOCATE_MEMORY_FAILED;
@ -210,15 +223,18 @@ int ObMergeSortReceiveTest::init_merge_sort_input(ObExecContext& ctx,
}
int ObMergeSortReceiveTest::mock_channel_loop(
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx* recv_ctx, int64_t n_channel, int64_t row_count, bool local_order)
ObPxMergeSortReceive::ObPxMergeSortReceiveCtx *recv_ctx,
int64_t n_channel,
int64_t row_count,
bool local_order)
{
int ret = OB_SUCCESS;
ObMockChannelLoop* mock_channel_loop = new ObMockChannelLoop(n_channel);
ObMockChannelLoop *mock_channel_loop = new ObMockChannelLoop(n_channel);
if (OB_ISNULL(mock_channel_loop)) {
cout << "fail to alloc mock channel loop" << endl;
} else {
recv_ctx->ptr_row_msg_loop_ = mock_channel_loop;
ObMockPxNewRow* mock_px_row = new ObMockPxNewRow(row_count, local_order, n_channel);
ObMockPxNewRow *mock_px_row = new ObMockPxNewRow(row_count, local_order, n_channel);
if (OB_ISNULL(mock_px_row)) {
ret = OB_ERR_UNEXPECTED;
cout << "fail to mock px row" << endl;
@ -228,7 +244,7 @@ int ObMergeSortReceiveTest::mock_channel_loop(
}
ObAddr self;
self.set_ip_addr("127.0.0.1", 8086);
ObDtlRpcChannel* tmp_channel = new ObDtlRpcChannel(1, 1, self);
ObDtlRpcChannel *tmp_channel = new ObDtlRpcChannel(1, 1, self);
for (int idx = 0; idx < n_channel; ++idx) {
if (OB_FAIL(recv_ctx->task_channels_.push_back(tmp_channel))) {
cout << "fail to push back channel" << endl;
@ -238,8 +254,9 @@ int ObMergeSortReceiveTest::mock_channel_loop(
return ret;
}
// n_channel -- number of channels
// local_order -- whether data is having local order property
// n_channel channel个数
// local_order是否局部有序
// 每组有序多少行,如果是local order,则每个channel有两组,否则是一组
void ObMergeSortReceiveTest::test_sort(int64_t n_channel, bool local_order, int64_t row_count)
{
int ret = OB_SUCCESS;
@ -248,12 +265,12 @@ void ObMergeSortReceiveTest::test_sort(int64_t n_channel, bool local_order, int6
ObExecContext ctx;
merge_sort_receive.init_sort_columns(1);
merge_sort_receive.add_sort_column(0, CS_TYPE_UTF8MB4_BIN, true, ObMaxType, default_asc_direction());
const ObNewRow* out_row = NULL;
const ObNewRow *out_row = NULL;
if (OB_FAIL(init_open(ctx, merge_sort_receive, n_channel, row_count, local_order))) {
ret = OB_ERR_UNEXPECTED;
cout << "fail to init open" << endl;
} else {
const ObObj* cell = NULL;
const ObObj *cell = NULL;
int64_t val;
int64_t n_group = local_order ? 2 : 1;
int64_t total_row_count = n_channel * row_count * n_group;
@ -265,15 +282,14 @@ void ObMergeSortReceiveTest::test_sort(int64_t n_channel, bool local_order, int6
ret = OB_ERR_UNEXPECTED;
cout << "fail to get next row: " << cnt << " ,total_row_count:" << total_row_count << endl;
} else {
// cout << "times:" << cnt << endl;
// cout << "times:" << cnt << endl;
cell = &out_row->cells_[0];
cell->get_int(val);
// cout << val << endl;
if (true == need_cmp) {
ASSERT_TRUE(val > last_value);
if (val <= last_value) {
cout << "compare error, data is no order:" << cnt << ", value: " << val << " ,last_value: " << last_value
<< endl;
cout << "compare error, data is no order:" << cnt << ", value: " << val << " ,last_value: " << last_value << endl;
}
if (local_order) {
need_cmp = !need_cmp;
@ -283,8 +299,7 @@ void ObMergeSortReceiveTest::test_sort(int64_t n_channel, bool local_order, int6
need_cmp = !need_cmp;
ASSERT_EQ(last_value, val);
if (last_value != val) {
cout << "compare error, there are two same data in local order:" << cnt << ", value: " << val
<< " ,last_value: " << last_value << endl;
cout << "compare error, there are two same data in local order:" << cnt << ", value: " << val << " ,last_value: " << last_value << endl;
}
}
}
@ -325,12 +340,12 @@ TEST_F(ObMergeSortReceiveTest, merge_sort_receive_1)
test_sort(19, true, 300);
}
int main(int argc, char** argv)
int main(int argc, char **argv)
{
system("rm -f test_merge_sort.log*");
OB_LOGGER.set_file_name("test_merge_sort.log", true, true);
// OB_LOGGER.set_log_level("INFO");
::testing::InitGoogleTest(&argc, argv);
//OB_LOGGER.set_log_level("INFO");
::testing::InitGoogleTest(&argc,argv);
oceanbase::common::ObLogger::get_logger().set_log_level("WARN");
return RUN_ALL_TESTS();
}

View File

@ -26,40 +26,38 @@ using namespace oceanbase::storage;
using namespace oceanbase::share;
using namespace oceanbase::share::schema;
class ObRandomAffiTaskSplitTest : public ::testing::Test {
class ObRandomAffiTaskSplitTest : public ::testing::Test
{
public:
const static int64_t TEST_PARTITION_COUNT = 5;
const static int64_t TEST_SPLIT_TASK_COUNT = 8;
ObRandomAffiTaskSplitTest() = default;
virtual ~ObRandomAffiTaskSplitTest() = default;
virtual void SetUp(){};
virtual void TearDown(){};
virtual void SetUp() {};
virtual void TearDown() {};
private:
// disallow copy
ObRandomAffiTaskSplitTest(const ObRandomAffiTaskSplitTest& other);
ObRandomAffiTaskSplitTest& operator=(const ObRandomAffiTaskSplitTest& other);
ObRandomAffiTaskSplitTest(const ObRandomAffiTaskSplitTest &other);
ObRandomAffiTaskSplitTest& operator=(const ObRandomAffiTaskSplitTest &other);
};
TEST_F(ObRandomAffiTaskSplitTest, split_task_test)
{
TEST_F(ObRandomAffiTaskSplitTest, split_task_test) {
{
int64_t parallel = 3;
int64_t tenant_id = 1;
ObPxPartitionInfo px_part_info;
ObPxTabletInfo px_part_info;
ObPxAffinityByRandom affinitize_rule;
for (int i = 0; i < 5; ++i) {
px_part_info.physical_row_count_ = (10 - i) * 100;
affinitize_rule.add_partition(i, i, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(i,i,parallel,tenant_id,px_part_info);
}
affinitize_rule.do_random(true);
const common::ObIArray<ObPxAffinityByRandom::PartitionHashValue>& result = affinitize_rule.get_result();
const common::ObIArray<ObPxAffinityByRandom::TabletHashValue>& result = affinitize_rule.get_result();
for (int i = 0; i < result.count(); ++i) {
LOG_INFO("result",
K(result.at(i).partition_id_),
K(result.at(i).worker_id_),
K(result.at(i).partition_info_.physical_row_count_));
LOG_INFO("result", K(result.at(i).tablet_id_), K(result.at(i).worker_id_), K(result.at(i).partition_info_.physical_row_count_));
}
ASSERT_EQ(1, result.at(0).worker_id_);
ASSERT_EQ(2, result.at(1).worker_id_);
@ -71,28 +69,25 @@ TEST_F(ObRandomAffiTaskSplitTest, split_task_test)
{
int64_t parallel = 16;
int64_t tenant_id = 1;
ObPxPartitionInfo px_part_info;
ObPxTabletInfo px_part_info;
ObPxAffinityByRandom affinitize_rule;
px_part_info.physical_row_count_ = 3000;
affinitize_rule.add_partition(0, 0, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(0,0,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 1000;
affinitize_rule.add_partition(1, 1, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(1,1,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 2500;
affinitize_rule.add_partition(2, 2, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(2,2,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 3500;
affinitize_rule.add_partition(3, 3, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(3,3,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 2000;
affinitize_rule.add_partition(4, 4, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(4,4,parallel,tenant_id,px_part_info);
affinitize_rule.do_random(true);
const common::ObIArray<ObPxAffinityByRandom::PartitionHashValue>& result = affinitize_rule.get_result();
const common::ObIArray<ObPxAffinityByRandom::TabletHashValue>& result = affinitize_rule.get_result();
for (int i = 0; i < 5; ++i) {
LOG_INFO("result",
K(result.at(i).partition_id_),
K(result.at(i).worker_id_),
K(result.at(i).partition_info_.physical_row_count_));
LOG_INFO("result", K(result.at(i).tablet_id_), K(result.at(i).worker_id_), K(result.at(i).partition_info_.physical_row_count_));
}
ASSERT_EQ(3, result.at(0).worker_id_);
ASSERT_EQ(0, result.at(1).worker_id_);
@ -104,28 +99,25 @@ TEST_F(ObRandomAffiTaskSplitTest, split_task_test)
{
int64_t parallel = 3;
int64_t tenant_id = 1;
ObPxPartitionInfo px_part_info;
ObPxTabletInfo px_part_info;
ObPxAffinityByRandom affinitize_rule;
px_part_info.physical_row_count_ = 3000;
affinitize_rule.add_partition(0, 0, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(0,0,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 4000;
affinitize_rule.add_partition(1, 1, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(1,1,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 2500;
affinitize_rule.add_partition(2, 2, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(2,2,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 1500;
affinitize_rule.add_partition(3, 3, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(3,3,parallel,tenant_id,px_part_info);
px_part_info.physical_row_count_ = 2000;
affinitize_rule.add_partition(4, 4, parallel, tenant_id, px_part_info);
affinitize_rule.add_partition(4,4,parallel,tenant_id,px_part_info);
affinitize_rule.do_random(true);
const common::ObIArray<ObPxAffinityByRandom::PartitionHashValue>& result = affinitize_rule.get_result();
const common::ObIArray<ObPxAffinityByRandom::TabletHashValue>& result = affinitize_rule.get_result();
for (int i = 0; i < 5; ++i) {
LOG_INFO("result",
K(result.at(i).partition_id_),
K(result.at(i).worker_id_),
K(result.at(i).partition_info_.physical_row_count_));
LOG_INFO("result", K(result.at(i).tablet_id_), K(result.at(i).worker_id_), K(result.at(i).partition_info_.physical_row_count_));
}
ASSERT_EQ(1, result.at(0).worker_id_);
@ -133,14 +125,15 @@ TEST_F(ObRandomAffiTaskSplitTest, split_task_test)
ASSERT_EQ(2, result.at(2).worker_id_);
ASSERT_EQ(0, result.at(3).worker_id_);
ASSERT_EQ(0, result.at(4).worker_id_);
}
}
}
int main(int argc, char** argv)
int main(int argc, char **argv)
{
OB_LOGGER.set_log_level("TRACE");
// oceanbase::common::ObLogger::get_logger().set_log_level("TRACE");
//oceanbase::common::ObLogger::get_logger().set_log_level("TRACE");
init_sql_factories();
::testing::InitGoogleTest(&argc, argv);
::testing::InitGoogleTest(&argc,argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,220 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SQL_EXE
#include "gtest/gtest.h"
#include "sql/engine/ob_exec_context.h"
#define private public
#define protected public
#include "sql/executor/ob_slice_calc.h"
#undef private
#undef protected
#include "lib/ob_define.h"
#include "common/row/ob_row.h"
#include "share/schema/ob_table_schema.h"
#include "share/schema/ob_part_mgr_util.h"
#include "sql/engine/px/ob_px_util.h"
#include "sql/engine/ob_exec_context.h"
#include "sql/engine/expr/ob_expr_calc_partition_id.h"
using namespace oceanbase::common;
using namespace oceanbase::share;
using namespace oceanbase::sql;
class TestPkeyRangeSliceCalc : public ::testing::Test
{
public:
TestPkeyRangeSliceCalc() : ctx_(allocator_) {}
virtual ~TestPkeyRangeSliceCalc() = default;
virtual void SetUp() override;
virtual void TearDown() override;
int build_channel_info(const int64_t ch_count, ObPxPartChInfo &ch_info);
public:
static const int64_t PARTITION_COUNT = 3;
static const int64_t RANGE_COUNT = 5;
ObArenaAllocator allocator_;
ObExecContext ctx_;
ObArray<ObExpr *> sort_exprs_;
ObExpr *fake_calc_part_id_expr_;
ObArray<ObSortFieldCollation> sort_collations_;
ObArray<ObSortCmpFunc> sort_cmp_funcs_;
ObPxTabletRange::RangeCut range_cut_;
int64_t tmp_int_;
ObDatum int_datum_;
};
void TestPkeyRangeSliceCalc::SetUp()
{
int ret = OB_SUCCESS;
ASSERT_EQ(OB_SUCCESS, sort_exprs_.push_back(nullptr));
ASSERT_EQ(OB_SUCCESS, sort_collations_.push_back(ObSortFieldCollation(
0/*field_idx*/,
ObCollationType::CS_TYPE_BINARY,
true/*is_ascending*/,
ObCmpNullPos::NULL_LAST)));
ObSortCmpFunc cmp_func;
cmp_func.cmp_func_ = ObDatumFuncs::get_nullsafe_cmp_func(
ObObjType::ObIntType,
ObObjType::ObIntType,
ObCmpNullPos::NULL_LAST,
ObCollationType::CS_TYPE_BINARY,
false/*is_orace_mode*/);
ASSERT_EQ(OB_SUCCESS, sort_cmp_funcs_.push_back(cmp_func));
int_datum_.int_ = &tmp_int_;
fake_calc_part_id_expr_ = reinterpret_cast<ObExpr *>(&tmp_int_);
ObDatum tmp_datum;
for (int64_t j = 1; OB_SUCC(ret) && j < RANGE_COUNT; ++j) {
tmp_datum.int_ = (int64_t *)allocator_.alloc(sizeof(int64_t));
ObPxTabletRange::DatumKey tmp_key;
ASSERT_EQ(OB_SUCCESS, tmp_key.push_back(tmp_datum));
tmp_key.at(0).set_int(j * 10);
ASSERT_EQ(OB_SUCCESS, range_cut_.push_back(tmp_key));
}
ObArray<ObPxTabletRange> part_ranges;
for (int64_t i = 0; OB_SUCC(ret) && i < PARTITION_COUNT; ++i) {
ObPxTabletRange tmp_part_range;
tmp_part_range.partition_id_ = i;
ret = tmp_part_range.range_cut_.assign(range_cut_);
ASSERT_EQ(OB_SUCCESS, ret);
ret = part_ranges.push_back(tmp_part_range);
ASSERT_EQ(OB_SUCCESS, ret);
}
ret = ctx_.set_partition_ranges(part_ranges);
ASSERT_EQ(OB_SUCCESS, ret);
LOG_INFO("init ctx partition ranges", K(ret), K(ctx_.get_partition_ranges()));
}
void TestPkeyRangeSliceCalc::TearDown()
{
}
int TestPkeyRangeSliceCalc::build_channel_info(const int64_t ch_count, ObPxPartChInfo &ch_info)
{
int ret = OB_SUCCESS;
ch_info.part_ch_array_.reset();
for (int64_t i = 0; OB_SUCC(ret) && i < PARTITION_COUNT; ++i) {
for (int64_t j = 0; OB_SUCC(ret) && j < ch_count; ++j) {
ObPxPartChMapItem item;
item.first_ = i;
item.second_ = ch_count * i + j;
if (OB_FAIL(ch_info.part_ch_array_.push_back(item))) {
LOG_WARN("push back channel item failed", K(ret), K(item));
}
}
}
return ret;
}
TEST_F(TestPkeyRangeSliceCalc, build_part_range_map_one_ch)
{
int ret = OB_SUCCESS;
ObPxPartChInfo ch_info;
const int64_t ch_count = 1;
ret = build_channel_info(ch_count, ch_info);
ASSERT_EQ(OB_SUCCESS, ret);
schema::ObTableSchema fake_table_schema;
ObSlaveMapPkeyRangeIdxCalc range_slice_calc(
ctx_, fake_table_schema, fake_calc_part_id_expr_, ObPQDistributeMethod::PARTITION_RANGE, ch_info,
sort_exprs_, &sort_cmp_funcs_, &sort_collations_, OB_REPARTITION_NO_REPARTITION);
ret = range_slice_calc.init();
ASSERT_EQ(OB_SUCCESS, ret);
int64_t task_idx = -1;
ObPxTabletRange::DatumKey tmp_key;
ASSERT_EQ(OB_SUCCESS, tmp_key.push_back(int_datum_));
for (int64_t j = 0; OB_SUCC(ret) && j < PARTITION_COUNT; ++j) {
const int64_t partition_id = j;
for (int64_t i = 0; OB_SUCC(ret) && i < 1000; ++i) {
tmp_key.at(0).set_int(i);
ret = range_slice_calc.get_task_idx(partition_id, tmp_key, task_idx);
ASSERT_EQ(OB_SUCCESS, ret);
ASSERT_EQ(task_idx, 0 + partition_id * ch_count);
}
}
}
TEST_F(TestPkeyRangeSliceCalc, build_part_range_map_less_ch)
{
int ret = OB_SUCCESS;
ObPxPartChInfo ch_info;
const int64_t ch_count = 3;
ret = build_channel_info(ch_count, ch_info);
ASSERT_EQ(OB_SUCCESS, ret);
schema::ObTableSchema fake_table_schema;
ObSlaveMapPkeyRangeIdxCalc range_slice_calc(
ctx_, fake_table_schema, fake_calc_part_id_expr_, ObPQDistributeMethod::PARTITION_RANGE, ch_info,
sort_exprs_, &sort_cmp_funcs_, &sort_collations_, OB_REPARTITION_NO_REPARTITION);
ret = range_slice_calc.init();
ASSERT_EQ(OB_SUCCESS, ret);
int64_t task_idx = -1;
ObPxTabletRange::DatumKey tmp_key;
ASSERT_EQ(OB_SUCCESS, tmp_key.push_back(int_datum_));
for (int64_t j = 0; OB_SUCC(ret) && j < PARTITION_COUNT; ++j) {
const int64_t partition_id = j;
for (int64_t i = 0; OB_SUCC(ret) && i < 1000; ++i) {
tmp_key.at(0).set_int(i);
ret = range_slice_calc.get_task_idx(partition_id, tmp_key, task_idx);
ASSERT_EQ(OB_SUCCESS, ret);
const int64_t range_idx = std::lower_bound(range_cut_.begin(), range_cut_.end(), tmp_key, range_slice_calc.sort_cmp_) - range_cut_.begin();
if (range_idx < 2) {
ASSERT_EQ(task_idx, 0 + partition_id * ch_count) << "i: " << i << ", range_idx: " << range_idx << std::endl;
} else if (range_idx < 4) {
ASSERT_EQ(task_idx, 1 + partition_id * ch_count) << "i: " << i << ", range_idx: " << range_idx << std::endl;
} else {
ASSERT_EQ(task_idx, 2 + partition_id * ch_count) << "i: " << i << ", range_idx: " << range_idx << std::endl;
}
}
}
}
TEST_F(TestPkeyRangeSliceCalc, build_part_range_map_more_ch)
{
int ret = OB_SUCCESS;
ObPxPartChInfo ch_info;
const int64_t ch_count = 30;
ret = build_channel_info(ch_count, ch_info);
ASSERT_EQ(OB_SUCCESS, ret);
schema::ObTableSchema fake_table_schema;
ObSlaveMapPkeyRangeIdxCalc range_slice_calc(
ctx_, fake_table_schema, fake_calc_part_id_expr_, ObPQDistributeMethod::PARTITION_RANGE, ch_info,
sort_exprs_, &sort_cmp_funcs_, &sort_collations_, OB_REPARTITION_NO_REPARTITION);
ret = range_slice_calc.init();
ASSERT_EQ(OB_SUCCESS, ret);
int64_t task_idx = -1;
ObPxTabletRange::DatumKey tmp_key;
ASSERT_EQ(OB_SUCCESS, tmp_key.push_back(int_datum_));
for (int64_t j = 0; OB_SUCC(ret) && j < PARTITION_COUNT; ++j) {
const int64_t partition_id = j;
for (int64_t i = 0; OB_SUCC(ret) && i < 1000; ++i) {
tmp_key.at(0).set_int(i);
ret = range_slice_calc.get_task_idx(partition_id, tmp_key, task_idx);
ASSERT_EQ(OB_SUCCESS, ret);
const int64_t range_idx = std::lower_bound(range_cut_.begin(), range_cut_.end(), tmp_key, range_slice_calc.sort_cmp_) - range_cut_.begin();
ASSERT_EQ(task_idx, range_idx + partition_id * ch_count) << "i: " << i << ", range_idx: " << range_idx << std::endl;
}
}
}
int main(int argc, char **argv)
{
system("rm -f test_slice_calc.log*");
OB_LOGGER.set_file_name("test_slice_calc.log", true, false);
OB_LOGGER.set_log_level("INFO");
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,294 @@
/**
* Copyright (c) 2021 OceanBase
* OceanBase CE is licensed under Mulan PubL v2.
* You can use this software according to the terms and conditions of the Mulan PubL v2.
* You may obtain a copy of Mulan PubL v2 at:
* http://license.coscl.org.cn/MulanPubL-2.0
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PubL v2 for more details.
*/
#define USING_LOG_PREFIX SQL_EXE
#include <gtest/gtest.h>
#include "sql/ob_sql_init.h"
#include "sql/engine/px/ob_px_util.h"
using namespace oceanbase;
using namespace oceanbase::common;
using namespace oceanbase::sql;
using namespace oceanbase::storage;
using namespace oceanbase::share;
using namespace oceanbase::share::schema;
class ObSplitSqcTaskTest : public ::testing::Test
{
public:
const static int64_t TEST_PARTITION_COUNT = 5;
const static int64_t TEST_SPLIT_TASK_COUNT = 8;
ObSplitSqcTaskTest() = default;
virtual ~ObSplitSqcTaskTest() = default;
virtual void SetUp() {};
virtual void TearDown() {};
private:
// disallow copy
ObSplitSqcTaskTest(const ObSplitSqcTaskTest &other);
ObSplitSqcTaskTest& operator=(const ObSplitSqcTaskTest &other);
};
TEST_F(ObSplitSqcTaskTest, split_task_test) {
int ret = OB_SUCCESS;
{
int64_t parallel = 12;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(33);
sqc_part_count.push_back(44);
sqc_part_count.push_back(100-33-44);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(3, results.count());
ASSERT_EQ(4, results[0]);
ASSERT_EQ(5, results[1]);
ASSERT_EQ(3, results[2]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
int64_t parallel = 12;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(1);
sqc_part_count.push_back(2);
sqc_part_count.push_back(1);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(3, results.count());
ASSERT_EQ(3, results[0]);
ASSERT_EQ(6, results[1]);
ASSERT_EQ(3, results[2]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
int64_t parallel = 15;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(1);
sqc_part_count.push_back(1);
sqc_part_count.push_back(11);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(3, results.count());
ASSERT_EQ(2, results[0]);
ASSERT_EQ(1, results[1]);
ASSERT_EQ(12, results[2]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
int64_t parallel = 15;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(4);
sqc_part_count.push_back(5);
sqc_part_count.push_back(5);
sqc_part_count.push_back(6);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(4, results.count());
ASSERT_EQ(3, results[0]);
ASSERT_EQ(4, results[1]);
ASSERT_EQ(4, results[2]);
ASSERT_EQ(4, results[3]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
int64_t parallel = 12;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(2);
sqc_part_count.push_back(3);
sqc_part_count.push_back(4);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(3, results.count());
ASSERT_EQ(3, results[0]);
ASSERT_EQ(4, results[1]);
ASSERT_EQ(5, results[2]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
// 出现partition极端倾斜的时候,少partition的sqc是否能分到线程。
int64_t parallel = 15;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(100);
sqc_part_count.push_back(100);
sqc_part_count.push_back(1);
sqc_part_count.push_back(1);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(4, results.count());
ASSERT_EQ(7, results[0]);
ASSERT_EQ(6, results[1]);
ASSERT_EQ(1, results[2]);
ASSERT_EQ(1, results[3]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
// 出现partition极端倾斜的时候,少partition的sqc是否能分到线程。
int64_t parallel = 203;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(100);
sqc_part_count.push_back(100);
sqc_part_count.push_back(1);
sqc_part_count.push_back(1);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(4, results.count());
ASSERT_EQ(101, results[0]);
ASSERT_EQ(100, results[1]);
ASSERT_EQ(1, results[2]);
ASSERT_EQ(1, results[3]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
// 出现partition极端倾斜的时候,少partition的sqc是否能分到线程。
int64_t parallel = 4;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(10000);
sqc_part_count.push_back(1);
sqc_part_count.push_back(1);
sqc_part_count.push_back(1);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(4, results.count());
ASSERT_EQ(1, results[0]);
ASSERT_EQ(1, results[1]);
ASSERT_EQ(1, results[2]);
ASSERT_EQ(1, results[3]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
// 出现partition极端倾斜的时候,少partition的sqc是否能分到线程。
int64_t parallel = 4;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(10000);
sqc_part_count.push_back(10000);
sqc_part_count.push_back(10000);
sqc_part_count.push_back(1);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(4, results.count());
ASSERT_EQ(1, results[0]);
ASSERT_EQ(1, results[1]);
ASSERT_EQ(1, results[2]);
ASSERT_EQ(1, results[3]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, parallel);
}
{
// parallel < sqc_count的时候,能否做到一个sqc一个线程。
int64_t parallel = 1;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
int64_t total_thread = 0;
sqc_part_count.push_back(2);
sqc_part_count.push_back(3);
sqc_part_count.push_back(4);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_SUCCESS);
ASSERT_EQ(3, results.count());
ASSERT_EQ(1, results[0]);
ASSERT_EQ(1, results[1]);
ASSERT_EQ(1, results[2]);
ARRAY_FOREACH(results, idx) {
total_thread += results[idx];
}
ASSERT_EQ(total_thread, sqc_part_count.count());
}
{
// 看看非法输入是否如预期一样报错
int64_t parallel = 1;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
sqc_part_count.push_back(2);
sqc_part_count.push_back(-1);
sqc_part_count.push_back(4);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_ERR_UNEXPECTED);
}
{
// 看看非法输入是否如预期一样报错
int64_t parallel = 0;
ObArray<int64_t> sqc_part_count;
ObArray<int64_t> results;
sqc_part_count.push_back(2);
sqc_part_count.push_back(1);
sqc_part_count.push_back(4);
ret = ObPXServerAddrUtil::split_parallel_into_task(parallel, sqc_part_count, results);
ASSERT_TRUE(ret == OB_INVALID_ARGUMENT);
}
}
int main(int argc, char **argv)
{
OB_LOGGER.set_log_level("TRACE");
//oceanbase::common::ObLogger::get_logger().set_log_level("TRACE");
init_sql_factories();
::testing::InitGoogleTest(&argc,argv);
return RUN_ALL_TESTS();
}