 f8c5c2647f
			
		
	
	f8c5c2647f
	
	
	
		
			
			Co-authored-by: Charles0429 <xiezhenjiang@gmail.com> Co-authored-by: tino247 <tino247@126.com> Co-authored-by: chaser-ch <chaser.ch@antgroup.com>
		
			
				
	
	
		
			1655 lines
		
	
	
		
			57 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			1655 lines
		
	
	
		
			57 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| /**
 | |
|  * Copyright (c) 2021 OceanBase
 | |
|  * OceanBase CE is licensed under Mulan PubL v2.
 | |
|  * You can use this software according to the terms and conditions of the Mulan PubL v2.
 | |
|  * You may obtain a copy of Mulan PubL v2 at:
 | |
|  *          http://license.coscl.org.cn/MulanPubL-2.0
 | |
|  * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
 | |
|  * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
 | |
|  * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
 | |
|  * See the Mulan PubL v2 for more details.
 | |
|  */
 | |
| 
 | |
| #define USING_LOG_PREFIX TEST
 | |
| #include <getopt.h>
 | |
| #include <unistd.h>
 | |
| #include <gtest/gtest.h>
 | |
| #define protected public
 | |
| #define private public
 | |
| #include "share/scheduler/ob_dag_scheduler.h"
 | |
| #include "share/scheduler/ob_sys_task_stat.h"
 | |
| #include "lib/atomic/ob_atomic.h"
 | |
| #include "observer/omt/ob_tenant_node_balancer.h"
 | |
| #include "share/scheduler/ob_dag_warning_history_mgr.h"
 | |
| #include "storage/compaction/ob_tenant_compaction_progress.h"
 | |
| 
 | |
| int64_t dag_cnt = 1;
 | |
| int64_t stress_time= 1; // 100ms
 | |
| char log_level[20] = "INFO";
 | |
| uint32_t time_slice = 1000;
 | |
| uint32_t sleep_slice = 2 * time_slice;
 | |
| const int64_t CHECK_TIMEOUT = 1 * 1000 * 1000;
 | |
| 
 | |
| #define CHECK_EQ_UTIL_TIMEOUT(expected, expr) \
 | |
|   { \
 | |
|     int64_t start_time = oceanbase::common::ObTimeUtility::current_time(); \
 | |
|     auto expr_result = (expr); \
 | |
|     do { \
 | |
|       if ((expected) == (expr_result)) { \
 | |
|         break; \
 | |
|       } else { \
 | |
|         expr_result = (expr); \
 | |
|       }\
 | |
|     } while(oceanbase::common::ObTimeUtility::current_time() - start_time < CHECK_TIMEOUT); \
 | |
|     EXPECT_EQ((expected), (expr_result)); \
 | |
|   }
 | |
| 
 | |
| namespace oceanbase
 | |
| {
 | |
| using namespace common;
 | |
| using namespace lib;
 | |
| using namespace share;
 | |
| using namespace omt;
 | |
| namespace unittest
 | |
| {
 | |
| 
 | |
| class TestAddTask : public ObITask
 | |
| {
 | |
| public:
 | |
|   TestAddTask()
 | |
|     : ObITask(ObITaskType::TASK_TYPE_UT), counter_(NULL),
 | |
|       adder_(), seq_(0), task_cnt_(0), sleep_us_(0)
 | |
|   {}
 | |
|   ~TestAddTask() {}
 | |
|   int init(int64_t *counter, int64_t adder, int64_t seq, int64_t task_cnt, int sleep_us = 0)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     counter_ = counter;
 | |
|     adder_ = adder;
 | |
|     seq_ = seq;
 | |
|     task_cnt_ = task_cnt;
 | |
|     sleep_us_ = sleep_us;
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int generate_next_task(ObITask *&next_task)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     if (seq_ >= task_cnt_ - 1) {
 | |
|       ret = OB_ITER_END;
 | |
|       COMMON_LOG(INFO, "generate task end", K_(seq), K_(task_cnt));
 | |
|     } else {
 | |
|       ObIDag *dag = get_dag();
 | |
|       TestAddTask *ntask = NULL;
 | |
|       if (NULL == dag) {
 | |
|         ret = OB_ERR_UNEXPECTED;
 | |
|         COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|       } else if (OB_FAIL(dag->alloc_task(ntask))) {
 | |
|         COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|       } else if (NULL == ntask) {
 | |
|         ret = OB_ERR_UNEXPECTED;
 | |
|         COMMON_LOG(WARN, "task is null", K(ret));
 | |
|       } else {
 | |
|         if (OB_FAIL(ntask->init(counter_, adder_, seq_ + 1, task_cnt_, sleep_us_))) {
 | |
|           COMMON_LOG(WARN, "failed to init addtask", K(ret));
 | |
|         }
 | |
|         if (OB_FAIL(ret)) {
 | |
|           dag->free_task(*ntask);
 | |
|         } else {
 | |
|           next_task = ntask;
 | |
|         }
 | |
|       }
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int process()
 | |
|   {
 | |
|     ::usleep(sleep_us_);
 | |
|     (void)ATOMIC_AAF(counter_, adder_);
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   VIRTUAL_TO_STRING_KV(KP_(counter), K_(seq), K_(task_cnt));
 | |
| private:
 | |
|   int64_t *counter_;
 | |
|   int64_t adder_;
 | |
|   int64_t seq_;
 | |
|   int64_t task_cnt_;
 | |
|   int sleep_us_;
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestAddTask);
 | |
| };
 | |
| 
 | |
| class TestMulTask : public ObITask
 | |
| {
 | |
| public:
 | |
|   TestMulTask()
 | |
|     : ObITask(ObITaskType::TASK_TYPE_UT), counter_(NULL), sleep_us_(0)
 | |
|   {}
 | |
|   ~TestMulTask() {}
 | |
| 
 | |
|   int init(int64_t *counter, int sleep_us = 0)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     counter_ = counter;
 | |
|     sleep_us_ = sleep_us;
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int process() { ::usleep(sleep_us_); *counter_ = *counter_ * 2; return OB_SUCCESS;}
 | |
|   VIRTUAL_TO_STRING_KV(KP_(counter));
 | |
| private:
 | |
|   int64_t *counter_;
 | |
|   int sleep_us_;
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestMulTask);
 | |
| };
 | |
| 
 | |
| class AtomicOperator
 | |
| {
 | |
| public:
 | |
|   AtomicOperator() : v_(0) {}
 | |
|   AtomicOperator(int64_t v) : v_(v) {}
 | |
|   void inc()
 | |
|   {
 | |
|     lib::ObMutexGuard guard(lock_);
 | |
|     ++v_;
 | |
|   }
 | |
| 
 | |
|   void mul(int64_t m)
 | |
|   {
 | |
|     lib::ObMutexGuard guard(lock_);
 | |
|     v_ *= m;
 | |
|   }
 | |
| 
 | |
|   int64_t value()
 | |
|   {
 | |
|     return v_;
 | |
|   }
 | |
|   void reset()
 | |
|   {
 | |
|     v_ = 0;
 | |
|   }
 | |
| private:
 | |
|   lib::ObMutex lock_;
 | |
|   int64_t v_;
 | |
| };
 | |
| 
 | |
| static const int64_t SLEEP_SLICE = 100;
 | |
| 
 | |
| class AtomicMulTask : public ObITask
 | |
| {
 | |
| public:
 | |
|   AtomicMulTask() :
 | |
|     ObITask(ObITask::TASK_TYPE_UT), seq_(0), cnt_(0), error_seq_(-1), op_(NULL), sleep_us_(0)
 | |
|   {}
 | |
|   int init(int64_t seq, int64_t cnt, AtomicOperator &op, int sleep_us = 0, int64_t error_seq = -1)
 | |
|   {
 | |
|     seq_ = seq;
 | |
|     cnt_ = cnt;
 | |
|     error_seq_ = error_seq;
 | |
|     op_ = &op;
 | |
|     sleep_us_ = sleep_us;
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   virtual int generate_next_task(ObITask *&task)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     ObIDag *dag = NULL;
 | |
|     AtomicMulTask *ntask = NULL;
 | |
|     if (seq_ >= cnt_) {
 | |
|       return OB_ITER_END;
 | |
|     } else if (OB_ISNULL(dag = get_dag())) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "dag is NULL", K(ret));
 | |
|     } else if (OB_FAIL(dag->alloc_task(ntask))){
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (OB_ISNULL(ntask)) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "ntask is NULL", K(ret));
 | |
|     } else {
 | |
|       COMMON_LOG(INFO, "a task is generated", K(seq_));
 | |
|       ntask->init(seq_ + 1, cnt_, *op_, sleep_us_, error_seq_);
 | |
|       task = ntask;
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int process()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     int64_t cnt = sleep_us_ / SLEEP_SLICE;
 | |
|     for (int64_t i = 0; i < cnt; ++i) {
 | |
|       dag_yield();
 | |
|       ::usleep(SLEEP_SLICE);
 | |
|     }
 | |
|     if (seq_ == error_seq_) {
 | |
|       return OB_ERR_UNEXPECTED;
 | |
|     } else {
 | |
|       op_->mul(2);
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   VIRTUAL_TO_STRING_KV("type", "AtomicMul", K(*dag_), K_(seq), K_(cnt), KP_(op), K_(error_seq), K_(sleep_us));
 | |
| private:
 | |
|   int64_t seq_;
 | |
|   int64_t cnt_;
 | |
|   int64_t error_seq_;
 | |
|   AtomicOperator *op_;
 | |
|   int sleep_us_;
 | |
| };
 | |
| 
 | |
| class AtomicIncTask : public ObITask
 | |
| {
 | |
| public:
 | |
|   AtomicIncTask() :
 | |
|     ObITask(ObITask::TASK_TYPE_UT), seq_(0), cnt_(0), error_seq_(-1), op_(NULL), sleep_us_(0)
 | |
|   {}
 | |
|   int init(int64_t seq, int64_t cnt, AtomicOperator &op, int sleep_us = 0, int64_t error_seq = -1)
 | |
|   {
 | |
|     seq_ = seq;
 | |
|     cnt_ = cnt;
 | |
|     error_seq_ = error_seq;
 | |
|     op_ = &op;
 | |
|     sleep_us_ = sleep_us;
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   virtual int generate_next_task(ObITask *&task)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     ObIDag *dag = NULL;
 | |
|     AtomicIncTask *ntask = NULL;
 | |
|     if (seq_ >= cnt_) {
 | |
|       return OB_ITER_END;
 | |
|     } else if (OB_ISNULL(dag = get_dag())) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "dag is NULL", K(ret));
 | |
|     } else if (OB_FAIL(dag->alloc_task(ntask))){
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (OB_ISNULL(ntask)) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "ntask is NULL", K(ret));
 | |
|     } else {
 | |
|       COMMON_LOG(INFO, "a task is generated", K(seq_));
 | |
|       ntask->init(seq_ + 1, cnt_, *op_, sleep_us_, error_seq_);
 | |
|       task = ntask;
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int process()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     int64_t cnt = sleep_us_ / SLEEP_SLICE;
 | |
|     for (int64_t i = 0; i < cnt; ++i) {
 | |
|       dag_yield();
 | |
|       ::usleep(SLEEP_SLICE);
 | |
|     }
 | |
|     if (seq_ == error_seq_) {
 | |
|       return OB_ERR_UNEXPECTED;
 | |
|     } else {
 | |
|       op_->inc();
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   VIRTUAL_TO_STRING_KV("type", "AtomicInc", K(*dag_), K_(seq), K_(cnt), KP_(op), K_(error_seq), K_(sleep_us));
 | |
| private:
 | |
|   int64_t seq_;
 | |
|   int64_t cnt_;
 | |
|   int64_t error_seq_;
 | |
|   AtomicOperator *op_;
 | |
|   int sleep_us_;
 | |
| };
 | |
| 
 | |
| class LoopWaitTask : public ObITask
 | |
| {
 | |
| public:
 | |
|   LoopWaitTask() :
 | |
|     ObITask(ObITask::TASK_TYPE_UT), seq_(0), cnt_(0)
 | |
|   {}
 | |
|   int init(int64_t seq, int64_t cnt, bool &finish_flag)
 | |
|   {
 | |
|     seq_ = seq;
 | |
|     cnt_ = cnt;
 | |
|     finish_flag_ = &finish_flag;
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   virtual int generate_next_task(ObITask *&task)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     ObIDag *dag = NULL;
 | |
|     LoopWaitTask *ntask = NULL;
 | |
|     if (seq_ >= cnt_) {
 | |
|       return OB_ITER_END;
 | |
|     } else if (OB_ISNULL(dag = get_dag())) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "dag is NULL", K(ret));
 | |
|     } else if (OB_FAIL(dag->alloc_task(ntask))){
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (OB_ISNULL(ntask)) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "ntask is NULL", K(ret));
 | |
|     } else {
 | |
|       COMMON_LOG(INFO, "a task is generated", K(seq_));
 | |
|       ntask->init(seq_ + 1, cnt_, *finish_flag_);
 | |
|       task = ntask;
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int process()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     while (true) {
 | |
|       dag_yield();
 | |
|       if (nullptr != finish_flag_) {
 | |
|         if (ATOMIC_LOAD(finish_flag_) == true) {
 | |
|           break;
 | |
|         }
 | |
|       }
 | |
|       ::usleep(SLEEP_SLICE);
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   VIRTUAL_TO_STRING_KV("type", "LoopWait", K(*dag_), K_(seq), K_(cnt));
 | |
| private:
 | |
|   int64_t seq_;
 | |
|   int64_t cnt_;
 | |
|   bool *finish_flag_;
 | |
| };
 | |
| 
 | |
| template<class T>
 | |
| int alloc_task(ObIDag &dag, T *&task) {
 | |
|   int ret = OB_SUCCESS;
 | |
|   task = NULL;
 | |
|   if (OB_FAIL(dag.alloc_task(task))) {
 | |
|     COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|   } else if (OB_ISNULL(task)) {
 | |
|     ret = OB_ERR_UNEXPECTED;
 | |
|     COMMON_LOG(WARN, "task is NULL", K(ret));
 | |
|   }
 | |
|   return ret;
 | |
| }
 | |
| 
 | |
| void wait_scheduler() {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   while (!scheduler->is_empty()) {
 | |
|     ::usleep(100000);
 | |
|   }
 | |
| }
 | |
| 
 | |
| class TestDag : public ObIDag
 | |
| {
 | |
| public:
 | |
|   TestDag() :
 | |
|       ObIDag(ObDagType::DAG_TYPE_MERGE_EXECUTE), id_(0), expect_(-1), expect_ret_(0), running_(false), tester_(NULL) { }
 | |
|   explicit TestDag(const ObDagType::ObDagTypeEnum type) :
 | |
|       ObIDag(type), id_(0), expect_(-1), expect_ret_(0), running_(false), tester_(NULL) { }
 | |
|   virtual ~TestDag()
 | |
|   {
 | |
|     if (get_dag_status() == ObIDag::DAG_STATUS_FINISH
 | |
|         || get_dag_status() == ObIDag::DAG_STATUS_NODE_FAILED) {
 | |
|       if (running_ && -1 != expect_) {
 | |
|         if (op_.value() != expect_
 | |
|             || get_dag_ret() != expect_ret_) {
 | |
|           if (OB_ALLOCATE_MEMORY_FAILED != get_dag_ret()) {
 | |
|             if (NULL != tester_) {
 | |
|               tester_->stop();
 | |
|             }
 | |
|             COMMON_LOG_RET(ERROR, OB_ERROR, "FATAL ERROR!!!", K_(expect), K(op_.value()), K_(expect_ret),
 | |
|                 K(get_dag_ret()), K_(id));
 | |
|             common::right_to_die_or_duty_to_live();
 | |
|           }
 | |
|         }
 | |
|       }
 | |
|     }
 | |
|   }
 | |
|   int init(int64_t id, int expect_ret = 0, int64_t expect = -1, lib::ThreadPool *tester = NULL)
 | |
|   {
 | |
|     id_ = id;
 | |
|     expect_ret_ = expect_ret;
 | |
|     expect_ = expect;
 | |
|     tester_ = tester;
 | |
|     ObAddr addr(1683068975,9999);
 | |
|     if (OB_SUCCESS != (ObSysTaskStatMgr::get_instance().set_self_addr(addr))) {
 | |
|       COMMON_LOG_RET(WARN, OB_ERROR, "failed to add sys task", K(addr));
 | |
|     }
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   virtual int64_t hash() const { return murmurhash(&id_, sizeof(id_), 0);}
 | |
|   virtual bool operator == (const ObIDag &other) const
 | |
|   {
 | |
|     bool bret = false;
 | |
|     if (get_type() == other.get_type()) {
 | |
|       const TestDag &dag = static_cast<const TestDag &>(other);
 | |
|       bret = dag.id_ == id_;
 | |
|     }
 | |
|     return bret;
 | |
|   }
 | |
|   void set_id(int64_t id) { id_ = id; }
 | |
|   AtomicOperator &get_op() { return op_; }
 | |
|   void set_running() { running_ = true; }
 | |
|   int fill_comment(char *buf,const int64_t size) const {
 | |
|     int ret = OB_SUCCESS;
 | |
|     if (OB_ISNULL(buf) || size <0) {
 | |
|       COMMON_LOG(INFO,"buf is NULL",K(ret),K(size));
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
|   virtual int fill_dag_key(char *buf, const int64_t buf_len) const override
 | |
|   {
 | |
|     UNUSEDx(buf, buf_len);
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
|   virtual lib::Worker::CompatMode get_compat_mode() const override
 | |
|   { return lib::Worker::CompatMode::MYSQL; }
 | |
|   VIRTUAL_TO_STRING_KV(K_(is_inited), K_(type), K_(id), K(task_list_.get_size()));
 | |
| protected:
 | |
|   int64_t id_;
 | |
|   int64_t expect_;
 | |
|   int expect_ret_;
 | |
|   AtomicOperator op_;
 | |
|   bool running_;
 | |
|   lib::ThreadPool *tester_;
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestDag);
 | |
| };
 | |
| 
 | |
| class TestLPDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestLPDag() : TestDag(ObDagType::DAG_TYPE_DDL) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestLPDag);
 | |
| };
 | |
| 
 | |
| class TestMPDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestMPDag() : TestDag(ObDagType::DAG_TYPE_MAJOR_MERGE) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestMPDag);
 | |
| };
 | |
| 
 | |
| class TestHPDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestHPDag() : TestDag(ObDagType::DAG_TYPE_MERGE_EXECUTE) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestHPDag);
 | |
| };
 | |
| 
 | |
| class TestHALowDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestHALowDag() : TestDag(ObDagType::DAG_TYPE_BACKUP) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestHALowDag);
 | |
| };
 | |
| 
 | |
| class TestCompMidDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestCompMidDag() : TestDag(ObDagType::DAG_TYPE_MERGE_EXECUTE) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestCompMidDag);
 | |
| };
 | |
| 
 | |
| class TestMemRelatedDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestMemRelatedDag() : TestDag(ObDagType::DAG_TYPE_MINI_MERGE) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestMemRelatedDag);
 | |
| };
 | |
| 
 | |
| class TestCompLowDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestCompLowDag() : TestDag(ObDagType::DAG_TYPE_MAJOR_MERGE) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestCompLowDag);
 | |
| };
 | |
| 
 | |
| class TestDDLDag : public TestDag
 | |
| {
 | |
| public:
 | |
|   TestDDLDag() : TestDag(ObDagType::DAG_TYPE_DDL) {}
 | |
| private:
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestDDLDag);
 | |
| };
 | |
| 
 | |
| class TestPrepareTask : public ObITask
 | |
| {
 | |
|   static const int64_t inc_task_cnt = 8;
 | |
|   static const int64_t mul_task_cnt = 6;
 | |
| public:
 | |
|   TestPrepareTask() : ObITask(ObITask::TASK_TYPE_UT), dag_id_(0), is_error_(false), sleep_us_(0), op_(NULL)
 | |
|   {}
 | |
| 
 | |
|   int init(int64_t dag_id, AtomicOperator *op = NULL, bool is_error = false, int sleep_us = 0)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     dag_id_ = dag_id;
 | |
|     is_error_ = is_error;
 | |
|     sleep_us_ = sleep_us;
 | |
|     if (NULL != op) {
 | |
|       op_ = op;
 | |
|     } else {
 | |
|       TestDag *dag = static_cast<TestDag*>(get_dag());
 | |
|       if (OB_ISNULL(dag)) {
 | |
|         ret = OB_ERR_UNEXPECTED;
 | |
|         COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|       } else {
 | |
|         op_ = &dag->get_op();
 | |
|       }
 | |
|     }
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
| 
 | |
|   int process()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     TestDag*dag = static_cast<TestDag*>(get_dag());
 | |
|     AtomicIncTask *inc_task = NULL;
 | |
|     AtomicMulTask *mul_task = NULL;
 | |
|     AtomicMulTask *mul_task1 = NULL;
 | |
|     if (OB_ISNULL(dag)) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|     } else if (OB_FAIL(alloc_task(*dag, inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to alloc inc_task", K(ret));
 | |
|     } else if (OB_FAIL(inc_task->init(1, inc_task_cnt, *op_))) {
 | |
|     } else if (OB_FAIL(alloc_task(*dag, mul_task))){
 | |
|       COMMON_LOG(WARN, "failed to alloc mul task", K(ret));
 | |
|     } else if (OB_FAIL(mul_task->init(1, mul_task_cnt, *op_, 0,
 | |
|         is_error_ ? 1 + (dag_id_ % mul_task_cnt) : -1))){
 | |
|     } else if (OB_FAIL(alloc_task(*dag, mul_task1))){
 | |
|       COMMON_LOG(WARN, "failed to alloc mul task", K(ret));
 | |
|     } else if (OB_FAIL(mul_task1->init(1, mul_task_cnt, *op_))){
 | |
|     } else if (OB_FAIL(mul_task->add_child(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|     } else if (OB_FAIL(mul_task1->add_child(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|     } else if (OB_FAIL(add_child(*mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child to self", K(ret));
 | |
|     } else if (OB_FAIL(add_child(*mul_task1))) {
 | |
|       COMMON_LOG(WARN, "failed to add child to self", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*mul_task1))) {
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     } else {
 | |
|       dag->set_running();
 | |
|     }
 | |
|     if (sleep_us_ > 0) {
 | |
|       ::usleep(sleep_us_);
 | |
|       if (is_error_) {
 | |
|         ret = OB_ERR_UNEXPECTED;
 | |
|       }
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
| private:
 | |
|   int64_t dag_id_;
 | |
|   bool is_error_;
 | |
|   int sleep_us_;
 | |
|   AtomicOperator *op_;
 | |
| };
 | |
| 
 | |
| class TestCyclePrepare : public ObITask
 | |
| {
 | |
| public:
 | |
|   TestCyclePrepare()
 | |
|     : ObITask(ObITask::TASK_TYPE_UT), op_(NULL) {}
 | |
|   int init(AtomicOperator *op = NULL)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     if (NULL != op) {
 | |
|       op_ = op;
 | |
|     } else {
 | |
|       TestDag *dag = static_cast<TestDag*>(get_dag());
 | |
|       if (OB_ISNULL(dag)) {
 | |
|         ret = OB_ERR_UNEXPECTED;
 | |
|         COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|       } else {
 | |
|         op_ = &dag->get_op();
 | |
|       }
 | |
|     }
 | |
|     return OB_SUCCESS;
 | |
|   }
 | |
| 
 | |
|   int process()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     TestDag*dag = static_cast<TestDag*>(get_dag());
 | |
|     AtomicIncTask *inc_task = NULL;
 | |
|     AtomicMulTask *mul_task = NULL;
 | |
|     AtomicMulTask *mul_task1 = NULL;
 | |
|     if (OB_ISNULL(dag)) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|     } else if (OB_FAIL(alloc_task(*dag, inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to alloc inc_task", K(ret));
 | |
|     } else if (OB_FAIL(inc_task->init(1, 5, *op_))) {
 | |
|     } else if (OB_FAIL(alloc_task(*dag, mul_task))){
 | |
|       COMMON_LOG(WARN, "failed to alloc mul task", K(ret));
 | |
|     } else if (OB_FAIL(mul_task->init(1, 5, *op_))){
 | |
|     } else if (OB_FAIL(alloc_task(*dag, mul_task1))){
 | |
|       COMMON_LOG(WARN, "failed to alloc mul task", K(ret));
 | |
|     } else if (OB_FAIL(mul_task1->init(1, 5, *op_))){
 | |
|     } else if (OB_FAIL(mul_task->add_child(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|     } else if (OB_FAIL(mul_task1->add_child(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|     } else if (OB_FAIL(inc_task->add_child(*mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|     } else if (OB_FAIL(add_child(*mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add child to self", K(ret));
 | |
|     } else if (OB_FAIL(add_child(*mul_task1))) {
 | |
|       COMMON_LOG(WARN, "failed to add child to self", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*inc_task))) {
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*mul_task1))) {
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     } else if (OB_FAIL(dag->add_task(*mul_task))) {
 | |
|       dag->free_task(*mul_task);
 | |
|       COMMON_LOG(WARN, "failed to add_task", K(ret));
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
| private:
 | |
|   AtomicOperator *op_;
 | |
| };
 | |
| 
 | |
| class DagSchedulerStressTester : public lib::ThreadPool
 | |
| {
 | |
|   static const int64_t STRESS_THREAD_NUM = 16;
 | |
| public:
 | |
|   DagSchedulerStressTester() : scheduler_(nullptr), test_time_(0) {}
 | |
|   ~DagSchedulerStressTester() { scheduler_ = nullptr; }
 | |
| 
 | |
|   int init(ObTenantDagScheduler *scheduler, int64_t test_time)
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     if (OB_ISNULL(scheduler)) {
 | |
|       ret = OB_INVALID_ARGUMENT;
 | |
|       COMMON_LOG(WARN, "get invalid args", K(ret), K(scheduler));
 | |
|     } else {
 | |
|       scheduler_ = scheduler;
 | |
|       test_time_ = test_time * 1000;
 | |
|     }
 | |
|     return ret;
 | |
|   }
 | |
| 
 | |
|   int do_stress()
 | |
|   {
 | |
|     int ret = OB_SUCCESS;
 | |
|     set_thread_count(STRESS_THREAD_NUM);
 | |
|     int64_t start_time = ObTimeUtility::current_time();
 | |
|     start();
 | |
|     wait();
 | |
|     int64_t elapsed_time = ObTimeUtility::current_time() - start_time;
 | |
|     COMMON_LOG(INFO, "stress test finished", K(elapsed_time / 1000));
 | |
|     int ret_code = system("grep ERROR test_dag_scheduler.log -q");
 | |
|     ret_code = WEXITSTATUS(ret_code);
 | |
|     if (ret_code == 0)
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|     return ret;
 | |
|   }
 | |
| 
 | |
|   void run1()
 | |
|   {
 | |
|     int64_t start_time = ObTimeUtility::current_time();
 | |
|     int ret = OB_SUCCESS;
 | |
|     int tmp_ret = OB_SUCCESS;
 | |
|     ObTenantBase * tenant_base = OB_NEW(ObTenantBase, "TestBase", 1);
 | |
|     tenant_base->init();
 | |
|     ObTenantEnv::set_tenant(tenant_base);
 | |
| 
 | |
|     while (!has_set_stop()
 | |
|            && OB_SUCC(ret)
 | |
|            && (ObTimeUtility::current_time() - start_time < test_time_)) {
 | |
|       const int64_t dag_id = get_dag_id();
 | |
|       TestDag *dag = NULL;
 | |
|       TestPrepareTask *task = NULL;
 | |
|       int expect_ret = (dag_id % 10 == 0 ? OB_ERR_UNEXPECTED : OB_SUCCESS);
 | |
|       int64_t expect_value = (dag_id % 10 == 0 ? 0 : 8);
 | |
| 
 | |
|       switch (dag_id % ObDagPrio::DAG_PRIO_MAX) {
 | |
|       case ObDagPrio::DAG_PRIO_DDL: {
 | |
|         TestLPDag *lp_dag = NULL;
 | |
|         if (OB_SUCCESS != (tmp_ret = scheduler_->alloc_dag(lp_dag))) {
 | |
|           if (OB_ALLOCATE_MEMORY_FAILED != tmp_ret) {
 | |
|             ret = tmp_ret;
 | |
|             COMMON_LOG(ERROR, "failed to allocate dag", K(ret));
 | |
|           } else {
 | |
|             COMMON_LOG(WARN, "out of memory", K(scheduler_->get_cur_dag_cnt()));
 | |
|           }
 | |
|         } else {
 | |
|           dag = lp_dag;
 | |
|         }
 | |
|         break;
 | |
|       }
 | |
|       case ObDagPrio::DAG_PRIO_COMPACTION_LOW: {
 | |
|         TestMPDag *mp_dag= NULL;
 | |
|         if (OB_SUCCESS != (tmp_ret = scheduler_->alloc_dag(mp_dag))) {
 | |
|           if (OB_ALLOCATE_MEMORY_FAILED != tmp_ret) {
 | |
|             ret = tmp_ret;
 | |
|             COMMON_LOG(ERROR, "failed to allocate dag", K(ret));
 | |
|           } else {
 | |
|             COMMON_LOG(WARN, "out of memory", K(scheduler_->get_cur_dag_cnt()));
 | |
|           }
 | |
|         } else {
 | |
|           dag = mp_dag;
 | |
|         }
 | |
|         break;
 | |
|       }
 | |
|       default : {
 | |
|         TestHPDag *hp_dag= NULL;
 | |
|         if (OB_SUCCESS != (tmp_ret = scheduler_->alloc_dag(hp_dag))) {
 | |
|           if (OB_ALLOCATE_MEMORY_FAILED != tmp_ret) {
 | |
|             ret = tmp_ret;
 | |
|             COMMON_LOG(ERROR, "failed to allocate dag", K(ret));
 | |
|           } else {
 | |
|             COMMON_LOG(WARN, "out of memory", K(scheduler_->get_cur_dag_cnt()));
 | |
|           }
 | |
|         } else {
 | |
|           dag = hp_dag;
 | |
|         }
 | |
|         break;
 | |
|       }
 | |
|       }
 | |
|       if (OB_SUCCESS != tmp_ret) {
 | |
|         continue;
 | |
|       }
 | |
| 
 | |
|       if (OB_FAIL(dag->init(dag_id, expect_ret, expect_value, this))) {
 | |
|         COMMON_LOG(WARN, "failed to init dag", K(ret));
 | |
|       } else if (OB_SUCCESS != (tmp_ret = alloc_task(*dag, task))){
 | |
|         COMMON_LOG(WARN, "failed to alloc task", K(tmp_ret));
 | |
|       } else if (OB_FAIL(task->init(dag_id, NULL, expect_ret != OB_SUCCESS))) {
 | |
|         COMMON_LOG(WARN, "failed to init task", K(ret));
 | |
|       } else if (OB_FAIL(dag->add_task(*task))) {
 | |
|         COMMON_LOG(WARN, "failed to add task", K(ret));
 | |
|       } else {
 | |
|         if (OB_SUCCESS != (tmp_ret = scheduler_->add_dag(dag))) {
 | |
|           if (OB_SIZE_OVERFLOW != tmp_ret) {
 | |
|             COMMON_LOG(ERROR, "failed to add dag", K(tmp_ret), K(*dag));
 | |
|           }
 | |
|           scheduler_->free_dag(*dag);
 | |
|         }
 | |
|       }
 | |
|     }
 | |
|   }
 | |
| 
 | |
|   int64_t get_dag_id()
 | |
|   {
 | |
|     return ATOMIC_FAA(&counter_, 1);
 | |
|   }
 | |
| private:
 | |
|   static int64_t counter_;
 | |
|   ObTenantDagScheduler *scheduler_;
 | |
|   int64_t test_time_;
 | |
| };
 | |
| 
 | |
| int64_t DagSchedulerStressTester::counter_ = 0;
 | |
| 
 | |
| class TestDagScheduler : public ::testing::Test
 | |
| {
 | |
| public:
 | |
|   TestDagScheduler()
 | |
|     : tenant_id_(500),
 | |
|       scheduler_(nullptr),
 | |
|       tenant_base_(500),
 | |
|       allocator_("DagScheduler")
 | |
|   { }
 | |
|   ~TestDagScheduler() {}
 | |
|   void SetUp()
 | |
|   {
 | |
|     scheduler_ = OB_NEW(ObTenantDagScheduler, ObModIds::TEST);
 | |
|     tenant_base_.set(scheduler_);
 | |
| 
 | |
|     ObTenantEnv::set_tenant(&tenant_base_);
 | |
|     ASSERT_EQ(OB_SUCCESS, tenant_base_.init());
 | |
|   }
 | |
|   void TearDown()
 | |
|   {
 | |
|     scheduler_->destroy();
 | |
|     tenant_base_.destroy();
 | |
|     ObTenantEnv::set_tenant(nullptr);
 | |
|   }
 | |
| private:
 | |
|   const uint64_t tenant_id_;
 | |
|   ObTenantDagScheduler *scheduler_;
 | |
|   ObTenantBase tenant_base_;
 | |
|   ObArenaAllocator allocator_;
 | |
|   DISALLOW_COPY_AND_ASSIGN(TestDagScheduler);
 | |
| };
 | |
| 
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_init)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
| 
 | |
|   // invalid thread cnt
 | |
|   EXPECT_EQ(OB_INVALID_ARGUMENT, scheduler->init(MTL_ID(), time_slice, time_slice, -1));
 | |
|   // invalid dag_limit
 | |
|   EXPECT_EQ(OB_INVALID_ARGUMENT, scheduler->init(MTL_ID(), time_slice, time_slice, 0, 0));
 | |
|   // invalid total_mem_limit
 | |
|   EXPECT_EQ(OB_INVALID_ARGUMENT, scheduler->init(MTL_ID(), time_slice, time_slice, 0, 10, 0));
 | |
|   // invalid hold_mem_limit
 | |
|   EXPECT_EQ(OB_INVALID_ARGUMENT, scheduler->init(MTL_ID(), time_slice, time_slice, 0, 10, 1024, 0));
 | |
|   EXPECT_EQ(OB_INVALID_ARGUMENT, scheduler->init(MTL_ID(), time_slice, time_slice, 0, 10, 1024, 2048));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(),time_slice, time_slice, 100));
 | |
|   EXPECT_EQ(OB_INIT_TWICE, scheduler->init(MTL_ID()));
 | |
| }
 | |
| 
 | |
| 
 | |
| TEST_F(TestDagScheduler, baisc_test)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   int ret = OB_SUCCESS;
 | |
|   TestDag *dag = NULL;
 | |
|   TestDag *dup_dag = NULL;
 | |
|   int64_t counter = 0;
 | |
|   //simple two-level dag
 | |
|   if (OB_FAIL(scheduler->alloc_dag(dag))) {
 | |
|     COMMON_LOG(WARN, "failed to alloc dag");
 | |
|   } else if (NULL == dag) {
 | |
|     ret = OB_ERR_UNEXPECTED;
 | |
|     COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|   } else if (OB_FAIL(dag->init(1))) {
 | |
|     COMMON_LOG(WARN, "failed to init dag", K(ret));
 | |
|   } else {
 | |
|     TestAddTask *add_task = NULL;
 | |
|     TestMulTask *mul_task = NULL;
 | |
|     if (OB_FAIL(dag->alloc_task(mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (NULL == mul_task) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "task is null", K(ret));
 | |
|     } else {
 | |
|       if (OB_FAIL(mul_task->init(&counter))) {
 | |
|         COMMON_LOG(WARN, "failed to init add task", K(ret));
 | |
|       } else if (OB_FAIL(dag->add_task(*mul_task))) {
 | |
|         COMMON_LOG(WARN, "failed to add task", K(ret));
 | |
|       }
 | |
|       if (OB_FAIL(ret)) {
 | |
|         dag->free_task(*mul_task);
 | |
|       } else {
 | |
|         if (OB_FAIL(dag->alloc_task(add_task))) {
 | |
|           COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|         } else if (NULL == add_task) {
 | |
|           ret = OB_ERR_UNEXPECTED;
 | |
|           COMMON_LOG(WARN, "task is null", K(ret));
 | |
|         } else {
 | |
|           if (OB_FAIL(add_task->init(&counter, 1, 0, 10, 200 * 1000))) {
 | |
|             COMMON_LOG(WARN, "failed to init add task", K(ret));
 | |
|           } else if (OB_FAIL(add_task->add_child(*mul_task))) {
 | |
|             COMMON_LOG(WARN, "failed to add child", K(ret));
 | |
|           } else if (OB_FAIL(dag->add_task(*add_task))) {
 | |
|             COMMON_LOG(WARN, "failed to add task");
 | |
|           }
 | |
|           if (OB_FAIL(ret)) {
 | |
|             dag->free_task(*add_task);
 | |
|           }
 | |
|         }
 | |
|       }
 | |
|     }
 | |
|   }
 | |
|   // check deduplication functionality
 | |
|   if (OB_FAIL(scheduler->alloc_dag(dup_dag))) {
 | |
|     COMMON_LOG(WARN, "failed to alloc dag");
 | |
|   } else if (NULL == dup_dag) {
 | |
|     ret = OB_ERR_UNEXPECTED;
 | |
|     COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|   } else if (OB_FAIL(dup_dag->init(1))) {
 | |
|     COMMON_LOG(WARN, "failed to init dag", K(ret));
 | |
|   } else {
 | |
|     TestMulTask *mul_task = NULL;
 | |
|     if (OB_FAIL(dup_dag->alloc_task(mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (NULL == mul_task) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "task is null", K(ret));
 | |
|     } else {
 | |
|       if (OB_FAIL(mul_task->init(&counter))) {
 | |
|         COMMON_LOG(WARN, "failed to init add task", K(ret));
 | |
|       } else if (OB_FAIL(dup_dag->add_task(*mul_task))) {
 | |
|         COMMON_LOG(WARN, "failed to add task", K(ret));
 | |
|       }
 | |
|       if (OB_FAIL(ret)) {
 | |
|         dup_dag->free_task(*mul_task);
 | |
|       }
 | |
|     }
 | |
|   }
 | |
|   EXPECT_EQ(OB_SUCCESS, ret);
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   EXPECT_EQ(OB_EAGAIN, scheduler->add_dag(dup_dag));
 | |
|   if (OB_NOT_NULL(dup_dag)) {
 | |
|     scheduler->free_dag(*dup_dag);
 | |
|   }
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(counter, 20);
 | |
| 
 | |
|   // three level dag that each level would generate dynamic tasks
 | |
|   AtomicOperator op(0);
 | |
|   TestDag *dag1 = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   AtomicIncTask *inc_task1 = NULL;
 | |
|   AtomicMulTask *mul_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, 4, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->add_child(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task1));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->add_child(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task1));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(170, op.value());
 | |
| 
 | |
|   // two-level dag with 2 tasks on the first level
 | |
|   op.reset();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   //add mul task
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, 4, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*mul_task));
 | |
|   // add inc task
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task1));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->add_child(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task1));
 | |
|   // add another inc task
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task1));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task1->add_child(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task1));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(320, op.value());
 | |
| 
 | |
|   // a dag with single task which generate all other tasks while processing
 | |
|   TestPrepareTask *prepare_task = NULL;
 | |
|   op.reset();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, prepare_task->init(1, &op));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(8, op.value());
 | |
| 
 | |
|   op.reset();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, prepare_task->init(1, &op, false, 1000*1000));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(8, op.value());
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_cycle)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   int ret = OB_SUCCESS;
 | |
|   TestDag *dag = NULL;
 | |
|   int64_t counter = 0;
 | |
|   if (OB_FAIL(scheduler->alloc_dag(dag))) {
 | |
|     COMMON_LOG(WARN, "failed to alloc dag");
 | |
|   } else if (NULL == dag) {
 | |
|     ret = OB_ERR_UNEXPECTED;
 | |
|     COMMON_LOG(WARN, "dag is null", K(ret));
 | |
|   } else if (OB_FAIL(dag->init(1))) {
 | |
|     COMMON_LOG(WARN, "failed to init dag", K(ret));
 | |
|   } else {
 | |
|     TestAddTask *add_task = NULL;
 | |
|     TestMulTask *mul_task = NULL;
 | |
|     if (OB_FAIL(dag->alloc_task(mul_task))) {
 | |
|       COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|     } else if (NULL == mul_task) {
 | |
|       ret = OB_ERR_UNEXPECTED;
 | |
|       COMMON_LOG(WARN, "task is null", K(ret));
 | |
|     } else {
 | |
|       if (OB_FAIL(mul_task->init(&counter))) {
 | |
|         COMMON_LOG(WARN, "failed to init add task", K(ret));
 | |
|       }
 | |
|       EXPECT_EQ(OB_INVALID_ARGUMENT, mul_task->add_child(*mul_task));
 | |
|       if (OB_SUCC(ret)) {
 | |
|         if (OB_FAIL(dag->alloc_task(add_task))) {
 | |
|           COMMON_LOG(WARN, "failed to alloc task", K(ret));
 | |
|         } else if (NULL == add_task) {
 | |
|           ret = OB_ERR_UNEXPECTED;
 | |
|           COMMON_LOG(WARN, "task is null", K(ret));
 | |
|         } else {
 | |
|           add_task->init(&counter, 1, 0, 3);
 | |
|           add_task->add_child(*mul_task);
 | |
|           mul_task->add_child(*add_task);
 | |
|         }
 | |
|         EXPECT_EQ(OB_SUCCESS, dag->add_task(*mul_task));
 | |
|         EXPECT_EQ(OB_INVALID_ARGUMENT, dag->add_task(*add_task));
 | |
|         dag->free_task(*add_task);
 | |
|         scheduler->free_dag(*dag);
 | |
|       }
 | |
|     }
 | |
|   }
 | |
|   TestCyclePrepare *prepare_task = NULL;
 | |
|   AtomicOperator op;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1, OB_INVALID_ARGUMENT, 0));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, prepare_task->init(&op));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(0, op.value());
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_priority)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   AtomicOperator op(0);
 | |
| 
 | |
|   int32_t thread_cnt = scheduler->get_work_thread_num();
 | |
| 
 | |
|   int32_t concurrency = 60;
 | |
|   bool finish_flag[5] = {false, false, false, false, false};
 | |
| 
 | |
|   TestDDLDag *dag1 = NULL;
 | |
|   LoopWaitTask *inc_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, concurrency, finish_flag[0]));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task));
 | |
|   TestHALowDag *dag2 = NULL;
 | |
|   LoopWaitTask *mul_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->init(2));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag2, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, concurrency, finish_flag[1]));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->add_task(*mul_task));
 | |
|   TestCompLowDag *dag3 = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag3));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->init(3));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag3, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, concurrency, finish_flag[2]));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->add_task(*inc_task));
 | |
|   TestMemRelatedDag *dag4 = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag4));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag4->init(4));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag4, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, concurrency, finish_flag[3]));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag4->add_task(*inc_task));
 | |
|   TestCompMidDag *dag5 = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag5));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag5->init(5));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag5, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, concurrency, finish_flag[4]));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag5->add_task(*inc_task));
 | |
| 
 | |
|   const int64_t uplimit = 10;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_DDL, uplimit));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_HA_LOW, uplimit));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_COMPACTION_MID, uplimit));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_HA_HIGH, uplimit));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_COMPACTION_HIGH, uplimit));
 | |
| 
 | |
|   // 13 threads in total
 | |
|   int64_t max_uplimit = uplimit;
 | |
|   int64_t threads_sum = 0;
 | |
|   int64_t work_thread_num = 0;
 | |
|   for (int64_t i = 0; i < ObDagPrio::DAG_PRIO_MAX; ++i) { // calc sum of default_low_limit
 | |
|     threads_sum += scheduler->up_limits_[i];
 | |
|   }
 | |
|   work_thread_num = threads_sum;
 | |
| 
 | |
|   EXPECT_EQ(work_thread_num, scheduler->get_work_thread_num());
 | |
|   scheduler->dump_dag_status();
 | |
| 
 | |
|   // low priority run at max speed
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag2));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
| 
 | |
|   dag3->set_priority(ObDagPrio::DAG_PRIO_HA_HIGH);
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag3));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag4));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_COMPACTION_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag5));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_COMPACTION_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_COMPACTION_MID));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
| 
 | |
|   for (int i = 0; i < 5; ++i) {
 | |
|     finish_flag[i] = true;
 | |
|   }
 | |
| 
 | |
|   wait_scheduler();
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_error_handling)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   AtomicOperator op(0);
 | |
|   TestDag *dag = NULL;
 | |
|   AtomicMulTask *mul_task = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, 10, op, 0, 8));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->add_child(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(0, op.value());
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, 1, op, 0, 1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->add_child(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 10, op));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->add_child(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(10, op.value());
 | |
| 
 | |
|   TestDag *dag1 = NULL;
 | |
|   TestPrepareTask *prepare_task = NULL;
 | |
|   op.reset();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, prepare_task->init(1, &op, true));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(0, op.value());
 | |
| 
 | |
|   op.reset();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, prepare_task->init(1, &op, true, 1000*1000));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*prepare_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   wait_scheduler();
 | |
|   EXPECT_EQ(0, op.value());
 | |
| }
 | |
| 
 | |
| void print_state(int64_t idx)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   COMMON_LOG(INFO, "scheduler state: ", K(scheduler->total_running_task_cnt_), K(scheduler->work_thread_num_),
 | |
|       K(scheduler->total_worker_cnt_), K(scheduler->low_limits_[idx]),
 | |
|       K(scheduler->up_limits_[idx]), K(scheduler->running_task_cnts_[idx]));
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_set_concurrency)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
| 
 | |
|   int64_t prio = ObDagPrio::DAG_PRIO_COMPACTION_MID;
 | |
|   int32_t uplimit = OB_DAG_PRIOS[prio].score_;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   int cnt = 30;
 | |
| 
 | |
|   scheduler->dump_dag_status();
 | |
|   TestHPDag *dag = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, cnt, dag->get_op(), 10 * sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
| 
 | |
|   CHECK_EQ_UTIL_TIMEOUT(OB_DAG_PRIOS[prio].score_, scheduler->low_limits_[prio]);
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->up_limits_[prio]);
 | |
|   CHECK_EQ_UTIL_TIMEOUT(uplimit, scheduler->total_running_task_cnt_);
 | |
|   EXPECT_TRUE(uplimit <= scheduler->work_thread_num_);
 | |
|   // set max to 20
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(prio, 20));
 | |
|   EXPECT_EQ(20, scheduler->up_limits_[prio]);
 | |
|   //EXPECT_EQ(41, scheduler->work_thread_num_);
 | |
|   // set to 1
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(prio, 1));
 | |
|   print_state(prio);
 | |
|   EXPECT_EQ(1, scheduler->low_limits_[prio]);
 | |
|   EXPECT_EQ(1, scheduler->up_limits_[prio]);
 | |
|   CHECK_EQ_UTIL_TIMEOUT(1, scheduler->total_running_task_cnt_);
 | |
|   EXPECT_TRUE(1 <= scheduler->work_thread_num_);
 | |
|   // set to 2
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(prio, 2));
 | |
|   print_state(prio);
 | |
|   EXPECT_EQ(2, scheduler->low_limits_[prio]);
 | |
|   EXPECT_EQ(2, scheduler->up_limits_[prio]);
 | |
|   CHECK_EQ_UTIL_TIMEOUT(2, scheduler->total_running_task_cnt_);
 | |
|   EXPECT_TRUE(2 <= scheduler->work_thread_num_);
 | |
|   // set to 5
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(prio, 5));
 | |
|   print_state(prio);
 | |
|   EXPECT_EQ(5, scheduler->low_limits_[prio]);
 | |
|   EXPECT_EQ(5, scheduler->up_limits_[prio]);
 | |
|   EXPECT_TRUE(5 <= scheduler->work_thread_num_);
 | |
|   // set to 0
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(prio, 0));
 | |
|   print_state(prio);
 | |
|   EXPECT_EQ(uplimit, scheduler->low_limits_[prio]);
 | |
|   EXPECT_EQ(scheduler->up_limits_[prio], uplimit);
 | |
|   EXPECT_TRUE(uplimit <= scheduler->work_thread_num_);
 | |
|   wait_scheduler();
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, stress_test)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice, 64));
 | |
| 
 | |
|   DagSchedulerStressTester tester;
 | |
|   tester.init(scheduler, stress_time);
 | |
|   EXPECT_EQ(OB_SUCCESS, tester.do_stress());
 | |
|   scheduler->destroy();
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_get_dag_count)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   TestMPDag *dag = NULL;
 | |
|   TestMPDag *dag2 = NULL;
 | |
|   TestMulTask *mul_task = NULL;
 | |
|   TestMulTask *mul_task2 = NULL;
 | |
|   int64_t counter = 1;
 | |
| 
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAJOR_MERGE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MERGE_EXECUTE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_DDL));
 | |
|   EXPECT_EQ(-1, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAX));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->alloc_task(mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(&counter));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   sleep(1);
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MERGE_EXECUTE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAJOR_MERGE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_DDL));
 | |
|   EXPECT_EQ(-1, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAX));
 | |
|   scheduler->stop();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->alloc_task(mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(&counter));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MERGE_EXECUTE));
 | |
|   EXPECT_EQ(1, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAJOR_MERGE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_DDL));
 | |
|   EXPECT_EQ(-1, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAX));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->init(2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->alloc_task(mul_task2));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task2->init(&counter));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->add_task(*mul_task2));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag2));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_MERGE_EXECUTE));
 | |
|   EXPECT_EQ(2, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAJOR_MERGE));
 | |
|   EXPECT_EQ(0, scheduler->get_dag_count(ObDagType::DAG_TYPE_DDL));
 | |
|   EXPECT_EQ(-1, scheduler->get_dag_count(ObDagType::DAG_TYPE_MAX));
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_destroy_when_running)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   void *buf = nullptr;
 | |
|   ASSERT_TRUE(nullptr != (buf = allocator_.alloc(sizeof(AtomicOperator))));
 | |
|   AtomicOperator *op = new(buf) AtomicOperator();
 | |
| 
 | |
|   TestLPDag *dag1 = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   int32_t thread_cnt = 64;
 | |
|   int32_t lp_max = OB_DAG_PRIOS[ObDagPrio::DAG_PRIO_DDL].score_;
 | |
|   scheduler->set_thread_score(ObDagPrio::DAG_PRIO_COMPACTION_MID, 100);
 | |
|   // thread cnt is 121
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, thread_cnt * 3, *op, 2*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task));
 | |
|   TestHPDag *dag2 = NULL;
 | |
|   AtomicMulTask *mul_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->init(2));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag2, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, thread_cnt * 10, *op, 3*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->add_task(*mul_task));
 | |
| 
 | |
|   #ifndef BUILD_COVERAGE
 | |
|   // not participate in coverage compilation to fix hang problem
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(lp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag2));
 | |
|   // high priority preempt quotas from low priority, low priority run at min thread
 | |
|   // CHECK_EQ_UTIL_TIMEOUT(hp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_COMPACTION_MID));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(lp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL));
 | |
|   #endif
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_up_limit)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
| 
 | |
|   AtomicOperator op(0);
 | |
| 
 | |
|   TestLPDag *dag1 = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   const int32_t lp_min = scheduler->low_limits_[ObDagPrio::DAG_PRIO_HA_LOW];
 | |
|   const int32_t lp_max = scheduler->up_limits_[ObDagPrio::DAG_PRIO_HA_LOW];
 | |
|   const int32_t mp_max = scheduler->up_limits_[ObDagPrio::DAG_PRIO_HA_MID];
 | |
|   const int32_t hp_max = scheduler->up_limits_[ObDagPrio::DAG_PRIO_HA_HIGH];
 | |
|   const int64_t cnt = 64;
 | |
|   scheduler->dump_dag_status();
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   dag1->set_priority(ObDagPrio::DAG_PRIO_HA_LOW);
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, cnt, op, 2*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task));
 | |
|   TestHPDag *dag2 = NULL;
 | |
|   AtomicMulTask *mul_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->init(2));
 | |
|   dag2->set_priority(ObDagPrio::DAG_PRIO_HA_HIGH);
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag2, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, cnt, op, 4*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->add_task(*mul_task));
 | |
|   TestMPDag *dag3 = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag3));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->init(3));
 | |
|   dag3->set_priority(ObDagPrio::DAG_PRIO_HA_MID);
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag3, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, cnt, op, 3*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->add_task(*inc_task));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(lp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag2));
 | |
|   // high priority preempt quotas from low priority, low priority run at min thread
 | |
|   CHECK_EQ_UTIL_TIMEOUT(hp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(MIN(scheduler->work_thread_num_ - hp_max, lp_max), scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag3));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(hp_max, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_HIGH));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(MIN(scheduler->work_thread_num_ - hp_max, mp_max), scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_MID));
 | |
|   CHECK_EQ_UTIL_TIMEOUT(lp_min, scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_HA_LOW));
 | |
| 
 | |
|   wait_scheduler();
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_emergency_task)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice, 64));
 | |
| 
 | |
|   AtomicOperator op(0);
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->set_thread_score(ObDagPrio::DAG_PRIO_DDL, 1));
 | |
|   EXPECT_EQ(1, scheduler->up_limits_[ObDagPrio::DAG_PRIO_DDL]);
 | |
|   TestLPDag *dag1 = NULL;
 | |
|   AtomicIncTask *inc_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->init(1));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag1, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 1, op, 20*sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag1->add_task(*inc_task));
 | |
| 
 | |
|   TestLPDag *dag2 = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag2));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->init(2));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag2, inc_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, inc_task->init(1, 1, op, sleep_slice));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag2->add_task(*inc_task));
 | |
| 
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag1));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag2));
 | |
| 
 | |
|   while (scheduler->get_running_task_cnt(ObDagPrio::DAG_PRIO_DDL) == 0) {
 | |
|     usleep(10);
 | |
|   }
 | |
| 
 | |
|   TestLPDag *dag3 = NULL;
 | |
|   AtomicMulTask *mul_task = NULL;
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->alloc_dag(dag3));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->init(3));
 | |
|   EXPECT_EQ(OB_SUCCESS, alloc_task(*dag3, mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, mul_task->init(1, 1, op, 0));
 | |
|   EXPECT_EQ(OB_SUCCESS, dag3->add_task(*mul_task));
 | |
|   EXPECT_EQ(OB_SUCCESS, scheduler->add_dag(dag3, true));
 | |
| 
 | |
|   wait_scheduler();
 | |
| 
 | |
|   EXPECT_EQ(3, op.value());
 | |
| }
 | |
| /*
 | |
| TEST_F(TestDagScheduler, test_large_thread_cnt)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
| 
 | |
|   const int64_t cpu_cnt = 128;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt;
 | |
|   scheduler->get_default_config(cpu_cnt);
 | |
|   scheduler->dump_dag_status();
 | |
|   const int64_t max_concurrency = 100;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(max_concurrency, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   const int64_t max_concurrency_2 = 10;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| 
 | |
|   const int64_t cpu_cnt_2 = 64;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt_2;
 | |
|   scheduler->get_default_config(cpu_cnt_2);
 | |
|   scheduler->dump_dag_status();
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(cpu_cnt_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| 
 | |
|   const int64_t cpu_cnt_3 = 512;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt_3;
 | |
|   scheduler->get_default_config(cpu_cnt_3);
 | |
|   scheduler->dump_dag_status();
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(max_concurrency, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
| 
 | |
|   const int64_t max_concurrency_3 = 256;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_3);
 | |
|   EXPECT_EQ(max_concurrency_3, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_3 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
| 
 | |
|   scheduler->set_major_merge_concurrency(0);
 | |
|   EXPECT_EQ(ObTenantDagScheduler::DEFAULT_UP_LIMIT[ObIDag::DAG_ULT_MAJOR_MERGE]
 | |
|       , scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(ObTenantDagScheduler::DEFAULT_LOW_LIMIT[ObDagPrio::DAG_PRIO_COMPACTION_LOW],
 | |
|       scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| }
 | |
| 
 | |
| TEST_F(TestDagScheduler, test_large_thread_cnt_2)
 | |
| {
 | |
|   ObTenantDagScheduler *scheduler = MTL(ObTenantDagScheduler*);
 | |
|   ASSERT_TRUE(nullptr != scheduler);
 | |
| 
 | |
|   const int64_t cpu_cnt = 256;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt;
 | |
|   scheduler->get_default_config(cpu_cnt);
 | |
|   scheduler->dump_dag_status();
 | |
|   int64_t max_concurrency = 500;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(cpu_cnt, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   int64_t max_concurrency_2 = 10;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| 
 | |
|   const int64_t cpu_cnt_2 = 64;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt_2;
 | |
|   scheduler->get_default_config(cpu_cnt_2);
 | |
|   scheduler->dump_dag_status();
 | |
|   max_concurrency = 200;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(cpu_cnt_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(cpu_cnt_2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| 
 | |
|   const int64_t cpu_cnt_3 = 512;
 | |
|   ASSERT_EQ(OB_SUCCESS, scheduler->init(MTL_ID(), time_slice));
 | |
|   scheduler->cpu_cnt_ = cpu_cnt_3;
 | |
|   scheduler->get_default_config(cpu_cnt_3);
 | |
|   scheduler->dump_dag_status();
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency);
 | |
|   EXPECT_EQ(max_concurrency, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_2);
 | |
|   EXPECT_EQ(max_concurrency_2, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_2 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
| 
 | |
|   const int64_t max_concurrency_3 = 800;
 | |
|   scheduler->set_major_merge_concurrency(max_concurrency_3);
 | |
|   EXPECT_EQ(cpu_cnt_3, scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(max_concurrency_3 / 2, scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
| 
 | |
|   scheduler->set_major_merge_concurrency(0);
 | |
|   EXPECT_EQ(ObTenantDagScheduler::DEFAULT_UP_LIMIT[ObIDag::DAG_ULT_MAJOR_MERGE]
 | |
|       , scheduler->up_limits_[ObIDag::DAG_ULT_MAJOR_MERGE]);
 | |
|   EXPECT_EQ(ObTenantDagScheduler::DEFAULT_LOW_LIMIT[ObDagPrio::DAG_PRIO_COMPACTION_LOW],
 | |
|       scheduler->low_limits_[ObDagPrio::DAG_PRIO_COMPACTION_LOW]);
 | |
|   scheduler->destroy();
 | |
| }
 | |
| */
 | |
| }
 | |
| }
 | |
| 
 | |
| void parse_cmd_arg(int argc, char **argv)
 | |
| {
 | |
|   int opt = 0;
 | |
|   const char *opt_string = "p:s:l:";
 | |
| 
 | |
|   struct option longopts[] = {
 | |
|       {"dag cnt for performance test", 1, NULL, 'p'},
 | |
|       {"stress test time", 1, NULL, 's'},
 | |
|       {"log level", 1, NULL, 'l'},
 | |
|       {0,0,0,0} };
 | |
| 
 | |
|   while (-1 != (opt = getopt_long(argc, argv, opt_string, longopts, NULL))) {
 | |
|     switch(opt) {
 | |
|     case 'p':
 | |
|       dag_cnt = strtoll(optarg, NULL, 10);
 | |
|       break;
 | |
|     case 's':
 | |
|       stress_time = strtoll(optarg, NULL, 10);
 | |
|       break;
 | |
|     case 'l':
 | |
|       snprintf(log_level, 20, "%s", optarg);
 | |
|       break;
 | |
|     default:
 | |
|       break;
 | |
|     }
 | |
|   }
 | |
| }
 | |
| 
 | |
| int main(int argc, char **argv)
 | |
| {
 | |
|   ::testing::InitGoogleTest(&argc, argv);
 | |
|   parse_cmd_arg(argc, argv);
 | |
|   OB_LOGGER.set_log_level(log_level);
 | |
|   OB_LOGGER.set_max_file_size(256*1024*1024);
 | |
|   system("rm -f test_dag_scheduler.log*");
 | |
|   OB_LOGGER.set_file_name("test_dag_scheduler.log");
 | |
|   return RUN_ALL_TESTS();
 | |
| }
 |