Merge branch 'master' into develop
This commit is contained in:
commit
ccf56f2ccc
10
deps/oblib/src/common/object/ob_obj_compare.h
vendored
10
deps/oblib/src/common/object/ob_obj_compare.h
vendored
@ -259,7 +259,15 @@ public:
|
||||
}
|
||||
const double l = obj1.get_double();
|
||||
const double r = obj2.get_double();
|
||||
if (l == r || fabs(l - r) < p) {
|
||||
if (isnan(l) || isnan(r)) {
|
||||
if (isnan(l) && isnan(r)) {
|
||||
ret = 0;
|
||||
} else if (isnan(l)) {
|
||||
ret = 1;
|
||||
} else {
|
||||
ret = -1;
|
||||
}
|
||||
} else if (l == r || fabs(l - r) < p) {
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = (l < r ? -1 : 1);
|
||||
|
31
deps/oblib/src/lib/lock/ob_spin_rwlock.h
vendored
31
deps/oblib/src/lib/lock/ob_spin_rwlock.h
vendored
@ -84,6 +84,37 @@ private:
|
||||
DISALLOW_COPY_AND_ASSIGN(SpinRLockGuard);
|
||||
};
|
||||
|
||||
class SpinRLockManualGuard
|
||||
{
|
||||
public:
|
||||
explicit SpinRLockManualGuard()
|
||||
: lock_(nullptr), ret_(OB_SUCCESS)
|
||||
{
|
||||
}
|
||||
~SpinRLockManualGuard()
|
||||
{
|
||||
if (OB_LIKELY(OB_SUCCESS == ret_) && OB_NOT_NULL(lock_)) {
|
||||
if (OB_UNLIKELY(OB_SUCCESS != (ret_ = lock_->unlock()))) {
|
||||
COMMON_LOG_RET(WARN, ret_, "Fail to unlock, ", K_(ret));
|
||||
} else {
|
||||
lock_ = nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
void lock(SpinRWLock &lock) {
|
||||
lock_ = &lock;
|
||||
if (OB_UNLIKELY(OB_SUCCESS != (ret_ = lock_->rdlock()))) {
|
||||
COMMON_LOG_RET(WARN, ret_, "Fail to read lock, ", K_(ret));
|
||||
}
|
||||
}
|
||||
inline int get_ret() const { return ret_; }
|
||||
private:
|
||||
SpinRWLock *lock_;
|
||||
int ret_;
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(SpinRLockManualGuard);
|
||||
};
|
||||
|
||||
class SpinWLockGuard
|
||||
{
|
||||
public:
|
||||
|
1
deps/oblib/src/lib/ob_errno.h
vendored
1
deps/oblib/src/lib/ob_errno.h
vendored
@ -376,6 +376,7 @@ constexpr int OB_ERR_TOO_MANY_PREFIX_DECLARE = -7424;
|
||||
constexpr int OB_ERR_XPATH_INVALID_NODE = -7426;
|
||||
constexpr int OB_ERR_XPATH_NO_NODE = -7427;
|
||||
constexpr int OB_ERR_DUP_DEF_NAMESPACE = -7431;
|
||||
constexpr int OB_ERR_INVALID_VECTOR_DIM = -7600;
|
||||
constexpr int OB_PACKET_CLUSTER_ID_NOT_MATCH = -8004;
|
||||
constexpr int OB_TENANT_ID_NOT_MATCH = -8005;
|
||||
constexpr int OB_URI_ERROR = -9001;
|
||||
|
4
deps/oblib/src/lib/stat/ob_latch_define.h
vendored
4
deps/oblib/src/lib/stat/ob_latch_define.h
vendored
@ -317,7 +317,6 @@ LATCH_DEF(DAS_ASYNC_RPC_LOCK, 290, "das wait remote response lock", LATCH_FIFO,
|
||||
LATCH_DEF(CLOG_CKPT_RWLOCK, 291, "clog checkpoint rwlock", LATCH_READ_PREFER, 2000, 0, true)
|
||||
LATCH_DEF(REWRITE_RULE_ITEM_LOCK, 292, "rewrite rule item lock", LATCH_FIFO, 2000, 0, true)
|
||||
|
||||
LATCH_DEF(TENANT_MGR_TENANT_BUCKET_LOCK, 290, "tenant mgr tenant bucket lock", LATCH_READ_PREFER, INT64_MAX, 0, false)
|
||||
LATCH_DEF(SRS_LOCK, 293, "srs lock", LATCH_READ_PREFER, 2000, 0, false)
|
||||
LATCH_DEF(DDL_EXECUTE_LOCK, 294, "ddl execute lock", LATCH_FIFO, 2000, 0, true)
|
||||
LATCH_DEF(TENANT_IO_CONFIG_LOCK, 295, "tenant io config lock", LATCH_FIFO, 2000, 0, true)
|
||||
@ -369,8 +368,9 @@ LATCH_DEF(SQL_AUDIT, 335, "sql audit release second level queue lock", LATCH_FIF
|
||||
|
||||
LATCH_DEF(S2_PHY_BLOCK_LOCK, 336, "s2 phy block lock", LATCH_FIFO, INT64_MAX, 0, false)
|
||||
LATCH_DEF(S2_MEM_BLOCK_LOCK, 337, "s2 mem block lock", LATCH_FIFO, INT64_MAX, 0, false)
|
||||
LATCH_DEF(TENANT_MGR_TENANT_BUCKET_LOCK, 338, "tenant mgr tenant bucket lock", LATCH_READ_PREFER, INT64_MAX, 0, false)
|
||||
|
||||
LATCH_DEF(LATCH_END, 338, "latch end", LATCH_FIFO, 2000, 0, true)
|
||||
LATCH_DEF(LATCH_END, 339, "latch end", LATCH_FIFO, 2000, 0, true)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
* @param summary_in_session Stat recorded for user session process mark this flag true.
|
||||
* @param can_visible Indicate whether this stat can be queried on gv$sysstat and gv$sesstat.
|
||||
* @param enable Indicate whether this stat is enabled. Marked it false it you merely need it as an placeholder.
|
||||
* NOTICE: do not reuse stat id or rename stat event!
|
||||
*/
|
||||
|
||||
// NETWORK
|
||||
|
24
deps/oblib/src/lib/wait_event/ob_wait_event.h
vendored
24
deps/oblib/src/lib/wait_event/ob_wait_event.h
vendored
@ -22,8 +22,9 @@
|
||||
* @param class Every wait event belongs to a class of wait event on deps/oblib/src/lib/wait_event/ob_wait_class.h
|
||||
* @param is_phy Indicate whether this wait event can be nested. true for most cases.
|
||||
* @param enable Means whether this wait event is enabled. Marked it false it you merely need it as an placeholder.
|
||||
* NOTICE: do not reuse wait event id or rename wait event!
|
||||
*/
|
||||
// USER_IO & SYSTEM_IO
|
||||
// USER_IO & SYSTEM_IO 10001-11999
|
||||
WAIT_EVENT_DEF(NULL_EVENT, 10000, "", "", "", "", OTHER, true, true)
|
||||
WAIT_EVENT_DEF(DB_FILE_DATA_READ, 10001, "db file data read", "fd", "offset", "size", USER_IO, true, true)
|
||||
WAIT_EVENT_DEF(DB_FILE_DATA_INDEX_READ, 10002, "db file data `index read", "fd", "offset", "size", USER_IO, true, true)
|
||||
@ -44,18 +45,18 @@ WAIT_EVENT_DEF(PALF_WRITE, 11017, "palf write", "fd", "offset", "size", SYSTEM_I
|
||||
WAIT_EVENT_DEF(OBJECT_STORAGE_WRITE, 11018, "object storage write", "fd", "offset", "size", SYSTEM_IO, true, false)
|
||||
WAIT_EVENT_DEF(OBJECT_STORAGE_READ, 11019, "object storage read", "fd", "offset", "size", SYSTEM_IO, true, false)
|
||||
|
||||
// SCHEDULER
|
||||
// SCHEDULER 12001-12999
|
||||
WAIT_EVENT_DEF(OMT_WAIT, 12001, "sched wait", "req type", "req start timestamp", "wait start timestamp", SCHEDULER, true, false)
|
||||
WAIT_EVENT_DEF(OMT_IDLE, 12002, "sched idle", "wait start timestamp", "", "", IDLE, true, true)
|
||||
|
||||
// NETWORK
|
||||
// NETWORK 13000-13999
|
||||
WAIT_EVENT_DEF(SYNC_RPC, 13000, "sync rpc", "pcode", "size", "", NETWORK, true, true)
|
||||
WAIT_EVENT_DEF(MYSQL_RESPONSE_WAIT_CLIENT, 13001, "mysql response wait client", "", "", "", NETWORK, true, true)
|
||||
WAIT_EVENT_DEF(DAS_ASYNC_RPC_LOCK_WAIT, 13002, "das wait remote response", "", "", "", NETWORK, true, true)
|
||||
WAIT_EVENT_DEF(ASYNC_EXTERNAL_TABLE_LOCK_WAIT, 13003, "external table wait remote response", "", "", "", NETWORK, true, true)
|
||||
WAIT_EVENT_DEF(NETWORK_QUEUE_WAIT, 13004, "wait for network request in queue", "pcode", "retry_times", "", NETWORK, true, true)
|
||||
|
||||
// APPLICATION
|
||||
// APPLICATION 14001-14999
|
||||
WAIT_EVENT_DEF(MT_READ_LOCK_WAIT,14001,"memstore read lock wait","lock","waiter","owner",APPLICATION,false, true)
|
||||
WAIT_EVENT_DEF(MT_WRITE_LOCK_WAIT,14002,"memstore write lock wait","lock","waiter","owner",APPLICATION,false, false)
|
||||
WAIT_EVENT_DEF(ROW_LOCK_WAIT,14003,"row lock wait","lock","waiter","owner",APPLICATION,false, false)
|
||||
@ -117,7 +118,6 @@ WAIT_EVENT_DEF(ASYNC_COMMITTING_WAIT, 16018, "async commiting wait", "", "", "",
|
||||
WAIT_EVENT_DEF(OBCDC_PART_MGR_SCHEMA_VERSION_WAIT, 18000, "oblog part mgr schema version wait", "", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(SYNC_GET_GTS_WAIT, 18101, "sync get gts timestamp wait", "address", "", "", CONCURRENCY, true, true)
|
||||
|
||||
// sleep
|
||||
WAIT_EVENT_DEF(BANDWIDTH_THROTTLE_SLEEP, 20000, "sleep: bandwidth throttle sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(DTL_PROCESS_CHANNEL_SLEEP, 20001, "sleep: dtl process channel sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(DTL_DESTROY_CHANNEL_SLEEP, 20002, "sleep: dtl destroy channel sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
@ -125,15 +125,11 @@ WAIT_EVENT_DEF(STORAGE_WRITING_THROTTLE_SLEEP, 20003, "sleep: storage writing th
|
||||
WAIT_EVENT_DEF(STORAGE_AUTOINC_FETCH_RETRY_SLEEP, 20004, "sleep: tablet autoinc fetch new range retry wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(STORAGE_AUTOINC_FETCH_CONFLICT_SLEEP, 20005, "sleep: tablet autoinc fetch new range conflict wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(STORAGE_HA_FINISH_TRANSFER, 20006, "sleep: finish transfer sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(GARBAGE_COLLECTOR_SLEEP, 20007, "sleep: wait log callback sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
|
||||
|
||||
// logservice
|
||||
WAIT_EVENT_DEF(LOG_EXTERNAL_STORAGE_IO_TASK_WAIT, 20007, "latch: log external storage io task wait", "", "", "", CONCURRENCY, true, true)
|
||||
WAIT_EVENT_DEF(LOG_EXTERNAL_STORAGE_HANDLER_RW_WAIT, 20008, "latch: log external storage handler rw wait", "", "", "", CONCURRENCY, true, false)
|
||||
WAIT_EVENT_DEF(LOG_EXTERNAL_STORAGE_HANDLER_WAIT, 20009, "latch: log external storage handler spin wait", "", "", "", CONCURRENCY, true, false)
|
||||
|
||||
// share storage
|
||||
// share storage 21001-21999
|
||||
WAIT_EVENT_DEF(ZONE_STORAGE_MANAGER_LOCK_WAIT, 21001, "latch: zone storage manager maintaince lock wait", "address", "number", "tries", CONCURRENCY, true, false)
|
||||
WAIT_EVENT_DEF(ZONE_STORAGE_INFO_RW_LOCK_WAIT, 21002, "latch: zone storage infos rw lock wait", "address", "number", "tries", CONCURRENCY, true, false)
|
||||
WAIT_EVENT_DEF(DEVICE_MANIFEST_RW_LOCK_WAIT, 21003, "latch: device_manifest rw lock wait", "address", "number", "tries", CONCURRENCY, true, false)
|
||||
@ -149,13 +145,17 @@ WAIT_EVENT_DEF(TIERED_BLOCK_WRITE_LOCAL, 21012, "tiered block write local", "add
|
||||
WAIT_EVENT_DEF(TIERED_BLOCK_READ_REMOTE, 21013, "tiered block read remote", "address", "", "", CONCURRENCY, true, false)
|
||||
WAIT_EVENT_DEF(TIERED_BLOCK_READ_LOCAL, 21014, "tiered block read local", "address", "", "", CONCURRENCY, true, false)
|
||||
|
||||
// inner sql
|
||||
// inner sql 30000-30099
|
||||
WAIT_EVENT_DEF(INNER_SQL_EXEC_WAIT, 30000, "exec inner sql wait", "wait inner sql class", "inner session id", "", OTHER, true, true)
|
||||
WAIT_EVENT_DEF(INNER_SESSION_IDLE_WAIT, 30001, "inner session wait to be called", "inner session id", "parent session id", "", IDLE, true, false)
|
||||
|
||||
// CONFIGURATION
|
||||
// CONFIGURATION 30100-30999
|
||||
WAIT_EVENT_DEF(WAIT_REFRESH_SCHEMA, 30100, "sleep: wait refresh schema", "sleep_interval", "schema_version", "", CONFIGURATION, true, true)
|
||||
WAIT_EVENT_DEF(PALF_THROTTLING, 30101, "palf throttling sleep", "sleep_interval", "", "", USER_IO, false, true)
|
||||
WAIT_EVENT_DEF(SLOG_NORMAL_RETRY_SLEEP, 30102, "sleep: slog has io error and retrying", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
|
||||
// sleep 31000-31999
|
||||
WAIT_EVENT_DEF(GARBAGE_COLLECTOR_SLEEP, 31000, "sleep: wait log callback sleep wait", "sleep_interval", "", "", CONCURRENCY, true, true)
|
||||
|
||||
// END. DO NOT MODIFY.
|
||||
WAIT_EVENT_DEF(WAIT_EVENT_DEF_END, 99999, "event end", "", "", "", OTHER, false, true)
|
||||
|
@ -54,7 +54,7 @@ int ObMysqlProtocolProcessor::do_decode(ObSMConnection& conn, ObICSMemPool& pool
|
||||
// go backward with MySQL packet header length
|
||||
start -= header_size;
|
||||
next_read_bytes = delta_len;
|
||||
} else if (conn.is_in_authed_phase()) {
|
||||
} else if (conn.is_in_authed_phase() || conn.is_in_auth_switch_phase()) {
|
||||
if (OB_FAIL(decode_body(pool, start, pktlen, pktseq, pkt))) {
|
||||
LOG_ERROR("fail to decode_body", K(sessid), K(pktseq), K(ret));
|
||||
}
|
||||
|
@ -108,6 +108,7 @@ PCODE_DEF(OB_CHECK_SERVER_FOR_ADDING_SERVER, 0x153)
|
||||
PCODE_DEF(OB_GET_SERVER_RESOURCE_INFO, 0x154)
|
||||
PCODE_DEF(OB_NOTIFY_SWITCH_LEADER, 0x155)
|
||||
PCODE_DEF(OB_GET_TENANT_TABLET_COUNT, 0x156)
|
||||
PCODE_DEF(OB_CHECK_SERVER_MACHINE_STATUS, 0x157)
|
||||
// remote interrupt call
|
||||
PCODE_DEF(OB_REMOTE_INTERRUPT_CALL, 0x1EE)
|
||||
|
||||
@ -1154,6 +1155,7 @@ PCODE_DEF(OB_CAL_UNIT_PHY_RESOURCE, 0x1622)
|
||||
PCODE_DEF(OB_CAL_STANDBY_TENANT_PHY_RESOURCE, 0x1623)
|
||||
|
||||
//PCODE_DEF(OB_UPDATE_MVIEW_REFERENCE_TABLE_STATUS, 0x1624)
|
||||
//PCODE_DEF(OB_DO_EVENT_DDL, 0x1625)
|
||||
|
||||
//**** 注意:在此行之前增加新的RPC ID ******
|
||||
//
|
||||
|
@ -1,4 +1,6 @@
|
||||
add_library(mit_env env/ob_simple_server_helper.cpp)
|
||||
set(MIT_SRCS env/ob_simple_server_helper.cpp)
|
||||
|
||||
add_library(mit_env ${MIT_SRCS})
|
||||
|
||||
target_include_directories(mit_env PUBLIC
|
||||
${CMAKE_SOURCE_DIR}/unittest ${CMAKE_SOURCE_DIR}/mittest)
|
||||
|
56
mittest/env/ob_simple_server_helper.cpp
vendored
56
mittest/env/ob_simple_server_helper.cpp
vendored
@ -311,6 +311,57 @@ int SimpleServerHelper::freeze(uint64_t tenant_id, ObLSID ls_id, ObTabletID tabl
|
||||
return ret;
|
||||
}
|
||||
|
||||
int SimpleServerHelper::freeze_tx_data(uint64_t tenant_id, ObLSID ls_id)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
MTL_SWITCH(tenant_id) {
|
||||
ObLSHandle ls_handle;
|
||||
if (OB_FAIL(MTL(ObLSService*)->get_ls(ls_id, ls_handle, ObLSGetMod::STORAGE_MOD))) {
|
||||
} else {
|
||||
storage::checkpoint::ObCheckpointExecutor *checkpoint_executor = ls_handle.get_ls()->get_checkpoint_executor();
|
||||
ObTxDataMemtableMgr *tx_data_memtable_mgr
|
||||
= dynamic_cast<ObTxDataMemtableMgr *>(
|
||||
dynamic_cast<ObLSTxService *>(
|
||||
checkpoint_executor->handlers_[logservice::TRANS_SERVICE_LOG_BASE_TYPE])
|
||||
->common_checkpoints_[storage::checkpoint::ObCommonCheckpointType::TX_DATA_MEMTABLE_TYPE]);
|
||||
if (OB_ISNULL(tx_data_memtable_mgr)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("checkpoint obj is null", KR(ret));
|
||||
} else if (OB_FAIL(tx_data_memtable_mgr->flush(share::SCN::max_scn(),
|
||||
checkpoint::INVALID_TRACE_ID))) {
|
||||
} else {
|
||||
usleep(10 * 1000 * 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int SimpleServerHelper::freeze_tx_ctx(uint64_t tenant_id, ObLSID ls_id)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
MTL_SWITCH(tenant_id) {
|
||||
ObLSHandle ls_handle;
|
||||
if (OB_FAIL(MTL(ObLSService*)->get_ls(ls_id, ls_handle, ObLSGetMod::STORAGE_MOD))) {
|
||||
} else {
|
||||
storage::checkpoint::ObCheckpointExecutor *checkpoint_executor = ls_handle.get_ls()->get_checkpoint_executor();
|
||||
ObTxCtxMemtable *tx_ctx_memtable
|
||||
= dynamic_cast<ObTxCtxMemtable *>(
|
||||
dynamic_cast<ObLSTxService *>(
|
||||
checkpoint_executor->handlers_[logservice::TRANS_SERVICE_LOG_BASE_TYPE])
|
||||
->common_checkpoints_[storage::checkpoint::ObCommonCheckpointType::TX_CTX_MEMTABLE_TYPE]);
|
||||
if (OB_ISNULL(tx_ctx_memtable)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("checkpoint obj is null", KR(ret));
|
||||
} else if (OB_FAIL(tx_ctx_memtable->flush(share::SCN::max_scn(), 0))) {
|
||||
} else {
|
||||
usleep(10 * 1000 * 1000);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int SimpleServerHelper::wait_flush_finish(uint64_t tenant_id, ObLSID ls_id, ObTabletID tablet_id)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -695,10 +746,9 @@ int SimpleServerHelper::ls_reboot(uint64_t tenant_id, ObLSID ls_id)
|
||||
LOG_INFO("ls_reboot", K(tenant_id), K(ls_id));
|
||||
int ret = OB_SUCCESS;
|
||||
auto print_mgr_state = [](ObLS *ls) {
|
||||
auto state = ls->ls_tx_svr_.mgr_->state_;
|
||||
const ObTxLSStateMgr &state_mgr = ls->ls_tx_svr_.mgr_->tx_ls_state_mgr_;
|
||||
LOG_INFO("print ls ctx mgr state:", K(ls->get_ls_id()),
|
||||
"ctx_mgr_state", state,
|
||||
K(ObLSTxCtxMgr::State::state_str(state)));
|
||||
K(state_mgr));
|
||||
};
|
||||
auto func = [tenant_id, ls_id, print_mgr_state] () {
|
||||
int ret = OB_SUCCESS;
|
||||
|
2
mittest/env/ob_simple_server_helper.h
vendored
2
mittest/env/ob_simple_server_helper.h
vendored
@ -70,6 +70,8 @@ public:
|
||||
static int get_ls_end_scn(uint64_t tenant_id, ObLSID ls_id, SCN &end_scn);
|
||||
static int wait_replay_advance(uint64_t tenant_id, ObLSID ls_id, SCN end_scn);
|
||||
static int wait_checkpoint_newest(uint64_t tenant_id, ObLSID ls_id);
|
||||
static int freeze_tx_ctx(uint64_t tenant_id, ObLSID ls_id);
|
||||
static int freeze_tx_data(uint64_t tenant_id, ObLSID ls_id);
|
||||
static int wait_tx(uint64_t tenant_id, ObLSID ls_id, ObTransID tx_id, ObTxState tx_state);
|
||||
static int wait_tx_exit(uint64_t tenant_id, ObLSID ls_id, ObTransID tx_id);
|
||||
static int wait_flush_finish(uint64_t tenant_id, ObLSID ls_id, ObTabletID tablet_id);
|
||||
|
@ -1025,8 +1025,9 @@ int ObSimpleLogClusterTestEnv::raw_write(PalfHandleImplGuard &leader,
|
||||
do {
|
||||
usleep(10);
|
||||
ret = (leader.palf_handle_impl_)->submit_group_log(opts, lsn, buf, buf_len);
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_SUCC(ret) || OB_ERR_OUT_OF_LOWER_BOUND == ret) {
|
||||
PALF_LOG(INFO, "raw_write success", KR(ret), K(lsn));
|
||||
ret = OB_SUCCESS;
|
||||
} else {
|
||||
if (REACH_TIME_INTERVAL(100 * 1000)) {
|
||||
PALF_LOG(WARN, "raw_write failed", KR(ret));
|
||||
@ -1167,6 +1168,10 @@ int ObSimpleLogClusterTestEnv::read_and_submit_group_log(PalfHandleImplGuard &le
|
||||
PALF_LOG(WARN, "iterator next failed", K(ret), K(iterator_raw_write));
|
||||
} else if (OB_FAIL(iterator_raw_write.get_entry(buffer, nbytes, scn, lsn, is_raw_write))) {
|
||||
PALF_LOG(WARN, "iterator get_entry failed", K(ret), K(iterator_raw_write), K(is_raw_write));
|
||||
} else if (lsn >= start_lsn && is_raw_write != true) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
PALF_LOG(ERROR, "iterator get_entry failed, is_raw_write must be true", K(ret), K(iterator_raw_write), K(is_raw_write),
|
||||
K(lsn), K(start_lsn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1960,6 +1960,35 @@ TEST_F(TestObSimpleLogClusterSingleReplica, test_raw_read)
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(TestObSimpleLogClusterSingleReplica, test_raw_write_concurrent_lsn)
|
||||
{
|
||||
SET_CASE_LOG_FILE(TEST_NAME, "test_raw_write_concurrent_lsn");
|
||||
int64_t id = ATOMIC_AAF(&palf_id_, 1);
|
||||
OB_LOGGER.set_log_level("TRACE");
|
||||
int64_t leader_idx = 0;
|
||||
PalfHandleImplGuard leader;
|
||||
PalfHandleImplGuard raw_write_leader;
|
||||
EXPECT_EQ(OB_SUCCESS, create_paxos_group(id, leader_idx, leader));
|
||||
PalfHandleImpl *palf_handle_impl = leader.palf_handle_impl_;
|
||||
const int64_t id_raw_write = ATOMIC_AAF(&palf_id_, 1);
|
||||
EXPECT_EQ(OB_SUCCESS, create_paxos_group(id_raw_write, leader_idx, raw_write_leader));
|
||||
EXPECT_EQ(OB_SUCCESS, change_access_mode_to_raw_write(raw_write_leader));
|
||||
|
||||
EXPECT_EQ(OB_SUCCESS, submit_log(leader, 100, leader_idx, MAX_LOG_BASE_TYPE));
|
||||
SCN max_scn1 = leader.palf_handle_impl_->get_max_scn();
|
||||
LSN end_pos_of_log1 = leader.palf_handle_impl_->get_max_lsn();
|
||||
EXPECT_EQ(OB_SUCCESS, wait_until_has_committed(leader, leader.palf_handle_impl_->get_max_lsn()));
|
||||
|
||||
std::thread submit_log_t1([&]() {
|
||||
EXPECT_EQ(OB_ITER_END, read_and_submit_group_log(leader, raw_write_leader));
|
||||
});
|
||||
std::thread submit_log_t2([&]() {
|
||||
EXPECT_EQ(OB_ITER_END, read_and_submit_group_log(leader, raw_write_leader));
|
||||
});
|
||||
submit_log_t1.join();
|
||||
submit_log_t2.join();
|
||||
}
|
||||
|
||||
} // namespace unittest
|
||||
} // namespace oceanbase
|
||||
|
||||
|
@ -257,8 +257,9 @@ public:
|
||||
{
|
||||
ObUndoAction undo(from, to);
|
||||
ObPartTransCtx *tx_ctx = store_ctx->mvcc_acc_ctx_.tx_ctx_;
|
||||
EXPECT_EQ(OB_SUCCESS,
|
||||
tx_ctx->ctx_tx_data_.add_undo_action(undo));
|
||||
ObTxDataGuard tx_data_guard;
|
||||
EXPECT_EQ(OB_SUCCESS, tx_ctx->ls_tx_ctx_mgr_->get_tx_table()->alloc_tx_data(tx_data_guard));
|
||||
EXPECT_EQ(OB_SUCCESS, tx_ctx->insert_undo_action_to_tx_table_(undo, tx_data_guard, SCN::min_scn()));
|
||||
ObMemtableCtx *mt_ctx = store_ctx->mvcc_acc_ctx_.mem_ctx_;
|
||||
ObTxCallbackList &cb_list = mt_ctx->trans_mgr_.callback_list_;
|
||||
for (ObMvccRowCallback *iter = (ObMvccRowCallback *)(cb_list.get_guard()->get_next());
|
||||
@ -3624,9 +3625,12 @@ int ObLSTxCtxMgr::init(const int64_t tenant_id,
|
||||
} else {
|
||||
if (OB_FAIL(ls_tx_ctx_map_.init(lib::ObMemAttr(tenant_id, "LSTxCtxMgr")))) {
|
||||
TRANS_LOG(WARN, "ls_tx_ctx_map_ init fail", KR(ret));
|
||||
} else if (OB_FAIL(tx_ls_state_mgr_.init(ls_id))) {
|
||||
TRANS_LOG(WARN, "init tx_ls_state_mgr_ failed", KR(ret));
|
||||
} else if (OB_FAIL(tx_ls_state_mgr_.switch_tx_ls_state(ObTxLSStateMgr::TxLSAction::START))) {
|
||||
TRANS_LOG(WARN, "start ls_tx_ctx_mgr failed",K(ret),K(tx_ls_state_mgr_));
|
||||
} else {
|
||||
is_inited_ = true;
|
||||
state_ = State::F_ALL_BLOCKED;
|
||||
tenant_id_ = tenant_id;
|
||||
ls_id_ = ls_id;
|
||||
tx_table_ = tx_table;
|
||||
|
@ -1113,6 +1113,7 @@ TEST_F(TestMultiVersionMerge, test_merge_with_multi_trans)
|
||||
|
||||
for (int64_t i = 1; i <= 4; i++) {
|
||||
ObTxData *tx_data = new ObTxData();
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
transaction::ObTransID tx_id = i;
|
||||
|
||||
// fill in data
|
||||
@ -1276,6 +1277,7 @@ TEST_F(TestMultiVersionMerge, test_merge_with_multi_trans_can_compact)
|
||||
|
||||
for (int64_t i = 1; i <= 5; i++) {
|
||||
ObTxData *tx_data = new ObTxData();
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
transaction::ObTransID tx_id = i;
|
||||
|
||||
// fill in data
|
||||
@ -1439,6 +1441,7 @@ TEST_F(TestMultiVersionMerge, test_merge_with_multi_trans_can_not_compact)
|
||||
|
||||
for (int64_t i = 1; i <= 5; i++) {
|
||||
ObTxData *tx_data = new ObTxData();
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
transaction::ObTransID tx_id = i;
|
||||
|
||||
// fill in data
|
||||
@ -3338,6 +3341,7 @@ TEST_F(TestMultiVersionMerge, test_running_trans_cross_macro_with_abort_sql_seq)
|
||||
ASSERT_NE(nullptr, tx_table = tx_table_guard.get_tx_table());
|
||||
|
||||
ObTxData *tx_data = new ObTxData();
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
transaction::ObTransID tx_id = 1;
|
||||
|
||||
// fill in data
|
||||
|
@ -17,6 +17,7 @@
|
||||
#define protected public
|
||||
#define private public
|
||||
|
||||
#include "lib/alloc/memory_dump.h"
|
||||
#include "storage/tablet/ob_tablet_persister.h"
|
||||
#include "storage/meta_mem/ob_tenant_meta_mem_mgr.h"
|
||||
#include "storage/meta_mem/ob_tablet_leak_checker.h"
|
||||
@ -1730,6 +1731,34 @@ TEST_F(TestTenantMetaMemMgr, test_show_limit)
|
||||
lib::set_tenant_memory_limit(MTL_ID(), before_tenant_mem);
|
||||
}
|
||||
|
||||
TEST_F(TestTenantMetaMemMgr, test_normal_tablet_buffer_fragment)
|
||||
{
|
||||
static const int64_t tablet_cnt = 155000;
|
||||
ObTabletHandle *tablets = new ObTabletHandle[tablet_cnt];
|
||||
const int64_t before_tenant_mem = lib::get_tenant_memory_limit(MTL_ID());
|
||||
const int64_t this_case_tenant_mem = 3 * 1024 * 1024 * 1024L; /* 3GB */
|
||||
lib::set_tenant_memory_limit(MTL_ID(), this_case_tenant_mem);
|
||||
for (int64_t i = 0; i < tablet_cnt; ++i) {
|
||||
ObTabletHandle tablet_handle;
|
||||
ASSERT_EQ(OB_SUCCESS, MTL(ObTenantMetaMemMgr *)->acquire_tablet(ObTabletPoolType::TP_NORMAL, tablets[i]));
|
||||
}
|
||||
ObMallocAllocator::get_instance()->print_tenant_memory_usage(MTL_ID());
|
||||
ObMemoryDump::get_instance().init();
|
||||
auto task = ObMemoryDump::get_instance().alloc_task();
|
||||
task->type_ = STAT_LABEL;
|
||||
ObMemoryDump::get_instance().push(task);
|
||||
usleep(1000000);
|
||||
ObTenantCtxAllocatorGuard ta = ObMallocAllocator::get_instance()->get_tenant_ctx_allocator(MTL_ID(), ObCtxIds::META_OBJ_CTX_ID);
|
||||
double fragment_rate = 1.0 * (ta->get_hold() - ta->get_used()) / ta->get_hold();
|
||||
std::cout << "hold: " << ta->get_hold() << " used: " << ta->get_used() << " limit: " << ta->get_limit() << " fragment_rate: " << fragment_rate << std::endl;
|
||||
ASSERT_TRUE(fragment_rate < 0.04);
|
||||
for (int64_t i = 0; i < tablet_cnt; ++i) {
|
||||
tablets[i].reset();
|
||||
}
|
||||
delete [] tablets;
|
||||
lib::set_tenant_memory_limit(MTL_ID(), before_tenant_mem);
|
||||
}
|
||||
|
||||
} // end namespace storage
|
||||
} // end namespace oceanbase
|
||||
|
||||
|
@ -401,7 +401,7 @@ TEST_F(TestHTableLock, concurrent_shared_lock)
|
||||
TEST_F(TestHTableLock, concurrent_exclusive_lock)
|
||||
{
|
||||
EXPECT_EQ(OB_SYS_TENANT_ID, MTL_ID());
|
||||
const uint64_t thread_cnt = 1024;
|
||||
const uint64_t thread_cnt = 100;
|
||||
const uint64_t fake_table_id = 1;
|
||||
ObString key = ObString::make_string("k1");
|
||||
const int64_t start = ObTimeUtility::current_time();
|
||||
@ -416,7 +416,7 @@ TEST_F(TestHTableLock, concurrent_exclusive_lock)
|
||||
TEST_F(TestHTableLock, concurrent_exclusive_shared_lock)
|
||||
{
|
||||
EXPECT_EQ(OB_SYS_TENANT_ID, MTL_ID());
|
||||
const uint64_t xthread_cnt = 1024;
|
||||
const uint64_t xthread_cnt = 100;
|
||||
const uint64_t sthread_cnt = 1024;
|
||||
const uint64_t fake_table_id = 1;
|
||||
ObString key = ObString::make_string("k1");
|
||||
|
@ -250,6 +250,7 @@ void TestTxDataTable::insert_tx_data_()
|
||||
tx_data_guard.reset();
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
|
||||
// fill in data
|
||||
tx_data->tx_id_ = tx_id;
|
||||
@ -287,6 +288,7 @@ void TestTxDataTable::insert_rollback_tx_data_()
|
||||
ObTxData *tx_data = nullptr;
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
|
||||
// fill in data
|
||||
tx_data->tx_id_ = tx_id;
|
||||
@ -410,7 +412,9 @@ void TestTxDataTable::do_basic_test()
|
||||
int64_t inserted_cnt_after_pre_process = freezing_memtable->get_tx_data_count();
|
||||
ASSERT_EQ(inserted_cnt_before_pre_process + 1, inserted_cnt_after_pre_process);
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS, freezing_memtable->get_split_ranges(nullptr, nullptr, range_cnt, range_array));
|
||||
ObStoreRange input_range;
|
||||
input_range.set_whole_range();
|
||||
ASSERT_EQ(OB_SUCCESS, freezing_memtable->get_split_ranges(input_range, range_cnt, range_array));
|
||||
int64_t pre_range_end_key = 0;
|
||||
for (int i = 0; i < range_cnt; i++) {
|
||||
auto &range = range_array[i];
|
||||
@ -464,23 +468,24 @@ void TestTxDataTable::do_undo_status_test()
|
||||
ObTxDataGuard tx_data_guard;
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
|
||||
tx_data->tx_id_ = rand();
|
||||
for (int i = 1; i <= 1001; i++) {
|
||||
transaction::ObUndoAction undo_action(ObTxSEQ(10 * (i + 1), 0), ObTxSEQ(10 * i, 0));
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->add_undo_action(&tx_table_, undo_action));
|
||||
}
|
||||
ASSERT_EQ(1000 / TX_DATA_UNDO_ACT_MAX_NUM_PER_NODE + 1, tx_data->undo_status_list_.undo_node_cnt_);
|
||||
ASSERT_EQ(1000 / TX_DATA_UNDO_ACT_MAX_NUM_PER_NODE + 1, tx_data->op_guard_->get_undo_status_list().undo_node_cnt_);
|
||||
|
||||
{
|
||||
transaction::ObUndoAction undo_action(ObTxSEQ(10000000, 0), ObTxSEQ(10,0));
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->add_undo_action(&tx_table_, undo_action));
|
||||
}
|
||||
|
||||
STORAGETEST_LOG(INFO, "", K(tx_data->undo_status_list_));
|
||||
ASSERT_EQ(1, tx_data->undo_status_list_.head_->size_);
|
||||
ASSERT_EQ(nullptr, tx_data->undo_status_list_.head_->next_);
|
||||
ASSERT_EQ(1, tx_data->undo_status_list_.undo_node_cnt_);
|
||||
STORAGETEST_LOG(INFO, "", K(tx_data->op_guard_->get_undo_status_list()));
|
||||
ASSERT_EQ(1, tx_data->op_guard_->get_undo_status_list().head_->size_);
|
||||
ASSERT_EQ(nullptr, tx_data->op_guard_->get_undo_status_list().head_->next_);
|
||||
ASSERT_EQ(1, tx_data->op_guard_->get_undo_status_list().undo_node_cnt_);
|
||||
}
|
||||
|
||||
{
|
||||
@ -490,23 +495,24 @@ void TestTxDataTable::do_undo_status_test()
|
||||
ObTxDataGuard tx_data_guard;
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
tx_data->tx_id_ = rand();
|
||||
|
||||
for (int i = 1; i <= 14; i++) {
|
||||
transaction::ObUndoAction undo_action(ObTxSEQ(i + 1,0), ObTxSEQ(i,0));
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->add_undo_action(&tx_table_, undo_action));
|
||||
}
|
||||
ASSERT_EQ(2, tx_data->undo_status_list_.undo_node_cnt_);
|
||||
ASSERT_EQ(2, tx_data->op_guard_->get_undo_status_list().undo_node_cnt_);
|
||||
|
||||
{
|
||||
transaction::ObUndoAction undo_action(ObTxSEQ(15, 0), ObTxSEQ(7,0));
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->add_undo_action(&tx_table_, undo_action));
|
||||
}
|
||||
|
||||
STORAGETEST_LOG(INFO, "", K(tx_data->undo_status_list_));
|
||||
ASSERT_EQ(7, tx_data->undo_status_list_.head_->size_);
|
||||
ASSERT_EQ(nullptr, tx_data->undo_status_list_.head_->next_);
|
||||
ASSERT_EQ(1, tx_data->undo_status_list_.undo_node_cnt_);
|
||||
STORAGETEST_LOG(INFO, "", K(tx_data->op_guard_->get_undo_status_list()));
|
||||
ASSERT_EQ(7, tx_data->op_guard_->get_undo_status_list().head_->size_);
|
||||
ASSERT_EQ(nullptr, tx_data->op_guard_->get_undo_status_list().head_->next_);
|
||||
ASSERT_EQ(1, tx_data->op_guard_->get_undo_status_list().undo_node_cnt_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -516,6 +522,7 @@ void TestTxDataTable::test_serialize_with_action_cnt_(int cnt)
|
||||
ObTxDataGuard tx_data_guard;
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
tx_data->tx_id_ = transaction::ObTransID(269381);
|
||||
tx_data->commit_version_.convert_for_logservice(ObTimeUtil::current_time_ns());
|
||||
tx_data->end_scn_.convert_for_logservice(ObTimeUtil::current_time_ns());
|
||||
@ -532,7 +539,7 @@ void TestTxDataTable::test_serialize_with_action_cnt_(int cnt)
|
||||
} else {
|
||||
node_cnt = cnt / 7 + 1;
|
||||
}
|
||||
ASSERT_EQ(node_cnt, tx_data->undo_status_list_.undo_node_cnt_);
|
||||
ASSERT_EQ(node_cnt, tx_data->op_guard_->get_undo_status_list().undo_node_cnt_);
|
||||
|
||||
char *buf = nullptr;
|
||||
ObArenaAllocator allocator;
|
||||
@ -652,6 +659,7 @@ void TestTxDataTable::do_repeat_insert_test() {
|
||||
ObTxDataGuard tx_data_guard;
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data_table_.alloc_tx_data(tx_data_guard, false));
|
||||
ASSERT_NE(nullptr, tx_data = tx_data_guard.tx_data());
|
||||
ASSERT_EQ(OB_SUCCESS, tx_data->init_tx_op());
|
||||
|
||||
// fill in data
|
||||
tx_data->tx_id_ = tx_id;
|
||||
|
@ -39,3 +39,4 @@ ob_unittest_multi_replica(test_max_commit_ts_read_from_dup_table)
|
||||
ob_unittest_multi_replica(test_mds_replay_from_ctx_table)
|
||||
ob_unittest_multi_replica_longer_timeout(test_multi_transfer_tx)
|
||||
ob_unittest_multi_replica(test_ob_direct_load_inc_log)
|
||||
ob_unittest_multi_replica(test_tx_ls_state_switch)
|
||||
|
@ -898,7 +898,7 @@ int ObMultiReplicaTestBase::check_tenant_exist(bool &bool_ret, const char *tenan
|
||||
} // namespace unittest
|
||||
} // namespace oceanbase
|
||||
|
||||
int ::oceanbase::omt::ObWorkerProcessor::process_err_test()
|
||||
OB_NOINLINE int ::oceanbase::omt::ObWorkerProcessor::process_err_test()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
|
422
mittest/multi_replica/test_tx_ls_state_switch.cpp
Normal file
422
mittest/multi_replica/test_tx_ls_state_switch.cpp
Normal file
@ -0,0 +1,422 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#define USING_LOG_PREFIX SERVER
|
||||
#define protected public
|
||||
#define private public
|
||||
|
||||
#include "env/ob_fast_bootstrap.h"
|
||||
#include "env/ob_multi_replica_test_base.h"
|
||||
#include "env/ob_multi_replica_util.h"
|
||||
#include "lib/mysqlclient/ob_mysql_result.h"
|
||||
#include "storage/tx/ob_trans_ctx_mgr_v4.h"
|
||||
|
||||
#define CUR_TEST_CASE_NAME ObTxLsState
|
||||
|
||||
DEFINE_MULTI_ZONE_TEST_CASE_CLASS
|
||||
|
||||
MULTI_REPLICA_TEST_MAIN_FUNCTION(test_tx_ls_state_switch_);
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
|
||||
static bool errsim_switch_follower_ = false;
|
||||
static bool errsim_apply_SWL_ = false;
|
||||
static bool block_start_working_submitting_ = false;
|
||||
static share::ObLSID errsim_dup_ls_id_;
|
||||
static share::ObLSID errsim_normal_ls_id_;
|
||||
|
||||
namespace transaction
|
||||
{
|
||||
|
||||
OB_NOINLINE int ObLSTxCtxMgr::errsim_switch_to_followr_gracefully()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (errsim_switch_follower_) {
|
||||
ret = OB_TIMEOUT;
|
||||
}
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
TRANS_LOG(INFO, "errsim in switch_to_follower_gracefully", K(ret), K(errsim_switch_follower_),
|
||||
K(errsim_normal_ls_id_), K(errsim_dup_ls_id_), KPC(this));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
OB_NOINLINE int ObLSTxCtxMgr::errsim_submit_start_working_log()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
TRANS_LOG(WARN, "[ObMultiReplicaTestBase] errsim for submit_start_working_log", K(ret),
|
||||
KPC(this));
|
||||
|
||||
while (block_start_working_submitting_) {
|
||||
usleep(1000 * 1000);
|
||||
|
||||
TRANS_LOG(WARN, "[ObMultiReplicaTestBase] errsim for submit_start_working_log", K(ret),
|
||||
K(block_start_working_submitting_), KPC(this));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
OB_NOINLINE int ObLSTxCtxMgr::errsim_apply_start_working_log()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
if (errsim_apply_SWL_) {
|
||||
ret = OB_TIMEOUT;
|
||||
}
|
||||
if (OB_FAIL(ret)) {
|
||||
TRANS_LOG(INFO, "errsim in on_start_working_log_cb_succ", K(ret), K(errsim_apply_SWL_),
|
||||
K(errsim_normal_ls_id_), K(errsim_dup_ls_id_), KPC(this));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // namespace transaction
|
||||
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
using namespace oceanbase::transaction;
|
||||
using namespace oceanbase::storage;
|
||||
|
||||
struct TableBasicArg
|
||||
{
|
||||
uint64_t tenant_id_;
|
||||
|
||||
int64_t dup_ls_id_num_;
|
||||
int64_t dup_table_id_;
|
||||
ObSEArray<int64_t, 10> dup_tablet_id_array_;
|
||||
|
||||
int64_t normal_ls_id_num_;
|
||||
int64_t normal_table_id_;
|
||||
ObSEArray<int64_t, 10> normal_tablet_id_array_;
|
||||
|
||||
TO_STRING_KV(K(tenant_id_),
|
||||
K(dup_ls_id_num_),
|
||||
K(dup_table_id_),
|
||||
K(normal_ls_id_num_),
|
||||
K(normal_table_id_),
|
||||
K(dup_tablet_id_array_),
|
||||
K(normal_tablet_id_array_));
|
||||
|
||||
OB_UNIS_VERSION(1);
|
||||
};
|
||||
|
||||
OB_SERIALIZE_MEMBER(TableBasicArg,
|
||||
tenant_id_,
|
||||
dup_ls_id_num_,
|
||||
dup_table_id_,
|
||||
dup_tablet_id_array_,
|
||||
normal_ls_id_num_,
|
||||
normal_table_id_,
|
||||
normal_tablet_id_array_);
|
||||
|
||||
static TableBasicArg static_basic_arg_;
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(1), create_test_env)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
const std::string test_dup_table_name = "test_dup_1";
|
||||
const std::string test_normal_table_name = "test_normal_1";
|
||||
|
||||
CREATE_TEST_TENANT(test_tenant_id);
|
||||
SERVER_LOG(INFO, "[ObMultiReplicaTestBase] create test tenant success", K(test_tenant_id));
|
||||
|
||||
common::ObMySQLProxy &test_tenant_sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
|
||||
ACQUIRE_CONN_FROM_SQL_PROXY(test_conn, test_tenant_sql_proxy);
|
||||
|
||||
std::string primary_zone_sql = "ALTER TENANT " + std::string(DEFAULT_TEST_TENANT_NAME)
|
||||
+ " set primary_zone='zone1; zone3; zone2';";
|
||||
WRITE_SQL_BY_CONN(test_conn, primary_zone_sql.c_str());
|
||||
|
||||
unittest::TestEnvTool::create_table_for_test_env(
|
||||
test_conn, test_dup_table_name.c_str(), 10, true /*is_dup_table*/,
|
||||
static_basic_arg_.dup_ls_id_num_, static_basic_arg_.dup_table_id_,
|
||||
static_basic_arg_.dup_tablet_id_array_);
|
||||
|
||||
unittest::TestEnvTool::create_table_for_test_env(
|
||||
test_conn, test_normal_table_name.c_str(), 10, false /*is_dup_table*/,
|
||||
static_basic_arg_.normal_ls_id_num_, static_basic_arg_.normal_table_id_,
|
||||
static_basic_arg_.normal_tablet_id_array_);
|
||||
|
||||
GET_LS(test_tenant_id, static_basic_arg_.dup_ls_id_num_, ls_handle);
|
||||
SERVER_LOG(INFO, "[ObMultiReplicaTestBase] -------- before wait dup tablet discover", K(ret),
|
||||
K(static_basic_arg_));
|
||||
RETRY_UNTIL_TIMEOUT(ls_handle.get_ls()->dup_table_ls_handler_.get_dup_tablet_count()
|
||||
== static_basic_arg_.dup_tablet_id_array_.count(),
|
||||
20 * 1000 * 1000, 100 * 1000);
|
||||
RETRY_UNTIL_TIMEOUT(
|
||||
ls_handle.get_ls()->dup_table_ls_handler_.tablets_mgr_ptr_->get_readable_tablet_set_count()
|
||||
>= 1,
|
||||
20 * 1000 * 1000, 100 * 1000);
|
||||
RETRY_UNTIL_TIMEOUT(
|
||||
ls_handle.get_ls()
|
||||
->dup_table_ls_handler_.tablets_mgr_ptr_->get_need_confirm_tablet_set_count()
|
||||
== 0,
|
||||
20 * 1000 * 1000, 100 * 1000);
|
||||
SERVER_LOG(INFO, "[ObMultiReplicaTestBase] -------- after wait dup tablet discover", K(ret),
|
||||
K(static_basic_arg_),
|
||||
K(ls_handle.get_ls()->dup_table_ls_handler_.get_dup_tablet_count()));
|
||||
ASSERT_EQ(OB_SUCCESS, ret /*has_dup_tablet*/);
|
||||
|
||||
WRITE_SQL_BY_CONN(test_conn, "set autocommit = false;");
|
||||
WRITE_SQL_BY_CONN(test_conn, "begin;");
|
||||
|
||||
const int64_t DEFAULT_LOAD_ROW_CNT = 10;
|
||||
for (int i = 1; i <= DEFAULT_LOAD_ROW_CNT; i++) {
|
||||
std::string insert_dup_sql_str =
|
||||
"INSERT INTO " + test_dup_table_name + " VALUES(" + std::to_string(i) + ", 0 , 0)";
|
||||
std::string insert_normal_sql_str =
|
||||
"INSERT INTO " + test_normal_table_name + " VALUES(" + std::to_string(i) + ", 0 , 0)";
|
||||
WRITE_SQL_BY_CONN(test_conn, insert_dup_sql_str.c_str());
|
||||
WRITE_SQL_BY_CONN(test_conn, insert_normal_sql_str.c_str());
|
||||
}
|
||||
WRITE_SQL_BY_CONN(test_conn, "commit;");
|
||||
|
||||
errsim_apply_SWL_ = true;
|
||||
|
||||
static_basic_arg_.tenant_id_ = test_tenant_id;
|
||||
std::string tmp_str;
|
||||
ASSERT_EQ(OB_SUCCESS, EventArgSerTool<TableBasicArg>::serialize_arg(static_basic_arg_, tmp_str));
|
||||
finish_event("CREATE_TEST_TABLE", tmp_str);
|
||||
}
|
||||
|
||||
void switch_leader_and_check(sqlclient::ObISQLConnection *test_conn,
|
||||
const int64_t tenant_id,
|
||||
const int64_t ls_id_num,
|
||||
const std::string local_ip,
|
||||
const std::string target_ip,
|
||||
const bool is_dup_ls = false)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] Start switching leader to local server", K(ret),
|
||||
K(tenant_id), K(ls_id_num), K(local_ip.c_str()), K(target_ip.c_str()), K(is_dup_ls));
|
||||
|
||||
GET_LS(tenant_id, ls_id_num, ls_handle);
|
||||
if (local_ip == target_ip) {
|
||||
if (is_dup_ls) {
|
||||
RETRY_UNTIL_TIMEOUT(!ls_handle.get_ls()->dup_table_ls_handler_.is_master(), 20 * 1000 * 1000,
|
||||
100 * 1000);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
RETRY_UNTIL_TIMEOUT(!ls_handle.get_ls()->ls_tx_svr_.mgr_->is_master(), 20 * 1000 * 1000,
|
||||
100 * 1000);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
std::string ls_id_str = std::to_string(ls_id_num);
|
||||
|
||||
std::string switch_leader_sql = "alter system switch replica leader ls=" + ls_id_str + " server='"
|
||||
+ target_ip + "' tenant='tt1';";
|
||||
|
||||
WRITE_SQL_BY_CONN(test_conn, switch_leader_sql.c_str());
|
||||
if (local_ip == target_ip) {
|
||||
if (is_dup_ls) {
|
||||
RETRY_UNTIL_TIMEOUT(ls_handle.get_ls()->dup_table_ls_handler_.is_master(), 20 * 1000 * 1000,
|
||||
100 * 1000);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
RETRY_UNTIL_TIMEOUT(ls_handle.get_ls()->ls_tx_svr_.mgr_->is_master(), 20 * 1000 * 1000,
|
||||
100 * 1000);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
}
|
||||
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] Finish switching leader to local server", K(ret),
|
||||
K(tenant_id), K(ls_id_num), K(local_ip.c_str()), K(target_ip.c_str()), K(is_dup_ls));
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(2), switch_leader_to_zone2)
|
||||
{
|
||||
std::string tmp_event_val;
|
||||
ASSERT_EQ(OB_SUCCESS, wait_event_finish("CREATE_TEST_TABLE", tmp_event_val, 30 * 60 * 1000));
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
EventArgSerTool<TableBasicArg>::deserialize_arg(static_basic_arg_, tmp_event_val));
|
||||
|
||||
common::ObMySQLProxy &test_tenant_sql_proxy = get_curr_simple_server().get_sql_proxy();
|
||||
ACQUIRE_CONN_FROM_SQL_PROXY(test_conn, test_tenant_sql_proxy);
|
||||
|
||||
std::string target_ip = local_ip_ + ":" + std::to_string(rpc_ports_[1]);
|
||||
switch_leader_and_check(test_conn, static_basic_arg_.tenant_id_,
|
||||
static_basic_arg_.normal_ls_id_num_, target_ip, target_ip, false);
|
||||
switch_leader_and_check(test_conn, static_basic_arg_.tenant_id_, static_basic_arg_.dup_ls_id_num_,
|
||||
target_ip, target_ip, true);
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS, finish_event("SWITCH_LEADER_TO_ZONE2_GRACEFULLY", ""));
|
||||
}
|
||||
|
||||
bool check_follower_with_lock(ObLS *ls)
|
||||
{
|
||||
bool is_follower = false;
|
||||
if (ls->ls_tx_svr_.mgr_->rwlock_.try_wrlock()) {
|
||||
is_follower = ls->ls_tx_svr_.mgr_->tx_ls_state_mgr_.is_follower();
|
||||
ls->ls_tx_svr_.mgr_->rwlock_.wrunlock();
|
||||
}
|
||||
|
||||
return is_follower;
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(2), switch_follower_failed_from_zone2_with_start_working_on_failure)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
std::string tmp_event_val;
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
wait_event_finish("SWITCH_LEADER_TO_ZONE2_GRACEFULLY", tmp_event_val, 30 * 60 * 1000));
|
||||
common::ObMySQLProxy &test_tenant_sql_proxy = get_curr_simple_server().get_sql_proxy();
|
||||
ACQUIRE_CONN_FROM_SQL_PROXY(test_conn, test_tenant_sql_proxy);
|
||||
|
||||
GET_LS(static_basic_arg_.tenant_id_, static_basic_arg_.dup_ls_id_num_, dup_ls_handle);
|
||||
GET_LS(static_basic_arg_.tenant_id_, static_basic_arg_.normal_ls_id_num_, normal_ls_handle);
|
||||
|
||||
errsim_dup_ls_id_ = share::ObLSID(static_basic_arg_.dup_ls_id_num_);
|
||||
errsim_normal_ls_id_ = share::ObLSID(static_basic_arg_.normal_ls_id_num_);
|
||||
|
||||
// switch to follower with timeout;
|
||||
// resume leader and submit start_working;
|
||||
std::string target_ip = local_ip_ + ":" + std::to_string(rpc_ports_[0]);
|
||||
errsim_switch_follower_ = true;
|
||||
|
||||
switch_leader_and_check(test_conn, static_basic_arg_.tenant_id_,
|
||||
static_basic_arg_.normal_ls_id_num_, "", target_ip, false);
|
||||
switch_leader_and_check(test_conn, static_basic_arg_.tenant_id_, static_basic_arg_.dup_ls_id_num_,
|
||||
"", target_ip, true);
|
||||
|
||||
share::SCN normal_applyied_SWL_scn =
|
||||
normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.max_applied_start_working_ts_;
|
||||
usleep(50 * 1000);
|
||||
RETRY_UNTIL_TIMEOUT(check_follower_with_lock(normal_ls_handle.get_ls()), 10 * 1000 * 1000,
|
||||
100 * 1000);
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] zone2 can not become a follower", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
ASSERT_EQ(ret, OB_TIMEOUT);
|
||||
|
||||
ret = OB_SUCCESS;
|
||||
ASSERT_EQ(
|
||||
normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.max_applied_start_working_ts_
|
||||
> normal_applyied_SWL_scn,
|
||||
true);
|
||||
|
||||
RETRY_UNTIL_TIMEOUT(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.is_follower(),
|
||||
30 * 1000 * 1000, 1 * 1000);
|
||||
ASSERT_EQ(ret, OB_SUCCESS);
|
||||
block_start_working_submitting_ = true;
|
||||
|
||||
// block msg with a busy start_working_cb
|
||||
ATOMIC_STORE(&block_msg_, true);
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] Start to block msg", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
|
||||
finish_event("BLOCK_ZONE2_MSG_PROCESS", "");
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
wait_event_finish("BLOCK_ZONE2_MSG_PROCESS", tmp_event_val, 30 * 60 * 1000));
|
||||
|
||||
usleep(1 * 1000 * 1000);
|
||||
block_start_working_submitting_ = false;
|
||||
|
||||
RETRY_UNTIL_TIMEOUT(
|
||||
!normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->ls_log_writer_.start_working_cbs_.is_empty(),
|
||||
5 * 1000 * 1000, 10 * 1000);
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] wait a pending start_working_cb", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
ASSERT_EQ(ret, OB_SUCCESS);
|
||||
ASSERT_EQ(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->is_r_pending_(), true);
|
||||
// wait election lease expired
|
||||
usleep(15 * 1000 * 1000);
|
||||
|
||||
finish_event("ZONE1_SUBMIT_LAST_START_WORKING", "");
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
wait_event_finish("ZONE1_BECOME_LEADER_IN_BLOCK_MSG", tmp_event_val, 30 * 60 * 1000));
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] The zone 1 has been a new leader", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
ASSERT_EQ(ret, OB_SUCCESS);
|
||||
|
||||
// usleep(2 * 1000 * 1000);
|
||||
|
||||
ATOMIC_STORE(&block_msg_, false);
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] Finish to block msg", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
|
||||
// TODO: start_working on failure
|
||||
// RETRY_UNTIL_TIMEOUT(
|
||||
// normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.cur_state_.state_val_.state_
|
||||
// == ObTxLSStateMgr::TxLSState::R_SYNC_FAILED,
|
||||
// 5 * 1000 * 1000, 10 * 1000);
|
||||
// ASSERT_EQ(ret, OB_SUCCESS);
|
||||
|
||||
RETRY_UNTIL_TIMEOUT(
|
||||
normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.cur_state_.state_val_.state_
|
||||
== ObTxLSStateMgr::TxLSState::F_WORKING,
|
||||
5 * 1000 * 1000, 10 * 1000);
|
||||
ASSERT_EQ(ret, OB_SUCCESS);
|
||||
|
||||
errsim_switch_follower_ = false;
|
||||
|
||||
finish_event("SWITCH_FOLLOWER_FAILED_FROM_ZONE2", "");
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(1), switch_leader_to_zone1_with_start_working_error)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
std::string tmp_event_val;
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
wait_event_finish("BLOCK_ZONE2_MSG_PROCESS", tmp_event_val, 30 * 60 * 1000));
|
||||
ATOMIC_STORE(&block_msg_, true);
|
||||
|
||||
finish_event("BLOCK_ZONE1_MSG_PROCESS", "");
|
||||
|
||||
ASSERT_EQ(OB_SUCCESS,
|
||||
wait_event_finish("ZONE1_SUBMIT_LAST_START_WORKING", tmp_event_val, 30 * 60 * 1000));
|
||||
ATOMIC_STORE(&block_msg_, false);
|
||||
|
||||
GET_LS(static_basic_arg_.tenant_id_, static_basic_arg_.normal_ls_id_num_, normal_ls_handle);
|
||||
// switch to leader and submit start_working log
|
||||
RETRY_UNTIL_TIMEOUT(
|
||||
normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.is_start_working_apply_pending(),
|
||||
20 * 1000 * 1000, 100 * 1000);
|
||||
ASSERT_EQ(OB_SUCCESS, ret);
|
||||
TRANS_LOG(INFO, "[ObMultiReplicaTestBase] The zone 1 has been a new leader", K(ret),
|
||||
KPC(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_));
|
||||
finish_event("ZONE1_BECOME_LEADER_IN_BLOCK_MSG", "");
|
||||
|
||||
RETRY_UNTIL_TIMEOUT(normal_ls_handle.get_ls()->ls_tx_svr_.mgr_->tx_ls_state_mgr_.is_master(),
|
||||
20 * 1000 * 1000, 1 * 1000 * 1000);
|
||||
ASSERT_EQ(OB_TIMEOUT, ret);
|
||||
|
||||
finish_event("KEEP_SWL_PENDING_IN_ZONE1", "");
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(1), transfer_with_block_normal)
|
||||
{
|
||||
// TODO
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(2), offline_ls_with_retry)
|
||||
{
|
||||
// TODO
|
||||
}
|
||||
|
||||
TEST_F(GET_ZONE_TEST_CLASS_NAME(2), gc_ls)
|
||||
{
|
||||
// TODO
|
||||
}
|
||||
|
||||
} // namespace unittest
|
||||
} // namespace oceanbase
|
@ -35,6 +35,7 @@ function(ob_offline_observer case case_file)
|
||||
EXCLUDE_FROM_ALL
|
||||
${case_file}
|
||||
${OBSERVER_TEST_SRCS}
|
||||
../${MIT_SRCS}
|
||||
)
|
||||
target_include_directories(${case} PUBLIC
|
||||
${CMAKE_SOURCE_DIR}/unittest ${CMAKE_SOURCE_DIR}/mittest)
|
||||
@ -53,6 +54,7 @@ endfunction()
|
||||
|
||||
ob_offline_observer(test_simple_ob test_ob_simple_cluster.cpp)
|
||||
ob_offline_observer(test_transfer_tx test_transfer_tx.cpp)
|
||||
ob_offline_observer(test_tx_data test_tx_data.cpp)
|
||||
|
||||
ob_unittest_observer(test_transfer_no_kill_tx test_transfer_tx.cpp)
|
||||
ob_unittest_observer(test_standby_balance test_standby_balance_ls_group.cpp)
|
||||
|
@ -106,7 +106,8 @@ ObSimpleClusterTestBase::~ObSimpleClusterTestBase()
|
||||
|
||||
void ObSimpleClusterTestBase::SetUp()
|
||||
{
|
||||
SERVER_LOG(INFO, "SetUp");
|
||||
auto case_name = ::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
SERVER_LOG(INFO, "SetUp>>>>>>>>>>>>>>", K(case_name));
|
||||
int ret = OB_SUCCESS;
|
||||
if (!is_started_) {
|
||||
if (OB_FAIL(start())) {
|
||||
@ -126,7 +127,8 @@ void ObSimpleClusterTestBase::SetUp()
|
||||
|
||||
void ObSimpleClusterTestBase::TearDown()
|
||||
{
|
||||
|
||||
auto case_name = ::testing::UnitTest::GetInstance()->current_test_info()->name();
|
||||
SERVER_LOG(INFO, "TearDown>>>>>>>>>>>>>>", K(case_name));
|
||||
}
|
||||
|
||||
void ObSimpleClusterTestBase::TearDownTestCase()
|
||||
|
@ -84,9 +84,10 @@ int ObTxData::add_undo_action(ObTxTable *tx_table,
|
||||
// STORAGE_LOG(DEBUG, "do add_undo_action");
|
||||
UNUSED(undo_node);
|
||||
int ret = OB_SUCCESS;
|
||||
SpinWLockGuard guard(undo_status_list_.lock_);
|
||||
init_tx_op();
|
||||
SpinWLockGuard guard(op_guard_->get_undo_status_list().lock_);
|
||||
ObTxDataTable *tx_data_table = nullptr;
|
||||
ObUndoStatusNode *node = undo_status_list_.head_;
|
||||
ObUndoStatusNode *node = op_guard_->get_undo_status_list().head_;
|
||||
if (OB_ISNULL(tx_table)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
STORAGE_LOG(WARN, "tx table is nullptr.", KR(ret));
|
||||
@ -104,9 +105,9 @@ int ObTxData::add_undo_action(ObTxTable *tx_table,
|
||||
STORAGE_LOG(WARN, "alloc_undo_status_node() fail", KR(ret));
|
||||
} else {
|
||||
new_node->next_ = node;
|
||||
undo_status_list_.head_ = new_node;
|
||||
op_guard_->get_undo_status_list().head_ = new_node;
|
||||
node = new_node;
|
||||
undo_status_list_.undo_node_cnt_++;
|
||||
op_guard_->get_undo_status_list().undo_node_cnt_++;
|
||||
}
|
||||
for (int64_t idx = 0; idx < TX_DATA_UNDO_ACT_MAX_NUM_PER_NODE; ++idx) {
|
||||
node->undo_actions_[node->size_++] = new_undo_action;
|
||||
@ -140,8 +141,8 @@ int ObTxDataMemtableScanIterator
|
||||
// not exactly accurate, but enough for unittest
|
||||
ATOMIC_STORE(&BIGGEST_TX_DATA_SIZE, buffer_len_);
|
||||
}
|
||||
if (tx_data_->undo_status_list_.undo_node_cnt_ > 0) {
|
||||
std::cout << "tx_id:" << tx_data_->tx_id_.get_id() << ", undo cnt:" << tx_data_->undo_status_list_.undo_node_cnt_ << ", generate size:" << generate_size_ << std::endl;
|
||||
if (tx_data_->op_guard_->get_undo_status_list().undo_node_cnt_ > 0) {
|
||||
std::cout << "tx_id:" << tx_data_->tx_id_.get_id() << ", undo cnt:" << tx_data_->op_guard_->get_undo_status_list().undo_node_cnt_ << ", generate size:" << generate_size_ << std::endl;
|
||||
}
|
||||
ATOMIC_STORE(&DUMP_BIG_TX_DATA, true);
|
||||
/**************************************************************************************************/
|
||||
@ -225,7 +226,11 @@ int ObTxDataSingleRowGetter::deserialize_tx_data_from_store_buffers_(ObTxData &t
|
||||
tx_data.tx_id_.get_id(),
|
||||
tx_data_buffers_.count());
|
||||
ATOMIC_STORE(&LOAD_BIG_TX_DATA, true);
|
||||
std::cout << "read big tx id from sstable, tx_id:" << ATOMIC_LOAD(&TEST_TX_ID) << ", undo cnt:" << tx_data.undo_status_list_.undo_node_cnt_ << ", buffer cnt:" << tx_data_buffers_.count() << std::endl;
|
||||
int64_t undo_cnt = 0;
|
||||
if (tx_data.op_guard_.is_valid()) {
|
||||
undo_cnt = tx_data.op_guard_->get_undo_status_list().undo_node_cnt_;
|
||||
}
|
||||
std::cout << "read big tx id from sstable, tx_id:" << ATOMIC_LOAD(&TEST_TX_ID) << ", undo cnt:" << undo_cnt << ", buffer cnt:" << tx_data_buffers_.count() << std::endl;
|
||||
}
|
||||
}
|
||||
/**************************************************************************************************/
|
||||
|
@ -64,7 +64,7 @@ class DoNothingOP : public ObITxDataCheckFunctor
|
||||
{
|
||||
virtual int operator()(const ObTxData &tx_data, ObTxCCCtx *tx_cc_ctx = nullptr) {
|
||||
UNUSED(tx_cc_ctx);
|
||||
cout << "read tx data:" << tx_data.tx_id_.get_id() << ", undo cnt:" << tx_data.undo_status_list_.undo_node_cnt_ << endl;
|
||||
cout << "read tx data:" << tx_data.tx_id_.get_id() << ", undo cnt:" << tx_data.op_guard_->get_undo_status_list().undo_node_cnt_ << endl;
|
||||
STORAGE_LOG_RET(INFO, 0, "read tx data", K(tx_data.tx_id_), K(lbt()));
|
||||
return OB_SUCCESS;
|
||||
}
|
||||
|
@ -80,6 +80,8 @@ int ObSimpleClusterExampleTest::do_balance_inner_(uint64_t tenant_id)
|
||||
ObBalanceJob job;
|
||||
if (OB_FAIL(b_svr->gather_stat_())) {
|
||||
LOG_WARN("failed to gather stat", KR(ret));
|
||||
} else if (OB_FAIL(b_svr->gather_ls_status_stat(tenant_id, b_svr->ls_array_))) {
|
||||
LOG_WARN("failed to gather stat", KR(ret));
|
||||
} else if (OB_FAIL(ObBalanceJobTableOperator::get_balance_job(
|
||||
tenant_id, false, *GCTX.sql_proxy_, job, start_time, finish_time))) {
|
||||
if (OB_ENTRY_NOT_EXIST == ret) {
|
||||
@ -352,6 +354,7 @@ TEST_F(ObSimpleClusterExampleTest, tx_exit)
|
||||
EQ(OB_TRANS_CTX_NOT_EXIST, SSH::wait_tx_exit(R.tenant_id_, loc2, tx_id));
|
||||
}
|
||||
|
||||
/*
|
||||
TEST_F(ObSimpleClusterExampleTest, large_query)
|
||||
{
|
||||
TRANSFER_CASE_PREPARE;
|
||||
@ -435,7 +438,7 @@ TEST_F(ObSimpleClusterExampleTest, large_query)
|
||||
LOGI("large_query: row_count:%ld", row_count);
|
||||
//get_curr_simple_server().get_sql_proxy().write("alter system set syslog_level='INFO'", affected_rows);
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
TEST_F(ObSimpleClusterExampleTest, epoch_recover_from_active_info)
|
||||
{
|
||||
@ -1215,7 +1218,9 @@ TEST_F(ObSimpleClusterExampleTest, transfer_tx_ctx_merge)
|
||||
TEST_F(ObSimpleClusterExampleTest, transfer_batch)
|
||||
{
|
||||
TRANSFER_CASE_PREPARE;
|
||||
sql_proxy.write("alter system set _transfer_start_trans_timeout='5s'",affected_rows);
|
||||
|
||||
sql_proxy.write("alter system set _transfer_start_trans_timeout = '10s'", affected_rows);
|
||||
std::set<sqlclient::ObISQLConnection*> jobs;
|
||||
for (int i =0 ;i< 5000;i++) {
|
||||
sqlclient::ObISQLConnection *conn = NULL;
|
||||
@ -1243,6 +1248,7 @@ TEST_F(ObSimpleClusterExampleTest, transfer_batch)
|
||||
int64_t sum = 0;
|
||||
EQ(0, SSH::select_int64(sql_proxy, "select sum(col) as val from stu2", sum));
|
||||
EQ(100 * 5000, sum);
|
||||
sql_proxy.write("alter system set _transfer_start_trans_timeout='1s'",affected_rows);
|
||||
}
|
||||
|
||||
TEST_F(ObSimpleClusterExampleTest, transfer_retain_ctx)
|
||||
|
@ -320,7 +320,7 @@ TEST_F(ObTransferWithSmallerStartSCN, smaller_start_scn)
|
||||
ASSERT_EQ(0, SSH::submit_redo(tenant_id, loc1));
|
||||
|
||||
ObTxLoopWorker *worker = MTL(ObTxLoopWorker *);
|
||||
worker->scan_all_ls_(true, true);
|
||||
worker->scan_all_ls_(true, true, false);
|
||||
usleep(1 * 1000 * 1000);
|
||||
|
||||
// Step4: let the tx data table update upper info
|
||||
@ -397,7 +397,7 @@ TEST_F(ObTransferWithSmallerStartSCN, smaller_start_scn)
|
||||
}
|
||||
ASSERT_EQ(loc1, loc2);
|
||||
|
||||
worker->scan_all_ls_(true, true);
|
||||
worker->scan_all_ls_(true, true, false);
|
||||
usleep(1 * 1000 * 1000);
|
||||
|
||||
fprintf(stdout, "start update upper info the second time\n");
|
||||
|
@ -85,7 +85,7 @@ int ObTxCtxMemtableScanIterator::serialize_next_tx_ctx_(ObTxLocalBuffer &buffer,
|
||||
if (OB_FAIL(ret)) {
|
||||
STORAGE_LOG(INFO, "get next tx ctx table info failed", KR(ret), KPC(tx_ctx));
|
||||
} else if (SLEEP_BEFORE_DUMP_TX_CTX) {
|
||||
fprintf(stdout, "ready to dump tx ctx, undo status node ptr : %p\n", tx_ctx->ctx_tx_data_.tx_data_guard_.tx_data()->undo_status_list_.head_);
|
||||
fprintf(stdout, "ready to dump tx ctx, undo status node ptr : %p\n", tx_ctx->ctx_tx_data_.tx_data_guard_.tx_data()->op_guard_->get_undo_status_list().head_);
|
||||
fprintf(stdout, "sleep 20 seconds before dump\n");
|
||||
HAS_GOT_TX_CTX = true;
|
||||
SLEEP_BEFORE_DUMP_TX_CTX = false;
|
||||
|
373
mittest/simple_server/test_tx_data.cpp
Normal file
373
mittest/simple_server/test_tx_data.cpp
Normal file
@ -0,0 +1,373 @@
|
||||
/**
|
||||
* Copyright (c) 2021 OceanBase
|
||||
* OceanBase CE is licensed under Mulan PubL v2.
|
||||
* You can use this software according to the terms and conditions of the Mulan PubL v2.
|
||||
* You may obtain a copy of Mulan PubL v2 at:
|
||||
* http://license.coscl.org.cn/MulanPubL-2.0
|
||||
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
* See the Mulan PubL v2 for more details.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#define USING_LOG_PREFIX SERVER
|
||||
#define protected public
|
||||
#define private public
|
||||
|
||||
#include "env/ob_simple_cluster_test_base.h"
|
||||
#include "mittest/env/ob_simple_server_helper.h"
|
||||
#include "storage/tx_storage/ob_ls_service.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace unittest
|
||||
{
|
||||
|
||||
using namespace oceanbase::transaction;
|
||||
using namespace oceanbase::storage;
|
||||
|
||||
|
||||
#define EQ(x, y) GTEST_ASSERT_EQ(x, y);
|
||||
#define NEQ(x, y) GTEST_ASSERT_NE(x, y);
|
||||
#define LE(x, y) GTEST_ASSERT_LE(x, y);
|
||||
#define GE(x, y) GTEST_ASSERT_GE(x, y);
|
||||
|
||||
class TestRunCtx
|
||||
{
|
||||
public:
|
||||
uint64_t tenant_id_ = 0;
|
||||
int64_t time_sec_ = 0;
|
||||
};
|
||||
|
||||
TestRunCtx R;
|
||||
|
||||
class ObTxDataTest : public ObSimpleClusterTestBase
|
||||
{
|
||||
public:
|
||||
// 指定case运行目录前缀 test_ob_simple_cluster_
|
||||
ObTxDataTest() : ObSimpleClusterTestBase("test_tx_data_", "50G", "50G") {}
|
||||
};
|
||||
|
||||
TEST_F(ObTxDataTest, observer_start)
|
||||
{
|
||||
SERVER_LOG(INFO, "observer_start succ");
|
||||
}
|
||||
|
||||
// 创建租户并不轻量,看场景必要性使用
|
||||
TEST_F(ObTxDataTest, add_tenant)
|
||||
{
|
||||
// 创建普通租户tt1
|
||||
ASSERT_EQ(OB_SUCCESS, create_tenant("tt1", "40G", "40G", false, 10));
|
||||
// 获取租户tt1的tenant_id
|
||||
ASSERT_EQ(OB_SUCCESS, get_tenant_id(R.tenant_id_));
|
||||
ASSERT_NE(0, R.tenant_id_);
|
||||
// 初始化普通租户tt1的sql proxy
|
||||
ASSERT_EQ(OB_SUCCESS, get_curr_simple_server().init_sql_proxy2());
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, create_new_ls)
|
||||
{
|
||||
// 在单节点ObServer下创建新的日志流, 注意避免被RS任务GC掉
|
||||
EQ(0, SSH::create_ls(R.tenant_id_, get_curr_observer().self_addr_));
|
||||
int64_t ls_count = 0;
|
||||
EQ(0, SSH::g_select_int64(R.tenant_id_, "select count(ls_id) as val from __all_ls where ls_id!=1", ls_count));
|
||||
EQ(2, ls_count);
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, rollback_to)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
sqlclient::ObISQLConnection *conn1 = NULL;
|
||||
EQ(0, sql_proxy.acquire(conn1));
|
||||
EQ(0, SSH::write(conn1, "set autocommit=0", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(100)", affected_rows));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::find_tx(conn1, tx_id));
|
||||
LOGI("find_tx:%ld", tx_id.get_id());
|
||||
EQ(0, SSH::write(conn1, "savepoint sp1"));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(200)", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "rollback to sp1"));
|
||||
int64_t val = 0;
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
EQ(0, SSH::write(conn1, "commit"));
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, rollback_to_with_redo)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
|
||||
sqlclient::ObISQLConnection *conn1 = NULL;
|
||||
EQ(0, sql_proxy.acquire(conn1));
|
||||
EQ(0, SSH::write(conn1, "set autocommit=0", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(100)", affected_rows));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::find_tx(conn1, tx_id));
|
||||
LOGI("find_tx:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, SSH::write(conn1, "savepoint sp1"));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(200)", affected_rows));
|
||||
ObLSID loc1;
|
||||
EQ(0, SSH::select_table_loc(R.tenant_id_, "stu1", loc1));
|
||||
// when tx has redo, rollback to need write log
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::write(conn1, "rollback to sp1"));
|
||||
int64_t val = 0;
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
EQ(0, SSH::write(conn1, "commit"));
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, rollback_to_with_read_sstable_uncommit)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
|
||||
sqlclient::ObISQLConnection *conn1 = NULL;
|
||||
EQ(0, sql_proxy.acquire(conn1));
|
||||
EQ(0, SSH::write(conn1, "set autocommit=0", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(100)", affected_rows));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::find_tx(conn1, tx_id));
|
||||
LOGI("find_tx:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, SSH::write(conn1, "savepoint sp1"));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(200)", affected_rows));
|
||||
ObLSID loc1;
|
||||
EQ(0, SSH::select_table_loc(R.tenant_id_, "stu1", loc1));
|
||||
// when tx has redo, rollback to need write log
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::write(conn1, "rollback to sp1"));
|
||||
int64_t val = 0;
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
|
||||
EQ(0, sql_proxy.write("alter system minor freeze", affected_rows));
|
||||
EQ(0, SSH::wait_checkpoint_newest(R.tenant_id_, loc1));
|
||||
// read from sstable uncommit row
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
|
||||
EQ(0, SSH::write(conn1, "commit"));
|
||||
EQ(0, SSH::select_int64(sql_proxy, "select sum(col1) as val from stu1",val));
|
||||
EQ(100, val);
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, rollback_to_with_ls_replay)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
|
||||
sqlclient::ObISQLConnection *conn1 = NULL;
|
||||
EQ(0, sql_proxy.acquire(conn1));
|
||||
EQ(0, SSH::write(conn1, "set autocommit=0", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(100)", affected_rows));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::find_tx(conn1, tx_id));
|
||||
LOGI("find_tx:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, SSH::write(conn1, "savepoint sp1"));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(200)", affected_rows));
|
||||
ObLSID loc1;
|
||||
EQ(0, SSH::select_table_loc(R.tenant_id_, "stu1", loc1));
|
||||
// when tx has redo, rollback to need write log
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::write(conn1, "rollback to sp1"));
|
||||
int64_t val = 0;
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
|
||||
EQ(0, sql_proxy.write("alter system minor freeze", affected_rows));
|
||||
EQ(0, SSH::wait_checkpoint_newest(R.tenant_id_, loc1));
|
||||
// read from sstable uncommit row
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
|
||||
LOGI("ls_reboot:%ld", loc1.id());
|
||||
// tx has not commit, tx ctx recover from tx_sstable
|
||||
EQ(0, SSH::ls_reboot(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::write(conn1, "commit"));
|
||||
EQ(0, SSH::select_int64(sql_proxy, "select sum(col1) as val from stu1",val));
|
||||
EQ(100, val);
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, rollback_to_with_ls_replay_from_middle)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
|
||||
sqlclient::ObISQLConnection *conn1 = NULL;
|
||||
EQ(0, sql_proxy.acquire(conn1));
|
||||
EQ(0, SSH::write(conn1, "set autocommit=0", affected_rows));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(100)", affected_rows));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::find_tx(conn1, tx_id));
|
||||
LOGI("find_tx:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, SSH::write(conn1, "savepoint sp1"));
|
||||
EQ(0, SSH::write(conn1, "insert into stu1 values(200)", affected_rows));
|
||||
ObLSID loc1;
|
||||
EQ(0, SSH::select_table_loc(R.tenant_id_, "stu1", loc1));
|
||||
// when tx has redo, rollback to need write log
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, sql_proxy.write("alter system minor freeze", affected_rows));
|
||||
EQ(0, SSH::wait_checkpoint_newest(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::write(conn1, "rollback to sp1"));
|
||||
int64_t val = 0;
|
||||
EQ(0, SSH::select_int64(conn1, "select sum(col1) val from stu1", val));
|
||||
EQ(100, val);
|
||||
|
||||
EQ(0, SSH::write(conn1, "commit"));
|
||||
// make tx_ctx checkpoint
|
||||
EQ(0, SSH::freeze_tx_ctx(R.tenant_id_, loc1));
|
||||
|
||||
LOGI("ls_reboot:%ld", loc1.id());
|
||||
EQ(0, SSH::ls_reboot(R.tenant_id_, loc1));
|
||||
|
||||
EQ(0, SSH::select_int64(sql_proxy, "select sum(col1) as val from stu1",val));
|
||||
EQ(100, val);
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, retain_ctx)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows = 0;
|
||||
ObMySQLTransaction trans;
|
||||
EQ(0, trans.start(GCTX.sql_proxy_, R.tenant_id_));
|
||||
observer::ObInnerSQLConnection *conn = static_cast<observer::ObInnerSQLConnection *>(trans.get_connection());
|
||||
char buf[10];
|
||||
ObRegisterMdsFlag flag;
|
||||
ObLSID ls_id1(1001);
|
||||
ObLSID ls_id2(1002);
|
||||
EQ(0, conn->register_multi_data_source(R.tenant_id_,
|
||||
ls_id1,
|
||||
ObTxDataSourceType::TEST3,
|
||||
buf,
|
||||
10,
|
||||
flag));
|
||||
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, ls_id1));
|
||||
EQ(0, sql_proxy.write("alter system minor freeze", affected_rows));
|
||||
EQ(0, SSH::wait_checkpoint_newest(R.tenant_id_, ls_id1));
|
||||
|
||||
EQ(0, conn->register_multi_data_source(R.tenant_id_,
|
||||
ls_id2,
|
||||
ObTxDataSourceType::TEST3,
|
||||
buf,
|
||||
10,
|
||||
flag));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::g_select_int64(R.tenant_id_, "select trans_id as val from __all_virtual_trans_stat where is_exiting=0 and session_id<=1 limit 1", tx_id.tx_id_));
|
||||
LOGI("find active_tx tx_id:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, trans.end(true));
|
||||
// make tx_ctx checkpoint
|
||||
EQ(0, SSH::freeze_tx_ctx(R.tenant_id_, ls_id1));
|
||||
LOGI("ls_reboot:%ld", ls_id1.id());
|
||||
EQ(0, SSH::ls_reboot(R.tenant_id_, ls_id1));
|
||||
|
||||
EQ(0, SSH::freeze_tx_ctx(R.tenant_id_, ls_id2));
|
||||
LOGI("ls_reboot:%ld", ls_id2.id());
|
||||
EQ(0, SSH::ls_reboot(R.tenant_id_, ls_id2));
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, retain_ctx2)
|
||||
{
|
||||
common::ObMySQLProxy &sql_proxy = get_curr_simple_server().get_sql_proxy2();
|
||||
int64_t affected_rows = 0;
|
||||
EQ(0, sql_proxy.write("drop table if exists stu1", affected_rows));
|
||||
EQ(0, sql_proxy.write("create table stu1(col1 int)", affected_rows));
|
||||
ObMySQLTransaction trans;
|
||||
EQ(0, trans.start(GCTX.sql_proxy_, R.tenant_id_));
|
||||
observer::ObInnerSQLConnection *conn = static_cast<observer::ObInnerSQLConnection *>(trans.get_connection());
|
||||
char buf[10];
|
||||
ObRegisterMdsFlag flag;
|
||||
ObLSID ls_id1(1001);
|
||||
EQ(0, conn->register_multi_data_source(R.tenant_id_,
|
||||
ls_id1,
|
||||
ObTxDataSourceType::TEST3,
|
||||
buf,
|
||||
10,
|
||||
flag));
|
||||
|
||||
EQ(0, SSH::submit_redo(R.tenant_id_, ls_id1));
|
||||
EQ(0, sql_proxy.write("alter system minor freeze", affected_rows));
|
||||
EQ(0, SSH::wait_checkpoint_newest(R.tenant_id_, ls_id1));
|
||||
|
||||
EQ(0, sql_proxy.write("insert into stu1 values(100)", affected_rows));
|
||||
|
||||
EQ(0, conn->register_multi_data_source(R.tenant_id_,
|
||||
ls_id1,
|
||||
ObTxDataSourceType::TEST3,
|
||||
buf,
|
||||
10,
|
||||
flag));
|
||||
ObTransID tx_id;
|
||||
EQ(0, SSH::g_select_int64(R.tenant_id_, "select trans_id as val from __all_virtual_trans_stat where is_exiting=0 and session_id<=1 limit 1", tx_id.tx_id_));
|
||||
LOGI("find active_tx tx_id:%ld", tx_id.get_id());
|
||||
|
||||
EQ(0, trans.end(true));
|
||||
// make tx_ctx checkpoint
|
||||
EQ(0, SSH::freeze_tx_ctx(R.tenant_id_, ls_id1));
|
||||
// make tx_data checkpoint
|
||||
EQ(0, SSH::freeze_tx_data(R.tenant_id_, ls_id1));
|
||||
LOGI("ls_reboot:%ld", ls_id1.id());
|
||||
EQ(0, SSH::ls_reboot(R.tenant_id_, ls_id1));
|
||||
}
|
||||
|
||||
TEST_F(ObTxDataTest, end)
|
||||
{
|
||||
if (R.time_sec_ > 0) {
|
||||
::sleep(R.time_sec_);
|
||||
}
|
||||
}
|
||||
|
||||
} // end unittest
|
||||
} // end oceanbase
|
||||
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t c = 0;
|
||||
int64_t time_sec = 0;
|
||||
char *log_level = (char*)"INFO";
|
||||
while(EOF != (c = getopt(argc,argv,"t:l:"))) {
|
||||
switch(c) {
|
||||
case 't':
|
||||
time_sec = atoi(optarg);
|
||||
break;
|
||||
case 'l':
|
||||
log_level = optarg;
|
||||
oceanbase::unittest::ObSimpleClusterTestBase::enable_env_warn_log_ = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
oceanbase::unittest::init_log_and_gtest(argc, argv);
|
||||
OB_LOGGER.set_log_level(log_level);
|
||||
|
||||
LOG_INFO("main>>>");
|
||||
oceanbase::unittest::R.time_sec_ = time_sec;
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -151,7 +151,7 @@ int LSFetchCtx::init(
|
||||
const int64_t start_tstamp_ns = start_parameters.get_start_tstamp_ns();
|
||||
const palf::LSN &start_lsn = start_parameters.get_start_lsn();
|
||||
// If the start lsn is 0, the service is started from creation
|
||||
const bool start_serve_from_create = (palf::PALF_INITIAL_LSN_VAL == start_lsn.val_);
|
||||
const bool start_serve_from_create = !tls_id.is_sys_log_stream() && palf::PALF_INITIAL_LSN_VAL == start_lsn.val_;
|
||||
ObBackupDest archive_dest;
|
||||
|
||||
reset();
|
||||
|
@ -248,8 +248,10 @@ int ObLogMetaManager::get_table_meta(
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_ERROR("expect valid schema_getter", KR(ret));
|
||||
} else {
|
||||
ObTimeGuard time_guard("get_table_meta", 2 * 1000 * 1000);
|
||||
RETRY_FUNC(stop_flag, *schema_getter, get_schema_guard_and_full_table_schema, tenant_id, table_id, global_schema_version, GET_SCHEMA_TIMEOUT,
|
||||
schema_mgr, table_schema);
|
||||
schema_mgr, table_schema);
|
||||
time_guard.click("get_full_table_schema");
|
||||
|
||||
if (OB_FAIL(ret)) {
|
||||
// caller deal with error code OB_TENANT_HAS_BEEN_DROPPED
|
||||
|
@ -26,6 +26,36 @@
|
||||
#include "rocksdb/table_properties.h"
|
||||
#include "rocksdb/utilities/table_properties_collectors.h"
|
||||
|
||||
#define RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(stop_flag, sleep_ms, var, func, args...) \
|
||||
do {\
|
||||
if (OB_SUCC(ret)) \
|
||||
{ \
|
||||
int64_t _retry_func_on_error_last_print_time = common::ObClockGenerator::getClock();\
|
||||
int64_t _retry_func_on_error_cur_print_time = 0;\
|
||||
const int64_t _PRINT_RETRY_FUNC_INTERVAL = 10 * _SEC_;\
|
||||
s = rocksdb::Status::IOError();\
|
||||
while (s.IsIOError() && ! (stop_flag)) \
|
||||
{ \
|
||||
s = (var).func(args); \
|
||||
if (s.IsIOError()) { \
|
||||
ob_usleep(sleep_ms); \
|
||||
}\
|
||||
_retry_func_on_error_cur_print_time = common::ObClockGenerator::getClock();\
|
||||
if (_retry_func_on_error_cur_print_time - _retry_func_on_error_last_print_time >= _PRINT_RETRY_FUNC_INTERVAL) {\
|
||||
LOG_DBA_WARN(OB_IO_ERROR, \
|
||||
"msg", "put value into rocksdb failed", \
|
||||
"error", s.ToString().c_str(), \
|
||||
"last_print_time", _retry_func_on_error_last_print_time); \
|
||||
_retry_func_on_error_last_print_time = _retry_func_on_error_cur_print_time; \
|
||||
}\
|
||||
} \
|
||||
if ((stop_flag)) \
|
||||
{ \
|
||||
ret = OB_IN_STOP_STATE; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace libobcdc
|
||||
@ -176,8 +206,8 @@ int RocksDbStoreService::put(const std::string &key, const ObSlice &value)
|
||||
ret = OB_IN_STOP_STATE;
|
||||
} else {
|
||||
// find column family handle for cf
|
||||
rocksdb::Status s = m_db_->Put(
|
||||
writer_options,
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, (*m_db_), Put, writer_options,
|
||||
rocksdb::Slice(key.c_str(), key.size()),
|
||||
rocksdb::Slice(value.buf_, value.buf_len_));
|
||||
|
||||
@ -203,7 +233,9 @@ int RocksDbStoreService::put(void *cf_handle, const std::string &key, const ObSl
|
||||
} else if (is_stopped()) {
|
||||
ret = OB_IN_STOP_STATE;
|
||||
} else {
|
||||
rocksdb::Status s = m_db_->Put(writer_options, column_family_handle, rocksdb::Slice(key),
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, (*m_db_), Put, writer_options, column_family_handle,
|
||||
rocksdb::Slice(key),
|
||||
rocksdb::Slice(value.buf_, value.buf_len_));
|
||||
|
||||
if (!s.ok()) {
|
||||
@ -231,10 +263,11 @@ int RocksDbStoreService::batch_write(void *cf_handle,
|
||||
rocksdb::WriteBatch batch;
|
||||
|
||||
for (int64_t idx = 0; OB_SUCC(ret) && !is_stopped() && idx < keys.size(); ++idx) {
|
||||
rocksdb::Status s = batch.Put(
|
||||
column_family_handle,
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, batch, Put, column_family_handle,
|
||||
rocksdb::Slice(keys[idx]),
|
||||
rocksdb::Slice(values[idx].buf_, values[idx].buf_len_));
|
||||
|
||||
if (!s.ok()) {
|
||||
ret = OB_IO_ERROR;
|
||||
_LOG_ERROR("RocksDbStoreService build batch failed, error %s", s.ToString().c_str());
|
||||
@ -313,7 +346,9 @@ int RocksDbStoreService::del(const std::string &key)
|
||||
// find column family handle for cf
|
||||
rocksdb::WriteOptions writer_options;
|
||||
writer_options.disableWAL = true;
|
||||
rocksdb::Status s = m_db_->Delete(writer_options, key);
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, (*m_db_), Delete, writer_options, key);
|
||||
|
||||
if (!s.ok()) {
|
||||
ret = OB_IO_ERROR;
|
||||
_LOG_ERROR("delete %s from rocksdb failed, error %s", key.c_str(), s.ToString().c_str());
|
||||
@ -336,7 +371,9 @@ int RocksDbStoreService::del(void *cf_handle, const std::string &key)
|
||||
} else if (is_stopped()) {
|
||||
ret = OB_IN_STOP_STATE;
|
||||
} else {
|
||||
rocksdb::Status s = m_db_->Delete(writer_options, column_family_handle, key);
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, (*m_db_), Delete, writer_options,
|
||||
column_family_handle, key);
|
||||
|
||||
if (!s.ok()) {
|
||||
LOG_ERROR("delete %s from rocksdb failed, error %s", key.c_str(), s.ToString().c_str());
|
||||
@ -361,8 +398,9 @@ int RocksDbStoreService::del_range(void *cf_handle, const std::string &begin_key
|
||||
} else {
|
||||
rocksdb::WriteOptions writer_options;
|
||||
writer_options.disableWAL = true;
|
||||
rocksdb::Status s = m_db_->DeleteRange(writer_options, column_family_handle,
|
||||
begin_key, end_key);
|
||||
rocksdb::Status s;
|
||||
RETRY_FUNC_ON_IO_ERROR_WITH_USLEEP_MS(is_stopped(), 1 * _SEC_, (*m_db_), DeleteRange, writer_options,
|
||||
column_family_handle, begin_key, end_key);
|
||||
|
||||
if (!s.ok()) {
|
||||
LOG_ERROR("DeleteRange %s from rocksdb failed, error %s", begin_key.c_str(), s.ToString().c_str());
|
||||
|
@ -1133,34 +1133,22 @@ int FetchLogARpc::analyze_result_(RpcRequest &rpc_req,
|
||||
ERRSIM_POINT_DEF(ALLOC_FETCH_LOG_ARPC_CB_FAIL);
|
||||
rpc::frame::ObReqTransport::AsyncCB *FetchLogARpc::RpcCB::clone(const rpc::frame::SPAlloc &alloc) const
|
||||
{
|
||||
void *buf = NULL;
|
||||
RpcCB *cb = NULL;
|
||||
if (OB_SUCCESS != ALLOC_FETCH_LOG_ARPC_CB_FAIL) {
|
||||
LOG_ERROR_RET(ALLOC_FETCH_LOG_ARPC_CB_FAIL, "ALLOC_FETCH_LOG_ARPC_CB_FAIL");
|
||||
} else if (OB_ISNULL(buf = alloc(sizeof(RpcCB)))) {
|
||||
LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "clone rpc callback fail", K(buf), K(sizeof(RpcCB)));
|
||||
} else if (OB_ISNULL(cb = new(buf) RpcCB(host_))) {
|
||||
LOG_ERROR_RET(OB_ALLOCATE_MEMORY_FAILED, "construct RpcCB fail", K(buf));
|
||||
} else {
|
||||
// 成功
|
||||
}
|
||||
|
||||
return cb;
|
||||
return static_cast<rpc::frame::ObReqTransport::AsyncCB *>(const_cast<FetchLogARpc::RpcCB*>(this));
|
||||
}
|
||||
|
||||
int FetchLogARpc::RpcCB::process()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObCdcLSFetchLogResp &result = RpcCBBase::result_;
|
||||
ObRpcResultCode &rcode = RpcCBBase::rcode_;
|
||||
const common::ObAddr &svr = RpcCBBase::dst_;
|
||||
const ObRpcResultCode rcode = RpcCBBase::rcode_;
|
||||
const common::ObAddr svr = RpcCBBase::dst_;
|
||||
|
||||
if (OB_FAIL(do_process_(rcode, &result))) {
|
||||
LOG_ERROR("process fetch log callback fail", KR(ret), K(result), K(rcode), K(svr));
|
||||
LOG_ERROR("process fetch log callback fail", KR(ret), K(rcode), K(svr));
|
||||
}
|
||||
// Aone:
|
||||
// Note: Active destructe response after asynchronous RPC processing
|
||||
result.reset();
|
||||
// result.reset();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1169,7 +1157,7 @@ void FetchLogARpc::RpcCB::on_timeout()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObRpcResultCode rcode;
|
||||
const common::ObAddr &svr = RpcCBBase::dst_;
|
||||
const common::ObAddr svr = RpcCBBase::dst_;
|
||||
|
||||
rcode.rcode_ = OB_TIMEOUT;
|
||||
(void)snprintf(rcode.msg_, sizeof(rcode.msg_), "fetch log rpc timeout, svr=%s",
|
||||
@ -1184,7 +1172,7 @@ void FetchLogARpc::RpcCB::on_invalid()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObRpcResultCode rcode;
|
||||
const common::ObAddr &svr = RpcCBBase::dst_;
|
||||
const common::ObAddr svr = RpcCBBase::dst_;
|
||||
|
||||
// Invalid package encountered, decode failed
|
||||
rcode.rcode_ = OB_RPC_PACKET_INVALID;
|
||||
@ -1204,7 +1192,7 @@ int FetchLogARpc::RpcCB::do_process_(const ObRpcResultCode &rcode, const ObCdcLS
|
||||
FetchLogARpc &rpc_host = rpc_req.host_;
|
||||
|
||||
if (OB_FAIL(rpc_host.handle_rpc_response(rpc_req, rcode, resp))) {
|
||||
LOG_ERROR("set fetch log response fail", KR(ret), K(resp), K(rcode));
|
||||
LOG_ERROR("set fetch log response fail", KR(ret), K(rcode));
|
||||
} else {
|
||||
// success
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ private:
|
||||
{
|
||||
public:
|
||||
explicit RpcCB(RpcRequest &host) : host_(host) {}
|
||||
virtual ~RpcCB() {}
|
||||
virtual ~RpcCB() { result_.reset(); }
|
||||
|
||||
public:
|
||||
rpc::frame::ObReqTransport::AsyncCB *clone(const rpc::frame::SPAlloc &alloc) const;
|
||||
|
@ -189,7 +189,7 @@ inline bool is_paxos_member_list_change(const LogConfigChangeType type)
|
||||
return (ADD_MEMBER == type || REMOVE_MEMBER == type
|
||||
|| ADD_MEMBER_AND_NUM == type || REMOVE_MEMBER_AND_NUM == type
|
||||
|| SWITCH_LEARNER_TO_ACCEPTOR == type || SWITCH_ACCEPTOR_TO_LEARNER == type
|
||||
|| CHANGE_REPLICA_NUM == type);
|
||||
|| CHANGE_REPLICA_NUM == type || SWITCH_LEARNER_TO_ACCEPTOR_AND_NUM == type);
|
||||
}
|
||||
|
||||
inline bool is_try_lock_config_change(const LogConfigChangeType type)
|
||||
|
@ -3507,6 +3507,7 @@ int LogSlidingWindow::submit_group_log(const LSN &lsn,
|
||||
// get log_task success
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
log_task->lock();
|
||||
SCN min_scn;
|
||||
if (log_task->is_valid()) {
|
||||
if (lsn != log_task->get_begin_lsn()
|
||||
@ -3530,7 +3531,6 @@ int LogSlidingWindow::submit_group_log(const LSN &lsn,
|
||||
PALF_LOG(WARN, "try_update_max_lsn_ failed", K(ret), K_(palf_id), K_(self), K(lsn), K(group_entry_header));
|
||||
} else {
|
||||
// prev_log_proposal_id match or not exist, receive this log
|
||||
log_task->lock();
|
||||
if (log_task->is_valid()) {
|
||||
// log_task可能被其他线程并发收取了,预期内容与本线程一致.
|
||||
if (group_entry_header.get_log_proposal_id() != log_task->get_proposal_id()) {
|
||||
@ -3547,11 +3547,11 @@ int LogSlidingWindow::submit_group_log(const LSN &lsn,
|
||||
(void) log_task->set_freezed();
|
||||
log_task->set_freeze_ts(ObTimeUtility::current_time());
|
||||
}
|
||||
log_task->unlock();
|
||||
|
||||
PALF_LOG(TRACE, "submit_group_log", K(ret), K_(palf_id), K_(self), K(group_entry_header),
|
||||
K(log_id), KPC(log_task));
|
||||
}
|
||||
log_task->unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ namespace logservice
|
||||
class ObLogService;
|
||||
class ObLogRestoreDriverBase
|
||||
{
|
||||
const int64_t FETCH_LOG_AHEAD_THRESHOLD_NS = 3 * 1000 * 1000 *1000L; // 3s
|
||||
const int64_t FETCH_LOG_AHEAD_THRESHOLD_NS = 6 * 1000 * 1000 *1000L; // 6s
|
||||
public:
|
||||
ObLogRestoreDriverBase();
|
||||
virtual ~ObLogRestoreDriverBase();
|
||||
|
@ -81,6 +81,7 @@ typedef enum ObItemType
|
||||
T_JSON = 47,
|
||||
T_GEOMETRY = 48,
|
||||
T_UDT_SQL = 49,
|
||||
T_COLLECTION = 51,
|
||||
T_ROARINGBITMAP = 52,
|
||||
|
||||
T_IEEE754_NAN = 61,
|
||||
@ -853,6 +854,7 @@ typedef enum ObItemType
|
||||
T_FUN_SYS_XML_EXISTSNODE = 1734,
|
||||
T_FUN_SYS_PRIV_ST_GEOHASH = 1735,
|
||||
T_FUN_SYS_PRIV_ST_MAKEPOINT = 1736,
|
||||
T_FUN_SYS_ARRAY = 1737,
|
||||
///< @note add new oracle only function type before this line
|
||||
|
||||
T_FUN_SYS_TABLET_AUTOINC_NEXTVAL = 1801, // add only for heap table
|
||||
@ -911,6 +913,9 @@ typedef enum ObItemType
|
||||
T_FUN_SYS_RB_AND_NULL2EMPTY = 2043,
|
||||
T_FUN_SYS_RB_OR_NULL2EMPTY = 2044,
|
||||
T_FUN_SYS_RB_ANDNOT_NULL2EMPTY = 2045,
|
||||
T_FUN_SYS_RB_TO_STRING = 2046,
|
||||
T_FUN_SYS_RB_FROM_STRING = 2047,
|
||||
T_FUN_SYS_RB_ITERATE = 2048,
|
||||
T_MAX_OP = 3000,
|
||||
|
||||
//pseudo column, to mark the group iterator id
|
||||
|
@ -62,6 +62,13 @@ using namespace sql;
|
||||
using namespace pl;
|
||||
namespace observer
|
||||
{
|
||||
|
||||
#ifdef ERRSIM
|
||||
ERRSIM_POINT_DEF(COM_STMT_PREXECUTE_PREPARE_ERROR);
|
||||
ERRSIM_POINT_DEF(COM_STMT_PREXECUTE_PS_CURSOR_OPEN_ERROR);
|
||||
ERRSIM_POINT_DEF(COM_STMT_PREXECUTE_EXECUTE_ERROR);
|
||||
#endif
|
||||
|
||||
ObMPStmtPrexecute::ObMPStmtPrexecute(const ObGlobalContext &gctx)
|
||||
: ObMPStmtExecute(gctx),
|
||||
sql_(),
|
||||
@ -244,10 +251,15 @@ int ObMPStmtPrexecute::before_process()
|
||||
LOG_WARN("fail to set session active", K(ret));
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(gctx_.sql_engine_->stmt_prepare(sql_,
|
||||
if (
|
||||
#ifdef ERRSIM
|
||||
OB_FAIL(COM_STMT_PREXECUTE_PREPARE_ERROR) ||
|
||||
#endif
|
||||
OB_FAIL(gctx_.sql_engine_->stmt_prepare(sql_,
|
||||
get_ctx(),
|
||||
result,
|
||||
false/*is_inner_sql*/))) {
|
||||
false /*is_inner_sql*/))
|
||||
) {
|
||||
set_exec_start_timestamp(ObTimeUtility::current_time());
|
||||
int cli_ret = OB_SUCCESS;
|
||||
get_retry_ctrl().test_and_save_retry_state(gctx_,
|
||||
@ -473,7 +485,12 @@ int ObMPStmtPrexecute::execute_response(ObSQLSessionInfo &session,
|
||||
ObPLExecCtx pl_ctx(cursor->get_allocator(), &result.get_exec_context(), NULL/*params*/,
|
||||
NULL/*result*/, &ret, NULL/*func*/, true);
|
||||
get_ctx().cur_sql_ = sql_;
|
||||
if (OB_FAIL(ObSPIService::dbms_dynamic_open(&pl_ctx, *cursor))) {
|
||||
if (
|
||||
#ifdef ERRSIM
|
||||
OB_FAIL(COM_STMT_PREXECUTE_PS_CURSOR_OPEN_ERROR) ||
|
||||
#endif
|
||||
OB_FAIL(ObSPIService::dbms_dynamic_open(&pl_ctx, *cursor))
|
||||
) {
|
||||
LOG_WARN("cursor open faild.", K(cursor->get_id()));
|
||||
// select do not support arraybinding
|
||||
if (!THIS_WORKER.need_retry()) {
|
||||
@ -597,12 +614,17 @@ int ObMPStmtPrexecute::execute_response(ObSQLSessionInfo &session,
|
||||
ret = tmp_ret;
|
||||
LOG_WARN("execute server cursor failed.", K(ret));
|
||||
}
|
||||
} else if (OB_FAIL(gctx_.sql_engine_->stmt_execute(stmt_id_,
|
||||
stmt_type_,
|
||||
params,
|
||||
ctx,
|
||||
result,
|
||||
false /* is_inner_sql */))) {
|
||||
} else if (
|
||||
#ifdef ERRSIM
|
||||
OB_FAIL(COM_STMT_PREXECUTE_EXECUTE_ERROR) ||
|
||||
#endif
|
||||
OB_FAIL(gctx_.sql_engine_->stmt_execute(stmt_id_,
|
||||
stmt_type_,
|
||||
params,
|
||||
ctx,
|
||||
result,
|
||||
false /* is_inner_sql */))
|
||||
) {
|
||||
set_exec_start_timestamp(ObTimeUtility::current_time());
|
||||
if (!THIS_WORKER.need_retry()) {
|
||||
int cli_ret = OB_SUCCESS;
|
||||
|
@ -93,6 +93,7 @@ static const ObMySQLTypeMap type_maps_[ObMaxType] =
|
||||
{EMySQLFieldType::MYSQL_TYPE_GEOMETRY, BLOB_FLAG | BINARY_FLAG, 0}, /* ObGeometryType */
|
||||
{EMySQLFieldType::MYSQL_TYPE_COMPLEX, 0, 0}, /* ObUserDefinedSQLType */
|
||||
{EMySQLFieldType::MYSQL_TYPE_NEWDECIMAL, 0, 0}, /* ObDecimalIntType */
|
||||
{EMySQLFieldType::MYSQL_TYPE_STRING, 0, 0}, /* ObCollectionSQLType, will cast to string */
|
||||
/* ObMaxType */
|
||||
};
|
||||
|
||||
|
@ -377,7 +377,7 @@ int ObTableQueryAsyncP::get_query_session(uint64_t sessid, ObTableQueryAsyncSess
|
||||
LOG_WARN("fail to insert session to query map", K(ret), K(sessid));
|
||||
OB_DELETE(ObTableQueryAsyncSession, ObModIds::TABLE_PROC, query_session);
|
||||
} else {}
|
||||
} else if (ObQueryOperationType::QUERY_NEXT == arg_.query_type_) {
|
||||
} else if (ObQueryOperationType::QUERY_NEXT == arg_.query_type_ || ObQueryOperationType::QUERY_END == arg_.query_type_) {
|
||||
if (OB_FAIL(ObQueryAsyncMgr::get_instance().get_query_session(sessid, query_session))) {
|
||||
LOG_WARN("fail to get query session from query sync mgr", K(ret), K(sessid));
|
||||
} else if (OB_ISNULL(query_session)) {
|
||||
@ -609,6 +609,13 @@ int ObTableQueryAsyncP::process_query_next()
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObTableQueryAsyncP::process_query_end()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
result_.is_end_ = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObTableQueryAsyncP::try_process()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -627,6 +634,8 @@ int ObTableQueryAsyncP::try_process()
|
||||
ret = process_query_start();
|
||||
} else if (ObQueryOperationType::QUERY_NEXT == arg_.query_type_) {
|
||||
ret = process_query_next();
|
||||
} else if (ObQueryOperationType::QUERY_END == arg_.query_type_) {
|
||||
ret = process_query_end();
|
||||
}
|
||||
if (OB_FAIL(ret)) {
|
||||
LOG_WARN("query execution failed, need rollback", K(ret));
|
||||
@ -689,8 +698,8 @@ int ObTableQueryAsyncP::destory_query_session(bool need_rollback_trans)
|
||||
int ObTableQueryAsyncP::check_query_type()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (arg_.query_type_ != table::ObQueryOperationType::QUERY_START &&
|
||||
arg_.query_type_ != table::ObQueryOperationType::QUERY_NEXT){
|
||||
if (arg_.query_type_ < table::ObQueryOperationType::QUERY_START ||
|
||||
arg_.query_type_ >= table::ObQueryOperationType::QUERY_MAX) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid query operation type", K(ret), K(arg_.query_type_));
|
||||
}
|
||||
|
@ -226,6 +226,7 @@ protected:
|
||||
private:
|
||||
int process_query_start();
|
||||
int process_query_next();
|
||||
int process_query_end();
|
||||
int destory_query_session(bool need_rollback_trans);
|
||||
DISALLOW_COPY_AND_ASSIGN(ObTableQueryAsyncP);
|
||||
|
||||
|
@ -230,8 +230,11 @@ public:
|
||||
} else if (OB_FAIL(ctx_->store_ctx_->check_status(ObTableLoadStatusType::INITED))) {
|
||||
LOG_WARN("fail to check status", KR(ret));
|
||||
} else if (OB_FAIL(tablet_ctx->open())) {
|
||||
LOG_WARN("fail to open tablet context", KR(ret));
|
||||
ret = OB_SUCCESS;
|
||||
LOG_WARN("fail to open tablet context", KR(ret), K(tablet_id));
|
||||
if (ret == OB_EAGAIN) {
|
||||
LOG_WARN("retry to open tablet context", K(tablet_id));
|
||||
ret = OB_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
ctx_->store_ctx_->handle_open_insert_tablet_ctx_finish(is_finish);
|
||||
break;
|
||||
|
@ -101,6 +101,10 @@ int ObAllVirtualTxData::fill_in_row_(const VirtualTxDataRow &row_data, common::O
|
||||
cur_row_.cells_[i].set_varchar(row_data.undo_status_list_str_);
|
||||
cur_row_.cells_[i].set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset()));
|
||||
break;
|
||||
case TX_OP_COL:
|
||||
cur_row_.cells_[i].set_varchar(row_data.tx_op_str_);
|
||||
cur_row_.cells_[i].set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset()));
|
||||
break;
|
||||
default:
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
break;
|
||||
@ -207,4 +211,4 @@ int ObAllVirtualTxData::generate_virtual_tx_data_row_(VirtualTxDataRow &tx_data_
|
||||
}
|
||||
|
||||
} // namespace observer
|
||||
} // namespace oceanbase
|
||||
} // namespace oceanbase
|
||||
|
@ -35,10 +35,11 @@ struct VirtualTxDataRow {
|
||||
share::SCN end_scn_;
|
||||
share::SCN commit_version_;
|
||||
char undo_status_list_str_[common::MAX_UNDO_LIST_CHAR_LENGTH];
|
||||
char tx_op_str_[common::MAX_TX_OP_CHAR_LENGTH];
|
||||
|
||||
VirtualTxDataRow() : state_(0), start_scn_(), end_scn_(), commit_version_() {}
|
||||
|
||||
TO_STRING_KV(K(state_), K(start_scn_), K(end_scn_), K(commit_version_), K(undo_status_list_str_));
|
||||
TO_STRING_KV(K(state_), K(start_scn_), K(end_scn_), K(commit_version_), K(undo_status_list_str_), K(tx_op_str_));
|
||||
};
|
||||
|
||||
class ObAllVirtualTxData : public common::ObVirtualTableScannerIterator {
|
||||
@ -53,7 +54,8 @@ private:
|
||||
START_SCN_COL,
|
||||
END_SCN_COL,
|
||||
COMMIT_VERSION_COL,
|
||||
UNDO_STATUS_COL
|
||||
UNDO_STATUS_COL,
|
||||
TX_OP_COL
|
||||
};
|
||||
|
||||
|
||||
|
@ -2873,6 +2873,17 @@ int ObPLExecCtx::get_user_type(uint64_t type_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObPLExecCtx::calc_expr(uint64_t package_id, int64_t expr_idx, ObObjParam &result)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_INVALID_ID == package_id) {
|
||||
OZ (ObSPIService::spi_calc_expr_at_idx(this, expr_idx, OB_INVALID_INDEX, &result));
|
||||
} else {
|
||||
OZ (ObSPIService::spi_calc_package_expr(this, package_id, expr_idx, &result));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObPLExecState::final(int ret)
|
||||
{
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
@ -3971,7 +3982,8 @@ int ObPLExecState::check_pl_execute_priv(ObSchemaGetterGuard &guard,
|
||||
OBJ_PRIV_ID_EXECUTE,
|
||||
CHECK_FLAG_NORMAL,
|
||||
obj_owner_id,
|
||||
role_id_array));
|
||||
role_id_array),
|
||||
K(obj_tenant_id), K(user_id), K(database_name), K(obj_id), K(obj_owner_id), K(role_id_array));
|
||||
}
|
||||
if (ROUTINE_SCHEMA == schema_type && ret == OB_TABLE_NOT_EXIST) {
|
||||
ret = OB_WRONG_COLUMN_NAME;
|
||||
@ -4453,11 +4465,18 @@ int ObPLFunction::is_special_pkg_invoke_right(ObSchemaGetterGuard &guard, bool &
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObPLINS::calc_expr(uint64_t package_id, int64_t expr_idx, ObObjParam &result)
|
||||
{
|
||||
int ret = OB_NOT_SUPPORTED;
|
||||
LOG_USER_WARN(OB_NOT_SUPPORTED, "call expr on base class ObIPLNS");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObPLINS::init_complex_obj(ObIAllocator &allocator,
|
||||
const ObPLDataType &pl_type,
|
||||
common::ObObjParam &obj,
|
||||
bool set_allocator,
|
||||
bool set_null) const
|
||||
bool set_null)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t init_size = 0;
|
||||
@ -4489,16 +4508,36 @@ int ObPLINS::init_complex_obj(ObIAllocator &allocator,
|
||||
OX (record = reinterpret_cast<ObPLRecord*>(ptr));
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < record_type->get_member_count(); ++i) {
|
||||
CK (OB_NOT_NULL(record_type->get_member(i)));
|
||||
CK (OB_NOT_NULL(record_type->get_record_member(i)));
|
||||
OZ (record->get_element(i, member));
|
||||
CK (OB_NOT_NULL(member));
|
||||
if (record_type->get_member(i)->is_obj_type()) {
|
||||
OX (new (member) ObObj(ObNullType));
|
||||
OX (new (member) ObObj(ObNullType));
|
||||
if (OB_FAIL(ret)) {
|
||||
} else if (record_type->get_record_member(i)->get_default() != OB_INVALID_INDEX) {
|
||||
ObObjParam default_v;
|
||||
if (record_type->is_package_type()) {
|
||||
OZ (calc_expr(extract_package_id(pl_type.get_user_type_id()),
|
||||
record_type->get_record_member(i)->get_default(),
|
||||
default_v));
|
||||
} else {
|
||||
OZ (calc_expr(OB_INVALID_ID,
|
||||
record_type->get_record_member(i)->get_default(),
|
||||
default_v));
|
||||
}
|
||||
if (OB_FAIL(ret)) {
|
||||
} else if (record_type->get_member(i)->is_obj_type()) {
|
||||
OZ (deep_copy_obj(allocator, default_v, *member));
|
||||
} else {
|
||||
OZ (ObUserDefinedType::deep_copy_obj(allocator, default_v, *member));
|
||||
}
|
||||
} else {
|
||||
int64_t init_size = OB_INVALID_SIZE;
|
||||
int64_t member_ptr = 0;
|
||||
OZ (record_type->get_member(i)->get_size(PL_TYPE_INIT_SIZE, init_size));
|
||||
OZ (record_type->get_member(i)->newx(allocator, this, member_ptr));
|
||||
OX (member->set_extend(member_ptr, record_type->get_member(i)->get_type(), init_size));
|
||||
if (!record_type->get_member(i)->is_obj_type()) {
|
||||
int64_t init_size = OB_INVALID_SIZE;
|
||||
int64_t member_ptr = 0;
|
||||
OZ (record_type->get_member(i)->get_size(PL_TYPE_INIT_SIZE, init_size));
|
||||
OZ (record_type->get_member(i)->newx(allocator, this, member_ptr));
|
||||
OX (member->set_extend(member_ptr, record_type->get_member(i)->get_type(), init_size));
|
||||
}
|
||||
}
|
||||
}
|
||||
// f(self object_type, p1 out object_type), p1 will be init here, we have to set it null
|
||||
|
@ -95,7 +95,9 @@ public:
|
||||
const ObPLDataType &pl_type,
|
||||
common::ObObjParam &obj,
|
||||
bool set_allocator = false,
|
||||
bool set_null = true) const;
|
||||
bool set_null = true);
|
||||
|
||||
virtual int calc_expr(uint64_t package_id, int64_t expr_idx, ObObjParam &result);
|
||||
};
|
||||
|
||||
class ObPLFunctionBase
|
||||
@ -645,8 +647,8 @@ struct ObPLExecCtx : public ObPLINS
|
||||
virtual int get_user_type(uint64_t type_id,
|
||||
const ObUserDefinedType *&user_type,
|
||||
ObIAllocator *allocator = NULL) const;
|
||||
virtual int calc_expr(uint64_t package_id, int64_t expr_idx, ObObjParam &result);
|
||||
|
||||
//Note: 不实现虚函数,省得llvm解析的时候需要处理vtable麻烦
|
||||
common::ObIAllocator *allocator_;
|
||||
sql::ObExecContext *exec_ctx_;
|
||||
ParamStore *params_; // param stroe, 对应PL Function的符号表
|
||||
|
@ -2635,9 +2635,11 @@ int ObPLResolver::collect_dep_info_by_schema(const ObPLResolveCtx &ctx,
|
||||
if (table_schema->is_view_table() && !table_schema->is_materialized_view()) {
|
||||
OZ (collect_dep_info_by_view_schema(ctx, table_schema, dependency_objects));
|
||||
} else {
|
||||
OZ(dependency_objects.push_back(ObSchemaObjVersion(table_schema->get_table_id(),
|
||||
table_schema->get_schema_version(),
|
||||
ObDependencyTableType::DEPENDENCY_TABLE)));
|
||||
ObSchemaObjVersion version(table_schema->get_table_id(),
|
||||
table_schema->get_schema_version(),
|
||||
ObDependencyTableType::DEPENDENCY_TABLE);
|
||||
version.is_db_explicit_ = ctx.session_info_.get_database_id() != table_schema->get_database_id();
|
||||
OZ(dependency_objects.push_back(version));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
@ -2674,9 +2676,11 @@ int ObPLResolver::build_record_type_by_schema(
|
||||
OZ (build_record_type_by_table_schema(
|
||||
resolve_ctx.schema_guard_, resolve_ctx.allocator_, table_schema, record_type, with_rowid));
|
||||
if (OB_NOT_NULL(dependency_objects)) {
|
||||
OZ(dependency_objects->push_back(ObSchemaObjVersion(table_schema->get_table_id(),
|
||||
table_schema->get_schema_version(),
|
||||
ObDependencyTableType::DEPENDENCY_TABLE)));
|
||||
ObSchemaObjVersion version(table_schema->get_table_id(),
|
||||
table_schema->get_schema_version(),
|
||||
ObDependencyTableType::DEPENDENCY_TABLE);
|
||||
version.is_db_explicit_ = resolve_ctx.session_info_.get_database_id() != table_schema->get_database_id();
|
||||
OZ(dependency_objects->push_back(version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2217,9 +2217,9 @@ int ObPLExternalNS::resolve_external_routine(const ObString &db_name,
|
||||
LOG_WARN("add dependency object failed", "package_id", schema_routine_info->get_package_id(), K(ret));
|
||||
} else if (synonym_checker.has_synonym()) {
|
||||
if (OB_FAIL(ObResolverUtils::add_dependency_synonym_object(&resolve_ctx_.schema_guard_,
|
||||
&resolve_ctx_.session_info_,
|
||||
synonym_checker,
|
||||
*get_dependency_table()))) {
|
||||
&resolve_ctx_.session_info_,
|
||||
synonym_checker,
|
||||
*get_dependency_table()))) {
|
||||
LOG_WARN("add dependency synonym failed", K(ret));
|
||||
}
|
||||
}
|
||||
|
@ -1208,6 +1208,8 @@ public:
|
||||
inline const ObPLBlockNS *get_parent_ns() const { return parent_ns_; }
|
||||
inline const ObPLResolveCtx &get_resolve_ctx() { return resolve_ctx_; }
|
||||
inline const ObPLDependencyTable *get_dependency_table() const { return dependency_table_; }
|
||||
|
||||
inline ObPLDependencyTable *get_dependency_table() { return dependency_table_; }
|
||||
inline void set_dependency_table(ObPLDependencyTable *dependency_table) { dependency_table_ = dependency_table; }
|
||||
int add_dependency_object(const share::schema::ObSchemaObjVersion &obj_version) const;
|
||||
|
||||
|
@ -444,9 +444,11 @@ int ObPLObjectValue::check_value_version(share::schema::ObSchemaGetterGuard *sch
|
||||
OZ (obtain_new_column_infos(*schema_guard, schema_obj2, column_infos));
|
||||
OX (is_old_version = !schema_obj1->match_columns(column_infos));
|
||||
} else {
|
||||
LOG_WARN("mismatched schema objs", K(*schema_obj1), K(schema_obj2), K(i));
|
||||
is_old_version = true;
|
||||
}
|
||||
if (OB_SUCC(ret) && is_old_version) {
|
||||
LOG_WARN("mismatched schema objs", K(*schema_obj1), K(schema_obj2), K(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -719,6 +721,7 @@ int ObPLObjectValue::match_dep_schema(const ObPLCacheCtx &pc_ctx,
|
||||
&& !stored_schema_objs_.at(i)->match_compare(schema_array.at(i))) {
|
||||
// check whether common table name is same as system table in oracle mode
|
||||
is_same = false;
|
||||
LOG_WARN("mismatched schema objs", K(*stored_schema_objs_.at(i)), K(stored_schema_objs_.at(i)), K(i));
|
||||
} else {
|
||||
// do nothing
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "share/stat/ob_opt_stat_gather_stat.h"
|
||||
#include "sql/engine/expr/ob_expr_uuid.h"
|
||||
#include "sql/privilege_check/ob_ora_priv_check.h"
|
||||
#include "sql/ob_result_set.h"
|
||||
|
||||
namespace oceanbase
|
||||
{
|
||||
@ -76,6 +77,8 @@ int ObDbmsStats::gather_table_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
int64_t start_time = ObTimeUtility::current_time();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_ISNULL(ctx.get_my_session()) || OB_ISNULL(ctx.get_task_executor_ctx())) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected error", K(ret), K(ctx.get_my_session()), K(ctx.get_task_executor_ctx()));
|
||||
@ -176,6 +179,8 @@ int ObDbmsStats::gather_schema_stats(ObExecContext &ctx, ParamStore ¶ms, ObO
|
||||
int64_t start_time = ObTimeUtility::current_time();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_ISNULL(ctx.get_my_session()) || OB_ISNULL(ctx.get_task_executor_ctx())) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected error", K(ret), K(ctx.get_my_session()), K(ctx.get_task_executor_ctx()));
|
||||
@ -309,6 +314,8 @@ int ObDbmsStats::gather_index_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
empty_cascade.set_null();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode() && !params.at(11).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("table name shouldn't be specified in gather index stats", K(ret));
|
||||
@ -528,6 +535,8 @@ int ObDbmsStats::set_table_stats(ObExecContext &ctx, ParamStore ¶ms, ObObj &
|
||||
param.table_param_.allocator_ = &ctx.get_allocator();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_set_table_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -606,6 +615,8 @@ int ObDbmsStats::set_column_stats(sql::ObExecContext &ctx,
|
||||
param.table_param_.allocator_ = &ctx.get_allocator();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (params.at(2).is_null() && !params.at(1).is_null()) {
|
||||
//do nothing
|
||||
} else if (OB_FAIL(parse_set_column_stats(ctx,
|
||||
@ -613,6 +624,7 @@ int ObDbmsStats::set_column_stats(sql::ObExecContext &ctx,
|
||||
params.at(1),
|
||||
params.at(2),
|
||||
params.at(3),
|
||||
param.col_meta_,
|
||||
param.table_param_))) {
|
||||
LOG_WARN("failed to parse set column stats", K(ret));
|
||||
} else if (OB_FAIL(parse_set_column_stats_options(ctx,
|
||||
@ -700,6 +712,8 @@ int ObDbmsStats::set_index_stats(ObExecContext &ctx, ParamStore ¶ms, ObObj &
|
||||
number::ObNumber num_nummicroblks;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode() && !params.at(22).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("table name shouldn't be specified in gather index stats", K(ret));
|
||||
@ -786,6 +800,8 @@ int ObDbmsStats::delete_table_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
bool cascade_indexes = false;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -866,6 +882,8 @@ int ObDbmsStats::delete_column_stats(ObExecContext &ctx, ParamStore ¶ms, ObO
|
||||
bool only_histogram = false;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -941,6 +959,8 @@ int ObDbmsStats::delete_schema_stats(ObExecContext &ctx, ParamStore ¶ms, ObO
|
||||
ObSEArray<uint64_t, 4> table_ids;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry delete schema stats is not allowed", K(ret));
|
||||
@ -1029,6 +1049,8 @@ int ObDbmsStats::delete_index_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
bool only_histogram = false;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode() && !params.at(10).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("table name shouldn't be specified in gather index stats", K(ret));
|
||||
@ -1289,6 +1311,8 @@ int ObDbmsStats::export_table_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
const share::schema::ObTableSchema *table_schema = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -1367,6 +1391,8 @@ int ObDbmsStats::export_column_stats(sql::ObExecContext &ctx,
|
||||
stat_param.cascade_ = true;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -1427,6 +1453,8 @@ int ObDbmsStats::export_schema_stats(ObExecContext &ctx, ParamStore ¶ms, ObO
|
||||
ObString tmp_str;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry export schema stats is not allowed", K(ret));
|
||||
@ -1506,6 +1534,8 @@ int ObDbmsStats::export_index_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
const share::schema::ObTableSchema *table_schema = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode() && !params.at(6).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("table name shouldn't be specified in gather index stats", K(ret));
|
||||
@ -1615,6 +1645,8 @@ int ObDbmsStats::import_table_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
const share::schema::ObTableSchema *table_schema = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -1712,6 +1744,8 @@ int ObDbmsStats::import_column_stats(sql::ObExecContext &ctx,
|
||||
stat_param.cascade_ = true;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -1783,6 +1817,8 @@ int ObDbmsStats::import_schema_stats(ObExecContext &ctx, ParamStore ¶ms, ObO
|
||||
ObSEArray<uint64_t, 4> table_ids;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry import schema stats is not allowed", K(ret));
|
||||
@ -1885,6 +1921,8 @@ int ObDbmsStats::import_index_stats(ObExecContext &ctx, ParamStore ¶ms, ObOb
|
||||
const share::schema::ObTableSchema *table_schema = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode() && !params.at(8).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("table name shouldn't be specified in gather index stats", K(ret));
|
||||
@ -2009,6 +2047,8 @@ int ObDbmsStats::lock_table_stats(sql::ObExecContext &ctx,
|
||||
ObString stat_type_str;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -2060,6 +2100,8 @@ int ObDbmsStats::lock_partition_stats(sql::ObExecContext &ctx,
|
||||
stat_param.stattype_ = StatTypeLocked::PARTITION_ALL_TYPE;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (params.at(2).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("partition not specified", K(ret));
|
||||
@ -2108,6 +2150,8 @@ int ObDbmsStats::lock_schema_stats(sql::ObExecContext &ctx,
|
||||
ObSEArray<uint64_t, 4> table_ids;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry lock schema stats is not allowed", K(ret));
|
||||
@ -2218,6 +2262,8 @@ int ObDbmsStats::unlock_table_stats(sql::ObExecContext &ctx,
|
||||
stat_param.stattype_ = StatTypeLocked::TABLE_ALL_TYPE;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -2270,6 +2316,8 @@ int ObDbmsStats::unlock_partition_stats(sql::ObExecContext &ctx,
|
||||
stat_param.stattype_ = StatTypeLocked::PARTITION_ALL_TYPE;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (params.at(2).is_null()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("partition not specified", K(ret));
|
||||
@ -2394,6 +2442,8 @@ int ObDbmsStats::restore_table_stats(sql::ObExecContext &ctx,
|
||||
int64_t specify_time = 0;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
params.at(0),
|
||||
params.at(1),
|
||||
@ -2500,6 +2550,8 @@ int ObDbmsStats::restore_schema_stats(sql::ObExecContext &ctx,
|
||||
int64_t specify_time = 0;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry restore schema stats is not allowed", K(ret));
|
||||
@ -2585,6 +2637,8 @@ int ObDbmsStats::purge_stats(sql::ObExecContext &ctx,
|
||||
int64_t specify_time = -1;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (lib::is_oracle_mode()) {
|
||||
if (!params.at(0).is_null() && !params.at(0).is_timestamp_tz()) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
@ -2643,6 +2697,8 @@ int ObDbmsStats::alter_stats_history_retention(sql::ObExecContext &ctx,
|
||||
double retention_tmp = 0.0; // bugfix:
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (!params.at(0).is_null() && OB_FAIL(params.at(0).get_number(num_retention))) {
|
||||
LOG_WARN("failed to get epc", K(ret));
|
||||
} else if (!params.at(0).is_null() &&
|
||||
@ -2764,6 +2820,8 @@ int ObDbmsStats::reset_global_pref_defaults(sql::ObExecContext &ctx,
|
||||
UNUSED(result);
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsPreferences::reset_global_pref_defaults(ctx))) {
|
||||
LOG_WARN("failed to reset global pref defaults");
|
||||
} else {/*do nothing*/}
|
||||
@ -2838,6 +2896,8 @@ int ObDbmsStats::set_global_prefs(sql::ObExecContext &ctx,
|
||||
ObStatPrefs *stat_pref = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (!params.at(0).is_null() && OB_FAIL(params.at(0).get_string(opt_name))) {
|
||||
LOG_WARN("failed to get string", K(ret), K(params.at(0)));
|
||||
} else if (!params.at(0).is_null() &&
|
||||
@ -2892,6 +2952,8 @@ int ObDbmsStats::set_schema_prefs(sql::ObExecContext &ctx,
|
||||
ObStatPrefs *stat_pref = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry set schema stats is not allowed", K(ret));
|
||||
@ -2956,6 +3018,8 @@ int ObDbmsStats::set_table_prefs(sql::ObExecContext &ctx,
|
||||
bool use_size_auto = false;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx, params.at(0), params.at(1), dummy_param, param))) {
|
||||
LOG_WARN("failed to get string", K(ret));
|
||||
} else if (OB_FAIL(table_ids.push_back(param.table_id_))) {
|
||||
@ -3015,6 +3079,8 @@ int ObDbmsStats::delete_schema_prefs(sql::ObExecContext &ctx,
|
||||
ObStatPrefs *stat_pref = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (ctx.get_my_session()->get_is_in_retry()) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("retry delete schema stats is not allowed", K(ret));
|
||||
@ -3067,6 +3133,8 @@ int ObDbmsStats::delete_table_prefs(sql::ObExecContext &ctx,
|
||||
ObStatPrefs *stat_pref = NULL;
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx, params.at(0), params.at(1), dummy_param, param))) {
|
||||
LOG_WARN("failed to get string", K(ret));
|
||||
} else if (OB_FAIL(table_ids.push_back(param.table_id_))) {
|
||||
@ -3583,6 +3651,7 @@ int ObDbmsStats::parse_set_column_stats(ObExecContext &ctx,
|
||||
const ObObjParam &tab_name,
|
||||
const ObObjParam &colname,
|
||||
const ObObjParam &part_name,
|
||||
ObObjMeta &col_meta,
|
||||
ObTableStatParam ¶m)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -3638,6 +3707,7 @@ int ObDbmsStats::parse_set_column_stats(ObExecContext &ctx,
|
||||
} else {
|
||||
col_param.column_id_ = col->get_column_id();
|
||||
col_param.cs_type_ = col->get_collation_type();
|
||||
col_meta = col->get_meta_type();
|
||||
col_param.gather_flag_ = 0;
|
||||
col_param.bucket_num_ = -1;
|
||||
if (col->is_index_column()) {
|
||||
@ -4095,7 +4165,12 @@ int ObDbmsStats::get_default_stat_options(ObExecContext &ctx,
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && stat_options & StatOptionFlags::OPT_BLOCK_SAMPLE) {
|
||||
param.sample_info_.set_is_block_sample(false);
|
||||
ObBlockSamplePrefs *tmp_pref = NULL;
|
||||
if (OB_FAIL(new_stat_prefs(*param.allocator_, ctx.get_my_session(), ObString(), tmp_pref))) {
|
||||
LOG_WARN("failed to new stat prefs", K(ret));
|
||||
} else if (OB_FAIL(stat_prefs.push_back(tmp_pref))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && stat_options & StatOptionFlags::OPT_METHOD_OPT) {
|
||||
ObMethodOptPrefs *tmp_pref = NULL;
|
||||
@ -4793,10 +4868,8 @@ int ObDbmsStats::parse_set_hist_stats_options(ObExecContext &ctx,
|
||||
number::ObNumber num_eavs;
|
||||
if (!epc.is_null() && OB_FAIL(epc.get_number(num_epc))) {
|
||||
LOG_WARN("failed to get epc", K(ret));
|
||||
} else if (!minval.is_null() && OB_FAIL(minval.get_raw(hist_param.minval_))) {
|
||||
LOG_WARN("failed to get minval", K(ret));
|
||||
} else if (!maxval.is_null() && OB_FAIL(maxval.get_raw(hist_param.maxval_))) {
|
||||
LOG_WARN("failed to get maxval", K(ret));
|
||||
} else if (!minval.is_null() && FALSE_IT(hist_param.minval_ = &minval)) {
|
||||
} else if (!maxval.is_null() && FALSE_IT(hist_param.maxval_ = &maxval)) {
|
||||
} else if (OB_FAIL(parser_pl_numarray(bkvals, hist_param.bkvals_))) {
|
||||
LOG_WARN("failed to parser pl numarray", K(ret));
|
||||
} else if (OB_FAIL(parser_pl_numarray(novals, hist_param.novals_))) {
|
||||
@ -5369,6 +5442,8 @@ int ObDbmsStats::gather_database_stats_job_proc(sql::ObExecContext &ctx,
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
ret = OB_SUCCESS;
|
||||
LOG_INFO("auto gather database statistics abort because of statistic table is unwriteable");
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (!ctx.get_my_session()->is_user_session() && no_auto_gather) {
|
||||
//do nothing
|
||||
LOG_INFO("auto gather stat abort because of the trace point and not user seesion",
|
||||
@ -5424,53 +5499,28 @@ int ObDbmsStats::gather_database_table_stats(sql::ObExecContext &ctx,
|
||||
} else if (OB_FALSE_IT(tenant_id = session->get_effective_tenant_id())) {
|
||||
} else if (is_virtual_tenant_id(tenant_id)) {
|
||||
// do nothing
|
||||
} else if (OB_FAIL(ObBasicStatsEstimator::get_need_stats_table_cnt(ctx, tenant_id,
|
||||
task_info.task_table_count_))) {
|
||||
LOG_WARN("failed to get all tables count", K(ret));
|
||||
} else {
|
||||
int64_t slice_cnt = 10000; // maximum tables we can gather stats at each iteration
|
||||
int64_t tmp_succeed = 0;
|
||||
int64_t offset = 0;
|
||||
do {
|
||||
table_ids.reuse();
|
||||
tmp_succeed = succeed_cnt;
|
||||
if (OB_FAIL(THIS_WORKER.check_status())) {
|
||||
LOG_WARN("check status failed", KR(ret));
|
||||
} else if (OB_FAIL(ObBasicStatsEstimator::get_need_stats_tables(ctx, tenant_id, table_ids, slice_cnt))) {
|
||||
LOG_WARN("failed to get tables that need gather stats", K(ret));
|
||||
} else if (OB_FAIL(do_gather_tables_stats(ctx, tenant_id, table_ids,
|
||||
duration_time, succeed_cnt, task_info))) {
|
||||
LOG_WARN("failed to gather table stats", K(ret));
|
||||
}
|
||||
LOG_INFO("succeed to gather table stats", K(ret), K(table_ids.count()), K(slice_cnt),
|
||||
K(tmp_succeed), K(duration_time), K(succeed_cnt));
|
||||
// case that we can break the loop:
|
||||
// 1. #table_ids < slice_cnt, which means that we have fetched all the tables we need to gather stats
|
||||
// 2. duration_time_ = -1, and has reached the ob_query_timeout session variable limit
|
||||
// 3. duration_time is not -1, and the time we cost to gather stats has reached duration_time
|
||||
} while (OB_SUCC(ret) && table_ids.count() == slice_cnt && (succeed_cnt - tmp_succeed) != 0);
|
||||
// gather virtual table stats
|
||||
ObSEArray<uint64_t, 256> all_table_ids;
|
||||
if (OB_FAIL(ret)) {
|
||||
} else if (OB_ISNULL(ctx.get_virtual_table_ctx().schema_guard_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected error", K(ret), K(ctx.get_virtual_table_ctx().schema_guard_));
|
||||
} else if (OB_FAIL(ctx.get_virtual_table_ctx().schema_guard_->get_table_ids_in_tenant(tenant_id, all_table_ids))) {
|
||||
LOG_WARN("failed to get virtual table ids in tenant", K(ret));
|
||||
} else {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < all_table_ids.count(); ++i) {
|
||||
int64_t table_id = static_cast<int64_t>(all_table_ids.at(i));
|
||||
if (is_virtual_table(table_id) && !ObDbmsStatsUtils::is_no_stat_virtual_table(table_id)) {
|
||||
if (OB_FAIL(refresh_tenant_schema_guard(ctx, tenant_id))) {
|
||||
if (OB_FAIL(ObBasicStatsEstimator::get_need_stats_tables(ctx, tenant_id, offset, slice_cnt, table_ids))) {
|
||||
LOG_WARN("failed to get need stats tables", K(ret));
|
||||
} else {
|
||||
task_info.task_table_count_ += table_ids.count();
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < table_ids.count(); ++i) {
|
||||
if (OB_FAIL(THIS_WORKER.check_status())) {
|
||||
LOG_WARN("failed to check status", K(ret));
|
||||
} else if (OB_FAIL(refresh_tenant_schema_guard(ctx, tenant_id))) {
|
||||
LOG_WARN("refresh tenant schema guard failed", K(ret));
|
||||
} else if (OB_FAIL(do_gather_table_stats(ctx, table_id, tenant_id,
|
||||
} else if (OB_FAIL(do_gather_table_stats(ctx, table_ids.at(i), tenant_id,
|
||||
duration_time, succeed_cnt, task_info))) {
|
||||
LOG_WARN("failed to gather virtual table stats", K(ret));
|
||||
} else {
|
||||
++task_info.task_table_count_;
|
||||
LOG_WARN("failed to gather table stats", K(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
offset += slice_cnt;
|
||||
} while (OB_SUCC(ret) && table_ids.count() == slice_cnt);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -5520,6 +5570,9 @@ int ObDbmsStats::do_gather_table_stats(sql::ObExecContext &ctx,
|
||||
} else if (OB_ISNULL(table_schema)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected null", K(ret));
|
||||
} else if (is_recyclebin_database_id(table_schema->get_database_id()) ||
|
||||
(lib::is_oracle_mode() && is_oceanbase_sys_database_id(table_schema->get_database_id()))) {
|
||||
//do nothing
|
||||
} else {
|
||||
StatTable stat_table(table_schema->get_database_id(), table_id);
|
||||
double stale_percent_threshold = OPT_DEFAULT_STALE_PERCENT;
|
||||
@ -5718,6 +5771,8 @@ int ObDbmsStats::gather_table_stats_with_default_param(ObExecContext &ctx,
|
||||
LOG_WARN("failed to use default gather stat optitions", K(ret));
|
||||
} else if (OB_FAIL(adjust_auto_gather_stat_option(stat_table.partition_stat_infos_, stat_param))) {
|
||||
LOG_WARN("failed to use default gather stat optitions", K(ret));
|
||||
} else if (!stat_param.need_gather_stats()) {
|
||||
//do nothing
|
||||
} else if (OB_FAIL(running_monitor.add_table_info(stat_param, stat_table.stale_percent_))) {
|
||||
LOG_WARN("failed to add table info", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsExecutor::gather_table_stats(ctx, stat_param, running_monitor))) {
|
||||
@ -5751,7 +5806,9 @@ int ObDbmsStats::gather_table_stats_with_default_param(ObExecContext &ctx,
|
||||
LOG_TRACE("Succeed to gather table stats", K(stat_param));
|
||||
}
|
||||
running_monitor.set_monitor_result(ret, ObTimeUtility::current_time(), stat_param.allocator_->used());
|
||||
update_optimizer_gather_stat_info(NULL, &gather_stat);
|
||||
if (stat_param.need_gather_stats()) {
|
||||
update_optimizer_gather_stat_info(NULL, &gather_stat);
|
||||
}
|
||||
ObOptStatGatherStatList::instance().remove(gather_stat);
|
||||
task_info.completed_table_count_ ++;
|
||||
return ret;
|
||||
@ -5867,13 +5924,21 @@ int ObDbmsStats::get_new_stat_pref(ObExecContext &ctx,
|
||||
} else {
|
||||
stat_pref = tmp_pref;
|
||||
}
|
||||
} else if (0 == opt_name.case_compare("BLOCK_SAMPLE")) {
|
||||
ObBlockSamplePrefs *tmp_pref = NULL;
|
||||
if (OB_FAIL(new_stat_prefs(allocator, ctx.get_my_session(), opt_value, tmp_pref))) {
|
||||
LOG_WARN("failed to new stat prefs", K(ret));
|
||||
} else {
|
||||
stat_pref = tmp_pref;
|
||||
}
|
||||
} else {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("Invalid input values for pname", K(ret), K(opt_name));
|
||||
LOG_USER_ERROR(OB_ERR_DBMS_STATS_PL, "Invalid input values for pname, Only Support CASCADE |"\
|
||||
"DEGREE | ESTIMATE_PERCENT | GRANULARITY | INCREMENTAL |"\
|
||||
"INCREMENTAL_LEVEL | METHOD_OPT | NO_INVALIDATE | OPTIONS"\
|
||||
"STALE_PERCENT | ESTIMATE_BLOCK | APPROXIMATE_NDV(global prefs unique) prefs");
|
||||
"DEGREE | ESTIMATE_PERCENT | GRANULARITY | INCREMENTAL |"\
|
||||
"INCREMENTAL_LEVEL | METHOD_OPT | NO_INVALIDATE | OPTIONS |"\
|
||||
"STALE_PERCENT | ESTIMATE_BLOCK | BLOCK_SAMPLE |"\
|
||||
"APPROXIMATE_NDV(global prefs unique) prefs");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -6341,6 +6406,8 @@ int ObDbmsStats::gather_system_stats(sql::ObExecContext &ctx,
|
||||
LOG_WARN("failed to check is unix connection", K(ret));
|
||||
} else if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(check_system_stat_table_ready(session->get_effective_tenant_id()))) {
|
||||
LOG_WARN("failed to check system stat table ready", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsExecutor::gather_system_stats(ctx, session->get_effective_tenant_id()))) {
|
||||
@ -6373,6 +6440,8 @@ int ObDbmsStats::delete_system_stats(sql::ObExecContext &ctx,
|
||||
LOG_WARN("failed to check is unix connection", K(ret));
|
||||
} else if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(check_system_stat_table_ready(session->get_effective_tenant_id()))) {
|
||||
LOG_WARN("failed to check system stat table ready", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsExecutor::delete_system_stats(ctx, session->get_effective_tenant_id()))) {
|
||||
@ -6411,6 +6480,8 @@ int ObDbmsStats::set_system_stats(sql::ObExecContext &ctx,
|
||||
LOG_WARN("failed to check is unix connection", K(ret));
|
||||
} else if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (OB_FAIL(check_system_stat_table_ready(session->get_effective_tenant_id()))) {
|
||||
LOG_WARN("failed to check system stat table ready", K(ret));
|
||||
} else if (2 != params.count()) {
|
||||
@ -6544,6 +6615,7 @@ int ObDbmsStats::check_system_stat_table_ready(int64_t tenant_id)
|
||||
* @param result
|
||||
* @return int
|
||||
*/
|
||||
|
||||
int ObDbmsStats::copy_table_stats(sql::ObExecContext &ctx,
|
||||
sql::ParamStore ¶ms,
|
||||
common::ObObj &result)
|
||||
@ -6560,6 +6632,8 @@ int ObDbmsStats::copy_table_stats(sql::ObExecContext &ctx,
|
||||
dummy_part_name.set_null();
|
||||
if (OB_FAIL(check_statistic_table_writeable(ctx))) {
|
||||
LOG_WARN("failed to check tenant is restore", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::implicit_commit_before_gather_stats(ctx))) {
|
||||
LOG_WARN("failed to implicit commit before gather stats", K(ret));
|
||||
} else if (GET_MIN_CLUSTER_VERSION() < CLUSTER_VERSION_4_2_2_0) {
|
||||
//do nothing
|
||||
} else if (OB_FAIL(parse_table_part_info(ctx,
|
||||
@ -6729,11 +6803,18 @@ int ObDbmsStats::adjust_auto_gather_stat_option(const ObIArray<ObPartitionStatIn
|
||||
}
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) &&
|
||||
param.global_stat_param_.need_modify_ &&
|
||||
param.global_stat_param_.gather_approx_ &&
|
||||
(has_part_locked || !param.part_stat_param_.need_modify_)) {
|
||||
param.global_stat_param_.gather_approx_ = false;
|
||||
if (OB_SUCC(ret) && param.global_stat_param_.need_modify_) {
|
||||
bool is_locked = false;
|
||||
if (param.global_stat_param_.gather_approx_ &&
|
||||
(has_part_locked || !param.part_stat_param_.need_modify_)) {
|
||||
param.global_stat_param_.gather_approx_ = false;
|
||||
if (is_partition_no_regather(param.global_part_id_, partition_stat_infos, is_locked)) {
|
||||
param.global_stat_param_.need_modify_ = false;
|
||||
}
|
||||
} else if (!param.global_stat_param_.gather_approx_ &&
|
||||
is_partition_no_regather(param.global_part_id_, partition_stat_infos, is_locked)) {
|
||||
param.global_stat_param_.need_modify_ = false;
|
||||
}
|
||||
}
|
||||
LOG_TRACE("succeed to adjust auto gather stat option", K(partition_stat_infos), K(param));
|
||||
return ret;
|
||||
|
@ -315,6 +315,7 @@ public:
|
||||
const ObObjParam &tab_name,
|
||||
const ObObjParam &colname,
|
||||
const ObObjParam &part_name,
|
||||
ObObjMeta &col_meta,
|
||||
ObTableStatParam ¶m);
|
||||
|
||||
static int parse_set_column_stats_options(ObExecContext &ctx,
|
||||
|
@ -135,7 +135,8 @@ ObDDLTaskSerializeField::ObDDLTaskSerializeField(const int64_t task_version,
|
||||
const bool is_abort,
|
||||
const int32_t sub_task_trace_id,
|
||||
const bool is_unique_index,
|
||||
const bool is_global_index)
|
||||
const bool is_global_index,
|
||||
const bool is_pre_split)
|
||||
{
|
||||
task_version_ = task_version;
|
||||
parallelism_ = parallelism;
|
||||
@ -145,6 +146,7 @@ ObDDLTaskSerializeField::ObDDLTaskSerializeField(const int64_t task_version,
|
||||
sub_task_trace_id_ = sub_task_trace_id;
|
||||
is_unique_index_ = is_unique_index;
|
||||
is_global_index_ = is_global_index;
|
||||
is_pre_split_ = is_pre_split;
|
||||
}
|
||||
|
||||
void ObDDLTaskSerializeField::reset()
|
||||
@ -157,6 +159,7 @@ void ObDDLTaskSerializeField::reset()
|
||||
sub_task_trace_id_ = 0;
|
||||
is_unique_index_ = false;
|
||||
is_global_index_ = false;
|
||||
is_pre_split_ = false;
|
||||
}
|
||||
|
||||
OB_SERIALIZE_MEMBER(ObDDLTaskSerializeField,
|
||||
@ -167,14 +170,15 @@ OB_SERIALIZE_MEMBER(ObDDLTaskSerializeField,
|
||||
is_abort_,
|
||||
sub_task_trace_id_,
|
||||
is_unique_index_,
|
||||
is_global_index_);
|
||||
is_global_index_,
|
||||
is_pre_split_);
|
||||
|
||||
ObCreateDDLTaskParam::ObCreateDDLTaskParam()
|
||||
: sub_task_trace_id_(0), tenant_id_(OB_INVALID_ID), object_id_(OB_INVALID_ID), schema_version_(0), parallelism_(0),
|
||||
consumer_group_id_(0), parent_task_id_(0), task_id_(0), type_(DDL_INVALID), src_table_schema_(nullptr),
|
||||
dest_table_schema_(nullptr), ddl_arg_(nullptr), allocator_(nullptr),
|
||||
aux_rowkey_doc_schema_(nullptr), aux_doc_rowkey_schema_(nullptr), aux_doc_word_schema_(nullptr),
|
||||
tenant_data_version_(0), ddl_need_retry_at_executor_(false)
|
||||
tenant_data_version_(0), ddl_need_retry_at_executor_(false), is_pre_split_(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -194,7 +198,7 @@ ObCreateDDLTaskParam::ObCreateDDLTaskParam(const uint64_t tenant_id,
|
||||
: sub_task_trace_id_(0), tenant_id_(tenant_id), object_id_(object_id), schema_version_(schema_version), parallelism_(parallelism), consumer_group_id_(consumer_group_id),
|
||||
parent_task_id_(parent_task_id), task_id_(task_id), type_(type), src_table_schema_(src_table_schema), dest_table_schema_(dest_table_schema),
|
||||
ddl_arg_(ddl_arg), allocator_(allocator), aux_rowkey_doc_schema_(nullptr), aux_doc_rowkey_schema_(nullptr),
|
||||
aux_doc_word_schema_(nullptr), ddl_need_retry_at_executor_(ddl_need_retry_at_executor)
|
||||
aux_doc_word_schema_(nullptr), ddl_need_retry_at_executor_(ddl_need_retry_at_executor), is_pre_split_(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -955,7 +959,7 @@ int ObDDLTask::set_ddl_stmt_str(const ObString &ddl_stmt_str)
|
||||
int ObDDLTask::serialize_params_to_message(char *buf, const int64_t buf_size, int64_t &pos) const
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObDDLTaskSerializeField serialize_field(task_version_, parallelism_, data_format_version_, consumer_group_id_, is_abort_, sub_task_trace_id_);
|
||||
ObDDLTaskSerializeField serialize_field(task_version_, parallelism_, data_format_version_, consumer_group_id_, is_abort_, sub_task_trace_id_, is_pre_split_);
|
||||
if (OB_UNLIKELY(nullptr == buf || buf_size <= 0)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid arguments", K(ret), KP(buf), K(buf_size));
|
||||
@ -983,13 +987,14 @@ int ObDDLTask::deserialize_params_from_message(const uint64_t tenant_id, const c
|
||||
consumer_group_id_ = serialize_field.consumer_group_id_;
|
||||
is_abort_ = serialize_field.is_abort_;
|
||||
sub_task_trace_id_ = serialize_field.sub_task_trace_id_;
|
||||
is_pre_split_ = serialize_field.is_pre_split_;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int64_t ObDDLTask::get_serialize_param_size() const
|
||||
{
|
||||
ObDDLTaskSerializeField serialize_field(task_version_, parallelism_, data_format_version_, consumer_group_id_, is_abort_, sub_task_trace_id_);
|
||||
ObDDLTaskSerializeField serialize_field(task_version_, parallelism_, data_format_version_, consumer_group_id_, is_abort_, sub_task_trace_id_, is_pre_split_);
|
||||
return serialize_field.get_serialize_size();
|
||||
}
|
||||
|
||||
|
@ -140,10 +140,10 @@ struct ObDDLTaskSerializeField final
|
||||
OB_UNIS_VERSION(1);
|
||||
public:
|
||||
TO_STRING_KV(K_(task_version), K_(parallelism), K_(data_format_version), K_(consumer_group_id),
|
||||
K_(is_abort), K_(sub_task_trace_id), K_(is_unique_index), K_(is_global_index));
|
||||
K_(is_abort), K_(sub_task_trace_id), K_(is_unique_index), K_(is_global_index) ,K_(is_pre_split));
|
||||
ObDDLTaskSerializeField() : task_version_(0), parallelism_(0), data_format_version_(0),
|
||||
consumer_group_id_(0), is_abort_(false), sub_task_trace_id_(0),
|
||||
is_unique_index_(false), is_global_index_(false) {}
|
||||
is_unique_index_(false), is_global_index_(false), is_pre_split_(false) {}
|
||||
ObDDLTaskSerializeField(const int64_t task_version,
|
||||
const int64_t parallelism,
|
||||
const uint64_t data_format_version,
|
||||
@ -151,7 +151,8 @@ public:
|
||||
const bool is_abort,
|
||||
const int32_t sub_task_trace_id,
|
||||
const bool is_unique_index = false,
|
||||
const bool is_global_index = false);
|
||||
const bool is_global_index = false,
|
||||
const bool is_pre_split = false);
|
||||
~ObDDLTaskSerializeField() = default;
|
||||
void reset();
|
||||
public:
|
||||
@ -163,6 +164,7 @@ public:
|
||||
int32_t sub_task_trace_id_;
|
||||
bool is_unique_index_;
|
||||
bool is_global_index_;
|
||||
bool is_pre_split_;
|
||||
};
|
||||
|
||||
struct ObCreateDDLTaskParam final
|
||||
@ -188,7 +190,7 @@ public:
|
||||
TO_STRING_KV(K_(tenant_id), K_(object_id), K_(schema_version), K_(parallelism), K_(consumer_group_id), K_(parent_task_id), K_(task_id),
|
||||
K_(type), KPC_(src_table_schema), KPC_(dest_table_schema), KPC_(ddl_arg), K_(tenant_data_version),
|
||||
K_(sub_task_trace_id), KPC_(aux_rowkey_doc_schema), KPC_(aux_doc_rowkey_schema), KPC_(aux_doc_word_schema),
|
||||
K_(ddl_need_retry_at_executor));
|
||||
K_(ddl_need_retry_at_executor), K_(is_pre_split));
|
||||
public:
|
||||
int32_t sub_task_trace_id_;
|
||||
uint64_t tenant_id_;
|
||||
@ -208,6 +210,7 @@ public:
|
||||
const ObTableSchema *aux_doc_word_schema_;
|
||||
uint64_t tenant_data_version_;
|
||||
bool ddl_need_retry_at_executor_;
|
||||
bool is_pre_split_;
|
||||
};
|
||||
|
||||
class ObDDLTaskRecordOperator final
|
||||
@ -525,7 +528,7 @@ public:
|
||||
parent_task_id_(0), parent_task_key_(), task_version_(0), parallelism_(0),
|
||||
allocator_(lib::ObLabel("DdlTask")), compat_mode_(lib::Worker::CompatMode::INVALID), err_code_occurence_cnt_(0),
|
||||
longops_stat_(nullptr), gmt_create_(0), stat_info_(), delay_schedule_time_(0), next_schedule_ts_(0),
|
||||
execution_id_(-1), sql_exec_addr_(), start_time_(0), data_format_version_(0)
|
||||
execution_id_(-1), sql_exec_addr_(), start_time_(0), data_format_version_(0), is_pre_split_(false)
|
||||
{}
|
||||
virtual ~ObDDLTask() {}
|
||||
virtual int process() = 0;
|
||||
@ -625,7 +628,7 @@ public:
|
||||
K_(task_version), K_(parallelism), K_(ddl_stmt_str), K_(compat_mode),
|
||||
K_(sys_task_id), K_(err_code_occurence_cnt), K_(stat_info),
|
||||
K_(next_schedule_ts), K_(delay_schedule_time), K(execution_id_), K(sql_exec_addr_), K_(data_format_version), K(consumer_group_id_),
|
||||
K_(dst_tenant_id), K_(dst_schema_version));
|
||||
K_(dst_tenant_id), K_(dst_schema_version), K_(is_pre_split));
|
||||
static const int64_t MAX_ERR_TOLERANCE_CNT = 3L; // Max torlerance count for error code.
|
||||
static const int64_t DEFAULT_TASK_IDLE_TIME_US = 10L * 1000L; // 10ms
|
||||
protected:
|
||||
@ -700,6 +703,7 @@ protected:
|
||||
int64_t start_time_;
|
||||
uint64_t data_format_version_;
|
||||
int64_t consumer_group_id_;
|
||||
bool is_pre_split_;
|
||||
};
|
||||
|
||||
enum ColChecksumStat
|
||||
|
@ -82,36 +82,6 @@ int64_t ObMergeProgress::to_string(char *buf, const int64_t buf_len) const
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
/**
|
||||
* -------------------------------------------------------------------ObRSCompactionTimeGuard-------------------------------------------------------------------
|
||||
*/
|
||||
const char *ObRSCompactionTimeGuard::CompactionEventStr[] = {
|
||||
"PREPARE_UNFINISH_TABLE_IDS",
|
||||
"GET_TABLET_LS_PAIRS",
|
||||
"GET_TABLET_META_TABLE",
|
||||
"CKM_VERIFICATION"
|
||||
};
|
||||
|
||||
const char *ObRSCompactionTimeGuard::get_comp_event_str(enum CompactionEvent event)
|
||||
{
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) == ARRAYSIZEOF(CompactionEventStr), "events str len is mismatch");
|
||||
const char *str = "";
|
||||
if (event >= COMPACTION_EVENT_MAX || event < PREPARE_UNFINISH_TABLE_IDS) {
|
||||
str = "invalid_type";
|
||||
} else {
|
||||
str = CompactionEventStr[event];
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
int64_t ObRSCompactionTimeGuard::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
for (int64_t idx = 0; idx < idx_; ++idx) {
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, get_comp_event_str((CompactionEvent)line_array_[idx]), click_poinsts_[idx]);
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
/**
|
||||
* -------------------------------------------------------------------ObTabletLSPairCache-------------------------------------------------------------------
|
||||
|
@ -209,26 +209,6 @@ typedef hash::ObHashMap<ObTabletID, ObTabletCompactionStatus> ObTabletStatusMap;
|
||||
typedef common::ObArray<share::ObTabletLSPair> ObTabletLSPairArray;
|
||||
typedef hash::ObHashMap<uint64_t, ObTableCompactionInfo> ObTableCompactionInfoMap;
|
||||
|
||||
struct ObRSCompactionTimeGuard : public ObCompactionTimeGuard
|
||||
{
|
||||
public:
|
||||
ObRSCompactionTimeGuard()
|
||||
: ObCompactionTimeGuard(UINT64_MAX, "[RS] ")
|
||||
{}
|
||||
virtual ~ObRSCompactionTimeGuard() {}
|
||||
enum CompactionEvent : uint16_t {
|
||||
PREPARE_UNFINISH_TABLE_IDS = 0,
|
||||
GET_TABLET_LS_PAIRS,
|
||||
GET_TABLET_META_TABLE,
|
||||
CKM_VERIFICATION,
|
||||
COMPACTION_EVENT_MAX,
|
||||
};
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const override;
|
||||
private:
|
||||
const static char *CompactionEventStr[];
|
||||
static const char *get_comp_event_str(enum CompactionEvent event);
|
||||
};
|
||||
|
||||
struct ObCkmValidatorStatistics
|
||||
{
|
||||
ObCkmValidatorStatistics() { reset(); }
|
||||
|
@ -835,9 +835,23 @@ void ObMajorMergeScheduler::check_merge_interval_time(const bool is_merging)
|
||||
(GCONF.enable_major_freeze) &&
|
||||
(!tenant_config->major_freeze_duty_time.disable())) {
|
||||
if (TC_REACH_TIME_INTERVAL(30 * 60 * 1000 * 1000)) {
|
||||
LOG_ERROR("long time no major freeze, please check it", KR(ret),
|
||||
K(global_last_merged_time), K(global_merge_start_time), K(max_merge_time),
|
||||
K(now), K_(tenant_id), K(is_merging), K(start_service_time), K(total_service_time));
|
||||
// standby tenant cannot launch major freeze itself, it perform major freeze according
|
||||
// to freeze info synchronized from primary tenant. therefore, standby tenants that
|
||||
// stop sync from primary tenant will not perform major freeze any more. do not print
|
||||
// error log for this case. issue-id: 56800988
|
||||
ObAllTenantInfo tenant_info;
|
||||
if (OB_FAIL(ObAllTenantInfoProxy::load_tenant_info(tenant_id_, sql_proxy_,
|
||||
false, tenant_info))) {
|
||||
LOG_WARN("fail to load tenant info", KR(ret), K_(tenant_id));
|
||||
} else if (tenant_info.is_standby()
|
||||
&& (tenant_info.get_standby_scn() >= tenant_info.get_recovery_until_scn())) {
|
||||
LOG_INFO("standby tenant do not sync from primary tenant any more, and do not"
|
||||
" major freeze any more");
|
||||
} else {
|
||||
LOG_ERROR("long time no major freeze, please check it", KR(ret),
|
||||
K(global_last_merged_time), K(global_merge_start_time), K(max_merge_time),
|
||||
K(now), K_(tenant_id), K(is_merging), K(start_service_time), K(total_service_time));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16672,6 +16672,10 @@ int ObDDLService::prepare_hidden_table_schema(const ObTableSchema &orig_table_sc
|
||||
hidden_table_schema.set_association_table_id(orig_table_schema.get_table_id());
|
||||
// set the hidden attributes of the table
|
||||
hidden_table_schema.set_table_state_flag(ObTableStateFlag::TABLE_STATE_HIDDEN_OFFLINE_DDL);
|
||||
if (orig_table_schema.get_tenant_id() != hidden_table_schema.get_tenant_id()) {
|
||||
// recover restore table, do not sync log to cdc.
|
||||
hidden_table_schema.set_ddl_ignore_sync_cdc_flag(ObDDLIgnoreSyncCdcFlag::DONT_SYNC_LOG_FOR_CDC);
|
||||
}
|
||||
// in oracle mode, need to add primary key constraints
|
||||
if (is_oracle_mode && !hidden_table_schema.is_heap_table()) {
|
||||
uint64_t new_cst_id = OB_INVALID_ID;
|
||||
@ -19514,6 +19518,7 @@ int ObDDLService::make_recover_restore_tables_visible(obrpc::ObAlterTableArg &al
|
||||
tmp_schema.set_association_table_id(OB_INVALID_ID);
|
||||
tmp_schema.set_table_state_flag(ObTableStateFlag::TABLE_STATE_NORMAL);
|
||||
tmp_schema.set_in_offline_ddl_white_list(true);
|
||||
tmp_schema.set_ddl_ignore_sync_cdc_flag(ObDDLIgnoreSyncCdcFlag::DO_SYNC_LOG_FOR_CDC); // reset.
|
||||
ObArray<ObSchemaType> conflict_schema_types;
|
||||
uint64_t synonym_id = OB_INVALID_ID;
|
||||
bool object_exist = false;
|
||||
|
@ -353,6 +353,8 @@ int ObLSRecoveryStatHandler::try_reload_and_fix_config_version_(const palf::LogC
|
||||
share::SCN readable_scn;
|
||||
const uint64_t meta_tenant_id = gen_meta_tenant_id(tenant_id_);
|
||||
uint64_t tenant_data_version = 0;
|
||||
ObLSRecoveryStatOperator op;
|
||||
ObLSID ls_id;
|
||||
if (OB_FAIL(check_inner_stat_())) {
|
||||
LOG_WARN("inner stat error", KR(ret));
|
||||
} else if (OB_UNLIKELY(!current_version.is_valid())) {
|
||||
@ -361,9 +363,14 @@ int ObLSRecoveryStatHandler::try_reload_and_fix_config_version_(const palf::LogC
|
||||
} else if (OB_ISNULL(GCTX.sql_proxy_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("sql proxy is null", KR(ret));
|
||||
} else if (FALSE_IT(ls_id = ls_->get_ls_id())) {
|
||||
//can not be there
|
||||
} else if (OB_FAIL(GET_MIN_DATA_VERSION(meta_tenant_id, tenant_data_version))) {
|
||||
LOG_WARN("failed to get min data version", KR(ret), K(tenant_id_), K(meta_tenant_id));
|
||||
} else if (tenant_data_version < DATA_VERSION_4_3_0_0) {
|
||||
} else if (tenant_data_version < MOCK_DATA_VERSION_4_2_4_0) {
|
||||
//内部表config_version的汇报最开始是在4300版本上提交的
|
||||
//后面patch到424版本上,由于是在4300分支的第一个版本号提交
|
||||
//所以版本号判断直接小于等于424即可
|
||||
need_update = false;
|
||||
LOG_INFO("not ready to load and update config version", KR(ret), K(tenant_data_version));
|
||||
} else {
|
||||
@ -371,13 +378,10 @@ int ObLSRecoveryStatHandler::try_reload_and_fix_config_version_(const palf::LogC
|
||||
if (current_version != config_version_in_inner_) {
|
||||
need_update = true;
|
||||
FLOG_INFO("config version not match, need update",
|
||||
K(config_version_in_inner_), K(current_version), "ls_id",
|
||||
ls_->get_ls_id());
|
||||
K(config_version_in_inner_), K(current_version), K(ls_id));
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && need_update) {
|
||||
ObLSRecoveryStatOperator op;
|
||||
ObLSID ls_id = ls_->get_ls_id();
|
||||
if (OB_FAIL(op.update_ls_config_version(tenant_id_, ls_id, current_version,
|
||||
*GCTX.sql_proxy_, readable_scn))) {
|
||||
LOG_WARN("failed to update ls config version", KR(ret), K(tenant_id_), K(ls_id), K(current_version));
|
||||
|
@ -474,7 +474,7 @@ int ObRecoveryLSService::process_ls_tx_log_(ObTxLogBlock &tx_log_block, const SC
|
||||
const ObTxBufferNodeArray &source_data =
|
||||
commit_log.get_multi_source_data();
|
||||
const uint64_t exec_tenant_id = gen_meta_tenant_id(tenant_id_);
|
||||
START_TRANSACTION(proxy_, exec_tenant_id)
|
||||
ObMySQLTransaction trans;
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < source_data.count(); ++i) {
|
||||
const ObTxBufferNode &node = source_data.at(i);
|
||||
if (ObTxDataSourceType::STANDBY_UPGRADE == node.get_data_source_type()) {
|
||||
@ -484,6 +484,8 @@ int ObRecoveryLSService::process_ls_tx_log_(ObTxLogBlock &tx_log_block, const SC
|
||||
} else if (ObTxDataSourceType::LS_TABLE != node.get_data_source_type()
|
||||
&& ObTxDataSourceType::TRANSFER_TASK != node.get_data_source_type()) {
|
||||
// nothing
|
||||
} else if (! trans.is_started() && OB_FAIL(trans.start(proxy_, exec_tenant_id))) {
|
||||
LOG_WARN("failed to start trans", KR(ret), K(exec_tenant_id));
|
||||
} else if (FALSE_IT(has_operation = true)) {
|
||||
//can not be there;
|
||||
} else if (OB_FAIL(check_valid_to_operator_ls_(sync_scn))) {
|
||||
|
@ -44,6 +44,8 @@ public:
|
||||
SHARE_LOG(ERROR, "init memstore allocator failed", KR(ret));
|
||||
} else if (OB_FAIL(mds_allocator_.init())) {
|
||||
SHARE_LOG(ERROR, "init mds allocator failed", KR(ret));
|
||||
} else if (OB_FAIL(tx_data_op_allocator_.init())) {
|
||||
SHARE_LOG(ERROR, "init tx data op allocator failed", KR(ret));
|
||||
} else if (OB_FAIL(
|
||||
share_resource_throttle_tool_.init(&memstore_allocator_, &tx_data_allocator_, &mds_allocator_))) {
|
||||
SHARE_LOG(ERROR, "init share resource throttle tool failed", KR(ret));
|
||||
@ -65,6 +67,7 @@ public:
|
||||
ObTenantTxDataAllocator &tx_data_allocator() { return tx_data_allocator_; }
|
||||
ObTenantMdsAllocator &mds_allocator() { return mds_allocator_; }
|
||||
TxShareThrottleTool &share_resource_throttle_tool() { return share_resource_throttle_tool_; }
|
||||
ObTenantTxDataOpAllocator &tx_data_op_allocator() { return tx_data_op_allocator_; }
|
||||
|
||||
private:
|
||||
void update_share_throttle_config_(const int64_t total_memory, omt::ObTenantConfigGuard &config);
|
||||
@ -78,6 +81,7 @@ private:
|
||||
ObMemstoreAllocator memstore_allocator_;
|
||||
ObTenantTxDataAllocator tx_data_allocator_;
|
||||
ObTenantMdsAllocator mds_allocator_;
|
||||
ObTenantTxDataOpAllocator tx_data_op_allocator_;
|
||||
};
|
||||
|
||||
class TxShareMemThrottleUtil
|
||||
@ -156,4 +160,4 @@ public:
|
||||
} // namespace share
|
||||
} // namespace oceanbase
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -23,6 +23,8 @@ namespace oceanbase {
|
||||
|
||||
namespace share {
|
||||
|
||||
thread_local int64_t ObTenantTxDataOpAllocator::local_alloc_size_ = 0;
|
||||
|
||||
int64_t ObTenantTxDataAllocator::resource_unit_size()
|
||||
{
|
||||
static const int64_t TX_DATA_RESOURCE_UNIT_SIZE = OB_MALLOC_NORMAL_BLOCK_SIZE; /* 8KB */
|
||||
@ -149,5 +151,59 @@ ObTxDataThrottleGuard::~ObTxDataThrottleGuard()
|
||||
}
|
||||
}
|
||||
|
||||
int ObTenantTxDataOpAllocator::init()
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObMemAttr mem_attr;
|
||||
mem_attr.tenant_id_ = MTL_ID();
|
||||
mem_attr.ctx_id_ = ObCtxIds::MDS_DATA_ID;
|
||||
mem_attr.label_ = "TX_OP";
|
||||
ObSharedMemAllocMgr *share_mem_alloc_mgr = MTL(ObSharedMemAllocMgr *);
|
||||
throttle_tool_ = &(share_mem_alloc_mgr->share_resource_throttle_tool());
|
||||
if (IS_INIT){
|
||||
ret = OB_INIT_TWICE;
|
||||
SHARE_LOG(WARN, "init tenant mds allocator twice", KR(ret), KPC(this));
|
||||
} else if (OB_ISNULL(throttle_tool_)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
SHARE_LOG(WARN, "throttle tool is unexpected null", KP(throttle_tool_), KP(share_mem_alloc_mgr));
|
||||
} else if (OB_FAIL(allocator_.init(OB_MALLOC_NORMAL_BLOCK_SIZE, block_alloc_, mem_attr))) {
|
||||
MDS_LOG(WARN, "init vslice allocator failed", K(ret), K(OB_MALLOC_NORMAL_BLOCK_SIZE), KP(this), K(mem_attr));
|
||||
} else {
|
||||
allocator_.set_nway(MDS_ALLOC_CONCURRENCY);
|
||||
is_inited_ = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *ObTenantTxDataOpAllocator::alloc(const int64_t size)
|
||||
{
|
||||
int64_t abs_expire_time = THIS_WORKER.get_timeout_ts();
|
||||
void * buf = alloc(size, abs_expire_time);
|
||||
if (OB_NOT_NULL(buf)) {
|
||||
local_alloc_size_ += size;
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
void *ObTenantTxDataOpAllocator::alloc(const int64_t size, const ObMemAttr &attr)
|
||||
{
|
||||
UNUSED(attr);
|
||||
void *obj = alloc(size);
|
||||
return obj;
|
||||
}
|
||||
|
||||
void *ObTenantTxDataOpAllocator::alloc(const int64_t size, const int64_t abs_expire_time)
|
||||
{
|
||||
void *obj = allocator_.alloc(size);
|
||||
return obj;
|
||||
}
|
||||
|
||||
void ObTenantTxDataOpAllocator::free(void *ptr)
|
||||
{
|
||||
allocator_.free(ptr);
|
||||
}
|
||||
|
||||
void ObTenantTxDataOpAllocator::set_attr(const ObMemAttr &attr) { allocator_.set_attr(attr); }
|
||||
|
||||
} // namespace share
|
||||
} // namespace oceanbase
|
||||
} // namespace oceanbase
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "lib/allocator/ob_slice_alloc.h"
|
||||
#include "share/ob_delegate.h"
|
||||
#include "share/throttle/ob_share_throttle_define.h"
|
||||
#include "lib/allocator/ob_vslice_alloc.h"
|
||||
|
||||
namespace oceanbase {
|
||||
namespace share {
|
||||
@ -75,7 +76,35 @@ private:
|
||||
share::TxShareThrottleTool *throttle_tool_;
|
||||
};
|
||||
|
||||
class ObTenantTxDataOpAllocator : public ObIAllocator {
|
||||
private:
|
||||
static const int64_t MDS_ALLOC_CONCURRENCY = 32;
|
||||
public:
|
||||
DEFINE_CUSTOM_FUNC_FOR_THROTTLE(Mds);
|
||||
|
||||
public:
|
||||
ObTenantTxDataOpAllocator() : is_inited_(false), throttle_tool_(nullptr), block_alloc_(), allocator_() {}
|
||||
|
||||
int init();
|
||||
void destroy() { is_inited_ = false; }
|
||||
void *alloc(const int64_t size, const int64_t expire_ts);
|
||||
virtual void *alloc(const int64_t size) override;
|
||||
virtual void *alloc(const int64_t size, const ObMemAttr &attr) override;
|
||||
virtual void free(void *ptr) override;
|
||||
virtual void set_attr(const ObMemAttr &attr) override;
|
||||
int64_t hold() { return allocator_.hold(); }
|
||||
int64_t get_local_alloc_size() { return local_alloc_size_; }
|
||||
void reset_local_alloc_size() { local_alloc_size_ = 0; }
|
||||
TO_STRING_KV(K(is_inited_), KP(this), KP(throttle_tool_), KP(&block_alloc_), KP(&allocator_));
|
||||
|
||||
private:
|
||||
bool is_inited_;
|
||||
share::TxShareThrottleTool *throttle_tool_;
|
||||
common::ObBlockAllocMgr block_alloc_;
|
||||
common::ObVSliceAlloc allocator_;
|
||||
static thread_local int64_t local_alloc_size_;
|
||||
};
|
||||
} // namespace share
|
||||
} // namespace oceanbase
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -14,45 +14,263 @@ namespace oceanbase
|
||||
namespace compaction
|
||||
{
|
||||
|
||||
/**
|
||||
* -------------------------------------------------------------------ObCompactionTimeGuard-------------------------------------------------------------------
|
||||
*/
|
||||
ObCompactionTimeGuard::~ObCompactionTimeGuard()
|
||||
{
|
||||
int64_t total_cost = 0;
|
||||
for (int64_t idx = 0; idx < size_; ++idx) {
|
||||
total_cost += event_times_[idx];
|
||||
}
|
||||
total_cost += common::ObTimeUtility::current_time() - last_click_ts_;
|
||||
if (OB_UNLIKELY(total_cost >= warn_threshold_)) {
|
||||
::oceanbase::common::OB_PRINT(log_mod_, OB_LOG_LEVEL_DIRECT_NO_ERRCODE(WARN), OB_SUCCESS, "cost too much time", LOG_KVS(K(*this)));
|
||||
}
|
||||
}
|
||||
|
||||
int64_t ObCompactionTimeGuard::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, "|threshold", warn_threshold_);
|
||||
common::databuff_printf(buf, buf_len, pos, "start at %s|", common::ObTime2Str::ob_timestamp_str(add_time_));
|
||||
int64_t total_cost = 0;
|
||||
for (int64_t idx = 0; idx < size_; ++idx) {
|
||||
const uint64_t ts = event_times_[idx];
|
||||
if (ts < 1_ms) {
|
||||
common::databuff_printf(buf, buf_len, pos, "%ldus|", ts);
|
||||
} else if (ts < 1_s) {
|
||||
common::databuff_printf(buf, buf_len, pos, "%.2lfms|", double(ts) / 1_ms);
|
||||
} else {
|
||||
common::databuff_printf(buf, buf_len, pos, "%.2lfs|", double(ts) / 1_s);
|
||||
}
|
||||
total_cost += event_times_[idx];
|
||||
}
|
||||
total_cost += common::ObTimeUtility::current_time() - last_click_ts_;
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, "total", total_cost);
|
||||
if (pos != 0 && pos < buf_len) {
|
||||
pos -= 1;
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
void ObCompactionTimeGuard::reuse()
|
||||
{
|
||||
size_ = 0;
|
||||
last_click_ts_ = common::ObTimeUtility::current_time();
|
||||
add_time_ = common::ObTimeUtility::current_time();
|
||||
for (uint16_t i = 0; i < capacity_; ++i) {
|
||||
event_times_[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool ObCompactionTimeGuard::click(const uint16_t event)
|
||||
{
|
||||
if (OB_LIKELY(event < CAPACITY)) {
|
||||
if (OB_LIKELY(size_ <= event)) {
|
||||
size_ = event + 1;
|
||||
}
|
||||
const int64_t now = common::ObTimeUtility::current_time();
|
||||
event_times_[event] += now - last_click_ts_;
|
||||
last_click_ts_ = now;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void ObCompactionTimeGuard::fmt_ts_to_meaningful_str(
|
||||
char *buf,
|
||||
const int64_t buf_len,
|
||||
int64_t &pos,
|
||||
const char *lvalue,
|
||||
const int64_t ts) const
|
||||
{
|
||||
common::databuff_printf(buf, buf_len, pos, "%s", lvalue);
|
||||
if (ts < 1_ms) {
|
||||
common::databuff_printf(buf, buf_len, pos, "=%ldus|", ts);
|
||||
} else if (ts < 1_s) {
|
||||
common::databuff_printf(buf, buf_len, pos, "=%.2lfms|", double(ts) / 1_ms);
|
||||
} else {
|
||||
common::databuff_printf(buf, buf_len, pos, "=%.2lfs|", double(ts) / 1_s);
|
||||
}
|
||||
}
|
||||
void ObCompactionTimeGuard::add_time_guard(const ObCompactionTimeGuard &other)
|
||||
{
|
||||
// last_click_ts_ is not useflu
|
||||
ObCompactionTimeGuard time_guard;
|
||||
int i = 0;
|
||||
int j = 0;
|
||||
while (i < idx_ && j < other.idx_) {
|
||||
if (line_array_[i] == other.line_array_[j]) {
|
||||
time_guard.line_array_[time_guard.idx_] = line_array_[i];
|
||||
time_guard.click_poinsts_[time_guard.idx_++] = click_poinsts_[i++] + other.click_poinsts_[j++];
|
||||
} else if (line_array_[i] < other.line_array_[j]) {
|
||||
time_guard.line_array_[time_guard.idx_] = line_array_[i];
|
||||
time_guard.click_poinsts_[time_guard.idx_++] = click_poinsts_[i++];
|
||||
} else {
|
||||
time_guard.line_array_[time_guard.idx_] = other.line_array_[j];
|
||||
time_guard.click_poinsts_[time_guard.idx_++] = other.click_poinsts_[j++];
|
||||
if (OB_LIKELY(guard_type_ == other.guard_type_ && CAPACITY == other.capacity_)) {
|
||||
size_ = std::max(size_, other.size_);
|
||||
for (uint16_t i = 0; i < size_; i++) {
|
||||
event_times_[i] += other.event_times_[i];
|
||||
}
|
||||
}
|
||||
while (i < idx_) {
|
||||
time_guard.line_array_[time_guard.idx_] = line_array_[i];
|
||||
time_guard.click_poinsts_[time_guard.idx_++] = click_poinsts_[i++];
|
||||
}
|
||||
while (j < other.idx_) {
|
||||
time_guard.line_array_[time_guard.idx_] = other.line_array_[j];
|
||||
time_guard.click_poinsts_[time_guard.idx_++] = other.click_poinsts_[j++];
|
||||
}
|
||||
*this = time_guard;
|
||||
}
|
||||
|
||||
ObCompactionTimeGuard & ObCompactionTimeGuard::operator=(const ObCompactionTimeGuard &other)
|
||||
{
|
||||
guard_type_ = other.guard_type_;
|
||||
capacity_ = other.capacity_;
|
||||
size_ = other.size_;
|
||||
last_click_ts_ = other.last_click_ts_;
|
||||
idx_ = other.idx_;
|
||||
for (int i = 0; i < other.idx_; ++i) {
|
||||
line_array_[i] = other.line_array_[i];
|
||||
click_poinsts_[i] = other.click_poinsts_[i];
|
||||
add_time_ = other.add_time_;
|
||||
for (uint16_t i = 0; i < other.size_; ++i) {
|
||||
event_times_[i] = other.event_times_[i];
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
uint16_t ObCompactionTimeGuard::get_max_event_count(const ObCompactionTimeGuardType guard_type)
|
||||
{
|
||||
uint16_t max_event_count = CAPACITY;
|
||||
if (RS_COMPACT_TIME_GUARD == guard_type) {
|
||||
max_event_count = ObRSCompactionTimeGuard::COMPACTION_EVENT_MAX;
|
||||
} else if (SCHEDULE_COMPACT_TIME_GUARD == guard_type) {
|
||||
max_event_count = ObCompactionScheduleTimeGuard::COMPACTION_EVENT_MAX;
|
||||
} else if (STORAGE_COMPACT_TIME_GUARD == guard_type) {
|
||||
max_event_count = ObStorageCompactionTimeGuard::COMPACTION_EVENT_MAX;
|
||||
}
|
||||
return max_event_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* -------------------------------------------------------------------ObRSCompactionTimeGuard-------------------------------------------------------------------
|
||||
*/
|
||||
const char *ObRSCompactionTimeGuard::CompactionEventStr[] = {
|
||||
"PREPARE_UNFINISH_TABLE_IDS",
|
||||
"GET_TABLET_LS_PAIRS",
|
||||
"GET_TABLET_META_TABLE",
|
||||
"CKM_VERIFICATION"
|
||||
};
|
||||
|
||||
const char *ObRSCompactionTimeGuard::get_comp_event_str(enum CompactionEvent event)
|
||||
{
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) == ARRAYSIZEOF(CompactionEventStr), "events str len is mismatch");
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) <= static_cast<int64_t>(CAPACITY), "too many events, need update CAPACITY");
|
||||
const char *str = "";
|
||||
if (event >= COMPACTION_EVENT_MAX || event < PREPARE_UNFINISH_TABLE_IDS) {
|
||||
str = "invalid_type";
|
||||
} else {
|
||||
str = CompactionEventStr[event];
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
int64_t ObRSCompactionTimeGuard::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
for (uint16_t idx = 0; idx < size_; ++idx) {
|
||||
if (event_times_[idx] > 0) {
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, get_comp_event_str(static_cast<CompactionEvent>(idx)), event_times_[idx]);
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
/**
|
||||
* ObCompactionScheduleTimeGuard Impl
|
||||
*/
|
||||
const char *ObCompactionScheduleTimeGuard::CompactionEventStr[] = {
|
||||
"GET_TABLET",
|
||||
"UPDATE_TABLET_REPORT_STATUS",
|
||||
"READ_MEDIUM_INFO",
|
||||
"SCHEDULE_NEXT_MEDIUM",
|
||||
"SCHEDULE_TABLET_MEDIUM",
|
||||
"FAST_FREEZE",
|
||||
"SEARCH_META_TABLE",
|
||||
"CHECK_META_TABLE",
|
||||
"SEARCH_CHECKSUM",
|
||||
"CHECK_CHECKSUM",
|
||||
"SCHEDULER_NEXT_ROUND"
|
||||
};
|
||||
|
||||
const char *ObCompactionScheduleTimeGuard::get_comp_event_str(enum CompactionEvent event)
|
||||
{
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) == ARRAYSIZEOF(CompactionEventStr), "events str len is mismatch");
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) <= static_cast<int64_t>(CAPACITY), "too many events, need update CAPACITY");
|
||||
const char *str = "";
|
||||
if (event >= COMPACTION_EVENT_MAX || event < GET_TABLET) {
|
||||
str = "invalid_type";
|
||||
} else {
|
||||
str = CompactionEventStr[event];
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
int64_t ObCompactionScheduleTimeGuard::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
for (int16_t idx = 0; idx < size_; ++idx) {
|
||||
if (event_times_[idx] > 0) {
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, get_comp_event_str(static_cast<CompactionEvent>(idx)), event_times_[idx]);
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
/*
|
||||
* ----------------------------------------------ObCompactionTimeGuard--------------------------------------------------
|
||||
*/
|
||||
constexpr float ObStorageCompactionTimeGuard::COMPACTION_SHOW_PERCENT_THRESHOLD;
|
||||
const char *ObStorageCompactionTimeGuard::CompactionEventStr[] = {
|
||||
"WAIT_TO_SCHEDULE",
|
||||
"COMPACTION_POLICY",
|
||||
"PRE_PROCESS_TX_TABLE",
|
||||
"GET_PARALLEL_RANGE",
|
||||
"EXECUTE",
|
||||
"CREATE_SSTABLE",
|
||||
"UPDATE_TABLET",
|
||||
"RELEASE_MEMTABLE",
|
||||
"SCHEDULE_OTHER_COMPACTION",
|
||||
"DAG_FINISH"
|
||||
};
|
||||
|
||||
const char *ObStorageCompactionTimeGuard::get_comp_event_str(const enum CompactionEvent event)
|
||||
{
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) == ARRAYSIZEOF(CompactionEventStr), "events str len is mismatch");
|
||||
STATIC_ASSERT(static_cast<int64_t>(COMPACTION_EVENT_MAX) <= static_cast<int64_t>(CAPACITY), "too many events, need update CAPACITY");
|
||||
const char *str = "";
|
||||
if (event >= COMPACTION_EVENT_MAX || event < DAG_WAIT_TO_SCHEDULE) {
|
||||
str = "invalid_type";
|
||||
} else {
|
||||
str = CompactionEventStr[event];
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
int64_t ObStorageCompactionTimeGuard::to_string(char *buf, const int64_t buf_len) const
|
||||
{
|
||||
int64_t pos = 0;
|
||||
int64_t total_cost = 0;
|
||||
J_KV(K_(add_time));
|
||||
common::databuff_printf(buf, buf_len, pos, "|");
|
||||
if (size_ > DAG_WAIT_TO_SCHEDULE && event_times_[DAG_WAIT_TO_SCHEDULE] > COMPACTION_SHOW_TIME_THRESHOLD) {
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, "wait_schedule_time", event_times_[DAG_WAIT_TO_SCHEDULE]);
|
||||
}
|
||||
for (int64_t idx = COMPACTION_POLICY; idx < size_; ++idx) {
|
||||
total_cost += event_times_[idx];
|
||||
}
|
||||
if (total_cost > COMPACTION_SHOW_TIME_THRESHOLD) {
|
||||
float ratio = 0;
|
||||
for (int64_t idx = COMPACTION_POLICY; idx < size_; ++idx) {
|
||||
const uint32_t time_interval = event_times_[idx]; // include the retry time since previous event
|
||||
ratio = (float)(time_interval)/ total_cost;
|
||||
if (ratio >= COMPACTION_SHOW_PERCENT_THRESHOLD || time_interval >= COMPACTION_SHOW_TIME_THRESHOLD) {
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, get_comp_event_str(static_cast<CompactionEvent>(idx)), event_times_[idx]);
|
||||
if (ratio > 0.01) {
|
||||
common::databuff_printf(buf, buf_len, pos, "(%.2f)", ratio);
|
||||
}
|
||||
common::databuff_printf(buf, buf_len, pos, "|");
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt_ts_to_meaningful_str(buf, buf_len, pos, "total", total_cost);
|
||||
if (pos != 0 && pos < buf_len) {
|
||||
buf[pos - 1] = ';';
|
||||
}
|
||||
|
||||
if (pos != 0 && pos < buf_len) {
|
||||
pos -= 1;
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
|
||||
} // namespace compaction
|
||||
} // namespace oceanbase
|
||||
|
@ -10,48 +10,169 @@
|
||||
#ifndef OB_SHARE_COMPACTION_COMPACTION_TIME_GUARD_H_
|
||||
#define OB_SHARE_COMPACTION_COMPACTION_TIME_GUARD_H_
|
||||
#include "share/ob_occam_time_guard.h"
|
||||
#include "lib/container/ob_se_array.h"
|
||||
namespace oceanbase
|
||||
{
|
||||
namespace compaction
|
||||
{
|
||||
|
||||
class ObCompactionTimeGuard : public common::occam::ObOccamTimeGuard
|
||||
/*
|
||||
* ObCompactionTimeGuard refers to the implementation of from ObOccamTimeGuard.
|
||||
* For example, you have 3 enum events {e0, e1, e2}
|
||||
* If you want to record the time cost of event e1, you can use click(e1), then event_times_[1] will accumulate the time cost of e1.
|
||||
*
|
||||
* ObCompactionTimeGuard
|
||||
* -- ObRSCompactionTimeGuard
|
||||
* -- ObScheduleCompactionTimeGuard
|
||||
* -- ObStorageCompactionTimeGuard
|
||||
*/
|
||||
class ObCompactionTimeGuard
|
||||
{
|
||||
public:
|
||||
const static uint64_t WARN_THRESHOLD = 30L * 1000 * 1000; // 30s
|
||||
ObCompactionTimeGuard(const uint64_t warn_threshold = WARN_THRESHOLD, const char *mod = "")
|
||||
: ObOccamTimeGuard(warn_threshold, nullptr, nullptr, mod),
|
||||
add_time_(0)
|
||||
{}
|
||||
virtual ~ObCompactionTimeGuard() {}
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const
|
||||
enum ObCompactionTimeGuardType : uint8_t
|
||||
{
|
||||
UNUSEDx(buf, buf_len);
|
||||
return 0;
|
||||
BASE_COMPACT_TIME_GUARD = 0,
|
||||
RS_COMPACT_TIME_GUARD = 1,
|
||||
SCHEDULE_COMPACT_TIME_GUARD = 2,
|
||||
STORAGE_COMPACT_TIME_GUARD = 3,
|
||||
MAX_COMPACT_TIME_GUARD
|
||||
};
|
||||
public:
|
||||
const static uint64_t WARN_THRESHOLD = 30L * 1000 * 1000; // 30s
|
||||
const static uint16_t CAPACITY = 16;
|
||||
ObCompactionTimeGuard(const ObCompactionTimeGuardType gurad_type = BASE_COMPACT_TIME_GUARD,
|
||||
const uint64_t warn_threshold = WARN_THRESHOLD,
|
||||
const char *mod = "")
|
||||
: guard_type_(gurad_type),
|
||||
warn_threshold_(warn_threshold),
|
||||
log_mod_(mod),
|
||||
capacity_(get_max_event_count(gurad_type)),
|
||||
size_(0),
|
||||
last_click_ts_(common::ObTimeUtility::current_time()),
|
||||
add_time_(common::ObTimeUtility::current_time())
|
||||
{
|
||||
reuse();
|
||||
}
|
||||
virtual ~ObCompactionTimeGuard();
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const;
|
||||
void reuse();
|
||||
void add_time_guard(const ObCompactionTimeGuard &other);
|
||||
ObCompactionTimeGuard & operator=(const ObCompactionTimeGuard &other);
|
||||
OB_INLINE bool is_empty() const { return 0 == idx_; }
|
||||
OB_INLINE bool is_empty() const { return 0 == size_; }
|
||||
// set the dag add_time as the first click time
|
||||
OB_INLINE void set_last_click_ts(const int64_t time)
|
||||
{
|
||||
last_click_ts_ = time;
|
||||
add_time_ = time;
|
||||
}
|
||||
OB_INLINE uint32_t get_specified_cost_time(const int64_t line) const {
|
||||
OB_INLINE uint32_t get_specified_cost_time(const int64_t event) const {
|
||||
uint32_t ret_val = 0;
|
||||
for (int64_t idx = 0; idx < idx_; ++idx) {
|
||||
if (line_array_[idx] == line) {
|
||||
ret_val = click_poinsts_[idx];
|
||||
break;
|
||||
}
|
||||
if (OB_LIKELY(event < size_)) {
|
||||
ret_val = event_times_[event];
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
bool click(const uint16_t event);
|
||||
// copy from ObOccamTimeGuard
|
||||
void fmt_ts_to_meaningful_str(
|
||||
char *buf,
|
||||
const int64_t buf_len,
|
||||
int64_t &pos,
|
||||
const char *lvalue,
|
||||
const int64_t ts) const;
|
||||
public:
|
||||
template <typename E>
|
||||
static constexpr uint16_t event_idx(E e) { return static_cast<uint16_t>(e); }
|
||||
static uint16_t get_max_event_count(const ObCompactionTimeGuardType guard_type);
|
||||
public:
|
||||
ObCompactionTimeGuardType guard_type_;
|
||||
const uint64_t warn_threshold_;
|
||||
const char * log_mod_;
|
||||
uint16_t capacity_; // equal with CAPACITY, used for child class to check the array boundary
|
||||
uint16_t size_;
|
||||
int64_t last_click_ts_;
|
||||
int64_t add_time_;
|
||||
uint64_t event_times_[CAPACITY];
|
||||
};
|
||||
|
||||
struct ObRSCompactionTimeGuard : public ObCompactionTimeGuard
|
||||
{
|
||||
public:
|
||||
ObRSCompactionTimeGuard()
|
||||
: ObCompactionTimeGuard(RS_COMPACT_TIME_GUARD, UINT64_MAX, "[RS] ")
|
||||
{}
|
||||
virtual ~ObRSCompactionTimeGuard() {}
|
||||
enum CompactionEvent : uint16_t {
|
||||
PREPARE_UNFINISH_TABLE_IDS = 0,
|
||||
GET_TABLET_LS_PAIRS,
|
||||
GET_TABLET_META_TABLE,
|
||||
CKM_VERIFICATION,
|
||||
COMPACTION_EVENT_MAX,
|
||||
};
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const override;
|
||||
private:
|
||||
const static char *CompactionEventStr[];
|
||||
static const char *get_comp_event_str(const enum CompactionEvent event);
|
||||
};
|
||||
struct ObCompactionScheduleTimeGuard : public ObCompactionTimeGuard
|
||||
{
|
||||
public:
|
||||
ObCompactionScheduleTimeGuard()
|
||||
: ObCompactionTimeGuard(SCHEDULE_COMPACT_TIME_GUARD, UINT64_MAX, "[STORAGE] ")
|
||||
{}
|
||||
virtual ~ObCompactionScheduleTimeGuard() {}
|
||||
enum CompactionEvent : uint16_t {
|
||||
// medium scheduler
|
||||
GET_TABLET = 0,
|
||||
UPDATE_TABLET_REPORT_STATUS,
|
||||
READ_MEDIUM_INFO,
|
||||
SCHEDULE_NEXT_MEDIUM,
|
||||
SCHEDULE_TABLET_MEDIUM,
|
||||
FAST_FREEZE,
|
||||
// medium checker
|
||||
SEARCH_META_TABLE,
|
||||
CHECK_META_TABLE,
|
||||
SEARCH_CHECKSUM,
|
||||
CHECK_CHECKSUM,
|
||||
SCHEDULER_NEXT_ROUND,
|
||||
COMPACTION_EVENT_MAX
|
||||
};
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const override;
|
||||
private:
|
||||
const static char *CompactionEventStr[];
|
||||
static const char *get_comp_event_str(const enum CompactionEvent event);
|
||||
};
|
||||
|
||||
struct ObStorageCompactionTimeGuard : public ObCompactionTimeGuard
|
||||
{
|
||||
public:
|
||||
ObStorageCompactionTimeGuard()
|
||||
: ObCompactionTimeGuard(STORAGE_COMPACT_TIME_GUARD, COMPACTION_WARN_THRESHOLD_RATIO, "[STORAGE] ")
|
||||
{}
|
||||
virtual ~ObStorageCompactionTimeGuard() {}
|
||||
enum CompactionEvent : uint16_t {
|
||||
DAG_WAIT_TO_SCHEDULE = 0,
|
||||
COMPACTION_POLICY,
|
||||
PRE_PROCESS_TX_TABLE,
|
||||
GET_PARALLEL_RANGE,
|
||||
EXECUTE,
|
||||
CREATE_SSTABLE,
|
||||
UPDATE_TABLET,
|
||||
RELEASE_MEMTABLE,
|
||||
SCHEDULE_OTHER_COMPACTION,
|
||||
DAG_FINISH,
|
||||
COMPACTION_EVENT_MAX
|
||||
};
|
||||
virtual int64_t to_string(char *buf, const int64_t buf_len) const override;
|
||||
private:
|
||||
const static char *CompactionEventStr[];
|
||||
static const char *get_comp_event_str(const enum CompactionEvent event);
|
||||
static const int64_t COMPACTION_WARN_THRESHOLD_RATIO = 60 * 1000L * 1000L; // 1 min
|
||||
static constexpr float COMPACTION_SHOW_PERCENT_THRESHOLD = 0.1;
|
||||
static const int64_t COMPACTION_SHOW_TIME_THRESHOLD = 1 * 1000L * 1000L; // 1s
|
||||
};
|
||||
|
||||
|
||||
} // namespace compaction
|
||||
} // namespace oceanbase
|
||||
|
||||
|
@ -147,7 +147,17 @@ struct ObFixedDoubleCmp: public ObDefined<>
|
||||
cmp_ret = 0;
|
||||
const double l = l_datum.get_double();
|
||||
const double r = r_datum.get_double();
|
||||
if (l == r || fabs(l - r) < P) {
|
||||
if (isnan(l) || isnan(r)) {
|
||||
if (isnan(l) && isnan(r)) {
|
||||
cmp_ret = 0;
|
||||
} else if (isnan(l)) {
|
||||
// l is nan, r is not nan:left always bigger than right
|
||||
cmp_ret = 1;
|
||||
} else {
|
||||
// l is not nan, r is nan, left always less than right
|
||||
cmp_ret = -1;
|
||||
}
|
||||
} else if (l == r || fabs(l - r) < P) {
|
||||
cmp_ret = 0;
|
||||
} else {
|
||||
cmp_ret = (l < r ? -1 : 1);
|
||||
|
@ -60,7 +60,7 @@ int ObInnerTableSchema::gv_ob_sstables_schema(ObTableSchema &table_schema)
|
||||
table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset()));
|
||||
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT M.SVR_IP, M.SVR_PORT, (case M.TABLE_TYPE when 0 then 'MEMTABLE' when 1 then 'TX_DATA_MEMTABLE' when 2 then 'TX_CTX_MEMTABLE' when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR' when 12 then 'MINI' when 13 then 'META' when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'DDL_MEM' when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'DDL_MERGE' else 'INVALID' end) as TABLE_TYPE, M.TENANT_ID, M.LS_ID, M.TABLET_ID, M.CG_IDX, M.START_LOG_SCN, M.END_LOG_SCN, M.DATA_CHECKSUM, M.SIZE, M.REF, M.UPPER_TRANS_VERSION, M.IS_ACTIVE, M.CONTAIN_UNCOMMITTED_ROW FROM oceanbase.__all_virtual_table_mgr M )__"))) {
|
||||
if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT M.SVR_IP, M.SVR_PORT, (case M.TABLE_TYPE when 0 then 'MEMTABLE' when 1 then 'TX_DATA_MEMTABLE' when 2 then 'TX_CTX_MEMTABLE' when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR' when 12 then 'MINI' when 13 then 'META' when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'DDL_MEM' when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'COL_ORIENTED_META' when 21 then 'DDL_MERGE_CO' when 22 then 'DDL_MERGE_CG' when 23 then 'DDL_MEM_CO' when 24 then 'DDL_MEM_CG' when 25 then 'DDL_MEM_MINI_SSTABLE' when 26 then 'MDS_MINI' when 27 then 'MDS_MINOR' else 'INVALID' end) as TABLE_TYPE, M.TENANT_ID, M.LS_ID, M.TABLET_ID, M.CG_IDX, M.START_LOG_SCN, M.END_LOG_SCN, M.DATA_CHECKSUM, M.SIZE, M.REF, M.UPPER_TRANS_VERSION, M.IS_ACTIVE, M.CONTAIN_UNCOMMITTED_ROW FROM oceanbase.__all_virtual_table_mgr M )__"))) {
|
||||
LOG_ERROR("fail to set view_definition", K(ret));
|
||||
}
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ int ObInnerTableSchema::gv_ob_sstables_ora_schema(ObTableSchema &table_schema)
|
||||
table_schema.set_collation_type(ObCharset::get_default_collation(ObCharset::get_default_charset()));
|
||||
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT M.SVR_IP, M.SVR_PORT, (case M.TABLE_TYPE when 0 then 'MEMTABLE' when 1 then 'TX_DATA_MEMTABLE' when 2 then 'TX_CTX_MEMTABLE' when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR' when 12 then 'MINI' when 13 then 'META' when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'IMC_SEGMENT' when 20 then 'DDL_MERGE' else 'INVALID' end) as TABLE_TYPE, M.LS_ID, M.TABLET_ID, M.START_LOG_SCN, M.END_LOG_SCN, M."SIZE", M.REF, M.UPPER_TRANS_VERSION, M.IS_ACTIVE, M.CONTAIN_UNCOMMITTED_ROW FROM SYS.ALL_VIRTUAL_TABLE_MGR M )__"))) {
|
||||
if (OB_FAIL(table_schema.set_view_definition(R"__( SELECT M.SVR_IP, M.SVR_PORT, (case M.TABLE_TYPE when 0 then 'MEMTABLE' when 1 then 'TX_DATA_MEMTABLE' when 2 then 'TX_CTX_MEMTABLE' when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR' when 12 then 'MINI' when 13 then 'META' when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'DDL_MEM' when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'COL_ORIENTED_META' when 21 then 'DDL_MERGE_CO' when 22 then 'DDL_MERGE_CG' when 23 then 'DDL_MEM_CO' when 24 then 'DDL_MEM_CG' when 25 then 'DDL_MEM_MINI_SSTABLE' when 26 then 'MDS_MINI' when 27 then 'MDS_MINOR' else 'INVALID' end) as TABLE_TYPE, M.LS_ID, M.TABLET_ID, M.START_LOG_SCN, M.END_LOG_SCN, M."SIZE", M.REF, M.UPPER_TRANS_VERSION, M.IS_ACTIVE, M.CONTAIN_UNCOMMITTED_ROW FROM SYS.ALL_VIRTUAL_TABLE_MGR M )__"))) {
|
||||
LOG_ERROR("fail to set view_definition", K(ret));
|
||||
}
|
||||
}
|
||||
|
@ -14382,6 +14382,8 @@ def_table_schema(
|
||||
)
|
||||
|
||||
# 12488: __all_virtual_scheduler_job_run_detail_v2
|
||||
# 12489: __all_virtual_deadlock_detector_stat
|
||||
# 12490: __all_virtual_spatial_reference_systems
|
||||
|
||||
# 余留位置(此行之前占位)
|
||||
# 本区域占位建议:采用真实表名进行占位
|
||||
@ -14861,6 +14863,7 @@ def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15451'
|
||||
def_table_schema(**no_direct_access(gen_oracle_mapping_virtual_table_def('15456', all_def_keywords['__all_virtual_nic_info'])))
|
||||
# 15457: __all_virtual_query_response_time
|
||||
# 15458: __all_scheduler_job_run_detail_v2
|
||||
# 15459: __all_spatial_reference_systems
|
||||
#
|
||||
# 余留位置(此行之前占位)
|
||||
# 本区域定义的Oracle表名比较复杂,一般都采用gen_xxx_table_def()方式定义,占位建议采用基表表名占位
|
||||
@ -17846,7 +17849,10 @@ SELECT
|
||||
when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR'
|
||||
when 12 then 'MINI' when 13 then 'META'
|
||||
when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'DDL_MEM'
|
||||
when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'DDL_MERGE'
|
||||
when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'COL_ORIENTED_META'
|
||||
when 21 then 'DDL_MERGE_CO' when 22 then 'DDL_MERGE_CG' when 23 then 'DDL_MEM_CO'
|
||||
when 24 then 'DDL_MEM_CG' when 25 then 'DDL_MEM_MINI_SSTABLE'
|
||||
when 26 then 'MDS_MINI' when 27 then 'MDS_MINOR'
|
||||
else 'INVALID'
|
||||
end) as TABLE_TYPE,
|
||||
M.TENANT_ID,
|
||||
@ -56036,7 +56042,11 @@ SELECT
|
||||
when 0 then 'MEMTABLE' when 1 then 'TX_DATA_MEMTABLE' when 2 then 'TX_CTX_MEMTABLE'
|
||||
when 3 then 'LOCK_MEMTABLE' when 4 then 'DIRECT_LOAD_MEMTABLE' when 10 then 'MAJOR' when 11 then 'MINOR'
|
||||
when 12 then 'MINI' when 13 then 'META'
|
||||
when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'IMC_SEGMENT' when 20 then 'DDL_MERGE'
|
||||
when 14 then 'DDL_DUMP' when 15 then 'REMOTE_LOGICAL_MINOR' when 16 then 'DDL_MEM'
|
||||
when 17 then 'CO_MAJOR' when 18 then 'NORMAL_CG' when 19 then 'ROWKEY_CG' when 20 then 'COL_ORIENTED_META'
|
||||
when 21 then 'DDL_MERGE_CO' when 22 then 'DDL_MERGE_CG' when 23 then 'DDL_MEM_CO'
|
||||
when 24 then 'DDL_MEM_CG' when 25 then 'DDL_MEM_MINI_SSTABLE'
|
||||
when 26 then 'MDS_MINI' when 27 then 'MDS_MINOR'
|
||||
else 'INVALID'
|
||||
end) as TABLE_TYPE,
|
||||
M.LS_ID,
|
||||
@ -61940,6 +61950,7 @@ def_table_schema(
|
||||
)
|
||||
# 28232: GV$OB_QUERY_RESPONSE_TIME_HISTOGRAM
|
||||
# 28233: V$OB_QUERY_RESPONSE_TIME_HISTOGRAM
|
||||
# 28234: DBA_OB_SPATIAL_COLUMNS
|
||||
#
|
||||
# 余留位置(此行之前占位)
|
||||
# 本区域占位建议:采用真实视图名进行占位
|
||||
|
@ -7,7 +7,7 @@ CREATE OR REPLACE PACKAGE BODY dbms_stats
|
||||
tabname VARCHAR(65535),
|
||||
partname VARCHAR(65535) DEFAULT NULL,
|
||||
estimate_percent DECIMAL DEFAULT AUTO_SAMPLE_SIZE,
|
||||
block_sample BOOLEAN DEFAULT FALSE,
|
||||
block_sample BOOLEAN DEFAULT NULL,
|
||||
method_opt VARCHAR(65535) DEFAULT DEFAULT_METHOD_OPT,
|
||||
degree DECIMAL DEFAULT NULL,
|
||||
granularity VARCHAR(65535) DEFAULT DEFAULT_GRANULARITY,
|
||||
@ -24,7 +24,7 @@ CREATE OR REPLACE PACKAGE BODY dbms_stats
|
||||
PROCEDURE gather_schema_stats (
|
||||
ownname VARCHAR(65535),
|
||||
estimate_percent DECIMAL DEFAULT AUTO_SAMPLE_SIZE,
|
||||
block_sample BOOLEAN DEFAULT FALSE,
|
||||
block_sample BOOLEAN DEFAULT NULL,
|
||||
method_opt VARCHAR(65535) DEFAULT DEFAULT_METHOD_OPT,
|
||||
degree DECIMAL DEFAULT NULL,
|
||||
granularity VARCHAR(65535) DEFAULT DEFAULT_GRANULARITY,
|
||||
|
@ -13,7 +13,7 @@ create or replace PACKAGE dbms_stats AUTHID CURRENT_USER
|
||||
tabname VARCHAR(65535),
|
||||
partname VARCHAR(65535) DEFAULT NULL,
|
||||
estimate_percent DECIMAL DEFAULT AUTO_SAMPLE_SIZE,
|
||||
block_sample BOOLEAN DEFAULT FALSE,
|
||||
block_sample BOOLEAN DEFAULT NULL,
|
||||
method_opt VARCHAR(65535) DEFAULT DEFAULT_METHOD_OPT,
|
||||
degree DECIMAL DEFAULT NULL,
|
||||
granularity VARCHAR(65535) DEFAULT DEFAULT_GRANULARITY,
|
||||
@ -29,7 +29,7 @@ create or replace PACKAGE dbms_stats AUTHID CURRENT_USER
|
||||
PROCEDURE gather_schema_stats (
|
||||
ownname VARCHAR(65535),
|
||||
estimate_percent DECIMAL DEFAULT AUTO_SAMPLE_SIZE,
|
||||
block_sample BOOLEAN DEFAULT FALSE,
|
||||
block_sample BOOLEAN DEFAULT NULL,
|
||||
method_opt VARCHAR(65535) DEFAULT DEFAULT_METHOD_OPT,
|
||||
degree DECIMAL DEFAULT NULL,
|
||||
granularity VARCHAR(65535) DEFAULT DEFAULT_GRANULARITY,
|
||||
|
@ -878,7 +878,7 @@ int ObTenantIOManager::inner_aio(const ObIOInfo &info, ObIOHandle &handle)
|
||||
} else if (OB_UNLIKELY(!is_working())) {
|
||||
ret = OB_STATE_NOT_MATCH;
|
||||
LOG_WARN("tenant not working", K(ret), K(tenant_id_));
|
||||
} else if (NULL != detector && detector->is_data_disk_has_fatal_error()) {
|
||||
} else if (SLOG_IO != info.flag_.get_sys_module_id() && NULL != detector && detector->is_data_disk_has_fatal_error()) {
|
||||
ret = OB_DISK_HUNG;
|
||||
// for temporary positioning issue, get lbt of log replay
|
||||
LOG_DBA_ERROR(OB_DISK_HUNG, "msg", "disk has fatal error");
|
||||
|
@ -174,6 +174,7 @@ cal_version(const uint64_t major, const uint64_t minor, const uint64_t major_pat
|
||||
#define CLUSTER_VERSION_4_2_1_2 (oceanbase::common::cal_version(4, 2, 1, 2))
|
||||
#define MOCK_CLUSTER_VERSION_4_2_1_3 (oceanbase::common::cal_version(4, 2, 1, 3))
|
||||
#define MOCK_CLUSTER_VERSION_4_2_1_4 (oceanbase::common::cal_version(4, 2, 1, 4))
|
||||
#define MOCK_CLUSTER_VERSION_4_2_1_7 (oceanbase::common::cal_version(4, 2, 1, 7))
|
||||
#define CLUSTER_VERSION_4_2_2_0 (oceanbase::common::cal_version(4, 2, 2, 0))
|
||||
#define MOCK_CLUSTER_VERSION_4_2_2_1 (oceanbase::common::cal_version(4, 2, 2, 1))
|
||||
#define MOCK_CLUSTER_VERSION_4_2_3_0 (oceanbase::common::cal_version(4, 2, 3, 0))
|
||||
|
@ -108,6 +108,10 @@ enum ObDDLType
|
||||
DDL_CHANGE_COLUMN_NAME = 10003,
|
||||
DDL_DROP_COLUMN_INSTANT = 10004,
|
||||
DDL_ALTER_PARTITION_AUTO_SPLIT_ATTRIBUTE = 10005, // auto table auto partition // online
|
||||
DDL_ADD_COLUMN_INSTANT = 10006, // add after/before column
|
||||
DDL_MODIFY_COLUMN_ONLINE = 10007,
|
||||
DDL_COMPOUND_ONLINE = 10008,
|
||||
DDL_COMPOUND_INSTANT = 10009,
|
||||
///< @note add new normal ddl type before this line
|
||||
DDL_MAX
|
||||
};
|
||||
@ -819,4 +823,3 @@ public:
|
||||
} // end namespace oceanbase
|
||||
|
||||
#endif // OCEANBASE_SHARE_OB_DDL_COMMON_H
|
||||
|
||||
|
@ -605,6 +605,7 @@ class ObString;
|
||||
ACT(BEFORE_START_TRANSFER_GET_TABLET_META,)\
|
||||
ACT(BEFORE_ADD_REFRESH_SCHEMA_TASK,)\
|
||||
ACT(BEFORE_ADD_ASYNC_REFRESH_SCHEMA_TASK,)\
|
||||
ACT(AFTER_MEMBERLIST_CHANGED,)\
|
||||
ACT(MAX_DEBUG_SYNC_POINT,)
|
||||
|
||||
DECLARE_ENUM(ObDebugSyncPoint, debug_sync_point, OB_DEBUG_SYNC_POINT_DEF);
|
||||
|
@ -1343,5 +1343,21 @@ int ObDMLExecHelper::check_row_exist(const char *table_name,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDMLSqlSplicer::add_long_double_column(const char *col_name, const double value)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
const bool is_primary_key = false;
|
||||
const bool is_null = false;
|
||||
if (OB_ISNULL(col_name)) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid column name", K(ret), KP(col_name));
|
||||
} else if (OB_FAIL(values_.append_fmt("%.17g", value))) {
|
||||
LOG_WARN("append value failed", K(ret));
|
||||
} else if (OB_FAIL(add_column(is_primary_key, is_null, col_name))) {
|
||||
LOG_WARN("add column failed", K(ret), K(is_primary_key), K(is_null), K(col_name));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // end namespace share
|
||||
} // end namespace oceanbase
|
||||
|
@ -100,6 +100,7 @@ public:
|
||||
int add_uint64_column(const char *col_name, const uint64_t value);
|
||||
int add_time_column(const char *col_name, const int64_t now, bool is_pk = false);
|
||||
int add_raw_time_column(const char *col_name, const int64_t now);
|
||||
int add_long_double_column(const char *col_name, const double value);
|
||||
// mark end of one row
|
||||
int finish_row();
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
@ -9,6 +9,7 @@
|
||||
// 负载均衡(Transfer)错误码值域[-7100, -7200)
|
||||
// GIS错误码值域 [-7201, -7400)
|
||||
// XML错误码值域 [-7400, -7600)
|
||||
// ARRAY错误码值域 [-7600, -7700)
|
||||
// 致命错误[-8000, -9000),客户端收到8xxx错误,需要关闭SQL连接
|
||||
// Storage错误码值域 [-9000, -9500)
|
||||
// PL/SQL错误码值域 [-9500, -10000)
|
||||
@ -2136,6 +2137,26 @@ DEFINE_ORACLE_ERROR(OB_ERR_XML_PARENT_ALREADY_CONTAINS_CHILD, -7433, -1, "42000"
|
||||
// 4. 本文件修改完成后,需要调用gen_errno.pl,生成ob_errno.h文件
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// ARRAY错误码值域 [-7600, -7700)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
DEFINE_ORACLE_ERROR_EXT_DEP(OB_ERR_INVALID_VECTOR_DIM, -7600, -1, "22000", "Invalid dimension for vector.", "inconsistent dimension: expected %u got %u", 932, "inconsistent dimension", "inconsistent dimension: expected %u got %u");
|
||||
|
||||
// 余留位置
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// ARRAY错误码值域 [-7600, -7700)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
//////////////////////////////////// 新增(占位)须知 ////////////////////////////////////
|
||||
// 1. 新增错误码需要先在master分支提交,保证master是所有版本的超集,避免出现错误码冲突
|
||||
// 2. 不支持以注释方式“占位”错误码,如果要新增错误码,直接提交完整的错误码定义即可
|
||||
// 3. 不同的错误码区间有不同含义,请在合理区间内顺序定义错误码。区间定义见文件头。
|
||||
// 每个区间内,请在“余留位置”行之前定义新错误码
|
||||
// 如果没有合适区间,请联系 @修铭 定义一个新的区间
|
||||
// 4. 本文件修改完成后,需要调用gen_errno.pl,生成ob_errno.h文件
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// 致命错误[-8000, -9000),客户端收到8xxx错误,需要关闭SQL连接
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
@ -3720,6 +3720,7 @@ constexpr int OB_ERR_INVALID_DATE_MSG_FMT_V2 = -4219;
|
||||
#define OB_ERR_DUP_DEF_NAMESPACE__USER_ERROR_MSG "XQST0066 - duplicate default namespace definition - %s."
|
||||
#define OB_ERR_COMPARE_VARRAY_LOB_ATTR__USER_ERROR_MSG "cannot compare VARRAY or LOB attributes of an object type"
|
||||
#define OB_ERR_XML_PARENT_ALREADY_CONTAINS_CHILD__USER_ERROR_MSG "Parent %.*s already contains child entry %s%.*s"
|
||||
#define OB_ERR_INVALID_VECTOR_DIM__USER_ERROR_MSG "inconsistent dimension: expected %u got %u"
|
||||
#define OB_SERVER_IS_INIT__USER_ERROR_MSG "Server is initializing"
|
||||
#define OB_SERVER_IS_STOPPING__USER_ERROR_MSG "Server is stopping"
|
||||
#define OB_PACKET_CHECKSUM_ERROR__USER_ERROR_MSG "Packet checksum error"
|
||||
@ -5987,6 +5988,7 @@ constexpr int OB_ERR_INVALID_DATE_MSG_FMT_V2 = -4219;
|
||||
#define OB_ERR_DUP_DEF_NAMESPACE__ORA_USER_ERROR_MSG "ORA-19118: XQST0066 - duplicate default namespace definition - %s."
|
||||
#define OB_ERR_COMPARE_VARRAY_LOB_ATTR__ORA_USER_ERROR_MSG "ORA-22901: cannot compare VARRAY or LOB attributes of an object type"
|
||||
#define OB_ERR_XML_PARENT_ALREADY_CONTAINS_CHILD__ORA_USER_ERROR_MSG "ORA-31003: Parent %.*s already contains child entry %s%.*s"
|
||||
#define OB_ERR_INVALID_VECTOR_DIM__ORA_USER_ERROR_MSG "ORA-00932: inconsistent dimension: expected %u got %u"
|
||||
#define OB_SERVER_IS_INIT__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -8001, Server is initializing"
|
||||
#define OB_SERVER_IS_STOPPING__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -8002, Server is stopping"
|
||||
#define OB_PACKET_CHECKSUM_ERROR__ORA_USER_ERROR_MSG "ORA-00600: internal error code, arguments: -8003, Packet checksum error"
|
||||
@ -6425,7 +6427,7 @@ constexpr int OB_ERR_INVALID_DATE_MSG_FMT_V2 = -4219;
|
||||
#define OB_ERR_DATA_TOO_LONG_MSG_FMT_V2__ORA_USER_ERROR_MSG "ORA-12899: value too large for column %.*s (actual: %ld, maximum: %ld)"
|
||||
#define OB_ERR_INVALID_DATE_MSG_FMT_V2__ORA_USER_ERROR_MSG "ORA-01861: Incorrect datetime value for column '%.*s' at row %ld"
|
||||
|
||||
extern int g_all_ob_errnos[2263];
|
||||
extern int g_all_ob_errnos[2264];
|
||||
|
||||
const char *ob_error_name(const int oberr);
|
||||
const char* ob_error_cause(const int oberr);
|
||||
|
@ -1511,6 +1511,20 @@ int ObFtsIndexBuilderUtil::get_doc_id_col(
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObFtsIndexBuilderUtil::check_fts_or_multivalue_index_allowed(
|
||||
ObTableSchema &data_schema)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
if (OB_UNLIKELY(!data_schema.is_valid())) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", K(ret), K(data_schema));
|
||||
} else if (data_schema.is_partitioned_table() && data_schema.is_heap_table()) {
|
||||
ret = OB_NOT_SUPPORTED;
|
||||
LOG_USER_ERROR(OB_NOT_SUPPORTED, "create full-text or multi-value index on partition table without primary key");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObFtsIndexBuilderUtil::get_word_segment_col(
|
||||
const ObTableSchema &data_schema,
|
||||
const obrpc::ObCreateIndexArg *index_arg,
|
||||
|
@ -68,6 +68,8 @@ public:
|
||||
static int get_doc_id_col(
|
||||
const ObTableSchema &data_schema,
|
||||
const ObColumnSchemaV2 *&doc_id_col);
|
||||
static int check_fts_or_multivalue_index_allowed(
|
||||
ObTableSchema &data_schema);
|
||||
private:
|
||||
static int check_ft_cols(
|
||||
const obrpc::ObCreateIndexArg *index_arg,
|
||||
|
@ -660,11 +660,15 @@ int ObIndexBuilderUtil::adjust_expr_index_args(
|
||||
LOG_WARN("push back mbr column to gen columns failed", K(ret));
|
||||
}
|
||||
} else if (is_fts_index(arg.index_type_)) {
|
||||
if (OB_FAIL(ObFtsIndexBuilderUtil::adjust_fts_args(arg, data_schema, gen_columns))) {
|
||||
if (OB_FAIL(ObFtsIndexBuilderUtil::check_fts_or_multivalue_index_allowed(data_schema))) {
|
||||
LOG_WARN("fail to check fts index allowed", K(ret));
|
||||
} else if (OB_FAIL(ObFtsIndexBuilderUtil::adjust_fts_args(arg, data_schema, gen_columns))) {
|
||||
LOG_WARN("failed to adjust fts args", K(ret));
|
||||
}
|
||||
} else if (is_multivalue_index(arg.index_type_)) {
|
||||
if (OB_FAIL(ObMulValueIndexBuilderUtil::adjust_mulvalue_index_args(arg, data_schema, gen_columns))) {
|
||||
if (OB_FAIL(ObFtsIndexBuilderUtil::check_fts_or_multivalue_index_allowed(data_schema))) {
|
||||
LOG_WARN("fail to check multivalue index allowed", K(ret));
|
||||
} else if (OB_FAIL(ObMulValueIndexBuilderUtil::adjust_mulvalue_index_args(arg, data_schema, gen_columns))) {
|
||||
LOG_WARN("failed to adjust multivalue args", K(ret));
|
||||
}
|
||||
} else if (OB_FAIL(adjust_ordinary_index_column_args(arg, data_schema, allocator, gen_columns))) {
|
||||
|
@ -4,13 +4,32 @@
|
||||
"comment" : "for workloads like trade, payment core system, internet high throughput application, etc. no restrictions like foreign key, no stored procedure, no long transaction, no large transaction, no complex join, no complex subquery",
|
||||
"parameters": {
|
||||
"cluster": [
|
||||
|
||||
{
|
||||
"name": "_enable_defensive_check",
|
||||
"value": 0,
|
||||
"comment": "disabling the defensive check feature in production environment can result in a 10% performance improvement for DML operations"
|
||||
},
|
||||
{
|
||||
"name": "enable_syslog_recycle",
|
||||
"value": 1,
|
||||
"comment": "enable syslog auto recycle can prevent log files from filling up disk space"
|
||||
},
|
||||
{
|
||||
"name": "max_syslog_file_count",
|
||||
"value": 300,
|
||||
"comment": "when enable_syslog_recycle is enabled, should set this value to a proper value. 300 is an empirical value."
|
||||
}
|
||||
],
|
||||
"tenant": [
|
||||
{
|
||||
"name":"_rowsets_max_rows",
|
||||
"name": "_rowsets_max_rows",
|
||||
"value": 1,
|
||||
"comment":"for simple OLTP workloads, rowset = 1 is most effective."
|
||||
"comment": "for simple OLTP workloads, rowset = 1 is most effective."
|
||||
},
|
||||
{
|
||||
"name": "log_transport_compress_all",
|
||||
"value": 1,
|
||||
"comment": "In scenarios with limited bandwidth, network bandwidth can be saved with a small amount of CPU overhead through RPC compression"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -21,16 +40,36 @@
|
||||
"parameters": {
|
||||
"cluster": [
|
||||
{
|
||||
"name":"large_query_threshold",
|
||||
"value":"600s",
|
||||
"comment":"for complex OLTP scenario, some query will run for very long time."
|
||||
"name": "large_query_threshold",
|
||||
"value": "600s",
|
||||
"comment": "for complex OLTP scenario, some query will run for very long time."
|
||||
},
|
||||
{
|
||||
"name": "_enable_defensive_check",
|
||||
"value": 0,
|
||||
"comment": "disabling the defensive check feature in production environment can result in a 3% performance improvement"
|
||||
},
|
||||
{
|
||||
"name": "enable_syslog_recycle",
|
||||
"value": 1,
|
||||
"comment": "enable syslog auto recycle can prevent log files from filling up disk space"
|
||||
},
|
||||
{
|
||||
"name": "max_syslog_file_count",
|
||||
"value": 300,
|
||||
"comment": "when enable_syslog_recycle is enabled, should set this value to a proper value. 300 is an empirical value."
|
||||
}
|
||||
],
|
||||
"tenant": [
|
||||
{
|
||||
"name":"_rowsets_max_rows",
|
||||
"name": "_rowsets_max_rows",
|
||||
"value": 4,
|
||||
"comment":"for complex OLTP workloads, rowset = 4 is most effective."
|
||||
"comment": "for complex OLTP workloads, rowset = 4 is most effective."
|
||||
},
|
||||
{
|
||||
"name": "log_transport_compress_all",
|
||||
"value": 1,
|
||||
"comment": "In scenarios with limited bandwidth, network bandwidth can be saved with a small amount of CPU overhead through RPC compression"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -46,26 +85,26 @@
|
||||
"comment": "disable trace log for better AP performance"
|
||||
},
|
||||
{
|
||||
"name":"trace_log_slow_query_watermark",
|
||||
"value":"7d",
|
||||
"comment":"7 days. no 'slow query' concept for AP query"
|
||||
"name": "trace_log_slow_query_watermark",
|
||||
"value": "7d",
|
||||
"comment": "7 days. no 'slow query' concept for AP query"
|
||||
},
|
||||
{
|
||||
"name":"large_query_threshold",
|
||||
"value":"0ms",
|
||||
"comment":"disable large query detection for AP query"
|
||||
"name": "large_query_threshold",
|
||||
"value": "0ms",
|
||||
"comment": "disable large query detection for AP query"
|
||||
}
|
||||
],
|
||||
"tenant": [
|
||||
{
|
||||
"name":"default_table_store_format",
|
||||
"value":"column",
|
||||
"comment":"default to column format for AP"
|
||||
"name": "default_table_store_format",
|
||||
"value": "column",
|
||||
"comment": "default to column format for AP"
|
||||
},
|
||||
{
|
||||
"name":"_rowsets_max_rows",
|
||||
"name": "_rowsets_max_rows",
|
||||
"value": 256,
|
||||
"comment":"for classic OLAP workloads, rowset 256 is adequate"
|
||||
"comment": "for classic OLAP workloads, rowset 256 is adequate"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -76,9 +115,9 @@
|
||||
"parameters": {
|
||||
"cluster": [
|
||||
{
|
||||
"name":"large_query_threshold",
|
||||
"value":"0ms",
|
||||
"comment":"disable large query detection for KV mode"
|
||||
"name": "large_query_threshold",
|
||||
"value": "0ms",
|
||||
"comment": "disable large query detection for KV mode"
|
||||
}
|
||||
],
|
||||
"tenant": [
|
||||
@ -91,16 +130,31 @@
|
||||
"parameters": {
|
||||
"cluster": [
|
||||
{
|
||||
"name":"large_query_threshold",
|
||||
"value":"600s",
|
||||
"comment":"AP query exist in HTAP workload, we need it running fast too."
|
||||
"name": "large_query_threshold",
|
||||
"value": "600s",
|
||||
"comment": "AP query exist in HTAP workload, we need it running fast too."
|
||||
},
|
||||
{
|
||||
"name": "enable_syslog_recycle",
|
||||
"value": 1,
|
||||
"comment": "enable syslog auto recycle can prevent log files from filling up disk space"
|
||||
},
|
||||
{
|
||||
"name": "max_syslog_file_count",
|
||||
"value": 300,
|
||||
"comment": "when enable_syslog_recycle is enabled, should set this value to a proper value. 300 is an empirical value."
|
||||
}
|
||||
],
|
||||
"tenant": [
|
||||
{
|
||||
"name":"_rowsets_max_rows",
|
||||
"name": "_rowsets_max_rows",
|
||||
"value": 32,
|
||||
"comment":"for classic HTAP workloads, rowset 32 is tradeoff"
|
||||
"comment": "for classic HTAP workloads, rowset 32 is tradeoff"
|
||||
},
|
||||
{
|
||||
"name": "log_transport_compress_all",
|
||||
"value": 1,
|
||||
"comment": "In scenarios with limited bandwidth, network bandwidth can be saved with a small amount of CPU overhead through RPC compression"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ int ObLogFileHandler::unlink(const char* file_path)
|
||||
while (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(THE_IO_DEVICE->unlink(file_path)) && OB_NO_SUCH_FILE_OR_DIRECTORY != ret) {
|
||||
LOG_WARN("unlink failed", K(ret), K(file_path));
|
||||
ob_usleep(static_cast<uint32_t>(UNLINK_RETRY_INTERVAL_US));
|
||||
ob_usleep<ObWaitEventIds::SLOG_NORMAL_RETRY_SLEEP>(UNLINK_RETRY_INTERVAL_US);
|
||||
ret = OB_SUCCESS;
|
||||
} else if (OB_NO_SUCH_FILE_OR_DIRECTORY == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
@ -358,7 +358,7 @@ int ObLogFileHandler::normal_retry_write(void *buf, int64_t size, int64_t offset
|
||||
if (REACH_TIME_INTERVAL(LOG_INTERVAL_US)) {
|
||||
LOG_WARN("fail to aio_write", K(ret), K(io_info), K(retry_cnt));
|
||||
} else {
|
||||
ob_usleep(static_cast<uint32_t>(SLEEP_TIME_US));
|
||||
ob_usleep<ObWaitEventIds::SLOG_NORMAL_RETRY_SLEEP>(SLEEP_TIME_US);
|
||||
}
|
||||
}
|
||||
} while (OB_FAIL(ret));
|
||||
@ -381,7 +381,7 @@ int ObLogFileHandler::open(const char *file_path, const int flags, const mode_t
|
||||
LOG_WARN("failed to open file", K(ret), K(file_path), K(errno), KERRMSG);
|
||||
if (OB_TIMEOUT == ret || OB_EAGAIN == ret) {
|
||||
ret = OB_SUCCESS;
|
||||
ob_usleep(static_cast<uint32_t>(ObLogDefinition::RETRY_SLEEP_TIME_IN_US));
|
||||
ob_usleep<ObWaitEventIds::SLOG_NORMAL_RETRY_SLEEP>(ObLogDefinition::RETRY_SLEEP_TIME_IN_US);
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
|
@ -356,7 +356,7 @@ public:
|
||||
bool is_dag_failed() const { return ObIDag::DAG_STATUS_NODE_FAILED == dag_status_; }
|
||||
void set_add_time() { add_time_ = ObTimeUtility::fast_current_time(); }
|
||||
int64_t get_add_time() const { return add_time_; }
|
||||
int64_t get_priority() const { return priority_; }
|
||||
ObDagPrio::ObDagPrioEnum get_priority() const { return priority_; }
|
||||
void set_priority(ObDagPrio::ObDagPrioEnum prio) { priority_ = prio; }
|
||||
const ObDagId &get_dag_id() const { return id_; }
|
||||
int set_dag_id(const ObDagId &dag_id);
|
||||
@ -1062,6 +1062,8 @@ public:
|
||||
template<typename T>
|
||||
int alloc_dag(T *&dag);
|
||||
template<typename T>
|
||||
int alloc_dag_with_priority(const ObDagPrio::ObDagPrioEnum &prio, T *&dag);
|
||||
template<typename T>
|
||||
int create_and_add_dag_net(const ObIDagInitParam *param);
|
||||
void free_dag(ObIDag &dag);
|
||||
void inner_free_dag(ObIDag &dag);
|
||||
@ -1297,6 +1299,27 @@ int ObTenantDagScheduler::alloc_dag(T *&dag)
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
int ObTenantDagScheduler::alloc_dag_with_priority(
|
||||
const ObDagPrio::ObDagPrioEnum &prio, T *&dag)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
dag = NULL;
|
||||
if (prio < ObDagPrio::DAG_PRIO_COMPACTION_HIGH
|
||||
|| prio >= ObDagPrio::DAG_PRIO_MAX) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
COMMON_LOG(WARN, "get invalid arg", K(ret), K(prio));
|
||||
} else if (OB_FAIL(alloc_dag(dag))) {
|
||||
COMMON_LOG(WARN, "failed to alloc dag", K(ret));
|
||||
} else if (OB_ISNULL(dag)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
COMMON_LOG(WARN, "dag should not be null", K(ret), KP(dag));
|
||||
} else {
|
||||
dag->set_priority(prio);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void ObTenantDagScheduler::free_dag_net(T *&dag_net)
|
||||
{
|
||||
|
@ -1849,7 +1849,8 @@ int ObSchemaPrinter::print_table_definition_table_options(const ObTableSchema &t
|
||||
if (OB_SUCC(ret)
|
||||
&& !strict_compat_
|
||||
&& ObDuplicateScopeChecker::is_valid_replicate_scope(table_schema.get_duplicate_scope())
|
||||
&& !is_no_table_options(sql_mode)) {
|
||||
&& !is_no_table_options(sql_mode)
|
||||
&& table_schema.is_user_table()) {
|
||||
// 目前只支持cluster
|
||||
if (table_schema.get_duplicate_scope() == ObDuplicateScope::DUPLICATE_SCOPE_CLUSTER) {
|
||||
if (OB_FAIL(databuff_printf(buf, buf_len, pos, "DUPLICATE_SCOPE = 'CLUSTER' "))) {
|
||||
|
@ -1231,6 +1231,17 @@ int ObSchemaUtils::is_drop_column_only(const AlterTableSchema &alter_table_schem
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ObSchemaUtils::can_add_column_group(const ObTableSchema &table_schema)
|
||||
{
|
||||
bool can_add_cg = false;
|
||||
if (table_schema.is_user_table()
|
||||
|| table_schema.is_tmp_table()
|
||||
|| table_schema.is_index_table()) {
|
||||
can_add_cg = true;
|
||||
}
|
||||
return can_add_cg;
|
||||
}
|
||||
|
||||
} // end schema
|
||||
} // end share
|
||||
} // end oceanbase
|
||||
|
@ -156,6 +156,7 @@ public:
|
||||
static int mock_default_cg(
|
||||
const uint64_t tenant_id,
|
||||
share::schema::ObTableSchema &new_table_schema);
|
||||
static bool can_add_column_group(const ObTableSchema &table_schema);
|
||||
|
||||
// Optimized method to batch get latest table schemas from cache or inner_table automatically.
|
||||
//
|
||||
|
@ -8690,6 +8690,10 @@ int ObTableSchema::add_column_group(const ObColumnGroupSchema &other)
|
||||
if (!other.is_valid()) {
|
||||
ret = OB_INVALID_ARGUMENT;
|
||||
LOG_WARN("invalid argument", KR(ret), K(other));
|
||||
} else if (other.get_column_group_type() != ObColumnGroupType::DEFAULT_COLUMN_GROUP
|
||||
&& !ObSchemaUtils::can_add_column_group(*this)) {
|
||||
ret = OB_NOT_SUPPORTED;
|
||||
LOG_WARN("only default column group is allowded to add to not user/tmp table", K(ret), K(other), KPC(this));
|
||||
} else if (OB_FAIL(do_add_column_group(other))) {
|
||||
LOG_WARN("fail to do add column group", KR(ret), K(other));
|
||||
}
|
||||
|
@ -259,6 +259,12 @@ enum ObMVOnQueryComputationFlag
|
||||
IS_MV_ON_QUERY_COMPUTATION = 1,
|
||||
};
|
||||
|
||||
enum ObDDLIgnoreSyncCdcFlag
|
||||
{
|
||||
DO_SYNC_LOG_FOR_CDC = 0,
|
||||
DONT_SYNC_LOG_FOR_CDC = 1,
|
||||
};
|
||||
|
||||
struct ObTableMode {
|
||||
OB_UNIS_VERSION_V(1);
|
||||
private:
|
||||
@ -288,7 +294,9 @@ private:
|
||||
static const int32_t TM_MV_ENABLE_QUERY_REWRITE_BITS = 1;
|
||||
static const int32_t TM_MV_ON_QUERY_COMPUTATION_OFFSET = 28;
|
||||
static const int32_t TM_MV_ON_QUERY_COMPUTATION_BITS = 1;
|
||||
static const int32_t TM_RESERVED = 3;
|
||||
static const int32_t TM_DDL_IGNORE_SYNC_CDC_OFFSET = 29;
|
||||
static const int32_t TM_DDL_IGNORE_SYNC_CDC_BITS = 1;
|
||||
static const int32_t TM_RESERVED = 2;
|
||||
|
||||
static const uint32_t MODE_FLAG_MASK = (1U << TM_MODE_FLAG_BITS) - 1;
|
||||
static const uint32_t PK_MODE_MASK = (1U << TM_PK_MODE_BITS) - 1;
|
||||
@ -303,6 +311,7 @@ private:
|
||||
static const uint32_t TABLE_REFERENCED_BY_MV_MASK = (1U << TM_TABLE_REFERENCED_BY_MV_BITS) - 1;
|
||||
static const uint32_t MV_ENABLE_QUERY_REWRITE_MASK = (1U << TM_MV_ENABLE_QUERY_REWRITE_BITS) - 1;
|
||||
static const uint32_t MV_ON_QUERY_COMPUTATION_MASK = (1U << TM_MV_ON_QUERY_COMPUTATION_BITS) - 1;
|
||||
static const uint32_t DDL_IGNORE_SYNC_CDC_MASK = (1U << TM_DDL_IGNORE_SYNC_CDC_BITS) - 1;
|
||||
public:
|
||||
ObTableMode() { reset(); }
|
||||
virtual ~ObTableMode() { reset(); }
|
||||
@ -381,7 +390,8 @@ public:
|
||||
"mv_available_flag", mv_available_flag_,
|
||||
"table_referenced_by_mv_flag", table_referenced_by_mv_flag_,
|
||||
"mv_enable_query_rewrite_flag", mv_enable_query_rewrite_flag_,
|
||||
"mv_on_query_computation_flag", mv_on_query_computation_flag_);
|
||||
"mv_on_query_computation_flag", mv_on_query_computation_flag_,
|
||||
"ddl_table_ignore_sync_cdc_flag", ddl_table_ignore_sync_cdc_flag_);
|
||||
union {
|
||||
int32_t mode_;
|
||||
struct {
|
||||
@ -398,6 +408,7 @@ public:
|
||||
uint32_t table_referenced_by_mv_flag_ : TM_TABLE_REFERENCED_BY_MV_BITS;
|
||||
uint32_t mv_enable_query_rewrite_flag_ : TM_MV_ENABLE_QUERY_REWRITE_BITS;
|
||||
uint32_t mv_on_query_computation_flag_ : TM_MV_ON_QUERY_COMPUTATION_BITS;
|
||||
uint32_t ddl_table_ignore_sync_cdc_flag_ : TM_DDL_IGNORE_SYNC_CDC_BITS;
|
||||
uint32_t reserved_ :TM_RESERVED;
|
||||
};
|
||||
};
|
||||
@ -723,6 +734,10 @@ public:
|
||||
{ return IS_MV_ON_QUERY_COMPUTATION == (enum ObMVOnQueryComputationFlag)table_mode_.mv_on_query_computation_flag_; }
|
||||
inline void set_mv_on_query_computation(const ObMVOnQueryComputationFlag flag)
|
||||
{ table_mode_.mv_on_query_computation_flag_ = flag; }
|
||||
inline void set_ddl_ignore_sync_cdc_flag(const ObDDLIgnoreSyncCdcFlag flag)
|
||||
{ table_mode_.ddl_table_ignore_sync_cdc_flag_ = flag; }
|
||||
inline bool is_ddl_table_ignored_to_sync_cdc() const
|
||||
{ return DONT_SYNC_LOG_FOR_CDC == table_mode_.ddl_table_ignore_sync_cdc_flag_; }
|
||||
|
||||
inline void set_session_id(const uint64_t id) { session_id_ = id; }
|
||||
inline uint64_t get_session_id() const { return session_id_; }
|
||||
|
@ -93,6 +93,9 @@ int ObBasicStatsEstimator::estimate(const ObOptStatGatherParam ¶m,
|
||||
LOG_WARN("failed to add group by info", K(ret));
|
||||
} else if (OB_FAIL(add_stat_item(ObPartitionId(src_tab_stat, calc_part_id_str, -1)))) {
|
||||
LOG_WARN("failed to add partition id", K(ret));
|
||||
} else if (param.is_specify_partition_ &&
|
||||
OB_FAIL(fill_partition_info(allocator, param.partition_infos_))) {
|
||||
LOG_WARN("failed to add partition info", K(ret));
|
||||
}
|
||||
} else if (OB_UNLIKELY(param.partition_infos_.count() > 1) ||
|
||||
OB_ISNULL(dst_opt_stats.at(0).table_stat_)) {
|
||||
@ -200,6 +203,8 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat = new (buf) BlockNumStat();
|
||||
block_num_stat->tab_macro_cnt_ = estimate_result.at(i).macro_block_count_;
|
||||
block_num_stat->tab_micro_cnt_ = estimate_result.at(i).micro_block_count_;
|
||||
block_num_stat->sstable_row_cnt_ = estimate_result.at(i).sstable_row_count_;
|
||||
block_num_stat->memtable_row_cnt_ = estimate_result.at(i).memtable_row_count_;
|
||||
total_sstable_row_cnt += estimate_result.at(i).sstable_row_count_;
|
||||
total_memtable_row_cnt += estimate_result.at(i).memtable_row_count_;
|
||||
int64_t partition_id = static_cast<int64_t>(estimate_result.at(i).part_id_);
|
||||
@ -213,7 +218,9 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat->tab_macro_cnt_,
|
||||
block_num_stat->tab_micro_cnt_,
|
||||
block_num_stat->cg_macro_cnt_arr_,
|
||||
block_num_stat->cg_micro_cnt_arr_))) {
|
||||
block_num_stat->cg_micro_cnt_arr_,
|
||||
block_num_stat->sstable_row_cnt_,
|
||||
block_num_stat->memtable_row_cnt_))) {
|
||||
LOG_WARN("faild to add", K(ret));
|
||||
}
|
||||
} else if (param.part_level_ == share::schema::PARTITION_LEVEL_TWO) {
|
||||
@ -226,7 +233,9 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat->tab_macro_cnt_,
|
||||
block_num_stat->tab_micro_cnt_,
|
||||
block_num_stat->cg_macro_cnt_arr_,
|
||||
block_num_stat->cg_micro_cnt_arr_))) {
|
||||
block_num_stat->cg_micro_cnt_arr_,
|
||||
block_num_stat->sstable_row_cnt_,
|
||||
block_num_stat->memtable_row_cnt_))) {
|
||||
LOG_WARN("faild to add", K(ret));
|
||||
} else {
|
||||
int64_t idx = 0;
|
||||
@ -240,7 +249,9 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat->tab_macro_cnt_,
|
||||
block_num_stat->tab_micro_cnt_,
|
||||
block_num_stat->cg_macro_cnt_arr_,
|
||||
block_num_stat->cg_micro_cnt_arr_))) {
|
||||
block_num_stat->cg_micro_cnt_arr_,
|
||||
block_num_stat->sstable_row_cnt_,
|
||||
block_num_stat->memtable_row_cnt_))) {
|
||||
LOG_WARN("faild to add", K(ret));
|
||||
}
|
||||
}
|
||||
@ -271,6 +282,8 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat = new (buf) BlockNumStat();
|
||||
block_num_stat->tab_macro_cnt_ = global_tab_stat.get_macro_block_count();
|
||||
block_num_stat->tab_micro_cnt_ = global_tab_stat.get_micro_block_count();
|
||||
block_num_stat->sstable_row_cnt_ = global_tab_stat.get_sstable_row_cnt();
|
||||
block_num_stat->memtable_row_cnt_ = global_tab_stat.get_memtable_row_cnt();
|
||||
if (OB_FAIL(block_num_stat->cg_macro_cnt_arr_.assign(global_tab_stat.get_cg_macro_arr())) ||
|
||||
OB_FAIL(block_num_stat->cg_micro_cnt_arr_.assign(global_tab_stat.get_cg_micro_arr()))) {
|
||||
LOG_WARN("failed to assign", K(ret));
|
||||
@ -289,6 +302,8 @@ int ObBasicStatsEstimator::estimate_block_count(ObExecContext &ctx,
|
||||
block_num_stat = new (buf) BlockNumStat();
|
||||
block_num_stat->tab_macro_cnt_ = first_part_tab_stats.at(i).get_macro_block_count();
|
||||
block_num_stat->tab_micro_cnt_ = first_part_tab_stats.at(i).get_micro_block_count();
|
||||
block_num_stat->sstable_row_cnt_ = first_part_tab_stats.at(i).get_sstable_row_cnt();
|
||||
block_num_stat->memtable_row_cnt_ = first_part_tab_stats.at(i).get_memtable_row_cnt();
|
||||
if (OB_FAIL(block_num_stat->cg_macro_cnt_arr_.assign(first_part_tab_stats.at(i).get_cg_macro_arr())) ||
|
||||
OB_FAIL(block_num_stat->cg_micro_cnt_arr_.assign(first_part_tab_stats.at(i).get_cg_micro_arr()))) {
|
||||
LOG_WARN("failed to assign", K(ret));
|
||||
@ -783,7 +798,7 @@ int ObBasicStatsEstimator::estimate_stale_partition(ObExecContext &ctx,
|
||||
cur_part_id = dst_part_id;
|
||||
cur_inc_mod_count = inc_mod_count;
|
||||
} else if (OB_FAIL(check_partition_stat_state(cur_part_id,
|
||||
has_subpart_invalid_inc ? 0 : cur_inc_mod_count,
|
||||
has_subpart_invalid_inc ? -1 : cur_inc_mod_count,
|
||||
stale_percent_threshold,
|
||||
partition_stat_infos))) {
|
||||
LOG_WARN("failed to check partition stat state", K(ret));
|
||||
@ -804,13 +819,13 @@ int ObBasicStatsEstimator::estimate_stale_partition(ObExecContext &ctx,
|
||||
ret = OB_SUCCESS;
|
||||
if (cur_part_id != -1 &&
|
||||
OB_FAIL(check_partition_stat_state(cur_part_id,
|
||||
has_subpart_invalid_inc ? 0 : cur_inc_mod_count,
|
||||
has_subpart_invalid_inc ? -1 : cur_inc_mod_count,
|
||||
stale_percent_threshold,
|
||||
partition_stat_infos))) {
|
||||
LOG_WARN("failed to check partition stat state", K(ret));
|
||||
} else if (is_check_global &&
|
||||
OB_FAIL(check_partition_stat_state(global_part_id,
|
||||
has_part_invalid_inc ? 0 : table_inc_modified,
|
||||
has_part_invalid_inc ? -1 : table_inc_modified,
|
||||
stale_percent_threshold,
|
||||
partition_stat_infos))) {
|
||||
LOG_WARN("failed to check partition stat state", K(ret));
|
||||
@ -826,6 +841,7 @@ int ObBasicStatsEstimator::estimate_stale_partition(ObExecContext &ctx,
|
||||
}
|
||||
}
|
||||
}
|
||||
ObSEArray<int64_t, 4> record_first_part_ids;
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < partition_infos.count(); ++i) {
|
||||
int64_t partition_id = partition_infos.at(i).part_id_;
|
||||
int64_t first_part_id = partition_infos.at(i).first_part_id_;
|
||||
@ -836,16 +852,22 @@ int ObBasicStatsEstimator::estimate_stale_partition(ObExecContext &ctx,
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
} else {/*do nothing*/}
|
||||
}
|
||||
if (first_part_id != OB_INVALID_ID && !is_contain(monitor_modified_part_ids, first_part_id)) {
|
||||
if (OB_SUCC(ret) &&
|
||||
first_part_id != OB_INVALID_ID &&
|
||||
!is_contain(monitor_modified_part_ids, first_part_id) &&
|
||||
!is_contain(record_first_part_ids, first_part_id)) {
|
||||
ObPartitionStatInfo partition_stat_info(first_part_id, 0, false, true);
|
||||
ret = partition_stat_infos.push_back(partition_stat_info);
|
||||
if (OB_FAIL(partition_stat_infos.push_back(partition_stat_info)) ||
|
||||
OB_FAIL(record_first_part_ids.push_back(first_part_id))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG_INFO("succeed to estimate stale partition", K(stale_percent_threshold),
|
||||
K(partition_stat_infos),
|
||||
K(partition_infos),
|
||||
K(monitor_modified_part_ids));
|
||||
LOG_TRACE("succeed to estimate stale partition", K(stale_percent_threshold),
|
||||
K(partition_stat_infos),
|
||||
K(partition_infos),
|
||||
K(monitor_modified_part_ids));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -881,6 +903,10 @@ int ObBasicStatsEstimator::update_last_modified_count(sqlclient::ObISQLConnectio
|
||||
ObSqlString tablet_list;
|
||||
int64_t affected_rows = 0;
|
||||
bool is_valid = true;
|
||||
bool is_all_update = false;
|
||||
//if this is virtual table real agent, we need update the real table id modifed count
|
||||
uint64_t table_id = share::is_oracle_mapping_real_virtual_table(param.table_id_) ?
|
||||
share::get_real_table_mappings_tid(param.table_id_) : param.table_id_;
|
||||
if (OB_ISNULL(conn)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected null", K(ret), K(conn));
|
||||
@ -888,17 +914,18 @@ int ObBasicStatsEstimator::update_last_modified_count(sqlclient::ObISQLConnectio
|
||||
LOG_WARN("failed to check table read write valid", K(ret));
|
||||
} else if (!is_valid) {
|
||||
// do nothing
|
||||
} else if (OB_FAIL(gen_tablet_list(param, tablet_list))) {
|
||||
} else if (OB_FAIL(gen_tablet_list(param, tablet_list, is_all_update))) {
|
||||
LOG_WARN("failed to gen partition list", K(ret));
|
||||
} else if (tablet_list.empty()) {
|
||||
} else if (tablet_list.empty() && !is_all_update) {
|
||||
/*do nothing*/
|
||||
} else if (OB_FAIL(udpate_sql.append_fmt(
|
||||
"update %s set last_inserts = inserts, last_updates = updates, last_deletes = deletes " \
|
||||
"where tenant_id = %lu and table_id = %lu and tablet_id in %s;",
|
||||
"where tenant_id = %lu and table_id = %lu %s %s;",
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME,
|
||||
share::schema::ObSchemaUtils::get_extract_tenant_id(param.tenant_id_, param.tenant_id_),
|
||||
share::schema::ObSchemaUtils::get_extract_schema_id(param.tenant_id_, param.table_id_),
|
||||
tablet_list.ptr()))) {
|
||||
share::schema::ObSchemaUtils::get_extract_schema_id(param.tenant_id_, table_id),
|
||||
!tablet_list.empty() ? "and tablet_id in" : " ",
|
||||
!tablet_list.empty() ? tablet_list.ptr() : " "))) {
|
||||
LOG_WARN("failed to append fmt", K(ret));
|
||||
} else if (OB_FAIL(conn->execute_write(param.tenant_id_, udpate_sql.ptr(), affected_rows))) {
|
||||
LOG_WARN("failed to execute sql", K(ret), K(udpate_sql));
|
||||
@ -1001,49 +1028,57 @@ int ObBasicStatsEstimator::check_partition_stat_state(const int64_t partition_id
|
||||
for (int64_t i = 0; !find_it && i < partition_stat_infos.count(); ++i) {
|
||||
if (partition_stat_infos.at(i).partition_id_ == partition_id) {
|
||||
//locked partition id or no arrived stale percent threshold no need regather stats.
|
||||
double stale_percent = partition_stat_infos.at(i).row_cnt_ <= 0 ? 1.0 :
|
||||
1.0 * inc_mod_count / partition_stat_infos.at(i).row_cnt_;
|
||||
double stale_percent = 0.0;
|
||||
if (inc_mod_count < 0 || partition_stat_infos.at(i).row_cnt_ <= 0) {
|
||||
stale_percent = inc_mod_count == 0 ? 0.0 : 1.0;
|
||||
} else {
|
||||
stale_percent = 1.0 * inc_mod_count / partition_stat_infos.at(i).row_cnt_;
|
||||
}
|
||||
partition_stat_infos.at(i).is_no_stale_ = stale_percent <= stale_percent_threshold;
|
||||
find_it = true;
|
||||
}
|
||||
}
|
||||
if (!find_it) {
|
||||
ObPartitionStatInfo partition_stat_info(partition_id, 0, false, false);
|
||||
partition_stat_info.is_no_stale_ = true;
|
||||
partition_stat_info.is_no_stale_ = false;
|
||||
ret = partition_stat_infos.push_back(partition_stat_info);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObBasicStatsEstimator::gen_tablet_list(const ObTableStatParam ¶m,
|
||||
ObSqlString &tablet_list)
|
||||
ObSqlString &tablet_list,
|
||||
bool &is_all_update)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSEArray<uint64_t, 4> tablet_ids;
|
||||
is_all_update = false;
|
||||
if (param.global_stat_param_.need_modify_) {
|
||||
if (param.part_level_ == share::schema::ObPartitionLevel::PARTITION_LEVEL_ZERO) {
|
||||
if (OB_UNLIKELY(param.global_tablet_id_ == ObTabletID::INVALID_TABLET_ID)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected error", K(ret), K(param));
|
||||
} else if (OB_FAIL(tablet_ids.push_back(param.global_tablet_id_))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
if (param.part_level_ == share::schema::ObPartitionLevel::PARTITION_LEVEL_ZERO ||
|
||||
!param.global_stat_param_.gather_approx_) {
|
||||
is_all_update = true;
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && !is_all_update && param.part_stat_param_.need_modify_) {
|
||||
if (param.part_level_ == share::schema::ObPartitionLevel::PARTITION_LEVEL_ONE) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.part_infos_.count(); ++i) {
|
||||
if (OB_FAIL(tablet_ids.push_back(param.part_infos_.at(i).tablet_id_.id()))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
} else if (param.part_level_ == share::schema::ObPartitionLevel::PARTITION_LEVEL_TWO) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.part_infos_.count(); ++i) {
|
||||
for (int64_t j = 0; OB_SUCC(ret) && j < param.subpart_infos_.count(); ++j) {
|
||||
if (param.part_infos_.at(i).part_id_ == param.subpart_infos_.at(j).first_part_id_) {
|
||||
if (OB_FAIL(tablet_ids.push_back(param.subpart_infos_.at(j).tablet_id_.id()))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && param.part_stat_param_.need_modify_ &&
|
||||
param.part_level_ != share::schema::ObPartitionLevel::PARTITION_LEVEL_TWO) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.part_infos_.count(); ++i) {
|
||||
if (OB_FAIL(tablet_ids.push_back(param.part_infos_.at(i).tablet_id_.id()))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.approx_part_infos_.count(); ++i) {
|
||||
if (OB_FAIL(tablet_ids.push_back(param.approx_part_infos_.at(i).tablet_id_.id()))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret) && param.subpart_stat_param_.need_modify_) {
|
||||
if (OB_SUCC(ret) && !is_all_update && param.subpart_stat_param_.need_modify_) {
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.subpart_infos_.count(); ++i) {
|
||||
if (OB_FAIL(tablet_ids.push_back(param.subpart_infos_.at(i).tablet_id_.id()))) {
|
||||
LOG_WARN("failed to push back", K(ret));
|
||||
@ -1062,7 +1097,6 @@ int ObBasicStatsEstimator::gen_tablet_list(const ObTableStatParam ¶m,
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int ObBasicStatsEstimator::get_all_tablet_id_and_object_id(const ObTableStatParam ¶m,
|
||||
ObIArray<ObTabletID> &tablet_ids,
|
||||
ObIArray<ObObjectID> &partition_ids)
|
||||
@ -1095,81 +1129,39 @@ int ObBasicStatsEstimator::get_all_tablet_id_and_object_id(const ObTableStatPara
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObBasicStatsEstimator::get_need_stats_table_cnt(ObExecContext &ctx,
|
||||
const int64_t tenant_id,
|
||||
int64_t &task_table_count)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString select_sql;
|
||||
if (OB_FAIL(select_sql.append_fmt(
|
||||
"select count(1) as cnt from (select distinct m.table_id from " \
|
||||
"%s m left join %s up on m.table_id = up.table_id and up.pname = 'STALE_PERCENT' join %s gp on gp.sname = 'STALE_PERCENT' " \
|
||||
"where (case when (m.inserts+m.updates+m.deletes) = 0 then 0 "
|
||||
"else ((m.inserts+m.updates+m.deletes) - (m.last_inserts+m.last_updates+m.last_deletes)) * 1.0 / (m.inserts+m.updates+m.deletes) > " \
|
||||
"(CASE WHEN up.valchar IS NOT NULL THEN cast(up.valchar as signed) * 1.0 / 100 ELSE Cast(gp.spare4 AS signed) * 1.0 / 100 end) end) " \
|
||||
"UNION select distinct table_id from %s where table_id not in (select table_id from %s)) ",
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME,
|
||||
share::OB_ALL_OPTSTAT_USER_PREFS_TNAME,
|
||||
share::OB_ALL_OPTSTAT_GLOBAL_PREFS_TNAME,
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME,
|
||||
share::OB_ALL_TABLE_STAT_TNAME))) {
|
||||
LOG_WARN("failed to append fmt", K(ret));
|
||||
} else {
|
||||
ObCommonSqlProxy *sql_proxy = ctx.get_sql_proxy();
|
||||
SMART_VAR(ObMySQLProxy::MySQLResult, proxy_result) {
|
||||
sqlclient::ObMySQLResult *client_result = NULL;
|
||||
ObSQLClientRetryWeak sql_client_retry_weak(sql_proxy);
|
||||
if (OB_FAIL(sql_client_retry_weak.read(proxy_result, tenant_id, select_sql.ptr()))) {
|
||||
LOG_WARN("failed to execute sql", K(ret), K(select_sql));
|
||||
} else if (OB_ISNULL(client_result = proxy_result.get_result())) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("failed to execute sql", K(ret));
|
||||
} else {
|
||||
while (OB_SUCC(ret) && OB_SUCC(client_result->next())) {
|
||||
int64_t idx = 0;
|
||||
ObObj obj;
|
||||
if (OB_FAIL(client_result->get_obj(idx, obj))) {
|
||||
LOG_WARN("failed to get object", K(ret));
|
||||
} else if (OB_FAIL(obj.get_int(task_table_count))) {
|
||||
LOG_WARN("failed to get int", K(ret), K(obj));
|
||||
}
|
||||
}
|
||||
ret = OB_ITER_END == ret ? OB_SUCCESS : ret;
|
||||
}
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
if (NULL != client_result) {
|
||||
if (OB_SUCCESS != (tmp_ret = client_result->close())) {
|
||||
LOG_WARN("close result set failed", K(ret), K(tmp_ret));
|
||||
ret = COVER_SUCC(tmp_ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG_TRACE("succeed to get table count that need gathering table stats", K(ret), K(task_table_count));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObBasicStatsEstimator::get_need_stats_tables(ObExecContext &ctx,
|
||||
const int64_t tenant_id,
|
||||
ObIArray<int64_t> &table_ids,
|
||||
int64_t &slice_cnt)
|
||||
const int64_t offset,
|
||||
const int64_t slice_cnt,
|
||||
ObIArray<int64_t> &table_ids)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
ObSqlString gather_table_type_list;
|
||||
ObSqlString select_sql;
|
||||
if (OB_FAIL(select_sql.append_fmt(
|
||||
"select distinct table_id from (select m.table_id from " \
|
||||
"%s m left join %s up on m.table_id = up.table_id and up.pname = 'STALE_PERCENT' join %s gp on gp.sname = 'STALE_PERCENT' " \
|
||||
"where (case when (m.inserts+m.updates+m.deletes) = 0 then 0 "\
|
||||
"else ((m.inserts+m.updates+m.deletes) - (m.last_inserts+m.last_updates+m.last_deletes)) * 1.0 / (m.inserts+m.updates+m.deletes) > " \
|
||||
"(CASE WHEN up.valchar IS NOT NULL THEN cast(up.valchar as signed) * 1.0 / 100 ELSE Cast(gp.spare4 AS signed) * 1.0 / 100 end) end) "\
|
||||
" UNION ALL select table_id from %s where table_id not in (select table_id from %s)) "
|
||||
"ORDER BY table_id DESC limit %ld",
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME,
|
||||
share::OB_ALL_OPTSTAT_USER_PREFS_TNAME,
|
||||
share::OB_ALL_OPTSTAT_GLOBAL_PREFS_TNAME,
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME,
|
||||
share::OB_ALL_TABLE_STAT_TNAME,
|
||||
slice_cnt))) {
|
||||
if (OB_FAIL(get_gather_table_type_list(gather_table_type_list))) {
|
||||
LOG_WARN("failed to get gather table type list", K(ret));
|
||||
} else if (OB_FAIL(select_sql.append_fmt("SELECT /*+no_rewrite*/table_id "\
|
||||
"FROM (SELECT tenant_id,"\
|
||||
" table_id,"\
|
||||
" table_type"\
|
||||
" FROM %s"\
|
||||
" WHERE table_type IN %s"\
|
||||
" ORDER BY tenant_id,"\
|
||||
" table_id"\
|
||||
" LIMIT %ld, %ld) t "\
|
||||
"WHERE table_type = %u "\
|
||||
" OR EXISTS(SELECT 1 "\
|
||||
" FROM %s m"\
|
||||
" WHERE t.table_id = m.table_id"\
|
||||
" AND t.tenant_id = m.tenant_id"\
|
||||
" AND inserts + deletes + updates > 0"\
|
||||
" limit 1); ",
|
||||
share::OB_ALL_TABLE_TNAME,
|
||||
gather_table_type_list.ptr(),
|
||||
offset,
|
||||
slice_cnt,
|
||||
share::schema::ObTableType::VIRTUAL_TABLE,
|
||||
share::OB_ALL_MONITOR_MODIFIED_TNAME))) {
|
||||
LOG_WARN("failed to append fmt", K(ret));
|
||||
} else {
|
||||
ObCommonSqlProxy *sql_proxy = ctx.get_sql_proxy();
|
||||
@ -1205,7 +1197,7 @@ int ObBasicStatsEstimator::get_need_stats_tables(ObExecContext &ctx,
|
||||
}
|
||||
}
|
||||
LOG_TRACE("succeed to get table ids that need gathering table stats",
|
||||
K(ret), K(slice_cnt), K(tenant_id), K(table_ids.count()), K(table_ids));
|
||||
K(select_sql), K(offset), K(slice_cnt), K(table_ids));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -1400,5 +1392,23 @@ int ObBasicStatsEstimator::check_can_use_column_store_and_split_part_gather(cons
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObBasicStatsEstimator::get_gather_table_type_list(ObSqlString &gather_table_type_list)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t table_type_arr[] = {share::schema::ObTableType::SYSTEM_TABLE,
|
||||
share::schema::ObTableType::VIRTUAL_TABLE,
|
||||
share::schema::ObTableType::USER_TABLE,
|
||||
share::schema::ObTableType::EXTERNAL_TABLE};
|
||||
int64_t table_type_cnt = sizeof(table_type_arr)/sizeof(table_type_arr[0]);
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < table_type_cnt; ++i) {
|
||||
char prefix = (i == 0 ? '(' : ' ');
|
||||
char suffix = (i == table_type_cnt - 1 ? ')' : ',');
|
||||
if (OB_FAIL(gather_table_type_list.append_fmt("%c%lu%c", prefix, table_type_arr[i], suffix))) {
|
||||
LOG_WARN("failed to append sql", K(ret));
|
||||
} else {/*do nothing*/}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
} // end of common
|
||||
} // end of oceanbase
|
||||
|
@ -102,7 +102,8 @@ public:
|
||||
ObIArray<ObPartitionStatInfo> &partition_stat_infos);
|
||||
|
||||
static int gen_tablet_list(const ObTableStatParam ¶m,
|
||||
ObSqlString &tablet_list);
|
||||
ObSqlString &tablet_list,
|
||||
bool &is_all_update);
|
||||
|
||||
static int do_estimate_block_count(ObExecContext &ctx,
|
||||
const uint64_t tenant_id,
|
||||
@ -137,12 +138,9 @@ public:
|
||||
|
||||
static int get_need_stats_tables(ObExecContext &ctx,
|
||||
const int64_t tenant_id,
|
||||
ObIArray<int64_t> &table_ids,
|
||||
int64_t &slice_cnt);
|
||||
|
||||
static int get_need_stats_table_cnt(ObExecContext &ctx,
|
||||
const int64_t tenant_id,
|
||||
int64_t &task_table_count);
|
||||
const int64_t offset,
|
||||
const int64_t slice_cnt,
|
||||
ObIArray<int64_t> &table_ids);
|
||||
|
||||
int estimate(const ObOptStatGatherParam ¶m,
|
||||
ObIArray<ObOptStat> &dst_opt_stats);
|
||||
@ -180,6 +178,8 @@ private:
|
||||
const int64_t degree,
|
||||
bool &use_column_store,
|
||||
bool &use_split_part);
|
||||
|
||||
static int get_gather_table_type_list(ObSqlString &gather_table_type_list);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "share/stat/ob_opt_column_stat.h"
|
||||
#include "share/stat/ob_dbms_stats_utils.h"
|
||||
#include "share/stat/ob_dbms_stats_copy_table_stats.h"
|
||||
#include "share/stat/ob_dbms_stats_history_manager.h"
|
||||
|
||||
int CopyTableStatHelper::copy_part_stat(ObIArray<ObOptTableStat *> &table_stats)
|
||||
{
|
||||
@ -419,8 +420,28 @@ int ObDbmsStatsCopyTableStats::copy_tab_col_stats(sql::ObExecContext &ctx,
|
||||
LOG_WARN("src table stat is not analyzed", K(table_stat_param.part_infos_.at(0).part_id_));
|
||||
} else if (OB_FAIL(copy_stat_helper.copy_part_col_stat(table_stat_param.is_subpart_name_, col_handles, table_stats, column_stats))) {
|
||||
LOG_WARN("failed to copy table column stat", K(ret), KPC(copy_stat_helper.src_part_stat_));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::split_batch_write(ctx, table_stats, column_stats))) {
|
||||
LOG_WARN("failed to split batch write stat", K(ret));
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
ObMySQLTransaction trans;
|
||||
//begin trans
|
||||
if (OB_FAIL(trans.start(ctx.get_sql_proxy(), table_stat_param.tenant_id_))) {
|
||||
LOG_WARN("fail to start transaction", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsHistoryManager::backup_opt_stats(ctx, trans, table_stat_param, ObTimeUtility::current_time()))) {
|
||||
LOG_WARN("failed to backup opt stats", K(ret));
|
||||
} else if (OB_FAIL(ObDbmsStatsUtils::split_batch_write(ctx, trans.get_connection(), table_stats, column_stats))) {
|
||||
LOG_WARN("failed to split batch write", K(ret));
|
||||
} else {/*do nothing*/}
|
||||
//end trans
|
||||
if (OB_SUCC(ret)) {
|
||||
if (OB_FAIL(trans.end(true))) {
|
||||
LOG_WARN("fail to commit transaction", K(ret));
|
||||
}
|
||||
} else {
|
||||
int tmp_ret = OB_SUCCESS;
|
||||
if (OB_SUCCESS != (tmp_ret = trans.end(false))) {
|
||||
LOG_WARN("fail to roll back transaction", K(tmp_ret));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -341,7 +341,7 @@ int ObDbmsStatsExecutor::split_gather_partition_stats(ObExecContext &ctx,
|
||||
}
|
||||
}
|
||||
if (OB_SUCC(ret)) {
|
||||
if (gather_helper.maximum_gather_col_cnt_ >= param.column_params_.count()) {
|
||||
if (gather_helper.maximum_gather_col_cnt_ >= param.get_need_gather_column()) {
|
||||
ObSEArray<ObOptTableStat *, 4> all_tstats;
|
||||
ObSEArray<ObOptColumnStat *, 4> all_cstats;
|
||||
ObSEArray<ObOptStat, 4> opt_stats;
|
||||
@ -677,7 +677,7 @@ int ObDbmsStatsExecutor::check_need_split_gather(const ObTableStatParam ¶m,
|
||||
GatherHelper &gather_helper)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
int64_t column_cnt = param.column_params_.count();
|
||||
int64_t column_cnt = param.get_need_gather_column();
|
||||
int64_t partition_cnt = param.subpart_stat_param_.need_modify_ ? param.subpart_infos_.count() :
|
||||
(param.part_stat_param_.need_modify_ ? param.part_infos_.count() + param.approx_part_infos_.count() : 1);
|
||||
bool need_histgoram = param.subpart_stat_param_.need_modify_ ? param.subpart_stat_param_.gather_histogram_ :
|
||||
@ -921,7 +921,7 @@ int ObDbmsStatsExecutor::set_column_stats(ObExecContext &ctx,
|
||||
col_stat->set_column_id(key.column_id_);
|
||||
col_stat->set_collation_type(param.table_param_.column_params_.at(0).cs_type_);
|
||||
col_stat->set_last_analyzed(0);
|
||||
if (OB_FAIL(do_set_column_stats(param, col_stat))) {
|
||||
if (OB_FAIL(do_set_column_stats(*alloc, ctx.get_my_session()->get_dtc_params(), param, col_stat))) {
|
||||
LOG_WARN("failed to do set table stats", K(ret));
|
||||
} else if (OB_FAIL(column_stats.push_back(col_stat))) {
|
||||
LOG_WARN("failed to push back column stat", K(ret));
|
||||
@ -994,7 +994,9 @@ int ObDbmsStatsExecutor::do_set_table_stats(const ObSetTableStatParam ¶m,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ObDbmsStatsExecutor::do_set_column_stats(const ObSetColumnStatParam ¶m,
|
||||
int ObDbmsStatsExecutor::do_set_column_stats(ObIAllocator &allocator,
|
||||
const ObDataTypeCastParams &dtc_params,
|
||||
const ObSetColumnStatParam ¶m,
|
||||
ObOptColumnStat *&column_stat)
|
||||
{
|
||||
int ret = OB_SUCCESS;
|
||||
@ -1025,9 +1027,21 @@ int ObDbmsStatsExecutor::do_set_column_stats(const ObSetColumnStatParam ¶m,
|
||||
if (param.avgclen_ > 0) {
|
||||
column_stat->set_avg_len(param.avgclen_);
|
||||
}
|
||||
//5.set hist_param TODO @jiangxiu.wt
|
||||
//5.set max/val value
|
||||
if (param.hist_param_.minval_ != NULL || param.hist_param_.maxval_ != NULL) {
|
||||
ObCastCtx cast_ctx(&allocator, &dtc_params, CM_NONE, param.col_meta_.get_collation_type());
|
||||
if ((param.hist_param_.minval_ != NULL &&
|
||||
OB_FAIL(ObObjCaster::to_type(param.col_meta_.get_type(), cast_ctx, *param.hist_param_.minval_, column_stat->get_min_value()))) ||
|
||||
(param.hist_param_.maxval_ != NULL &&
|
||||
OB_FAIL(ObObjCaster::to_type(param.col_meta_.get_type(), cast_ctx, *param.hist_param_.maxval_, column_stat->get_max_value())))) {
|
||||
ret = OB_ERR_DBMS_STATS_PL;
|
||||
LOG_WARN("Invalid or inconsistent input values", K(ret), K(param));
|
||||
LOG_USER_ERROR(OB_ERR_DBMS_STATS_PL,"Invalid or inconsistent input values");
|
||||
}
|
||||
}
|
||||
//6.set hist_param TODO @jiangxiu.wt
|
||||
//other options support later.
|
||||
LOG_TRACE("succeed to do set column stats", K(*column_stat));
|
||||
LOG_TRACE("succeed to do set column stats", K(param), K(*column_stat));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -142,7 +142,9 @@ private:
|
||||
static int do_set_table_stats(const ObSetTableStatParam ¶m,
|
||||
ObOptTableStat *table_stat);
|
||||
|
||||
static int do_set_column_stats(const ObSetColumnStatParam ¶m,
|
||||
static int do_set_column_stats(ObIAllocator &allocator,
|
||||
const ObDataTypeCastParams &dtc_params,
|
||||
const ObSetColumnStatParam ¶m,
|
||||
ObOptColumnStat *&column_stat);
|
||||
|
||||
static int reset_table_locked_state(ObExecContext &ctx,
|
||||
|
@ -1030,7 +1030,7 @@ int ObDbmsStatsExportImport::get_opt_stat(ObExecContext &ctx,
|
||||
} else if (OB_FAIL(num_val.extract_valid_int64_with_trunc(int_val))) {
|
||||
LOG_WARN("extract_valid_int64_with_trunc failed", K(ret), K(num_val));
|
||||
} else if (int_val > 0) {
|
||||
if (OB_UNLIKELY(col_stat->get_histogram().get_density() <= 0.0)) {
|
||||
if (OB_UNLIKELY(col_stat->get_histogram().get_density() < 0.0)) {
|
||||
ret = OB_ERR_UNEXPECTED;
|
||||
LOG_WARN("get unexpected error", K(result_objs), K(ret), KPC(col_stat));
|
||||
} else if (col_stat->get_histogram().get_buckets().empty()) {
|
||||
|
@ -95,12 +95,15 @@ int ObDbmsStatsGather::classfy_column_histogram(const ObOptStatGatherParam ¶
|
||||
LOG_WARN("get unexpected error", K(ret), KPC(dst_col_stat), K(col_param));
|
||||
} else if (col_param.need_basic_stat() &&
|
||||
col_param.bucket_num_ > 1 &&
|
||||
dst_col_stat->get_num_distinct() > 0) {
|
||||
dst_col_stat->get_num_distinct() > 0 &&
|
||||
dst_col_stat->get_num_not_null() > 0) {
|
||||
int64_t max_disuse_cnt = std::ceil(dst_col_stat->get_num_not_null() * 1.0 / col_param.bucket_num_);
|
||||
//After testing, the error of using hyperloglog to estimate ndv is within %5.
|
||||
const double MAX_LLC_NDV_ERR_RATE = !param.need_approx_ndv_ ? 0.0 : 0.05;
|
||||
const int64_t fault_tolerance_cnt = std::ceil(dst_col_stat->get_num_distinct() * MAX_LLC_NDV_ERR_RATE);
|
||||
if (dst_col_stat->get_num_distinct() >= col_param.bucket_num_ + max_disuse_cnt + fault_tolerance_cnt) {
|
||||
double sample_val = dst_col_stat->get_histogram().get_sample_size() * 100.0 / dst_col_stat->get_num_not_null();
|
||||
if (dst_col_stat->get_num_distinct() >= col_param.bucket_num_ + max_disuse_cnt + fault_tolerance_cnt ||
|
||||
sample_val < 100.0 * (1.0 - 1.0 / col_param.bucket_num_)) {
|
||||
//directly gather hybrid histogram
|
||||
dst_col_stat->get_histogram().set_type(ObHistType::HYBIRD);
|
||||
} else {
|
||||
@ -179,6 +182,8 @@ int ObDbmsStatsGather::init_opt_stat(ObIAllocator &allocator,
|
||||
} else {
|
||||
tab_stat->set_macro_block_num(block_num_stat->tab_macro_cnt_);
|
||||
tab_stat->set_micro_block_num(block_num_stat->tab_micro_cnt_);
|
||||
tab_stat->set_sstable_row_count(block_num_stat->sstable_row_cnt_);
|
||||
tab_stat->set_memtable_row_count(block_num_stat->memtable_row_cnt_);
|
||||
}
|
||||
}
|
||||
for (int64_t i = 0; OB_SUCC(ret) && i < param.column_params_.count(); ++i) {
|
||||
|
@ -374,7 +374,7 @@ int ObDbmsStatsMaintenanceWindow::is_stats_maintenance_window_attr(const sql::Ob
|
||||
if (0 == attr_name.case_compare("job_action")) {
|
||||
if (0 == job_name.case_compare(opt_stats_history_manager)) {
|
||||
const char *job_action_name = "DBMS_STATS.PURGE_STATS(";
|
||||
if (0 == strncasecmp(val_name.ptr(), job_action_name, strlen(job_action_name))) {
|
||||
if (!val_name.empty() && 0 == strncasecmp(val_name.ptr(), job_action_name, strlen(job_action_name))) {
|
||||
if (OB_FAIL(dml.add_column("job_action", ObHexEscapeSqlStr(val_name)))) {
|
||||
LOG_WARN("failed to add column", K(ret));
|
||||
} else if (OB_FAIL(dml.add_column("what", ObHexEscapeSqlStr(val_name)))) {
|
||||
@ -385,7 +385,7 @@ int ObDbmsStatsMaintenanceWindow::is_stats_maintenance_window_attr(const sql::Ob
|
||||
} else {/*do nothing*/}
|
||||
} else {
|
||||
const char *job_action_name = "DBMS_STATS.GATHER_DATABASE_STATS_JOB_PROC(";
|
||||
if (0 == strncasecmp(val_name.ptr(), job_action_name, strlen(job_action_name))) {
|
||||
if (!val_name.empty() && 0 == strncasecmp(val_name.ptr(), job_action_name, strlen(job_action_name))) {
|
||||
if (OB_FAIL(dml.add_column("job_action", ObHexEscapeSqlStr(val_name)))) {
|
||||
LOG_WARN("failed to add column", K(ret));
|
||||
} else if (OB_FAIL(dml.add_column("what", ObHexEscapeSqlStr(val_name)))) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user