fix some bug

This commit is contained in:
Handora
2024-07-24 05:24:13 +00:00
committed by ob-robot
parent eacf70f5fb
commit 4e6883cf58
2 changed files with 17 additions and 15 deletions

View File

@ -107,16 +107,6 @@ int check_sequence_set_violation(const concurrent_control::ObWriteFlag write_fla
// and so fail to pass the check. We must bypass the case.
} else if (write_flag.is_table_api()) {
// bypass the case
// Case 6: For the case of deleting rows during building the unique index
// concurrently, it may exist that two rows of the main table point to one
// row of the newly created index, which means the unique index will abort
// itself during consistency check. While because of the feature of the
// online ddl, the concurrent delete will start to operate on the newly
// created index, which causes these two delete operations and fail to pass
// the check. So we need bypass this case.
} else if (blocksstable::ObDmlFlag::DF_DELETE == writer_dml_flag
&& blocksstable::ObDmlFlag::DF_DELETE == locker_dml_flag) {
// bypass the case
// Case 7: For the case of batch dml operation, it may operate the same row
// concurrently if the first operation has no effects.(SQL layer will check
// the modification of the row before the second operation, and report the
@ -134,6 +124,16 @@ int check_sequence_set_violation(const concurrent_control::ObWriteFlag write_fla
// So we need bypass this case.
} else if (write_flag.is_insert_up()) {
// bypass the case
// Case 6: For the case of deleting rows during building the unique index
// concurrently, it may exist that two rows of the main table point to one
// row of the newly created index, which means the unique index will abort
// itself during consistency check. While because of the feature of the
// online ddl, the concurrent delete will start to operate on the newly
// created index, which causes these two delete operations and fail to pass
// the check. So we need bypass this case.
} else if (blocksstable::ObDmlFlag::DF_DELETE == writer_dml_flag
&& blocksstable::ObDmlFlag::DF_DELETE == locker_dml_flag) {
// bypass the case
// Case 9: For the case of the write only index, it may operate the same
// row more than once under the case that main table has two rows pointing
// to the same index which is building during the first stage(in which

View File

@ -786,13 +786,14 @@ class ObTxCommitInfoLogTempRef
{
public:
ObTxCommitInfoLogTempRef()
: scheduler_(), participants_(), app_trace_id_str_(), app_trace_info_(),
incremental_participants_(), prev_record_lsn_(), redo_lsns_(), xid_()
: scheduler_(), participants_(), commit_parts_(), app_trace_id_str_(), app_trace_info_(),
incremental_participants_(), prev_record_lsn_(), redo_lsns_(), xid_()
{}
public:
common::ObAddr scheduler_;
share::ObLSArray participants_;
ObTxCommitParts commit_parts_;
common::ObString app_trace_id_str_;
common::ObString app_trace_info_;
share::ObLSArray incremental_participants_;
@ -812,7 +813,7 @@ public:
incremental_participants_(temp_ref.incremental_participants_), cluster_version_(0),
app_trace_id_str_(temp_ref.app_trace_id_str_), app_trace_info_(temp_ref.app_trace_info_),
prev_record_lsn_(temp_ref.prev_record_lsn_), redo_lsns_(temp_ref.redo_lsns_),
xid_(temp_ref.xid_), commit_parts_(), epoch_(0)
xid_(temp_ref.xid_), commit_parts_(temp_ref.commit_parts_), epoch_(0)
{
before_serialize();
}
@ -829,7 +830,7 @@ public:
share::ObLSArray &incremental_participants,
uint64_t cluster_version,
const ObXATransID &xid,
const ObTxCommitParts &commit_parts,
ObTxCommitParts &commit_parts,
int64_t epoch)
: scheduler_(scheduler), participants_(participants), upstream_(upstream),
is_sub2pc_(is_sub2pc), is_dup_tx_(is_dup_tx), can_elr_(is_elr),
@ -895,7 +896,8 @@ private:
ObRedoLSNArray &redo_lsns_;
// for xa
ObXATransID xid_;
ObTxCommitParts commit_parts_;
// for transfer
ObTxCommitParts &commit_parts_;
int64_t epoch_;
};