maintain 3 scns for each piece

This commit is contained in:
wxhwang
2023-01-28 19:02:35 +08:00
committed by ob-robot
parent c15d389bef
commit 924eb925b9
4 changed files with 42 additions and 21 deletions

View File

@ -765,6 +765,9 @@ int ObArchiveHandler::notify_(const ObTenantArchiveRoundAttr &round)
int ObArchiveHandler::get_max_checkpoint_scn_(const uint64_t tenant_id, SCN &max_checkpoint_scn) const
{
// For standby tenant, archive progress is limited only by the max replayable scn for each log stream.
// That will leads some log of type of create log stream is archived before been replayed. In this case,
// we should limit tenant archive progress not more than the GTS.
int ret = OB_SUCCESS;
ObAllTenantInfo tenant_info;
const bool for_update = false;

View File

@ -345,6 +345,7 @@ int ObDestRoundCheckpointer::generate_one_piece_(const ObTenantArchiveRoundAttr
// stat data amount and checkpoint ts for current piece.
const ObArray<ObLSDestRoundSummary> &ls_round_list = summary.ls_round_list_;
piece.piece_info_.checkpoint_scn_ = SCN::max_scn();
piece.piece_info_.max_scn_ = SCN::min_scn();
for (int64_t i = 0; OB_SUCC(ret) && i < ls_round_list.count(); i++) {
const ObLSDestRoundSummary &ls_round = ls_round_list.at(i);
@ -368,6 +369,7 @@ int ObDestRoundCheckpointer::generate_one_piece_(const ObTenantArchiveRoundAttr
// fill piece
piece.piece_info_.checkpoint_scn_ = MIN(piece.piece_info_.checkpoint_scn_, ls_piece.checkpoint_scn_);
piece.piece_info_.max_scn_ = MAX(piece.piece_info_.max_scn_, ls_piece.checkpoint_scn_);
piece.piece_info_.input_bytes_ += ls_piece.input_bytes_;
piece.piece_info_.output_bytes_ += ls_piece.output_bytes_;
@ -383,11 +385,10 @@ int ObDestRoundCheckpointer::generate_one_piece_(const ObTenantArchiveRoundAttr
if (OB_FAIL(ret)) {
} else if (piece_id < max_active_piece_id) {
piece.piece_info_.status_.set_frozen();
piece.piece_info_.checkpoint_scn_ = piece.piece_info_.end_scn_;
piece.piece_info_.file_status_ = ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE;
} else if (piece_id == max_active_piece_id) {
piece.piece_info_.checkpoint_scn_ = MIN(new_round_info.checkpoint_scn_, piece.piece_info_.checkpoint_scn_);
piece.piece_info_.status_.set_active();
piece.piece_info_.checkpoint_scn_ = new_round_info.checkpoint_scn_;
piece.piece_info_.file_status_ = ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE;
} else {
// piece_id > max_active_piece_id

View File

@ -1235,20 +1235,25 @@ int ObArchiveStore::get_piece_paths_in_range(const SCN &start_scn, const SCN &en
} else {
const int64_t dest_id = piece_keys.at(0).dest_id_;
int64_t last_piece_idx = -1;
for (int64_t i = 0; OB_SUCC(ret) && i < piece_whole_info.his_frozen_pieces_.count(); i++) {
int64_t i = 0;
int64_t pieces_cnt = piece_whole_info.his_frozen_pieces_.count();
while (OB_SUCC(ret) && i < pieces_cnt) {
const ObTenantArchivePieceAttr &cur = piece_whole_info.his_frozen_pieces_.at(i);
ObBackupPath piece_path;
if (cur.key_.dest_id_ != dest_id) {
// Filter pieces archived at other path.
++i;
continue;
}
if (cur.start_scn_ >= cur.checkpoint_scn_) {
// Filter empty piece
if (cur.file_status_ != ObBackupFileStatus::STATUS::BACKUP_FILE_AVAILABLE) {
// Filter unavailable piece
++i;
continue;
}
if (cur.checkpoint_scn_ <= start_scn) {
if (cur.end_scn_ <= start_scn) {
++i;
continue;
}
@ -1257,7 +1262,7 @@ int ObArchiveStore::get_piece_paths_in_range(const SCN &start_scn, const SCN &en
}
if (pieces.empty()) {
// this piece can be used to restore.
// this piece may be used to restore.
if (cur.start_scn_ <= start_scn) {
if (OB_FAIL(ObArchivePathUtil::get_piece_dir_path(dest, cur.key_.dest_id_, cur.key_.round_id_, cur.key_.piece_id_, piece_path))) {
LOG_WARN("failed to get piece path", K(ret), K(dest), K(cur));
@ -1265,6 +1270,7 @@ int ObArchiveStore::get_piece_paths_in_range(const SCN &start_scn, const SCN &en
LOG_WARN("fail to push back path", K(ret), K(piece_path));
} else {
last_piece_idx = i;
++i;
}
} else {
ret = OB_ENTRY_NOT_EXIST;
@ -1273,17 +1279,28 @@ int ObArchiveStore::get_piece_paths_in_range(const SCN &start_scn, const SCN &en
}
} else {
const ObTenantArchivePieceAttr &prev = piece_whole_info.his_frozen_pieces_.at(last_piece_idx);
if (prev.checkpoint_scn_ != cur.start_scn_) {
// piece not continous
ret = OB_ENTRY_NOT_EXIST;
LOG_WARN("pieces are not continous", K(ret), K(prev), K(cur), K(start_scn), K(end_scn));
LOG_USER_ERROR(OB_ENTRY_NOT_EXIST, "No enough log for restore");
if (prev.end_scn_ != cur.start_scn_) {
// The <start_scn, checkpoint_scn, end_scn> of pieces are as following:
// Piece#1 : <2022-06-01 00:00:00, 2022-06-01 06:00:00, 2022-06-02 00:00:00>
// Piece#2 : <2022-06-01 08:00:00, 2022-06-02 07:59:00, 2022-06-02 08:00:00>
// Piece#3 : <2022-06-02 08:00:00, 2022-06-03 06:00:00, 2022-06-03 08:00:00>
// And the input [start_scn, end_scn] pair is [2022-06-01 12:00:00, 2022-06-03 04:00:00].
// Previously, Piece#1 is required, and pushed into 'pieces'. However, when i = 1,
// we find that Piece#2 is not continous with Piece#1, and Piece#1 is not required actually.
// Then Piece#1 is abandoned, and recompute the required pieces.
pieces.reset();
last_piece_idx = -1;
// Do not do ++i, recompute if current piece can be used to restore.
LOG_INFO("pieces are not continous", K(prev), K(cur), K(start_scn), K(end_scn));
} else if (OB_FAIL(ObArchivePathUtil::get_piece_dir_path(dest, cur.key_.dest_id_, cur.key_.round_id_, cur.key_.piece_id_, piece_path))) {
LOG_WARN("failed to get piece path", K(ret), K(dest), K(cur));
} else if (OB_FAIL(pieces.push_back(piece_path))) {
LOG_WARN("fail to push back path", K(ret), K(piece_path));
} else {
last_piece_idx = i;
++i;
}
}
}

View File

@ -1113,10 +1113,10 @@ TEST_F(ArchiveCheckpointerTest, in_doing_02)
fill_archive_ls_piece(
1001,
false,
1,
1/* piece id */,
ObArchiveRoundState::doing(),
"2022-01-01 00:00:00",
"2022-01-01 00:01:00",
"2022-01-01 00:00:00"/* start time */,
"2022-01-01 00:01:00"/* checkpoint time*/,
0,
2000,
200,
@ -1130,10 +1130,10 @@ TEST_F(ArchiveCheckpointerTest, in_doing_02)
fill_archive_ls_piece(
1002,
false,
1,
1/* piece id */,
ObArchiveRoundState::doing(),
"2022-01-01 00:00:00",
"2022-01-01 00:01:00",
"2022-01-01 00:00:00"/* start time */,
"2022-01-01 00:01:00"/* checkpoint time*/,
0,
1000,
100,
@ -1144,10 +1144,10 @@ TEST_F(ArchiveCheckpointerTest, in_doing_02)
fill_archive_ls_piece(
1002,
false,
2,
2/* piece id */,
ObArchiveRoundState::doing(),
"2022-01-01 00:01:00",
"2022-01-01 00:01:30",
"2022-01-01 00:01:00"/* start time */,
"2022-01-01 00:01:30"/* checkpoint time*/,
1000,
2000,
100,