[feature-wip](unique-key-merge-on-write) opt lock and only save valid delete_bitmap (#11953)

1. use rlock in most logic instead of wrlock
2. filter stale rowset's delete bitmap in save meta
3. add a delete_bitmap lock to handle compaction and publish_txn confict

Co-authored-by: yixiutt <yixiu@selectdb.com>
This commit is contained in:
yixiutt
2022-08-23 14:43:40 +08:00
committed by GitHub
parent 30a13c8141
commit 60fddd56e7
10 changed files with 73 additions and 51 deletions

View File

@ -400,20 +400,21 @@ bool MemTable::need_to_agg() {
Status MemTable::_generate_delete_bitmap() {
// generate delete bitmap, build a tmp rowset and load recent segment
if (_tablet->enable_unique_key_merge_on_write()) {
auto rowset = _rowset_writer->build_tmp();
auto beta_rowset = reinterpret_cast<BetaRowset*>(rowset.get());
std::vector<segment_v2::SegmentSharedPtr> segments;
segment_v2::SegmentSharedPtr segment;
if (beta_rowset->num_segments() == 0) {
return Status::OK();
}
RETURN_IF_ERROR(beta_rowset->load_segment(beta_rowset->num_segments() - 1, &segment));
segments.push_back(segment);
std::lock_guard<std::shared_mutex> meta_wrlock(_tablet->get_header_lock());
RETURN_IF_ERROR(_tablet->calc_delete_bitmap(beta_rowset->rowset_id(), segments,
&_rowset_ids, _delete_bitmap));
if (!_tablet->enable_unique_key_merge_on_write()) {
return Status::OK();
}
auto rowset = _rowset_writer->build_tmp();
auto beta_rowset = reinterpret_cast<BetaRowset*>(rowset.get());
std::vector<segment_v2::SegmentSharedPtr> segments;
segment_v2::SegmentSharedPtr segment;
if (beta_rowset->num_segments() == 0) {
return Status::OK();
}
RETURN_IF_ERROR(beta_rowset->load_segment(beta_rowset->num_segments() - 1, &segment));
segments.push_back(segment);
std::shared_lock meta_rlock(_tablet->get_header_lock());
RETURN_IF_ERROR(_tablet->calc_delete_bitmap(beta_rowset->rowset_id(), segments, &_rowset_ids,
_delete_bitmap));
return Status::OK();
}