[improvement](exception-safe) create and prepare node/sink support exception safe (#20551)

This commit is contained in:
Xinyi Zou
2023-06-09 21:06:59 +08:00
committed by GitHub
parent abb2048d5d
commit 93b53cf2f4
12 changed files with 70 additions and 62 deletions

View File

@ -828,8 +828,8 @@ Status Block::serialize(int be_exec_version, PBlock* pblock,
RETURN_IF_ERROR(get_block_compression_codec(compression_type, &codec));
faststring buf_compressed;
RETURN_IF_CATCH_EXCEPTION(RETURN_IF_ERROR(codec->compress(
Slice(column_values.data(), content_uncompressed_size), &buf_compressed)));
RETURN_IF_ERROR_OR_CATCH_EXCEPTION(codec->compress(
Slice(column_values.data(), content_uncompressed_size), &buf_compressed));
size_t compressed_size = buf_compressed.size();
if (LIKELY(compressed_size < content_uncompressed_size)) {
pblock->set_column_values(buf_compressed.data(), buf_compressed.size());

View File

@ -164,13 +164,16 @@ bool DataTypeDecimal<T>::parse_from_string(const std::string& str, T* res) const
DataTypePtr create_decimal(UInt64 precision_value, UInt64 scale_value, bool use_v2) {
if (precision_value < min_decimal_precision() ||
precision_value > max_decimal_precision<Decimal128>()) {
LOG(WARNING) << "Wrong precision " << precision_value;
return nullptr;
throw doris::Exception(doris::ErrorCode::NOT_IMPLEMENTED_ERROR,
"Wrong precision {}, min: {}, max: {}", precision_value,
min_decimal_precision(), max_decimal_precision<Decimal128>());
}
if (static_cast<UInt64>(scale_value) > precision_value) {
LOG(WARNING) << "Negative scales and scales larger than precision are not supported";
return nullptr;
throw doris::Exception(doris::ErrorCode::NOT_IMPLEMENTED_ERROR,
"Negative scales and scales larger than precision are not "
"supported, scale_value: {}, precision_value: {}",
scale_value, precision_value);
}
if (use_v2) {

View File

@ -202,8 +202,8 @@ void DataTypeDateTimeV2::cast_to_date_v2(const UInt64 from, UInt32& to) {
DataTypePtr create_datetimev2(UInt64 scale_value) {
if (scale_value > 6) {
LOG(WARNING) << "Wrong scale " << scale_value;
return nullptr;
throw doris::Exception(doris::ErrorCode::NOT_IMPLEMENTED_ERROR, "scale_value > 6 {}",
scale_value);
}
return std::make_shared<DataTypeDateTimeV2>(scale_value);
}

View File

@ -1302,8 +1302,8 @@ Status OrcReader::filter(orc::ColumnVectorBatch& data, uint16_t* sel, uint16_t s
for (auto& conjunct : _lazy_read_ctx.conjuncts) {
filter_conjuncts.push_back(conjunct);
}
RETURN_IF_CATCH_EXCEPTION(RETURN_IF_ERROR(VExprContext::execute_conjuncts(
filter_conjuncts, nullptr, block, _filter.get(), &can_filter_all)));
RETURN_IF_ERROR_OR_CATCH_EXCEPTION(VExprContext::execute_conjuncts(
filter_conjuncts, nullptr, block, _filter.get(), &can_filter_all));
if (_lazy_read_ctx.resize_first_column) {
block->get_by_position(0).column->assume_mutable()->clear();

View File

@ -785,8 +785,8 @@ Status RowGroupReader::_rewrite_dict_predicates() {
// The following process may be tricky and time-consuming, but we have no other way.
temp_block.get_by_position(0).column->assume_mutable()->resize(dict_value_column_size);
}
RETURN_IF_CATCH_EXCEPTION(RETURN_IF_ERROR(VExprContext::execute_conjuncts_and_filter_block(
ctxs, nullptr, &temp_block, columns_to_filter, column_to_keep)));
RETURN_IF_ERROR_OR_CATCH_EXCEPTION(VExprContext::execute_conjuncts_and_filter_block(
ctxs, nullptr, &temp_block, columns_to_filter, column_to_keep));
if (dict_pos != 0) {
// We have to clean the first column to insert right data.
temp_block.get_by_position(0).column->assume_mutable()->clear();

View File

@ -309,16 +309,11 @@ Status VFileScanner::_init_src_block(Block* block) {
auto it = _name_to_col_type.find(slot->col_name());
if (it == _name_to_col_type.end() || _is_dynamic_schema) {
// not exist in file, using type from _input_tuple_desc
data_type =
DataTypeFactory::instance().create_data_type(slot->type(), slot->is_nullable());
RETURN_IF_CATCH_EXCEPTION(data_type = DataTypeFactory::instance().create_data_type(
slot->type(), slot->is_nullable()));
} else {
data_type = DataTypeFactory::instance().create_data_type(it->second, true);
}
if (data_type == nullptr) {
return Status::NotSupported("Not support data type {} for column {}",
it == _name_to_col_type.end() ? slot->type().debug_string()
: it->second.debug_string(),
slot->col_name());
RETURN_IF_CATCH_EXCEPTION(
data_type = DataTypeFactory::instance().create_data_type(it->second, true));
}
MutableColumnPtr data_column = data_type->create_column();
_src_block.insert(

View File

@ -179,7 +179,7 @@ Status VSortNode::open(RuntimeState* state) {
ExecNode::get_next,
_children[0], std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3)));
RETURN_IF_CATCH_EXCEPTION(RETURN_IF_ERROR(sink(state, upstream_block.get(), eos)));
RETURN_IF_ERROR_OR_CATCH_EXCEPTION(sink(state, upstream_block.get(), eos));
} while (!eos);
child(0)->close(state);
@ -191,7 +191,7 @@ Status VSortNode::open(RuntimeState* state) {
}
Status VSortNode::pull(doris::RuntimeState* state, vectorized::Block* output_block, bool* eos) {
RETURN_IF_CATCH_EXCEPTION(RETURN_IF_ERROR(_sorter->get_next(state, output_block, eos)));
RETURN_IF_ERROR_OR_CATCH_EXCEPTION(_sorter->get_next(state, output_block, eos));
reached_limit(output_block, eos);
if (*eos) {
_runtime_profile->add_info_string("Spilled", _sorter->is_spilled() ? "true" : "false");