diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp index 9f9590ef84..a90bbec7d8 100644 --- a/be/src/agent/task_worker_pool.cpp +++ b/be/src/agent/task_worker_pool.cpp @@ -273,8 +273,8 @@ Status check_migrate_request(StorageEngine& engine, const TStorageMediumMigrateR // check local disk capacity int64_t tablet_size = tablet->tablet_local_size(); if ((*dest_store)->reach_capacity_limit(tablet_size)) { - return Status::InternalError("reach the capacity limit of path {}, tablet_size={}", - (*dest_store)->path(), tablet_size); + return Status::Error("reach the capacity limit of path {}, tablet_size={}", + (*dest_store)->path(), tablet_size); } return Status::OK(); } diff --git a/be/src/common/config.cpp b/be/src/common/config.cpp index 09e084fbb4..8153e5aca5 100644 --- a/be/src/common/config.cpp +++ b/be/src/common/config.cpp @@ -42,8 +42,7 @@ #include "io/fs/file_writer.h" #include "io/fs/local_file_system.h" -namespace doris { -namespace config { +namespace doris::config { // Dir of custom config file DEFINE_String(custom_config_dir, "${DORIS_HOME}/conf"); @@ -109,10 +108,6 @@ DEFINE_mInt32(hash_table_double_grow_degree, "31"); DEFINE_mInt32(max_fill_rate, "2"); DEFINE_mInt32(double_resize_threshold, "23"); -// Expand the hash table before inserting data, the maximum expansion size. -// There are fewer duplicate keys, reducing the number of resize hash tables -// There are many duplicate keys, and the hash table filled bucket is far less than the hash table build bucket. -DEFINE_mInt64(hash_table_pre_expanse_max_rows, "65535"); // The maximum low water mark of the system `/proc/meminfo/MemAvailable`, Unit byte, default 1.6G, // actual low water mark=min(1.6G, MemTotal * 10%), avoid wasting too much memory on machines @@ -845,16 +840,6 @@ DEFINE_String(function_service_protocol, "h2:grpc"); // use which load balancer to select server to connect DEFINE_String(rpc_load_balancer, "rr"); -// The maximum buffer/queue size to collect span. After the size is reached, spans are dropped. -// An export will be triggered when the number of spans in the queue reaches half of the maximum. -DEFINE_Int32(max_span_queue_size, "2048"); - -// The maximum batch size of every export spans. It must be smaller or equal to max_queue_size. -DEFINE_Int32(max_span_export_batch_size, "512"); - -// The time interval between two consecutive export spans. -DEFINE_Int32(export_span_schedule_delay_millis, "500"); - // a soft limit of string type length, the hard limit is 2GB - 4, but if too long will cause very low performance, // so we set a soft limit, default is 1MB DEFINE_mInt32(string_type_length_soft_limit_bytes, "1048576"); @@ -867,10 +852,6 @@ DEFINE_mInt32(jsonb_type_length_soft_limit_bytes, "1048576"); DEFINE_Validator(jsonb_type_length_soft_limit_bytes, [](const int config) -> bool { return config > 0 && config <= 2147483643; }); -// used for olap scanner to save memory, when the size of unused_object_pool -// is greater than object_pool_buffer_size, release the object in the unused_object_pool. -DEFINE_Int32(object_pool_buffer_size, "100"); - // Threshold of reading a small file into memory DEFINE_mInt32(in_memory_file_size, "1048576"); // 1MB @@ -906,7 +887,7 @@ DEFINE_Int32(concurrency_per_dir, "2"); // "whole_file_cache": the whole file. DEFINE_mString(file_cache_type, "file_block_cache"); DEFINE_Validator(file_cache_type, [](std::string_view config) -> bool { - return config == "" || config == "file_block_cache"; + return config.empty() || config == "file_block_cache"; }); DEFINE_Int32(s3_transfer_executor_pool_size, "2"); @@ -970,8 +951,8 @@ DEFINE_Bool(enable_fuzzy_mode, "false"); DEFINE_Bool(enable_debug_points, "false"); DEFINE_Int32(pipeline_executor_size, "0"); -// 128 MB -DEFINE_mInt64(local_exchange_buffer_mem_limit, "134217728"); +DEFINE_Bool(enable_workload_group_for_scan, "false"); +DEFINE_mInt64(workload_group_scan_task_wait_timeout_ms, "10000"); // Temp config. True to use optimization for bitmap_index apply predicate except leaf node of the and node. // Will remove after fully test. @@ -1152,6 +1133,9 @@ DEFINE_Bool(enable_snapshot_action, "false"); DEFINE_mInt32(variant_max_merged_tablet_schema_size, "2048"); +// 128 MB +DEFINE_mInt64(local_exchange_buffer_mem_limit, "134217728"); + // clang-format off #ifdef BE_TEST // test s3 @@ -1204,7 +1188,7 @@ bool replaceenv(std::string& s) { std::size_t pos = 0; std::size_t start = 0; while ((start = s.find("${", pos)) != std::string::npos) { - std::size_t end = s.find("}", start + 2); + std::size_t end = s.find('}', start + 2); if (end == std::string::npos) { return false; } @@ -1242,9 +1226,9 @@ bool strtox(const std::string& valstr, std::vector& retval) { } bool strtox(const std::string& valstr, bool& retval) { - if (valstr.compare("true") == 0) { + if (valstr == "true") { retval = true; - } else if (valstr.compare("false") == 0) { + } else if (valstr == "false") { retval = false; } else { return false; @@ -1604,18 +1588,17 @@ std::vector> get_config_info() { std::vector _config; _config.push_back(it.first); - _config.push_back(field_it->second.type); + _config.emplace_back(field_it->second.type); if (0 == strcmp(field_it->second.type, "bool")) { - _config.push_back(it.second == "1" ? "true" : "false"); + _config.emplace_back(it.second == "1" ? "true" : "false"); } else { _config.push_back(it.second); } - _config.push_back(field_it->second.valmutable ? "true" : "false"); + _config.emplace_back(field_it->second.valmutable ? "true" : "false"); configs.push_back(_config); } return configs; } -} // namespace config -} // namespace doris +} // namespace doris::config diff --git a/be/src/common/config.h b/be/src/common/config.h index 5820da682f..abdf1c67a2 100644 --- a/be/src/common/config.h +++ b/be/src/common/config.h @@ -152,11 +152,6 @@ DECLARE_mInt32(max_fill_rate); DECLARE_mInt32(double_resize_threshold); -// Expand the hash table before inserting data, the maximum expansion size. -// There are fewer duplicate keys, reducing the number of resize hash tables -// There are many duplicate keys, and the hash table filled bucket is far less than the hash table build bucket. -DECLARE_mInt64(hash_table_pre_expanse_max_rows); - // The maximum low water mark of the system `/proc/meminfo/MemAvailable`, Unit byte, default 1.6G, // actual low water mark=min(1.6G, MemTotal * 10%), avoid wasting too much memory on machines // with large memory larger than 16G. @@ -907,26 +902,12 @@ DECLARE_String(function_service_protocol); // use which load balancer to select server to connect DECLARE_String(rpc_load_balancer); -// The maximum buffer/queue size to collect span. After the size is reached, spans are dropped. -// An export will be triggered when the number of spans in the queue reaches half of the maximum. -DECLARE_Int32(max_span_queue_size); - -// The maximum batch size of every export spans. It must be smaller or equal to max_queue_size. -DECLARE_Int32(max_span_export_batch_size); - -// The time interval between two consecutive export spans. -DECLARE_Int32(export_span_schedule_delay_millis); - // a soft limit of string type length, the hard limit is 2GB - 4, but if too long will cause very low performance, // so we set a soft limit, default is 1MB DECLARE_mInt32(string_type_length_soft_limit_bytes); DECLARE_mInt32(jsonb_type_length_soft_limit_bytes); -// used for olap scanner to save memory, when the size of unused_object_pool -// is greater than object_pool_buffer_size, release the object in the unused_object_pool. -DECLARE_Int32(object_pool_buffer_size); - // Threshold fo reading a small file into memory DECLARE_mInt32(in_memory_file_size); diff --git a/be/src/common/status.h b/be/src/common/status.h index e854cda360..14aec46cc2 100644 --- a/be/src/common/status.h +++ b/be/src/common/status.h @@ -8,8 +8,8 @@ #include // for TStatus #include #include -#include +#include #include #include #include @@ -93,7 +93,8 @@ namespace ErrorCode { E(VERSION_NOT_EXIST, -214, false); \ E(TABLE_NOT_FOUND, -215, true); \ E(TRY_LOCK_FAILED, -216, false); \ - E(OUT_OF_BOUND, -218, true); \ + E(EXCEEDED_LIMIT, -217, false); \ + E(OUT_OF_BOUND, -218, false); \ E(INVALID_ROOT_PATH, -222, true); \ E(NO_AVAILABLE_ROOT_PATH, -223, true); \ E(CHECK_LINES_ERROR, -224, true); \ diff --git a/be/src/http/action/http_stream.cpp b/be/src/http/action/http_stream.cpp index ef4decb038..8c75f6be74 100644 --- a/be/src/http/action/http_stream.cpp +++ b/be/src/http/action/http_stream.cpp @@ -194,7 +194,7 @@ int HttpStreamAction::on_header(HttpRequest* req) { << " Bytes) exceeds the WAL (Write-Ahead Log) limit (" << config::wal_max_disk_size * 0.8 << " Bytes). Please set this load to \"group commit\"=false."; - st = Status::InternalError("Http load size too large."); + st = Status::Error("Http load size too large."); } } } diff --git a/be/src/http/action/stream_load.cpp b/be/src/http/action/stream_load.cpp index 7d336a932b..04c29a5302 100644 --- a/be/src/http/action/stream_load.cpp +++ b/be/src/http/action/stream_load.cpp @@ -219,7 +219,7 @@ int StreamLoadAction::on_header(HttpRequest* req) { << " Bytes) exceeds the WAL (Write-Ahead Log) limit (" << config::wal_max_disk_size * 0.8 << " Bytes). Please set this load to \"group commit\"=false."; - st = Status::InternalError("Stream load size too large."); + st = Status::Error("Stream load size too large."); } } } diff --git a/be/src/http/action/tablet_migration_action.cpp b/be/src/http/action/tablet_migration_action.cpp index 3ccdde7e7f..df75403497 100644 --- a/be/src/http/action/tablet_migration_action.cpp +++ b/be/src/http/action/tablet_migration_action.cpp @@ -23,6 +23,7 @@ #include #include "common/config.h" +#include "common/status.h" #include "http/http_channel.h" #include "http/http_headers.h" #include "http/http_request.h" @@ -209,7 +210,9 @@ Status TabletMigrationAction::_check_migrate_request(int64_t tablet_id, int32_t if ((*dest_store)->reach_capacity_limit(tablet_size)) { LOG(WARNING) << "reach the capacity limit of path: " << (*dest_store)->path() << ", tablet size: " << tablet_size; - return Status::InternalError("Insufficient disk capacity"); + return Status::Error( + "reach the capacity limit of path {}, tablet_size={}", (*dest_store)->path(), + tablet_size); } return Status::OK(); diff --git a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp index 806d7473ca..0440f6865a 100644 --- a/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp +++ b/be/src/olap/rowset/segment_v2/inverted_index_cache.cpp @@ -21,12 +21,14 @@ #include #include // IWYU pragma: no_include -#include // IWYU pragma: keep -#include #include + +#include // IWYU pragma: keep +#include // IWYU pragma: no_include #include // IWYU pragma: keep #include +#include #include "common/logging.h" #include "olap/olap_common.h" @@ -38,8 +40,7 @@ #include "util/defer_op.h" #include "util/runtime_profile.h" -namespace doris { -namespace segment_v2 { +namespace doris::segment_v2 { Status FulltextIndexSearcherBuilder::build(DorisCompoundReader* directory, OptionalIndexSearcherPtr& output_searcher) { @@ -109,8 +110,7 @@ InvertedIndexSearcherCache::InvertedIndexSearcherCache(size_t capacity, uint32_t if (config::enable_inverted_index_cache_check_timestamp) { auto get_last_visit_time = [](const void* value) -> int64_t { - InvertedIndexSearcherCache::CacheValue* cache_value = - (InvertedIndexSearcherCache::CacheValue*)value; + auto* cache_value = (InvertedIndexSearcherCache::CacheValue*)value; return cache_value->last_visit_time; }; _cache = std::unique_ptr( @@ -146,8 +146,7 @@ Status InvertedIndexSearcherCache::get_index_searcher( cache_handle->owned = !use_cache; IndexSearcherPtr index_searcher; std::unique_ptr index_builder = nullptr; - auto mem_tracker = - std::unique_ptr(new MemTracker("InvertedIndexSearcherCacheWithRead")); + auto mem_tracker = std::make_unique("InvertedIndexSearcherCacheWithRead"); #ifndef BE_TEST { bool exists = false; @@ -280,7 +279,7 @@ Status InvertedIndexSearcherCache::insert(const io::FileSystemSPtr& fs, cache_value->index_searcher = std::move(index_searcher); cache_value->size = mem_tracker->consumption(); cache_value->last_visit_time = UnixMillis(); - auto lru_handle = _insert(cache_key, cache_value.release()); + auto* lru_handle = _insert(cache_key, cache_value.release()); _cache->release(lru_handle); return Status::OK(); } @@ -300,7 +299,7 @@ int64_t InvertedIndexSearcherCache::mem_consumption() { bool InvertedIndexSearcherCache::_lookup(const InvertedIndexSearcherCache::CacheKey& key, InvertedIndexCacheHandle* handle) { - auto lru_handle = _cache->lookup(key.index_file_path); + auto* lru_handle = _cache->lookup(key.index_file_path); if (lru_handle == nullptr) { return false; } @@ -311,8 +310,7 @@ bool InvertedIndexSearcherCache::_lookup(const InvertedIndexSearcherCache::Cache Cache::Handle* InvertedIndexSearcherCache::_insert(const InvertedIndexSearcherCache::CacheKey& key, CacheValue* value) { auto deleter = [](const doris::CacheKey& key, void* value) { - InvertedIndexSearcherCache::CacheValue* cache_value = - (InvertedIndexSearcherCache::CacheValue*)value; + auto* cache_value = (InvertedIndexSearcherCache::CacheValue*)value; delete cache_value; }; @@ -325,7 +323,7 @@ bool InvertedIndexQueryCache::lookup(const CacheKey& key, InvertedIndexQueryCach if (key.encode().empty()) { return false; } - auto lru_handle = _cache->lookup(key.encode()); + auto* lru_handle = _cache->lookup(key.encode()); if (lru_handle == nullptr) { return false; } @@ -348,8 +346,8 @@ void InvertedIndexQueryCache::insert(const CacheKey& key, std::shared_ptrinsert(key.encode(), (void*)cache_value_ptr.release(), - bitmap->getSizeInBytes(), deleter, CachePriority::NORMAL); + auto* lru_handle = _cache->insert(key.encode(), (void*)cache_value_ptr.release(), + bitmap->getSizeInBytes(), deleter, CachePriority::NORMAL); *handle = InvertedIndexQueryCacheHandle(_cache.get(), lru_handle); } @@ -360,5 +358,4 @@ int64_t InvertedIndexQueryCache::mem_consumption() { return 0L; } -} // namespace segment_v2 -} // namespace doris +} // namespace doris::segment_v2 diff --git a/be/src/olap/schema_change.cpp b/be/src/olap/schema_change.cpp index c9a1283ede..fd4bf312c9 100644 --- a/be/src/olap/schema_change.cpp +++ b/be/src/olap/schema_change.cpp @@ -1339,10 +1339,10 @@ Status SchemaChangeHandler::_parse_request(const SchemaChangeParams& sc_params, Status SchemaChangeHandler::_init_column_mapping(ColumnMapping* column_mapping, const TabletColumn& column_schema, const std::string& value) { - column_mapping->default_value = WrapperField::create(column_schema); - - if (column_mapping->default_value == nullptr) { - return Status::Error("column_mapping->default_value is nullptr"); + if (auto field = WrapperField::create(column_schema); field.has_value()) { + column_mapping->default_value = field.value(); + } else { + return field.error(); } if (column_schema.is_nullable() && value.length() == 0) { diff --git a/be/src/olap/single_replica_compaction.cpp b/be/src/olap/single_replica_compaction.cpp index d7f4e54f10..a5e060147d 100644 --- a/be/src/olap/single_replica_compaction.cpp +++ b/be/src/olap/single_replica_compaction.cpp @@ -437,7 +437,9 @@ Status SingleReplicaCompaction::_download_files(DataDir* data_dir, HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, get_file_size_cb)); // check disk capacity if (data_dir->reach_capacity_limit(file_size)) { - return Status::InternalError("Disk reach capacity limit"); + return Status::Error( + "reach the capacity limit of path {}, file_size={}", data_dir->path(), + file_size); } total_file_size += file_size; diff --git a/be/src/olap/storage_engine.cpp b/be/src/olap/storage_engine.cpp index 4fb248d24e..4e0fb2eddf 100644 --- a/be/src/olap/storage_engine.cpp +++ b/be/src/olap/storage_engine.cpp @@ -402,7 +402,9 @@ Status StorageEngine::_check_file_descriptor_number() { LOG(ERROR) << "File descriptor number is less than " << config::min_file_descriptor_number << ". Please use (ulimit -n) to set a value equal or greater than " << config::min_file_descriptor_number; - return Status::InternalError("file descriptors limit is too small"); + return Status::Error( + "file descriptors limit {} is small than {}", l.rlim_cur, + config::min_file_descriptor_number); } return Status::OK(); } diff --git a/be/src/olap/task/engine_batch_load_task.cpp b/be/src/olap/task/engine_batch_load_task.cpp index 756d8012b5..6f4ff53e77 100644 --- a/be/src/olap/task/engine_batch_load_task.cpp +++ b/be/src/olap/task/engine_batch_load_task.cpp @@ -110,7 +110,7 @@ Status EngineBatchLoadTask::_init() { // check disk capacity if (_push_req.push_type == TPushType::LOAD_V2) { - if (tablet->data_dir()->reach_capacity_limit(_push_req.__isset.http_file_size)) { + if (tablet->data_dir()->reach_capacity_limit(_push_req.http_file_size)) { return Status::IOError("Disk does not have enough capacity"); } } diff --git a/be/src/olap/task/engine_clone_task.cpp b/be/src/olap/task/engine_clone_task.cpp index 997a5228c8..581cd3515c 100644 --- a/be/src/olap/task/engine_clone_task.cpp +++ b/be/src/olap/task/engine_clone_task.cpp @@ -515,7 +515,9 @@ Status EngineCloneTask::_download_files(DataDir* data_dir, const std::string& re HttpClient::execute_with_retry(DOWNLOAD_FILE_MAX_RETRY, 1, get_file_size_cb)); // check disk capacity if (data_dir->reach_capacity_limit(file_size)) { - return Status::InternalError("Disk reach capacity limit"); + return Status::Error( + "reach the capacity limit of path {}, file_size={}", data_dir->path(), + file_size); } total_file_size += file_size; diff --git a/be/src/olap/wrapper_field.cpp b/be/src/olap/wrapper_field.cpp index cc4cad38ef..71386900e4 100644 --- a/be/src/olap/wrapper_field.cpp +++ b/be/src/olap/wrapper_field.cpp @@ -18,21 +18,23 @@ #include "olap/wrapper_field.h" #include -#include #include +#include #include #include "common/config.h" +#include "common/status.h" #include "olap/olap_common.h" #include "olap/olap_define.h" #include "olap/row_cursor.h" +#include "util/expected.hpp" namespace doris { const size_t DEFAULT_STRING_LENGTH = 50; -WrapperField* WrapperField::create(const TabletColumn& column, uint32_t len) { +Result WrapperField::create(const TabletColumn& column, uint32_t len) { bool is_string_type = (column.type() == FieldType::OLAP_FIELD_TYPE_CHAR || column.type() == FieldType::OLAP_FIELD_TYPE_VARCHAR || column.type() == FieldType::OLAP_FIELD_TYPE_HLL || @@ -44,12 +46,13 @@ WrapperField* WrapperField::create(const TabletColumn& column, uint32_t len) { if (is_string_type && len > max_length) { LOG(WARNING) << "length of string parameter is too long[len=" << len << ", max_len=" << max_length << "]."; - return nullptr; + return unexpected {Status::Error( + "length of string parameter is too long[len={}, max_len={}].", len, max_length)}; } Field* rep = FieldFactory::create(column); if (rep == nullptr) { - return nullptr; + return unexpected {Status::Uninitialized("Unsupport field creation of {}", column.name())}; } size_t variable_len = 0; @@ -67,9 +70,7 @@ WrapperField* WrapperField::create(const TabletColumn& column, uint32_t len) { } else { variable_len = column.length(); } - - WrapperField* wrapper = new WrapperField(rep, variable_len, is_string_type); - return wrapper; + return new WrapperField(rep, variable_len, is_string_type); } WrapperField* WrapperField::create_by_type(const FieldType& type, int32_t var_length) { @@ -83,8 +84,7 @@ WrapperField* WrapperField::create_by_type(const FieldType& type, int32_t var_le type == FieldType::OLAP_FIELD_TYPE_OBJECT || type == FieldType::OLAP_FIELD_TYPE_STRING || type == FieldType::OLAP_FIELD_TYPE_QUANTILE_STATE); - auto wrapper = new WrapperField(rep, var_length, is_string_type); - return wrapper; + return new WrapperField(rep, var_length, is_string_type); } WrapperField::WrapperField(Field* rep, size_t variable_len, bool is_string_type) @@ -98,7 +98,7 @@ WrapperField::WrapperField(Field* rep, size_t variable_len, bool is_string_type) if (_is_string_type) { _var_length = variable_len > DEFAULT_STRING_LENGTH ? DEFAULT_STRING_LENGTH : variable_len; - Slice* slice = reinterpret_cast(buf); + auto* slice = reinterpret_cast(buf); slice->size = _var_length; _string_content.reset(new char[slice->size]); slice->data = _string_content.get(); diff --git a/be/src/olap/wrapper_field.h b/be/src/olap/wrapper_field.h index 978c9d7378..53ce8e1d3e 100644 --- a/be/src/olap/wrapper_field.h +++ b/be/src/olap/wrapper_field.h @@ -34,7 +34,7 @@ enum class FieldType; class WrapperField { public: - static WrapperField* create(const TabletColumn& column, uint32_t len = 0); + static Result create(const TabletColumn& column, uint32_t len = 0); static WrapperField* create_by_type(const FieldType& type) { return create_by_type(type, 0); } static WrapperField* create_by_type(const FieldType& type, int32_t var_length); diff --git a/be/src/runtime/snapshot_loader.cpp b/be/src/runtime/snapshot_loader.cpp index afd4190728..34d26764ea 100644 --- a/be/src/runtime/snapshot_loader.cpp +++ b/be/src/runtime/snapshot_loader.cpp @@ -312,7 +312,9 @@ Status SnapshotLoader::download(const std::map& src_to // check disk capacity if (data_dir->reach_capacity_limit(file_len)) { - return Status::InternalError("capacity limit reached"); + return Status::Error( + "reach the capacity limit of path {}, file_size={}", data_dir->path(), + file_len); } // remove file which will be downloaded now. // this file will be added to local_files if it be downloaded successfully. @@ -545,7 +547,9 @@ Status SnapshotLoader::remote_http_download( // check disk capacity if (data_dir->reach_capacity_limit(file_size)) { - return Status::InternalError("Disk reach capacity limit"); + return Status::Error( + "reach the capacity limit of path {}, file_size={}", data_dir->path(), + file_size); } total_file_size += file_size; diff --git a/be/src/vec/exprs/vexpr.cpp b/be/src/vec/exprs/vexpr.cpp index 97611867e7..3f06dbb7f5 100644 --- a/be/src/vec/exprs/vexpr.cpp +++ b/be/src/vec/exprs/vexpr.cpp @@ -182,9 +182,9 @@ VExpr::VExpr(const TExprNode& node) VExpr::VExpr(const VExpr& vexpr) = default; -VExpr::VExpr(const TypeDescriptor& type, bool is_slotref, bool is_nullable) +VExpr::VExpr(TypeDescriptor type, bool is_slotref, bool is_nullable) : _opcode(TExprOpcode::INVALID_OPCODE), - _type(type), + _type(std::move(type)), _fn_context_index(-1), _prepared(false) { if (is_slotref) { @@ -197,13 +197,13 @@ VExpr::VExpr(const TypeDescriptor& type, bool is_slotref, bool is_nullable) Status VExpr::prepare(RuntimeState* state, const RowDescriptor& row_desc, VExprContext* context) { ++context->_depth_num; if (context->_depth_num > config::max_depth_of_expr_tree) { - return Status::InternalError( + return Status::Error( "The depth of the expression tree is too big, make it less than {}", config::max_depth_of_expr_tree); } - for (int i = 0; i < _children.size(); ++i) { - RETURN_IF_ERROR(_children[i]->prepare(state, row_desc, context)); + for (auto& i : _children) { + RETURN_IF_ERROR(i->prepare(state, row_desc, context)); } --context->_depth_num; return Status::OK(); @@ -211,8 +211,8 @@ Status VExpr::prepare(RuntimeState* state, const RowDescriptor& row_desc, VExprC Status VExpr::open(RuntimeState* state, VExprContext* context, FunctionContext::FunctionStateScope scope) { - for (int i = 0; i < _children.size(); ++i) { - RETURN_IF_ERROR(_children[i]->open(state, context, scope)); + for (auto& i : _children) { + RETURN_IF_ERROR(i->open(state, context, scope)); } if (scope == FunctionContext::FRAGMENT_LOCAL) { RETURN_IF_ERROR(VExpr::get_const_col(context, nullptr)); diff --git a/be/src/vec/exprs/vexpr.h b/be/src/vec/exprs/vexpr.h index e9f011a2c3..708b57ab63 100644 --- a/be/src/vec/exprs/vexpr.h +++ b/be/src/vec/exprs/vexpr.h @@ -80,7 +80,7 @@ public: VExpr(const TExprNode& node); VExpr(const VExpr& vexpr); - VExpr(const TypeDescriptor& type, bool is_slotref, bool is_nullable); + VExpr(TypeDescriptor type, bool is_slotref, bool is_nullable); // only used for test VExpr() = default; virtual ~VExpr() = default; diff --git a/regression-test/suites/load_p0/stream_load/test_group_commit_wal_limit.groovy b/regression-test/suites/load_p0/stream_load/test_group_commit_wal_limit.groovy index 68ccc3f853..24b66de04b 100644 --- a/regression-test/suites/load_p0/stream_load/test_group_commit_wal_limit.groovy +++ b/regression-test/suites/load_p0/stream_load/test_group_commit_wal_limit.groovy @@ -71,7 +71,7 @@ suite("test_group_commit_wal_limit") { assertEquals(code, 0) out = process.text logger.info("out is " + out ) - assertTrue(out.contains('[INTERNAL_ERROR]Stream load size too large')) + assertTrue(out.contains('Stream load size too large')) // too lagre data case 1TB strBuilder = new StringBuilder() @@ -89,7 +89,7 @@ suite("test_group_commit_wal_limit") { assertEquals(code, 0) out = process.text logger.info("out is " + out ) - assertTrue(out.contains('[INTERNAL_ERROR]Stream load size too large')) + assertTrue(out.contains('Stream load size too large')) // httpload // normal case @@ -126,7 +126,7 @@ suite("test_group_commit_wal_limit") { assertEquals(code, 0) out = process.text logger.info("out is " + out ) - assertTrue(out.contains('[INTERNAL_ERROR]Http load size too large')) + assertTrue(out.contains('Http load size too large')) // too lagre data case 1TB strBuilder = new StringBuilder() @@ -144,5 +144,5 @@ suite("test_group_commit_wal_limit") { assertEquals(code, 0) out = process.text logger.info("out is " + out ) - assertTrue(out.contains('[INTERNAL_ERROR]Http load size too large')) + assertTrue(out.contains('Http load size too large')) }