diff --git a/LICENSE.txt b/LICENSE.txt index c27528916d..92f632ce8d 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -359,7 +359,7 @@ Parts of be/src/runtime/string_search.hpp: Python Software License V2 -------------------------------------------------------------------------------- -be/src/util/coding.*: 3-clause BSD +be/src/util/coding.*, be/src/util/status.*: 3-clause BSD Copyright (c) 2011 The LevelDB Authors. All rights reserved. diff --git a/be/src/agent/heartbeat_server.cpp b/be/src/agent/heartbeat_server.cpp index af7a0b72f8..35f67f8dde 100644 --- a/be/src/agent/heartbeat_server.cpp +++ b/be/src/agent/heartbeat_server.cpp @@ -82,7 +82,7 @@ Status HeartbeatServer::_heartbeat( << master_info.backend_ip << " vs. " << BackendOptions::get_localhost(); std::stringstream ss; ss << "actual backend local ip: " << BackendOptions::get_localhost(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -93,7 +93,7 @@ Status HeartbeatServer::_heartbeat( auto st = _olap_engine->set_cluster_id(master_info.cluster_id); if (!st.ok()) { LOG(WARNING) << "fail to set cluster id. status=" << st.get_error_msg(); - return Status("fail to set cluster id."); + return Status::InternalError("fail to set cluster id."); } else { _master_info->cluster_id = master_info.cluster_id; LOG(INFO) << "record cluster id. host: " << master_info.network_address.hostname @@ -102,7 +102,7 @@ Status HeartbeatServer::_heartbeat( } else { if (_master_info->cluster_id != master_info.cluster_id) { OLAP_LOG_WARNING("invalid cluster id: %d. ignore.", master_info.cluster_id); - return Status("invalid cluster id. ignore."); + return Status::InternalError("invalid cluster id. ignore."); } } @@ -121,7 +121,7 @@ Status HeartbeatServer::_heartbeat( << _master_info->network_address.hostname << " port: " << _master_info->network_address.port << " local epoch: " << _epoch << " received epoch: " << master_info.epoch; - return Status("epoch is not greater than local. ignore heartbeat."); + return Status::InternalError("epoch is not greater than local. ignore heartbeat."); } } else { // when Master FE restarted, host and port remains the same, but epoch will be increased. @@ -139,7 +139,7 @@ Status HeartbeatServer::_heartbeat( } else if (_master_info->token != master_info.token) { LOG(WARNING) << "invalid token. local_token:" << _master_info->token << ". token:" << master_info.token; - return Status("invalid token."); + return Status::InternalError("invalid token."); } } @@ -152,7 +152,7 @@ Status HeartbeatServer::_heartbeat( _olap_engine->report_notify(true); } - return Status::OK; + return Status::OK(); } AgentStatus create_heartbeat_server( diff --git a/be/src/agent/pusher.cpp b/be/src/agent/pusher.cpp index cbf2f70e63..fa90b6391b 100644 --- a/be/src/agent/pusher.cpp +++ b/be/src/agent/pusher.cpp @@ -138,7 +138,7 @@ AgentStatus Pusher::process(vector* tablet_infos) { VLOG(3) << "check time out. time_out:" << _push_req.timeout << ", now:" << now; is_timeout = true; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(client->init(_remote_file_path)); @@ -160,12 +160,12 @@ AgentStatus Pusher::process(vector* tablet_infos) { if (file_size != local_file_size) { LOG(WARNING) << "download_file size error. file_size=" << file_size << ", local_file_size=" << local_file_size; - return Status("downloaded file's size isn't right"); + return Status::InternalError("downloaded file's size isn't right"); } } // NOTE: change http_file_path is not good design _push_req.http_file_path = _local_file_path; - return Status::OK; + return Status::OK(); }; MonotonicStopWatch stopwatch; diff --git a/be/src/agent/task_worker_pool.cpp b/be/src/agent/task_worker_pool.cpp index 15accf9eff..819d763bee 100644 --- a/be/src/agent/task_worker_pool.cpp +++ b/be/src/agent/task_worker_pool.cpp @@ -1402,7 +1402,7 @@ AgentStatus TaskWorkerPool::_clone_copy( RETURN_IF_ERROR(client->init(remote_file_path)); client->set_timeout_ms(LIST_REMOTE_FILE_TIMEOUT * 1000); RETURN_IF_ERROR(client->execute(&file_list_str)); - return Status::OK; + return Status::OK(); }; Status download_status = HttpClient::execute_with_retry( @@ -1464,7 +1464,7 @@ AgentStatus TaskWorkerPool::_clone_copy( client->set_timeout_ms(GET_LENGTH_TIMEOUT * 1000); RETURN_IF_ERROR(client->head()); file_size = client->get_content_length(); - return Status::OK; + return Status::OK(); }; download_status = HttpClient::execute_with_retry( DOWNLOAD_FILE_MAX_RETRY, 1, get_file_size_cb); @@ -1499,10 +1499,10 @@ AgentStatus TaskWorkerPool::_clone_copy( << ", remote_path=" << remote_file_path << ", file_size=" << file_size << ", local_file_size=" << local_file_size; - return Status("downloaded file size is not equal"); + return Status::InternalError("downloaded file size is not equal"); } chmod(local_file_path.c_str(), S_IRUSR | S_IWUSR); - return Status::OK; + return Status::OK(); }; download_status = HttpClient::execute_with_retry( DOWNLOAD_FILE_MAX_RETRY, 1, download_cb); diff --git a/be/src/codegen/llvm_codegen.cpp b/be/src/codegen/llvm_codegen.cpp index d20637304a..05b02d33d8 100644 --- a/be/src/codegen/llvm_codegen.cpp +++ b/be/src/codegen/llvm_codegen.cpp @@ -123,7 +123,7 @@ Status LlvmCodeGen::load_from_file( if (err.value() != 0) { std::stringstream ss; ss << "Could not load module " << file << ": " << err.message(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } COUNTER_UPDATE((*codegen)->_module_file_size, file_buffer->getBufferSize()); @@ -135,7 +135,7 @@ Status LlvmCodeGen::load_from_file( if (loaded_module == NULL) { std::stringstream ss; ss << "Could not parse module " << file << ": " << error; - return Status(ss.str()); + return Status::InternalError(ss.str()); } (*codegen)->_module = loaded_module; @@ -167,9 +167,9 @@ Status LlvmCodeGen::load_module_from_memory( if (*module == NULL) { std::stringstream ss; ss << "Could not parse module " << module_name << ": " << error; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status LlvmCodeGen::load_doris_ir( @@ -213,7 +213,7 @@ Status LlvmCodeGen::load_doris_ir( if (layout->getSizeInBytes() != sizeof(StringValue)) { DCHECK_EQ(layout->getSizeInBytes(), sizeof(StringValue)); - return Status("Could not create llvm struct type for StringVal"); + return Status::InternalError("Could not create llvm struct type for StringVal"); } // Parse functions from module @@ -231,7 +231,7 @@ Status LlvmCodeGen::load_doris_ir( // undoing the mangling is no fun either. if (fn_name.find(FN_MAPPINGS[j].fn_name) != std::string::npos) { if (codegen->_loaded_functions[FN_MAPPINGS[j].fn] != NULL) { - return Status("Duplicate definition found for function: " + fn_name); + return Status::InternalError("Duplicate definition found for function: " + fn_name); } codegen->_loaded_functions[FN_MAPPINGS[j].fn] = functions[i]; @@ -256,10 +256,10 @@ Status LlvmCodeGen::load_doris_ir( } } - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status LlvmCodeGen::init() { @@ -286,7 +286,7 @@ Status LlvmCodeGen::init() { delete _module; std::stringstream ss; ss << "Could not create ExecutionEngine: " << _error_string; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _void_type = llvm::Type::getVoidTy(context()); _ptr_type = llvm::PointerType::get(get_type(TYPE_TINYINT), 0); @@ -295,7 +295,7 @@ Status LlvmCodeGen::init() { RETURN_IF_ERROR(load_intrinsics()); - return Status::OK; + return Status::OK(); } LlvmCodeGen::~LlvmCodeGen() { @@ -681,7 +681,7 @@ Status LlvmCodeGen::finalize_module() { #endif if (_is_corrupt) { - return Status("Module is corrupt."); + return Status::InternalError("Module is corrupt."); } SCOPED_TIMER(_profile.total_time_counter()); @@ -710,7 +710,7 @@ Status LlvmCodeGen::finalize_module() { } #endif - return Status::OK; + return Status::OK(); } void LlvmCodeGen::optimize_module() { @@ -995,7 +995,7 @@ Status LlvmCodeGen::load_intrinsics() { module(), llvm::Intrinsic::memcpy, types); if (fn == NULL) { - return Status("Could not find memcpy intrinsic."); + return Status::InternalError("Could not find memcpy intrinsic."); } _llvm_intrinsics[llvm::Intrinsic::memcpy] = fn; @@ -1021,13 +1021,13 @@ Status LlvmCodeGen::load_intrinsics() { if (fn == NULL) { std::stringstream ss; ss << "Could not find " << non_overloaded_intrinsics[i].error << " intrinsic"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _llvm_intrinsics[id] = fn; } - return Status::OK; + return Status::OK(); } void LlvmCodeGen::codegen_memcpy(LlvmBuilder* builder, llvm::Value* dst, llvm::Value* src, int size) { diff --git a/be/src/common/status.cpp b/be/src/common/status.cpp index 64a9b865c0..15f3b0d4a2 100644 --- a/be/src/common/status.cpp +++ b/be/src/common/status.cpp @@ -15,170 +15,200 @@ // specific language governing permissions and limitations // under the License. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + #include "common/status.h" -#include - -#include "common/logging.h" +#include "gutil/strings/fastmem.h" // for memcpy_inlined namespace doris { -// NOTE: this is statically initialized and we must be very careful what -// functions these constructors call. In particular, we cannot call -// glog functions which also rely on static initializations. -// TODO: is there a more controlled way to do this. -const Status Status::OK; -const Status Status::CANCELLED(TStatusCode::CANCELLED, "Cancelled", true); -const Status Status::MEM_LIMIT_EXCEEDED( - TStatusCode::MEM_LIMIT_EXCEEDED, "Memory limit exceeded", true); -const Status Status::THRIFT_RPC_ERROR( - TStatusCode::THRIFT_RPC_ERROR, "Thrift RPC failed", true); +inline const char* assemble_state( + TStatusCode::type code, const Slice& msg, int16_t precise_code, const Slice& msg2) { + DCHECK(code != TStatusCode::OK); -const Status Status::TIMEOUT( - TStatusCode::TIMEOUT, "timeout", true); - -Status::ErrorDetail::ErrorDetail(const TStatus& status) : - error_code(status.status_code), - error_msgs(status.error_msgs) { - DCHECK_NE(error_code, TStatusCode::OK); -} - -Status::ErrorDetail::ErrorDetail(const PStatus& pstatus) - : error_code((TStatusCode::type)pstatus.status_code()) { - DCHECK_NE(error_code, TStatusCode::OK); - for (auto& msg : pstatus.error_msgs()) { - error_msgs.push_back(msg); + const uint32_t len1 = msg.size; + const uint32_t len2 = msg2.size; + const uint32_t size = len1 + ((len2 > 0) ? (2 + len2) : 0); + auto result = new char[size + 7]; + memcpy(result, &size, sizeof(size)); + result[4] = static_cast(code); + memcpy(result + 5, &precise_code, sizeof(precise_code)); + memcpy(result + 7, msg.data, len1); + if (len2 > 0) { + result[7 + len1] = ':'; + result[8 + len1] = ' '; + memcpy(result + 9 + len1, msg2.data, len2); } + return result; } -Status::Status(const std::string& error_msg) : - _error_detail(new ErrorDetail(TStatusCode::INTERNAL_ERROR, error_msg)) { - LOG(INFO) << error_msg << std::endl << get_stack_trace(); +const char* Status::copy_state(const char* state) { + uint32_t size; + strings::memcpy_inlined(&size, state, sizeof(size)); + auto result = new char[size + 7]; + strings::memcpy_inlined(result, state, size + 7); + return result; } -Status::Status(TStatusCode::type code, const std::string& error_msg) - : _error_detail(new ErrorDetail(code, error_msg)) { -} - -Status::Status(const std::string& error_msg, bool quiet) : - _error_detail(new ErrorDetail(TStatusCode::INTERNAL_ERROR, error_msg)) { - if (!quiet) { - LOG(INFO) << error_msg << std::endl << get_stack_trace(); - } -} - -Status::Status(const TStatus& status) : - _error_detail(status.status_code == TStatusCode::OK ? NULL : new ErrorDetail(status)) { -} - -Status& Status::operator=(const TStatus& status) { - delete _error_detail; - - if (status.status_code == TStatusCode::OK) { - _error_detail = NULL; - } else { - _error_detail = new ErrorDetail(status); - } - - return *this; -} - -Status::Status(const PStatus& pstatus) : - _error_detail((TStatusCode::type)pstatus.status_code() == TStatusCode::OK - ? nullptr : new ErrorDetail(pstatus)) { -} - -Status& Status::operator=(const PStatus& status) { - delete _error_detail; - if (status.status_code() == (TStatusCode::type)TStatusCode::OK) { - _error_detail = nullptr; - } else { - _error_detail = new ErrorDetail(status); - } - return *this; -} - -void Status::add_error_msg(TStatusCode::type code, const std::string& msg) { - if (_error_detail == NULL) { - _error_detail = new ErrorDetail(code, msg); - } else { - _error_detail->error_msgs.push_back(msg); - } - - VLOG(2) << msg; -} - -void Status::add_error_msg(const std::string& msg) { - add_error_msg(TStatusCode::INTERNAL_ERROR, msg); -} - -void Status::add_error(const Status& status) { - if (status.ok()) { - return; - } - - add_error_msg(status.code(), status.get_error_msg()); -} - -void Status::get_error_msgs(std::vector* msgs) const { - msgs->clear(); - - if (_error_detail != NULL) { - *msgs = _error_detail->error_msgs; - } -} - -void Status::get_error_msg(std::string* msg) const { - msg->clear(); - - if (_error_detail != NULL) { - *msg = boost::join(_error_detail->error_msgs, "\n"); - } -} - -std::string Status::get_error_msg() const { - std::string msg; - get_error_msg(&msg); - return msg; -} - -void Status::to_thrift(TStatus* status) const { - status->error_msgs.clear(); - - if (_error_detail == NULL) { - status->status_code = TStatusCode::OK; - } else { - status->status_code = _error_detail->error_code; - - for (int i = 0; i < _error_detail->error_msgs.size(); ++i) { - status->error_msgs.push_back(_error_detail->error_msgs[i]); - } - - status->__isset.error_msgs = !_error_detail->error_msgs.empty(); - } -} - -void Status::to_protobuf(PStatus* pstatus) const { - pstatus->clear_error_msgs(); - if (_error_detail == nullptr) { - pstatus->set_status_code((int)TStatusCode::OK); - } else { - pstatus->set_status_code(_error_detail->error_code); - pstatus->mutable_error_msgs()->Reserve(_error_detail->error_msgs.size()); - for (auto& err_msg : _error_detail->error_msgs) { - pstatus->add_error_msgs(err_msg); +Status::Status(const TStatus& s) : _state(nullptr) { + if (s.status_code != TStatusCode::OK) { + if (s.error_msgs.empty()) { + _state = assemble_state(s.status_code, Slice(), 1, Slice()); + } else { + _state = assemble_state(s.status_code, s.error_msgs[0], 1, Slice()); } } } -void Status::MergeStatus(const Status& status) { - if (status.ok()) return; - if (_error_detail == NULL) { - _error_detail = new ErrorDetail(status.code()); - } else { - std::vector msgs_vector; - status.get_error_msgs(&msgs_vector); - for (const std::string& s: msgs_vector) add_error_msg(s); - } +Status::Status(const PStatus& s) : _state(nullptr) { + TStatusCode::type code = (TStatusCode::type)s.status_code(); + if (code != TStatusCode::OK) { + if (s.error_msgs_size() == 0) { + _state = assemble_state(code, Slice(), 1, Slice()); + } else { + _state = assemble_state(code, s.error_msgs(0), 1, Slice()); + } + } } + +Status::Status(TStatusCode::type code, const Slice& msg, int16_t precise_code, const Slice& msg2) + : _state(assemble_state(code, msg, precise_code, msg2)) { +} + +void Status::to_thrift(TStatus* s) const { + s->error_msgs.clear(); + if (_state == nullptr) { + s->status_code = TStatusCode::OK; + } else { + s->status_code = code(); + auto msg = message(); + s->error_msgs.emplace_back(msg.data, msg.size); + s->__isset.error_msgs = true; + } +} + +void Status::to_protobuf(PStatus* s) const { + s->clear_error_msgs(); + if (_state == nullptr) { + s->set_status_code((int)TStatusCode::OK); + } else { + s->set_status_code(code()); + auto msg = message(); + s->add_error_msgs(msg.data, msg.size); + } +} + +std::string Status::code_as_string() const { + if (_state == nullptr) { + return "OK"; + } + switch (code()) { + case TStatusCode::OK: + return "OK"; + case TStatusCode::CANCELLED: + return "Cancelled"; + case TStatusCode::NOT_IMPLEMENTED_ERROR: + return "Not implemented"; + case TStatusCode::RUNTIME_ERROR: + return "Runtime error"; + case TStatusCode::MEM_LIMIT_EXCEEDED: + return "Memory limit exceeded"; + case TStatusCode::INTERNAL_ERROR: + return "Internal error"; + case TStatusCode::THRIFT_RPC_ERROR: + return "Thrift rpc error"; + case TStatusCode::TIMEOUT: + return "Timeout"; + case TStatusCode::MEM_ALLOC_FAILED: + return "Memory alloc failed"; + case TStatusCode::BUFFER_ALLOCATION_FAILED: + return "Buffer alloc failed"; + case TStatusCode::MINIMUM_RESERVATION_UNAVAILABLE: + return "Minimum reservation unavailable"; + case TStatusCode::PUBLISH_TIMEOUT: + return "Publish timeout"; + case TStatusCode::LABEL_ALREADY_EXISTS: + return "Label already exist"; + case TStatusCode::END_OF_FILE: + return "End of file"; + case TStatusCode::NOT_FOUND: + return "Not found"; + case TStatusCode::CORRUPTION: + return "Corruption"; + case TStatusCode::INVALID_ARGUMENT: + return "Invalid argument"; + case TStatusCode::IO_ERROR: + return "IO error"; + case TStatusCode::ALREADY_EXIST: + return "Already exist"; + case TStatusCode::NETWORK_ERROR: + return "Network error"; + case TStatusCode::ILLEGAL_STATE: + return "Illegal state"; + case TStatusCode::NOT_AUTHORIZED: + return "Not authorized"; + case TStatusCode::REMOTE_ERROR: + return "Remote error"; + case TStatusCode::SERVICE_UNAVAILABLE: + return "Service unavailable"; + case TStatusCode::UNINITIALIZED: + return "Uninitialized"; + case TStatusCode::CONFIGURATION_ERROR: + return "Configuration error"; + case TStatusCode::INCOMPLETE: + return "Incomplete"; + default: { + char tmp[30]; + snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast(code())); + return tmp; + } + } + return std::string(); +} + +std::string Status::to_string() const { + std::string result(code_as_string()); + if (_state == nullptr) { + return result; + } + + result.append(": "); + Slice msg = message(); + result.append(reinterpret_cast(msg.data), msg.size); + int16_t posix = precise_code(); + if (posix != 1) { + char buf[64]; + snprintf(buf, sizeof(buf), " (error %d)", posix); + result.append(buf); + } + return result; +} + +Slice Status::message() const { + if (_state == nullptr) { + return Slice(); + } + + uint32_t length; + memcpy(&length, _state, sizeof(length)); + return Slice(_state + 7, length); +} + +Status Status::clone_and_prepend(const Slice& msg) const { + if (ok()) { + return *this; + } + return Status(code(), msg, precise_code(), message()); +} + +Status Status::clone_and_append(const Slice& msg) const { + if (ok()) { + return *this; + } + return Status(code(), message(), precise_code(), msg); +} + } diff --git a/be/src/common/status.h b/be/src/common/status.h index 958e30f5e2..54fdbd0891 100644 --- a/be/src/common/status.h +++ b/be/src/common/status.h @@ -15,8 +15,11 @@ // specific language governing permissions and limitations // under the License. -#ifndef DORIS_BE_SRC_COMMON_COMMON_STATUS_H -#define DORIS_BE_SRC_COMMON_COMMON_STATUS_H +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once #include #include @@ -25,132 +28,109 @@ #include "common/compiler_util.h" #include "gen_cpp/Status_types.h" // for TStatus #include "gen_cpp/status.pb.h" // for PStatus -#include "util/stack_util.h" // for PStatus +#include "util/slice.h" // for Slice namespace doris { -// Status is used as a function return type to indicate success, failure or cancellation -// of the function. In case of successful completion, it only occupies sizeof(void*) -// statically allocated memory. In the error case, it records a stack of error messages. -// -// example: -// Status fnB(int x) { -// Status status = fnA(x); -// if (!status.ok()) { -// status.AddErrorMsg("fnA(x) went wrong"); -// return status; -// } -// } -// -// TODO: macros: -// RETURN_IF_ERROR(status) << "msg" -// MAKE_ERROR() << "msg" - class Status { public: - Status(): _error_detail(NULL) {} - - static const Status OK; - static const Status CANCELLED; - static const Status MEM_LIMIT_EXCEEDED; - static const Status THRIFT_RPC_ERROR; - static const Status TIMEOUT; + Status(): _state(nullptr) {} + ~Status() noexcept { delete[] _state; } // copy c'tor makes copy of error detail so Status can be returned by value - Status(const Status& status) : _error_detail( - status._error_detail != NULL - ? new ErrorDetail(*status._error_detail) - : NULL) { - } - - // c'tor for error case - is this useful for anything other than CANCELLED? - Status(TStatusCode::type code) : _error_detail(new ErrorDetail(code)) { - } - - // c'tor for error case - Status(TStatusCode::type code, const std::string& error_msg, bool quiet) : - _error_detail(new ErrorDetail(code, error_msg)) { - if (!quiet) { - VLOG(2) << error_msg; - } - } - - Status(TStatusCode::type code, const std::string& error_msg); - - // c'tor for internal error - Status(const std::string& error_msg); - - Status(const std::string& error_msg, bool quiet); - - ~Status() { - if (_error_detail != NULL) { - delete _error_detail; - } + Status(const Status& s) + : _state(s._state == nullptr ? nullptr : copy_state(s._state)) { } // same as copy c'tor - Status& operator=(const Status& status) { - delete _error_detail; - - if (LIKELY(status._error_detail == NULL)) { - _error_detail = NULL; - } else { - _error_detail = new ErrorDetail(*status._error_detail); + Status& operator=(const Status& s) { + // The following condition catches both aliasing (when this == &s), + // and the common case where both s and *this are OK. + if (_state != s._state) { + delete[] _state; + _state = (s._state == nullptr) ? nullptr : copy_state(s._state); } + return *this; + } + // move c'tor + Status(Status&& s) noexcept : _state(s._state) { + s._state = nullptr; + } + + // move assign + Status& operator=(Status&& s) noexcept { + std::swap(_state, s._state); return *this; } // "Copy" c'tor from TStatus. Status(const TStatus& status); - // same as previous c'tor - Status& operator=(const TStatus& status); - Status(const PStatus& pstatus); - Status& operator=(const PStatus& pstatus); - // assign from stringstream - Status& operator=(const std::stringstream& stream); + static Status OK() { return Status(); } - bool ok() const { - return _error_detail == NULL; + static Status PublishTimeout(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::PUBLISH_TIMEOUT, msg, sub_code, msg2); + } + static Status MemoryAllocFailed(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::MEM_ALLOC_FAILED, msg, sub_code, msg2); + } + static Status BufferAllocFailed(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::BUFFER_ALLOCATION_FAILED, msg, sub_code, msg2); + } + static Status InvalidArgument(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::INVALID_ARGUMENT, msg, sub_code, msg2); + } + static Status MinimumReservationUnavailable(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::INVALID_ARGUMENT, msg, sub_code, msg2); + } + static Status IoError(const Slice& msg, + int16_t sub_code = 1, + const Slice& msg2 = Slice()) { + return Status(TStatusCode::IO_ERROR, msg, sub_code, msg2); + } + static Status EndOfFile(const Slice& msg, + int16_t sub_code = 1, + const Slice& msg2 = Slice()) { + return Status(TStatusCode::END_OF_FILE, msg, sub_code, msg2); + } + static Status InternalError(const Slice& msg, + int16_t sub_code = 1, + const Slice& msg2 = Slice()) { + return Status(TStatusCode::INTERNAL_ERROR, msg, sub_code, msg2); + } + static Status RuntimeError(const Slice& msg, + int16_t sub_code = 1, + const Slice& msg2 = Slice()) { + return Status(TStatusCode::RUNTIME_ERROR, msg, sub_code, msg2); + } + static Status Cancelled(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::CANCELLED, msg, sub_code, msg2); } - bool is_cancelled() const { - return _error_detail != NULL - && _error_detail->error_code == TStatusCode::CANCELLED; + static Status MemoryLimitExceeded(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::MEM_LIMIT_EXCEEDED, msg, sub_code, msg2); } - bool is_mem_limit_exceeded() const { - return _error_detail != NULL - && _error_detail->error_code == TStatusCode::MEM_LIMIT_EXCEEDED; + static Status ThriftRpcError(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::THRIFT_RPC_ERROR, msg, sub_code, msg2); } - bool is_thrift_rpc_error() const { - return _error_detail != NULL - && _error_detail->error_code == TStatusCode::MEM_LIMIT_EXCEEDED; + static Status TimedOut(const Slice& msg, int16_t sub_code = 1, const Slice& msg2 = Slice()) { + return Status(TStatusCode::TIMEOUT, msg, sub_code, msg2); } - // Add an error message and set the code if no code has been set yet. - // If a code has already been set, 'code' is ignored. - void add_error_msg(TStatusCode::type code, const std::string& msg); - - // Add an error message and set the code to INTERNAL_ERROR if no code has been - // set yet. If a code has already been set, it is left unchanged. - void add_error_msg(const std::string& msg); - - // Does nothing if status.ok(). - // Otherwise: if 'this' is an error status, adds the error msg from 'status; - // otherwise assigns 'status'. - void add_error(const Status& status); - - // Return all accumulated error msgs. - void get_error_msgs(std::vector* msgs) const; + bool ok() const { return _state == nullptr; } + bool is_cancelled() const { return code() == TStatusCode::CANCELLED; } + bool is_mem_limit_exceeded() const { return code() == TStatusCode::MEM_LIMIT_EXCEEDED; } + bool is_thrift_rpc_error() const { return code() == TStatusCode::THRIFT_RPC_ERROR; } // Convert into TStatus. Call this if 'status_container' contains an optional // TStatus field named 'status'. This also sets __isset.status. - template void set_t_status(T* status_container) const { + template + void set_t_status(T* status_container) const { to_thrift(&status_container->status); status_container->__isset.status = true; } @@ -159,34 +139,75 @@ public: void to_thrift(TStatus* status) const; void to_protobuf(PStatus* status) const; - // Return all accumulated error msgs in a single string. - void get_error_msg(std::string* msg) const; - - std::string get_error_msg() const; - - TStatusCode::type code() const { - return _error_detail == NULL ? TStatusCode::OK : _error_detail->error_code; + std::string get_error_msg() const { + auto msg = message(); + return std::string(msg.data, msg.size); } - /// Does nothing if status.ok(). - /// Otherwise: if 'this' is an error status, adds the error msg from 'status'; - /// otherwise assigns 'status'. - void MergeStatus(const Status& status); + /// @return A string representation of this status suitable for printing. + /// Returns the string "OK" for success. + std::string to_string() const; + + /// @return A string representation of the status code, without the message + /// text or sub code information. + std::string code_as_string() const; + + // This is similar to to_string, except that it does not include + // the stringified error code or sub code. + // + // @note The returned Slice is only valid as long as this Status object + // remains live and unchanged. + // + // @return The message portion of the Status. For @c OK statuses, + // this returns an empty string. + Slice message() const; + + TStatusCode::type code() const { + return _state == nullptr ? TStatusCode::OK : static_cast(_state[4]); + } + + int16_t precise_code() const { + if (_state == nullptr) { + return 0; + } + int16_t precise_code; + memcpy(&precise_code, _state + 5, sizeof(precise_code)); + return precise_code; + } + + /// Clone this status and add the specified prefix to the message. + /// + /// If this status is OK, then an OK status will be returned. + /// + /// @param [in] msg + /// The message to prepend. + /// @return A new Status object with the same state plus an additional + /// leading message. + Status clone_and_prepend(const Slice& msg) const; + + /// Clone this status and add the specified suffix to the message. + /// + /// If this status is OK, then an OK status will be returned. + /// + /// @param [in] msg + /// The message to append. + /// @return A new Status object with the same state plus an additional + /// trailing message. + Status clone_and_append(const Slice& msg) const; private: - struct ErrorDetail { - TStatusCode::type error_code; // anything other than OK - std::vector error_msgs; + const char* copy_state(const char* state); - ErrorDetail(const TStatus& status); - ErrorDetail(const PStatus& status); - ErrorDetail(TStatusCode::type code) - : error_code(code) {} - ErrorDetail(TStatusCode::type code, const std::string& msg) - : error_code(code), error_msgs(1, msg) {} - }; + Status(TStatusCode::type code, const Slice& msg, int16_t sub_code, const Slice& msg2); - ErrorDetail* _error_detail; +private: + // OK status has a nullptr _state. Otherwise, _state is a new[] array + // of the following form: + // _state[0..3] == length of message + // _state[4] == code + // _state[5..6] == precise_code + // _state[7..] == message + const char* _state; }; // some generally useful macros @@ -210,8 +231,7 @@ private: do { \ Status _status_ = (stmt); \ if (UNLIKELY(!_status_.ok())) { \ - string msg; \ - _status_.get_error_msg(&msg); \ + string msg = _status_.get_error_msg(); \ LOG(ERROR) << msg; \ exit(1); \ } \ @@ -220,5 +240,3 @@ private: } #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) - -#endif diff --git a/be/src/exec/aggregation_node.cpp b/be/src/exec/aggregation_node.cpp index 1a0e96ddb1..d5c211dc7c 100644 --- a/be/src/exec/aggregation_node.cpp +++ b/be/src/exec/aggregation_node.cpp @@ -89,7 +89,7 @@ Status AggregationNode::init(const TPlanNode& tnode, RuntimeState* state) { _pool, tnode.agg_node.aggregate_functions[i], &evaluator); _aggregate_evaluators.push_back(evaluator); } - return Status::OK; + return Status::OK(); } Status AggregationNode::prepare(RuntimeState* state) { @@ -172,7 +172,7 @@ Status AggregationNode::prepare(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } Status AggregationNode::open(RuntimeState* state) { @@ -255,7 +255,7 @@ Status AggregationNode::open(RuntimeState* state) { VLOG_ROW << "id=" << id() << " aggregated " << num_input_rows << " input rows into " << num_agg_rows << " output rows"; _output_iterator = _hash_tbl->begin(); - return Status::OK; + return Status::OK(); } Status AggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -267,7 +267,7 @@ Status AggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } ExprContext** ctxs = &_conjunct_ctxs[0]; @@ -312,12 +312,12 @@ Status AggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* } } COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } Status AggregationNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } // Iterate through the remaining rows in the hash table and call Serialize/Finalize on diff --git a/be/src/exec/analytic_eval_node.cpp b/be/src/exec/analytic_eval_node.cpp index c0b3e4d2e4..0ce2f50ffc 100644 --- a/be/src/exec/analytic_eval_node.cpp +++ b/be/src/exec/analytic_eval_node.cpp @@ -140,7 +140,7 @@ Status AnalyticEvalNode::init(const TPlanNode& tnode, RuntimeState* state) { _pool, analytic_node.order_by_eq, &_order_by_eq_expr_ctx)); } - return Status::OK; + return Status::OK(); } Status AnalyticEvalNode::prepare(RuntimeState* state) { @@ -186,7 +186,7 @@ Status AnalyticEvalNode::prepare(RuntimeState* state) { _child_tuple_cmp_row = reinterpret_cast( _mem_pool->allocate(sizeof(Tuple*) * 2)); - return Status::OK; + return Status::OK(); } Status AnalyticEvalNode::open(RuntimeState* state) { @@ -250,7 +250,7 @@ Status AnalyticEvalNode::open(RuntimeState* state) { _curr_child_batch.reset(); } - return Status::OK; + return Status::OK(); } string debug_window_bound_string(const TAnalyticWindowBoundary& b) { @@ -612,7 +612,7 @@ Status AnalyticEvalNode::process_child_batches(RuntimeState* state) { RETURN_IF_ERROR(child(0)->get_next(state, _curr_child_batch.get(), &_input_eos)); } - return Status::OK; + return Status::OK(); } Status AnalyticEvalNode::process_child_batch(RuntimeState* state) { @@ -711,7 +711,7 @@ Status AnalyticEvalNode::process_child_batch(RuntimeState* state) { << _prev_pool_last_window_idx; } - return Status::OK; + return Status::OK(); } Status AnalyticEvalNode::get_next_output_batch(RuntimeState* state, RowBatch* output_batch, @@ -722,7 +722,7 @@ Status AnalyticEvalNode::get_next_output_batch(RuntimeState* state, RowBatch* ou if (_input_stream->rows_returned() == _input_stream->num_rows()) { *eos = true; - return Status::OK; + return Status::OK(); } const int num_child_tuples = child(0)->row_desc().tuple_descriptors().size(); @@ -770,7 +770,7 @@ Status AnalyticEvalNode::get_next_output_batch(RuntimeState* state, RowBatch* ou *eos = true; } - return Status::OK; + return Status::OK(); } inline int64_t AnalyticEvalNode::num_output_rows_ready() const { @@ -805,7 +805,7 @@ Status AnalyticEvalNode::get_next(RuntimeState* state, RowBatch* row_batch, bool if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -833,12 +833,12 @@ Status AnalyticEvalNode::get_next(RuntimeState* state, RowBatch* row_batch, bool } COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } Status AnalyticEvalNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } if (_input_stream.get() != NULL) { @@ -888,7 +888,7 @@ Status AnalyticEvalNode::close(RuntimeState* state) { _mem_pool->free_all(); } ExecNode::close(state); - return Status::OK; + return Status::OK(); } void AnalyticEvalNode::debug_string(int indentation_level, stringstream* out) const { diff --git a/be/src/exec/blocking_join_node.cpp b/be/src/exec/blocking_join_node.cpp index af305d42dd..ed626966ce 100644 --- a/be/src/exec/blocking_join_node.cpp +++ b/be/src/exec/blocking_join_node.cpp @@ -77,15 +77,15 @@ Status BlockingJoinNode::prepare(RuntimeState* state) { _build_tuple_row_size = num_build_tuples * sizeof(Tuple*); _left_batch.reset(new RowBatch(child(0)->row_desc(), state->batch_size(), mem_tracker())); - return Status::OK; + return Status::OK(); } Status BlockingJoinNode::close(RuntimeState* state) { // TODO(zhaochun): avoid double close - // if (is_closed()) return Status::OK; + // if (is_closed()) return Status::OK(); _left_batch.reset(); ExecNode::close(state); - return Status::OK; + return Status::OK(); } void BlockingJoinNode::build_side_thread(RuntimeState* state, boost::promise* status) { @@ -163,7 +163,7 @@ Status BlockingJoinNode::open(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } void BlockingJoinNode::debug_string(int indentation_level, std::stringstream* out) const { diff --git a/be/src/exec/broker_reader.cpp b/be/src/exec/broker_reader.cpp index f79c6cb3a9..d4e1975741 100644 --- a/be/src/exec/broker_reader.cpp +++ b/be/src/exec/broker_reader.cpp @@ -102,7 +102,7 @@ Status BrokerReader::open() { std::stringstream ss; ss << "Open broker reader failed, broker:" << broker_addr << " failed:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } if (response.opStatus.statusCode != TBrokerOperationStatusCode::OK) { @@ -110,18 +110,18 @@ Status BrokerReader::open() { ss << "Open broker reader failed, broker:" << broker_addr << " failed:" << response.opStatus.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _fd = response.fd; _is_fd_valid = true; - return Status::OK; + return Status::OK(); } Status BrokerReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { if (_eof) { *eof = true; - return Status::OK; + return Status::OK(); } const TNetworkAddress& broker_addr = _addresses[_addr_idx]; @@ -153,19 +153,19 @@ Status BrokerReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { std::stringstream ss; ss << "Read from broker failed, broker:" << broker_addr << " failed:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } if (response.opStatus.statusCode == TBrokerOperationStatusCode::END_OF_FILE) { // read the end of broker's file *eof = _eof = true; - return Status::OK; + return Status::OK(); } else if (response.opStatus.statusCode != TBrokerOperationStatusCode::OK) { std::stringstream ss; ss << "Read from broker failed, broker:" << broker_addr << " failed:" << response.opStatus.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } *buf_len = response.data.size(); @@ -173,7 +173,7 @@ Status BrokerReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { _cur_offset += *buf_len; *eof = false; - return Status::OK; + return Status::OK(); } void BrokerReader::close() { diff --git a/be/src/exec/broker_scan_node.cpp b/be/src/exec/broker_scan_node.cpp index ddd0247b91..3cf999a31c 100644 --- a/be/src/exec/broker_scan_node.cpp +++ b/be/src/exec/broker_scan_node.cpp @@ -68,7 +68,7 @@ Status BrokerScanNode::init(const TPlanNode& tnode, RuntimeState* state) { _partition_infos.end(), compare_part_use_range); } - return Status::OK; + return Status::OK(); } Status BrokerScanNode::prepare(RuntimeState* state) { @@ -80,7 +80,7 @@ Status BrokerScanNode::prepare(RuntimeState* state) { if (_tuple_desc == nullptr) { std::stringstream ss; ss << "Failed to get tuple descriptor, _tuple_id=" << _tuple_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // Initialize slots map @@ -89,7 +89,7 @@ Status BrokerScanNode::prepare(RuntimeState* state) { if (!pair.second) { std::stringstream ss; ss << "Failed to insert slot, col_name=" << slot->col_name(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -105,7 +105,7 @@ Status BrokerScanNode::prepare(RuntimeState* state) { // Profile _wait_scanner_timer = ADD_TIMER(runtime_profile(), "WaitScannerTime"); - return Status::OK; + return Status::OK(); } Status BrokerScanNode::open(RuntimeState* state) { @@ -124,7 +124,7 @@ Status BrokerScanNode::open(RuntimeState* state) { RETURN_IF_ERROR(start_scanners()); - return Status::OK; + return Status::OK(); } Status BrokerScanNode::start_scanners() { @@ -133,7 +133,7 @@ Status BrokerScanNode::start_scanners() { _num_running_scanners = 1; } _scanner_threads.emplace_back(&BrokerScanNode::scanner_worker, this, 0, _scan_ranges.size()); - return Status::OK; + return Status::OK(); } Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -141,7 +141,7 @@ Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* // check if CANCELLED. if (state->is_cancelled()) { std::unique_lock l(_batch_queue_lock); - if (update_status(Status::CANCELLED)) { + if (update_status(Status::Cancelled("Cancelled"))) { // Notify all scanners _queue_writer_cond.notify_all(); } @@ -149,7 +149,7 @@ Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* if (_scan_finished.load()) { *eos = true; - return Status::OK; + return Status::OK(); } std::shared_ptr scanner_batch; @@ -167,7 +167,7 @@ Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* return _process_status; } if (_runtime_state->is_cancelled()) { - if (update_status(Status::CANCELLED)) { + if (update_status(Status::Cancelled("Cancelled"))) { _queue_writer_cond.notify_all(); } return _process_status; @@ -182,7 +182,7 @@ Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* if (scanner_batch == nullptr) { _scan_finished.store(true); *eos = true; - return Status::OK; + return Status::OK(); } // notify one scanner @@ -216,12 +216,12 @@ Status BrokerScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* } } - return Status::OK; + return Status::OK(); } Status BrokerScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -260,7 +260,7 @@ Status BrokerScanNode::set_scan_ranges(const std::vector& scan } } - return Status::OK; + return Status::OK(); } void BrokerScanNode::debug_string(int ident_level, std::stringstream* out) const { @@ -292,7 +292,7 @@ Status BrokerScanNode::scanner_scan( int tuple_buffer_size = row_batch->capacity() * _tuple_desc->byte_size(); void* tuple_buffer = tuple_pool->allocate(tuple_buffer_size); if (tuple_buffer == nullptr) { - return Status("Allocate memory for row batch failed."); + return Status::InternalError("Allocate memory for row batch failed."); } Tuple* tuple = reinterpret_cast(tuple_buffer); @@ -300,7 +300,7 @@ Status BrokerScanNode::scanner_scan( RETURN_IF_CANCELLED(_runtime_state); // If we have finished all works if (_scan_finished.load()) { - return Status::OK; + return Status::OK(); } // This row batch has been filled up, and break this @@ -359,15 +359,15 @@ Status BrokerScanNode::scanner_scan( } // Process already set failed, so we just return OK if (!_process_status.ok()) { - return Status::OK; + return Status::OK(); } // Scan already finished, just return if (_scan_finished.load()) { - return Status::OK; + return Status::OK(); } // Runtime state is canceled, just return cancel if (_runtime_state->is_cancelled()) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } // Queue size Must be samller than _max_buffered_batches _batch_queue.push_back(row_batch); @@ -377,7 +377,7 @@ Status BrokerScanNode::scanner_scan( } } - return Status::OK; + return Status::OK(); } void BrokerScanNode::scanner_worker(int start_idx, int length) { diff --git a/be/src/exec/broker_scanner.cpp b/be/src/exec/broker_scanner.cpp index a9f9698ed5..3f63fe43db 100644 --- a/be/src/exec/broker_scanner.cpp +++ b/be/src/exec/broker_scanner.cpp @@ -86,7 +86,7 @@ Status BrokerScanner::init_expr_ctxes() { if (src_tuple_desc == nullptr) { std::stringstream ss; ss << "Unknown source tuple descriptor, tuple_id=" << _params.src_tuple_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } std::map src_slot_desc_map; @@ -98,7 +98,7 @@ Status BrokerScanner::init_expr_ctxes() { if (it == std::end(src_slot_desc_map)) { std::stringstream ss; ss << "Unknown source slot descriptor, slot_id=" << slot_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _src_slot_descs.emplace_back(it->second); } @@ -115,7 +115,7 @@ Status BrokerScanner::init_expr_ctxes() { if (_dest_tuple_desc == nullptr) { std::stringstream ss; ss << "Unknown dest tuple descriptor, tuple_id=" << _params.dest_tuple_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } bool has_slot_id_map = _params.__isset.dest_sid_to_src_sid_without_trans; @@ -128,7 +128,7 @@ Status BrokerScanner::init_expr_ctxes() { std::stringstream ss; ss << "No expr for dest slot, id=" << slot_desc->id() << ", name=" << slot_desc->col_name(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } ExprContext* ctx = nullptr; RETURN_IF_ERROR(Expr::create_expr_tree(_state->obj_pool(), it->second, &ctx)); @@ -144,21 +144,21 @@ Status BrokerScanner::init_expr_ctxes() { if (_src_slot_it == std::end(src_slot_desc_map)) { std::stringstream ss; ss << "No src slot " << it->second << " in src slot descs"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _src_slot_descs_order_by_dest.emplace_back(_src_slot_it->second); } } } - return Status::OK; + return Status::OK(); } Status BrokerScanner::open() { RETURN_IF_ERROR(init_expr_ctxes()); _text_converter.reset(new(std::nothrow) TextConverter('\\')); if (_text_converter == nullptr) { - return Status("No memory error."); + return Status::InternalError("No memory error."); } _rows_read_counter = ADD_COUNTER(_profile, "RowsRead", TUnit::UNIT); @@ -168,10 +168,10 @@ Status BrokerScanner::open() { _strict_mode = _params.strict_mode; } if (_strict_mode && !_params.__isset.dest_sid_to_src_sid_without_trans) { - return Status("Slot map of dest to src must be set in strict mode"); + return Status::InternalError("Slot map of dest to src must be set in strict mode"); } - return Status::OK; + return Status::OK(); } Status BrokerScanner::get_next(Tuple* tuple, MemPool* tuple_pool, bool* eof) { @@ -211,20 +211,20 @@ Status BrokerScanner::get_next(Tuple* tuple, MemPool* tuple_pool, bool* eof) { } else { *eof = false; } - return Status::OK; + return Status::OK(); } Status BrokerScanner::open_next_reader() { if (_next_range >= _ranges.size()) { _scanner_eof = true; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(open_file_reader()); RETURN_IF_ERROR(open_line_reader()); _next_range++; - return Status::OK; + return Status::OK(); } Status BrokerScanner::open_file_reader() { @@ -261,7 +261,7 @@ Status BrokerScanner::open_file_reader() { _stream_load_pipe = _state->exec_env()->load_stream_mgr()->get(range.load_id); if (_stream_load_pipe == nullptr) { VLOG(3) << "unknown stream load id: " << UniqueId(range.load_id); - return Status("unknown stream load id"); + return Status::InternalError("unknown stream load id"); } _cur_file_reader = _stream_load_pipe.get(); break; @@ -269,10 +269,10 @@ Status BrokerScanner::open_file_reader() { default: { std::stringstream ss; ss << "Unknown file type, type=" << range.file_type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } - return Status::OK; + return Status::OK(); } Status BrokerScanner::create_decompressor(TFileFormatType::type type) { @@ -301,13 +301,13 @@ Status BrokerScanner::create_decompressor(TFileFormatType::type type) { default: { std::stringstream ss; ss << "Unknown format type, type=" << type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } RETURN_IF_ERROR(Decompressor::create_decompressor( compress_type, &_cur_decompressor)); - return Status::OK; + return Status::OK(); } Status BrokerScanner::open_line_reader() { @@ -327,7 +327,7 @@ Status BrokerScanner::open_line_reader() { if (range.format_type != TFileFormatType::FORMAT_CSV_PLAIN) { std::stringstream ss; ss << "For now we do not support split compressed file"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } size += 1; _skip_next_line = true; @@ -354,13 +354,13 @@ Status BrokerScanner::open_line_reader() { default: { std::stringstream ss; ss << "Unknown format type, type=" << range.format_type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } _cur_line_reader_eof = false; - return Status::OK; + return Status::OK(); } void BrokerScanner::close() { diff --git a/be/src/exec/broker_writer.cpp b/be/src/exec/broker_writer.cpp index bc44655dcc..f894b44c93 100644 --- a/be/src/exec/broker_writer.cpp +++ b/be/src/exec/broker_writer.cpp @@ -102,7 +102,7 @@ Status BrokerWriter::open() { std::stringstream ss; ss << "Open broker writer failed, broker:" << broker_addr << " failed:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } VLOG_ROW << "debug: send broker open writer response: " @@ -113,17 +113,17 @@ Status BrokerWriter::open() { ss << "Open broker writer failed, broker:" << broker_addr << " failed:" << response.opStatus.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _fd = response.fd; - return Status::OK; + return Status::OK(); } Status BrokerWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_len) { if (buf_len == 0) { *written_len = 0; - return Status::OK; + return Status::OK(); } const TNetworkAddress& broker_addr = _addresses[_addr_idx]; @@ -156,13 +156,13 @@ Status BrokerWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_l std::stringstream ss; ss << "Fail to write to broker, broker:" << broker_addr << " failed:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str()); + return Status::ThriftRpcError(ss.str()); } } catch (apache::thrift::TException& e) { std::stringstream ss; ss << "Fail to write to broker, broker:" << broker_addr << " failed:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str()); + return Status::ThriftRpcError(ss.str()); } VLOG_ROW << "debug: send broker pwrite response: " @@ -173,13 +173,13 @@ Status BrokerWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_l ss << "Fail to write to broker, broker:" << broker_addr << " msg:" << response.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } *written_len = buf_len; _cur_offset += buf_len; - return Status::OK; + return Status::OK(); } void BrokerWriter::close() { diff --git a/be/src/exec/cross_join_node.cpp b/be/src/exec/cross_join_node.cpp index 1fc72688e5..0c50da35a2 100644 --- a/be/src/exec/cross_join_node.cpp +++ b/be/src/exec/cross_join_node.cpp @@ -37,18 +37,18 @@ Status CrossJoinNode::prepare(RuntimeState* state) { DCHECK(_join_op == TJoinOp::CROSS_JOIN); RETURN_IF_ERROR(BlockingJoinNode::prepare(state)); _build_batch_pool.reset(new ObjectPool()); - return Status::OK; + return Status::OK(); } Status CrossJoinNode::close(RuntimeState* state) { // avoid double close if (is_closed()) { - return Status::OK; + return Status::OK(); } _build_batches.reset(); _build_batch_pool.reset(); BlockingJoinNode::close(state); - return Status::OK; + return Status::OK(); } Status CrossJoinNode::construct_build_side(RuntimeState* state) { @@ -79,7 +79,7 @@ Status CrossJoinNode::construct_build_side(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } void CrossJoinNode::init_get_next(TupleRow* first_left_row) { @@ -96,7 +96,7 @@ Status CrossJoinNode::get_next(RuntimeState* state, RowBatch* output_batch, bool if (reached_limit() || _eos) { *eos = true; - return Status::OK; + return Status::OK(); } ScopedTimer timer(_left_child_timer); @@ -140,7 +140,7 @@ Status CrossJoinNode::get_next(RuntimeState* state, RowBatch* output_batch, bool } } - return Status::OK; + return Status::OK(); } std::string CrossJoinNode::build_list_debug_string() { diff --git a/be/src/exec/csv_scan_node.cpp b/be/src/exec/csv_scan_node.cpp index 996bb943c0..0b287c341d 100644 --- a/be/src/exec/csv_scan_node.cpp +++ b/be/src/exec/csv_scan_node.cpp @@ -137,11 +137,11 @@ Status CsvScanNode::prepare(RuntimeState* state) { VLOG(1) << "CsvScanNode::Prepare"; if (_is_init) { - return Status::OK; + return Status::OK(); } if (nullptr == state) { - return Status("input runtime_state pointer is nullptr."); + return Status::InternalError("input runtime_state pointer is nullptr."); } RETURN_IF_ERROR(ScanNode::prepare(state)); @@ -152,14 +152,14 @@ Status CsvScanNode::prepare(RuntimeState* state) { _tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id); if (nullptr == _tuple_desc) { - return Status("Failed to get tuple descriptor."); + return Status::InternalError("Failed to get tuple descriptor."); } _slot_num = _tuple_desc->slots().size(); const OlapTableDescriptor* csv_table = static_cast(_tuple_desc->table_desc()); if (nullptr == csv_table) { - return Status("csv table pointer is nullptr."); + return Status::InternalError("csv table pointer is nullptr."); } // @@ -170,7 +170,7 @@ Status CsvScanNode::prepare(RuntimeState* state) { if (slot->type().type == TYPE_HLL) { TMiniLoadEtlFunction& function = _column_function_map[column_name]; if (check_hll_function(function) == false) { - return Status("Function name or param error."); + return Status::InternalError("Function name or param error."); } _hll_column_num++; } @@ -211,21 +211,21 @@ Status CsvScanNode::prepare(RuntimeState* state) { // new one scanner _csv_scanner.reset(new(std::nothrow) CsvScanner(_file_paths)); if (_csv_scanner.get() == nullptr) { - return Status("new a csv scanner failed."); + return Status::InternalError("new a csv scanner failed."); } _tuple_pool.reset(new(std::nothrow) MemPool(state->instance_mem_tracker())); if (_tuple_pool.get() == nullptr) { - return Status("new a mem pool failed."); + return Status::InternalError("new a mem pool failed."); } _text_converter.reset(new(std::nothrow) TextConverter('\\')); if (_text_converter.get() == nullptr) { - return Status("new a text convertor failed."); + return Status::InternalError("new a text convertor failed."); } _is_init = true; - return Status::OK; + return Status::OK(); } Status CsvScanNode::open(RuntimeState* state) { @@ -233,11 +233,11 @@ Status CsvScanNode::open(RuntimeState* state) { VLOG(1) << "CsvScanNode::Open"; if (nullptr == state) { - return Status("input pointer is nullptr."); + return Status::InternalError("input pointer is nullptr."); } if (!_is_init) { - return Status("used before initialize."); + return Status::InternalError("used before initialize."); } _runtime_state = state; @@ -247,17 +247,17 @@ Status CsvScanNode::open(RuntimeState* state) { SCOPED_TIMER(_runtime_profile->total_time_counter()); RETURN_IF_ERROR(_csv_scanner->open()); - return Status::OK; + return Status::OK(); } Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { VLOG(1) << "CsvScanNode::GetNext"; if (nullptr == state || nullptr == row_batch || nullptr == eos) { - return Status("input is nullptr pointer"); + return Status::InternalError("input is nullptr pointer"); } if (!_is_init) { - return Status("used before initialize."); + return Status::InternalError("used before initialize."); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::GETNEXT)); @@ -267,7 +267,7 @@ Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } // create new tuple buffer for row_batch @@ -275,7 +275,7 @@ Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos void* tuple_buffer = _tuple_pool->allocate(tuple_buffer_size); if (nullptr == tuple_buffer) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } _tuple = reinterpret_cast(tuple_buffer); @@ -293,7 +293,7 @@ Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos // next get_next() call row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), !reached_limit()); *eos = reached_limit(); - return Status::OK; + return Status::OK(); } // read csv @@ -333,12 +333,12 @@ Status CsvScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), false); *eos = csv_eos; - return Status::OK; + return Status::OK(); } Status CsvScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } VLOG(1) << "CsvScanNode::Close"; RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); @@ -356,7 +356,7 @@ Status CsvScanNode::close(RuntimeState* state) { error_msg << "Read zero normal line file. "; state->append_error_msg_to_file("", error_msg.str(), true); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } // only write summary line if there are error lines @@ -368,7 +368,7 @@ Status CsvScanNode::close(RuntimeState* state) { state->append_error_msg_to_file("", summary_msg.str(), true); } - return Status::OK; + return Status::OK(); } void CsvScanNode::debug_string(int indentation_level, stringstream* out) const { @@ -382,7 +382,7 @@ void CsvScanNode::debug_string(int indentation_level, stringstream* out) const { } Status CsvScanNode::set_scan_ranges(const vector& scan_ranges) { - return Status::OK; + return Status::OK(); } void CsvScanNode::fill_fix_length_string( diff --git a/be/src/exec/csv_scanner.cpp b/be/src/exec/csv_scanner.cpp index c9de571d66..e21bef76ba 100644 --- a/be/src/exec/csv_scanner.cpp +++ b/be/src/exec/csv_scanner.cpp @@ -44,22 +44,22 @@ namespace doris { if (_is_open) { LOG(INFO) << "this scanner already opened"; - return Status::OK; + return Status::OK(); } if (_file_paths.empty()) { - return Status("no file specified."); + return Status::InternalError("no file specified."); } _is_open = true; - return Status::OK; + return Status::OK(); } // TODO(lingbin): read more than one line at a time to reduce IO comsumption Status CsvScanner::get_next_row(std::string* line_str, bool* eos) { if (_current_file == nullptr && _current_file_idx == _file_paths.size()) { *eos = true; - return Status::OK; + return Status::OK(); } if (_current_file == nullptr && _current_file_idx < _file_paths.size()) { @@ -68,7 +68,7 @@ namespace doris { _current_file = new std::ifstream(file_path, std::ifstream::in); if (!_current_file->is_open()) { - return Status("Fail to read csv file: " + file_path); + return Status::InternalError("Fail to read csv file: " + file_path); } ++_current_file_idx; } @@ -81,12 +81,12 @@ namespace doris { if (_current_file_idx == _file_paths.size()) { *eos = true; - return Status::OK; + return Status::OK(); } } *eos = false; - return Status::OK; + return Status::OK(); } } // end namespace doris diff --git a/be/src/exec/data_sink.cpp b/be/src/exec/data_sink.cpp index 228f5afc4a..026f7382f8 100644 --- a/be/src/exec/data_sink.cpp +++ b/be/src/exec/data_sink.cpp @@ -47,7 +47,7 @@ Status DataSink::create_data_sink( switch (thrift_sink.type) { case TDataSinkType::DATA_STREAM_SINK: { if (!thrift_sink.__isset.stream_sink) { - return Status("Missing data stream sink."); + return Status::InternalError("Missing data stream sink."); } bool send_query_statistics_with_every_batch = params.__isset.send_query_statistics_with_every_batch ? params.send_query_statistics_with_every_batch : false; @@ -62,7 +62,7 @@ Status DataSink::create_data_sink( } case TDataSinkType::RESULT_SINK: if (!thrift_sink.__isset.result_sink) { - return Status("Missing data buffer sink."); + return Status::InternalError("Missing data buffer sink."); } // TODO: figure out good buffer size based on size of output row @@ -73,7 +73,7 @@ Status DataSink::create_data_sink( case TDataSinkType::MYSQL_TABLE_SINK: { #ifdef DORIS_WITH_MYSQL if (!thrift_sink.__isset.mysql_table_sink) { - return Status("Missing data buffer sink."); + return Status::InternalError("Missing data buffer sink."); } // TODO: figure out good buffer size based on size of output row @@ -82,13 +82,13 @@ Status DataSink::create_data_sink( sink->reset(mysql_tbl_sink); break; #else - return Status("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); + return Status::InternalError("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); #endif } case TDataSinkType::DATA_SPLIT_SINK: { if (!thrift_sink.__isset.split_sink) { - return Status("Missing data split buffer sink."); + return Status::InternalError("Missing data split buffer sink."); } // TODO: figure out good buffer size based on size of output row @@ -102,7 +102,7 @@ Status DataSink::create_data_sink( case TDataSinkType::EXPORT_SINK: { if (!thrift_sink.__isset.export_sink) { - return Status("Missing export sink sink."); + return Status::InternalError("Missing export sink sink."); } std::unique_ptr export_sink(new ExportSink(pool, row_desc, output_exprs)); @@ -128,23 +128,23 @@ Status DataSink::create_data_sink( } error_msg << str << " not implemented."; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } if (sink->get() != NULL) { RETURN_IF_ERROR((*sink)->init(thrift_sink)); } - return Status::OK; + return Status::OK(); } Status DataSink::init(const TDataSink& thrift_sink) { - return Status::OK; + return Status::OK(); } Status DataSink::prepare(RuntimeState* state) { _expr_mem_tracker.reset(new MemTracker(-1, "Data sink", state->instance_mem_tracker())); - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/exec/data_sink.h b/be/src/exec/data_sink.h index 942e33bdaa..bee9b39030 100644 --- a/be/src/exec/data_sink.h +++ b/be/src/exec/data_sink.h @@ -65,7 +65,7 @@ public: virtual Status close(RuntimeState* state, Status exec_status) { _expr_mem_tracker->close(); _closed = true; - return Status::OK; + return Status::OK(); } // Creates a new data sink from thrift_sink. A pointer to the diff --git a/be/src/exec/decompressor.cpp b/be/src/exec/decompressor.cpp index 0104285417..d0de707bd6 100644 --- a/be/src/exec/decompressor.cpp +++ b/be/src/exec/decompressor.cpp @@ -45,10 +45,10 @@ Status Decompressor::create_decompressor(CompressType type, default: std::stringstream ss; ss << "Unknown compress type: " << type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - Status st = Status::OK; + Status st = Status::OK(); if (*decompressor != nullptr) { st = (*decompressor)->init(); } @@ -84,10 +84,10 @@ Status GzipDecompressor::init() { if (ret < 0) { std::stringstream ss; ss << "Failed to init inflate. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status GzipDecompressor::decompress( @@ -124,7 +124,7 @@ Status GzipDecompressor::decompress( // produce more output. inflate() can be called again with more output space // or more available input // ATTN: even if ret == Z_OK, decompressed_len may also be zero - return Status::OK; + return Status::OK(); } else if (ret == Z_STREAM_END) { *stream_end = true; // reset _z_strm to continue decoding a subsequent gzip stream @@ -132,12 +132,12 @@ Status GzipDecompressor::decompress( if (ret != Z_OK) { std::stringstream ss; ss << "Failed to inflateRset. return code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else if (ret != Z_OK) { std::stringstream ss; ss << "Failed to inflate. return code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } else { // here ret must be Z_OK. // we continue if avail_out and avail_in > 0. @@ -145,7 +145,7 @@ Status GzipDecompressor::decompress( } } - return Status::OK; + return Status::OK(); } std::string GzipDecompressor::debug_info() { @@ -165,10 +165,10 @@ Status Bzip2Decompressor::init() { if (ret != BZ_OK) { std::stringstream ss; ss << "Failed to init bz2. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status Bzip2Decompressor::decompress( @@ -195,32 +195,32 @@ Status Bzip2Decompressor::decompress( << " decompressed_len: " << *decompressed_len; std::stringstream ss; ss << "Failed to bz2 decompress. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } else if (ret == BZ_STREAM_END) { *stream_end = true; ret = BZ2_bzDecompressEnd(&_bz_strm); if (ret != BZ_OK) { std::stringstream ss; ss << "Failed to end bz2 after meet BZ_STREAM_END. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } ret = BZ2_bzDecompressInit(&_bz_strm, 0, 0); if (ret != BZ_OK) { std::stringstream ss; ss << "Failed to init bz2 after meet BZ_STREAM_END. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else if (ret != BZ_OK) { std::stringstream ss; ss << "Failed to bz2 decompress. status code: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } else { // continue } } - return Status::OK; + return Status::OK(); } std::string Bzip2Decompressor::debug_info() { @@ -243,13 +243,13 @@ Status Lz4FrameDecompressor::init() { if (LZ4F_isError(ret)) { std::stringstream ss; ss << "LZ4F_dctx creation error: " << std::string(LZ4F_getErrorName(ret)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // init as -1 _expect_dec_buf_size = -1; - return Status::OK; + return Status::OK(); } Status Lz4FrameDecompressor::decompress( @@ -273,7 +273,7 @@ Status Lz4FrameDecompressor::decompress( std::stringstream ss; ss << "Lz4 header size is between 7 and 15 bytes. " << "but input size is only: " << input_len; - return Status(ss.str()); + return Status::InternalError(ss.str()); } LZ4F_frameInfo_t info; @@ -281,7 +281,7 @@ Status Lz4FrameDecompressor::decompress( if (LZ4F_isError(ret)) { std::stringstream ss; ss << "LZ4F_getFrameInfo error: " << std::string(LZ4F_getErrorName(ret)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _expect_dec_buf_size = get_block_size(&info); @@ -289,7 +289,7 @@ Status Lz4FrameDecompressor::decompress( std::stringstream ss; ss << "Impossible lz4 block size unless more block sizes are allowed" << std::string(LZ4F_getErrorName(ret)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } *input_bytes_read = src_size; @@ -307,7 +307,7 @@ Status Lz4FrameDecompressor::decompress( if (LZ4F_isError(ret)) { std::stringstream ss; ss << "Decompression error: " << std::string(LZ4F_getErrorName(ret)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // update @@ -319,7 +319,7 @@ Status Lz4FrameDecompressor::decompress( *stream_end = false; } - return Status::OK; + return Status::OK(); } std::string Lz4FrameDecompressor::debug_info() { diff --git a/be/src/exec/empty_set_node.cpp b/be/src/exec/empty_set_node.cpp index c751a00c37..4284d63e32 100644 --- a/be/src/exec/empty_set_node.cpp +++ b/be/src/exec/empty_set_node.cpp @@ -26,7 +26,7 @@ EmptySetNode::EmptySetNode(ObjectPool* pool, const TPlanNode& tnode, Status EmptySetNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { *eos = true; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/es/es_predicate.cpp b/be/src/exec/es/es_predicate.cpp index a7f0bc1f45..b1d2364d3c 100644 --- a/be/src/exec/es/es_predicate.cpp +++ b/be/src/exec/es/es_predicate.cpp @@ -172,7 +172,7 @@ EsPredicate::EsPredicate(ExprContext* context, _context(context), _disjuncts_num(0), _tuple_desc(tuple_desc), - _es_query_status(Status::OK) { + _es_query_status(Status::OK()) { } EsPredicate::~EsPredicate() { @@ -219,7 +219,7 @@ static bool is_literal_node(const Expr* expr) { Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { if (TExprNodeType::BINARY_PRED == conjunct->node_type()) { if (conjunct->children().size() != 2) { - return Status("build disjuncts failed: number of childs is not 2"); + return Status::InternalError("build disjuncts failed: number of childs is not 2"); } SlotRef* slot_ref = nullptr; @@ -234,16 +234,16 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { slot_ref = (SlotRef*)(conjunct->get_child(1)); op = conjunct->op(); } else { - return Status("build disjuncts failed: no SLOT_REF child"); + return Status::InternalError("build disjuncts failed: no SLOT_REF child"); } const SlotDescriptor* slot_desc = get_slot_desc(slot_ref); if (slot_desc == nullptr) { - return Status("build disjuncts failed: slot_desc is null"); + return Status::InternalError("build disjuncts failed: slot_desc is null"); } if (!is_literal_node(expr)) { - return Status("build disjuncts failed: expr is not literal type"); + return Status::InternalError("build disjuncts failed: expr is not literal type"); } ExtLiteral literal(expr->type().type, _context->get_value(expr, NULL)); @@ -255,7 +255,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { literal); _disjuncts.push_back(predicate); - return Status::OK; + return Status::OK(); } if (is_match_func(conjunct)) { @@ -279,13 +279,13 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { } _disjuncts.push_back(predicate); - return Status::OK; + return Status::OK(); } if (TExprNodeType::FUNCTION_CALL == conjunct->node_type()) { std::string fname = conjunct->fn().name.function_name; if (fname != "like") { - return Status("build disjuncts failed: function name is not like"); + return Status::InternalError("build disjuncts failed: function name is not like"); } SlotRef* slot_ref = nullptr; @@ -297,17 +297,17 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { expr = conjunct->get_child(0); slot_ref = (SlotRef*)(conjunct->get_child(1)); } else { - return Status("build disjuncts failed: no SLOT_REF child"); + return Status::InternalError("build disjuncts failed: no SLOT_REF child"); } const SlotDescriptor* slot_desc = get_slot_desc(slot_ref); if (slot_desc == nullptr) { - return Status("build disjuncts failed: slot_desc is null"); + return Status::InternalError("build disjuncts failed: slot_desc is null"); } PrimitiveType type = expr->type().type; if (type != TYPE_VARCHAR && type != TYPE_CHAR) { - return Status("build disjuncts failed: like value is not a string"); + return Status::InternalError("build disjuncts failed: like value is not a string"); } ExtLiteral literal(type, _context->get_value(expr, NULL)); @@ -318,7 +318,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { literal); _disjuncts.push_back(predicate); - return Status::OK; + return Status::OK(); } if (TExprNodeType::IN_PRED == conjunct->node_type()) { @@ -326,7 +326,7 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { // like col_a in (abs(1)) if (TExprOpcode::FILTER_IN != conjunct->op() && TExprOpcode::FILTER_NOT_IN != conjunct->op()) { - return Status("build disjuncts failed: " + return Status::InternalError("build disjuncts failed: " "opcode in IN_PRED is neither FILTER_IN nor FILTER_NOT_IN"); } @@ -334,24 +334,24 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { const InPredicate* pred = dynamic_cast(conjunct); const Expr* expr = Expr::expr_without_cast(pred->get_child(0)); if (expr->node_type() != TExprNodeType::SLOT_REF) { - return Status("build disjuncts failed: node type is not slot ref"); + return Status::InternalError("build disjuncts failed: node type is not slot ref"); } const SlotDescriptor* slot_desc = get_slot_desc((const SlotRef *)expr); if (slot_desc == nullptr) { - return Status("build disjuncts failed: slot_desc is null"); + return Status::InternalError("build disjuncts failed: slot_desc is null"); } if (pred->get_child(0)->type().type != slot_desc->type().type) { if (!ignore_cast(slot_desc, pred->get_child(0))) { - return Status("build disjuncts failed"); + return Status::InternalError("build disjuncts failed"); } } HybirdSetBase::IteratorBase* iter = pred->hybird_set()->begin(); while (iter->has_next()) { if (nullptr == iter->get_value()) { - return Status("build disjuncts failed: hybird set has a null value"); + return Status::InternalError("build disjuncts failed: hybird set has a null value"); } ExtLiteral literal(slot_desc->type().type, const_cast(iter->get_value())); @@ -367,12 +367,12 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { in_pred_values); _disjuncts.push_back(predicate); - return Status::OK; + return Status::OK(); } if (TExprNodeType::COMPOUND_PRED == conjunct->node_type()) { if (TExprOpcode::COMPOUND_OR != conjunct->op()) { - return Status("build disjuncts failed: op is not COMPOUND_OR"); + return Status::InternalError("build disjuncts failed: op is not COMPOUND_OR"); } Status status = build_disjuncts_list(conjunct->get_child(0)); if (!status.ok()) { @@ -383,13 +383,13 @@ Status EsPredicate::build_disjuncts_list(const Expr* conjunct) { return status; } - return Status::OK; + return Status::OK(); } // if go to here, report error std::stringstream ss; ss << "build disjuncts failed: node type " << conjunct->node_type() << " is not supported"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } bool EsPredicate::is_match_func(const Expr* conjunct) { diff --git a/be/src/exec/es/es_query_builder.cpp b/be/src/exec/es/es_query_builder.cpp index 8fc260c671..bf6e7207c5 100644 --- a/be/src/exec/es/es_query_builder.cpp +++ b/be/src/exec/es/es_query_builder.cpp @@ -293,23 +293,23 @@ Status BooleanQueryBuilder::check_es_query(const ExtFunction& extFunction) { // { "term": { "dv": "2" } } if (!scratch_document.HasParseError()) { if (!scratch_document.IsObject()) { - return Status(TStatusCode::ES_REQUEST_ERROR, "esquery must be a object"); + return Status::InvalidArgument("esquery must be a object"); } rapidjson::SizeType object_count = scratch_document.MemberCount(); if (object_count != 1) { - return Status(TStatusCode::ES_REQUEST_ERROR, "esquery must only one root"); + return Status::InvalidArgument("esquery must only one root"); } // deep copy, reference http://rapidjson.org/md_doc_tutorial.html#DeepCopyValue rapidjson::Value::ConstMemberIterator first = scratch_document.MemberBegin(); query_key.CopyFrom(first->name, allocator); if (!query_key.IsString()) { // if we found one key, then end loop as QueryDSL only support one `query` root - return Status(TStatusCode::ES_REQUEST_ERROR, "esquery root key must be string"); + return Status::InvalidArgument("esquery root key must be string"); } } else { - return Status(TStatusCode::ES_REQUEST_ERROR, "malformed esquery json"); + return Status::InvalidArgument("malformed esquery json"); } - return Status::OK; + return Status::OK(); } void BooleanQueryBuilder::validate(const std::vector& espredicates, std::vector* result) { diff --git a/be/src/exec/es/es_scan_reader.cpp b/be/src/exec/es/es_scan_reader.cpp index e2f17a0908..27e5762cab 100644 --- a/be/src/exec/es/es_scan_reader.cpp +++ b/be/src/exec/es/es_scan_reader.cpp @@ -70,10 +70,10 @@ Status ESScanReader::open() { std::stringstream ss; ss << "Failed to connect to ES server, errmsg is: " << status.get_error_msg(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } VLOG(1) << "open _cached response: " << _cached_response; - return Status::OK; + return Status::OK(); } Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr& scroll_parser) { @@ -81,7 +81,7 @@ Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr& scr // if is first scroll request, should return the cached response *scan_eos = true; if (_eos) { - return Status::OK; + return Status::OK(); } if (_is_first) { @@ -98,16 +98,16 @@ Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr& scr if (status == 404) { LOG(WARNING) << "request scroll search failure 404[" << ", response: " << (response.empty() ? "empty response" : response); - return Status("No search context found for " + _scroll_id); + return Status::InternalError("No search context found for " + _scroll_id); } if (status != 200) { LOG(WARNING) << "request scroll search failure[" << "http status" << status << ", response: " << (response.empty() ? "empty response" : response); if (status == 404) { - return Status("No search context found for " + _scroll_id); + return Status::InternalError("No search context found for " + _scroll_id); } - return Status("request scroll search failure: " + (response.empty() ? "empty response" : response)); + return Status::InternalError("request scroll search failure: " + (response.empty() ? "empty response" : response)); } } @@ -122,7 +122,7 @@ Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr& scr _scroll_id = scroll_parser->get_scroll_id(); if (scroll_parser->get_total() == 0) { _eos = true; - return Status::OK; + return Status::OK(); } if (scroll_parser->get_size() < _batch_size) { @@ -132,12 +132,12 @@ Status ESScanReader::get_next(bool* scan_eos, std::unique_ptr& scr } *scan_eos = false; - return Status::OK; + return Status::OK(); } Status ESScanReader::close() { if (_scroll_id.empty()) { - return Status::OK; + return Status::OK(); } std::string scratch_target = _target + REQUEST_SEARCH_SCROLL_PATH; @@ -149,9 +149,9 @@ Status ESScanReader::close() { std::string response; RETURN_IF_ERROR(_network_client.execute_delete_request(ESScrollQueryBuilder::build_clear_scroll_body(_scroll_id), &response)); if (_network_client.get_http_status() == 200) { - return Status::OK; + return Status::OK(); } else { - return Status("es_scan_reader delete scroll context failure"); + return Status::InternalError("es_scan_reader delete scroll context failure"); } } } diff --git a/be/src/exec/es/es_scroll_parser.cpp b/be/src/exec/es/es_scroll_parser.cpp index e1ee317f77..d07c63bf7d 100644 --- a/be/src/exec/es/es_scroll_parser.cpp +++ b/be/src/exec/es/es_scroll_parser.cpp @@ -46,7 +46,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for #define RETURN_ERROR_IF_COL_IS_ARRAY(col, type) \ do { \ if (col.IsArray()) { \ - return Status(strings::Substitute(ERROR_COL_DATA_IS_ARRAY, type_to_string(type))); \ + return Status::InternalError(strings::Substitute(ERROR_COL_DATA_IS_ARRAY, type_to_string(type))); \ } \ } while (false) @@ -54,7 +54,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for #define RETURN_ERROR_IF_COL_IS_NOT_STRING(col, type) \ do { \ if (!col.IsString()) { \ - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); \ + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); \ } \ } while (false) @@ -62,7 +62,7 @@ static const string ERROR_COL_DATA_IS_ARRAY = "Data source returned an array for #define RETURN_ERROR_IF_PARSING_FAILED(result, type) \ do { \ if (result != StringParser::PARSE_SUCCESS) { \ - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); \ + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); \ } \ } while (false) @@ -70,7 +70,7 @@ template static Status get_int_value(const rapidjson::Value &col, PrimitiveType type, void* slot) { if (col.IsNumber()) { *reinterpret_cast(slot) = (T)(sizeof(T) < 8 ? col.GetInt() : col.GetInt64()); - return Status::OK; + return Status::OK(); } RETURN_ERROR_IF_COL_IS_ARRAY(col, type); @@ -89,7 +89,7 @@ static Status get_int_value(const rapidjson::Value &col, PrimitiveType type, voi memcpy(slot, &v, sizeof(v)); } - return Status::OK; + return Status::OK(); } template @@ -97,7 +97,7 @@ static Status get_float_value(const rapidjson::Value &col, PrimitiveType type, v DCHECK(sizeof(T) == 4 || sizeof(T) == 8); if (col.IsNumber()) { *reinterpret_cast(slot) = (T)(sizeof(T) == 4 ? col.GetFloat() : col.GetDouble()); - return Status::OK; + return Status::OK(); } RETURN_ERROR_IF_COL_IS_ARRAY(col, type); @@ -110,7 +110,7 @@ static Status get_float_value(const rapidjson::Value &col, PrimitiveType type, v RETURN_ERROR_IF_PARSING_FAILED(result, type); *reinterpret_cast(slot) = v; - return Status::OK; + return Status::OK(); } ScrollParser::ScrollParser() : @@ -128,11 +128,11 @@ Status ScrollParser::parse(const std::string& scroll_result) { if (_document_node.HasParseError()) { std::stringstream ss; ss << "Parsing json error, json is: " << scroll_result; - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (!_document_node.HasMember(FIELD_SCROLL_ID)) { - return Status("Document has not a scroll id field"); + return Status::InternalError("Document has not a scroll id field"); } const rapidjson::Value &scroll_node = _document_node[FIELD_SCROLL_ID]; @@ -142,20 +142,20 @@ Status ScrollParser::parse(const std::string& scroll_result) { const rapidjson::Value &field_total = outer_hits_node[FIELD_TOTAL]; _total = field_total.GetInt(); if (_total == 0) { - return Status::OK; + return Status::OK(); } VLOG(1) << "es_scan_reader total hits: " << _total << " documents"; const rapidjson::Value &inner_hits_node = outer_hits_node[FIELD_INNER_HITS]; if (!inner_hits_node.IsArray()) { - return Status("inner hits node is not an array"); + return Status::InternalError("inner hits node is not an array"); } rapidjson::Document::AllocatorType& a = _document_node.GetAllocator(); _inner_hits_node.CopyFrom(inner_hits_node, a); _size = _inner_hits_node.Size(); - return Status::OK; + return Status::OK(); } int ScrollParser::get_size() { @@ -174,13 +174,13 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, Tuple* tuple, MemPool* tuple_pool, bool* line_eof) { *line_eof = true; if (_size <= 0 || _line_index >= _size) { - return Status::OK; + return Status::OK(); } const rapidjson::Value& obj = _inner_hits_node[_line_index++]; const rapidjson::Value& line = obj[FIELD_SOURCE]; if (!line.IsObject()) { - return Status("Parse inner hits failed"); + return Status::InternalError("Parse inner hits failed"); } tuple->init(tuple_desc->byte_size()); @@ -307,7 +307,7 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, case TYPE_DATETIME: { if (col.IsNumber()) { if (!reinterpret_cast(slot)->from_unixtime(col.GetInt64())) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); } if (type == TYPE_DATE) { @@ -325,11 +325,11 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, const std::string& val = col.GetString(); size_t val_size = col.GetStringLength(); if (!ts_slot->from_date_str(val.c_str(), val_size)) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); } if (ts_slot->year() < 1900) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, type_to_string(type))); } if (type == TYPE_DATE) { @@ -348,6 +348,6 @@ Status ScrollParser::fill_tuple(const TupleDescriptor* tuple_desc, } *line_eof = false; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/es_http_scan_node.cpp b/be/src/exec/es_http_scan_node.cpp index abfbeaea92..6c7de68ab3 100644 --- a/be/src/exec/es_http_scan_node.cpp +++ b/be/src/exec/es_http_scan_node.cpp @@ -55,7 +55,7 @@ Status EsHttpScanNode::init(const TPlanNode& tnode, RuntimeState* state) { // use TEsScanNode _properties = tnode.es_scan_node.properties; - return Status::OK; + return Status::OK(); } Status EsHttpScanNode::prepare(RuntimeState* state) { @@ -67,7 +67,7 @@ Status EsHttpScanNode::prepare(RuntimeState* state) { if (_tuple_desc == nullptr) { std::stringstream ss; ss << "Failed to get tuple descriptor, _tuple_id=" << _tuple_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // set up column name vector for ESScrollQueryBuilder @@ -80,12 +80,12 @@ Status EsHttpScanNode::prepare(RuntimeState* state) { _wait_scanner_timer = ADD_TIMER(runtime_profile(), "WaitScannerTime"); - return Status::OK; + return Status::OK(); } // build predicate Status EsHttpScanNode::build_conjuncts_list() { - Status status = Status::OK; + Status status = Status::OK(); for (int i = 0; i < _conjunct_ctxs.size(); ++i) { EsPredicate* predicate = _pool->add( new EsPredicate(_conjunct_ctxs[i], _tuple_desc)); @@ -103,7 +103,7 @@ Status EsHttpScanNode::build_conjuncts_list() { } } - return Status::OK; + return Status::OK(); } Status EsHttpScanNode::open(RuntimeState* state) { @@ -144,7 +144,7 @@ Status EsHttpScanNode::open(RuntimeState* state) { RETURN_IF_ERROR(start_scanners()); - return Status::OK; + return Status::OK(); } Status EsHttpScanNode::start_scanners() { @@ -161,7 +161,7 @@ Status EsHttpScanNode::start_scanners() { Status status = f.get(); if (!status.ok()) return status; } - return Status::OK; + return Status::OK(); } Status EsHttpScanNode::get_next(RuntimeState* state, RowBatch* row_batch, @@ -169,19 +169,19 @@ Status EsHttpScanNode::get_next(RuntimeState* state, RowBatch* row_batch, SCOPED_TIMER(_runtime_profile->total_time_counter()); if (state->is_cancelled()) { std::unique_lock l(_batch_queue_lock); - if (update_status(Status::CANCELLED)) { + if (update_status(Status::Cancelled("Cancelled"))) { _queue_writer_cond.notify_all(); } } if (_eos) { *eos = true; - return Status::OK; + return Status::OK(); } if (_scan_finished.load()) { *eos = true; - return Status::OK; + return Status::OK(); } std::shared_ptr scanner_batch; @@ -199,7 +199,7 @@ Status EsHttpScanNode::get_next(RuntimeState* state, RowBatch* row_batch, return _process_status; } if (_runtime_state->is_cancelled()) { - if (update_status(Status::CANCELLED)) { + if (update_status(Status::Cancelled("Cancelled"))) { _queue_writer_cond.notify_all(); } return _process_status; @@ -214,7 +214,7 @@ Status EsHttpScanNode::get_next(RuntimeState* state, RowBatch* row_batch, if (scanner_batch == nullptr) { _scan_finished.store(true); *eos = true; - return Status::OK; + return Status::OK(); } // notify one scanner @@ -248,12 +248,12 @@ Status EsHttpScanNode::get_next(RuntimeState* state, RowBatch* row_batch, } } - return Status::OK; + return Status::OK(); } Status EsHttpScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -272,7 +272,7 @@ Status EsHttpScanNode::close(RuntimeState* state) { // This function is called after plan node has been prepared. Status EsHttpScanNode::set_scan_ranges(const std::vector& scan_ranges) { _scan_ranges = scan_ranges; - return Status::OK; + return Status::OK(); } void EsHttpScanNode::debug_string(int ident_level, std::stringstream* out) const { @@ -296,7 +296,7 @@ Status EsHttpScanNode::scanner_scan( int tuple_buffer_size = row_batch->capacity() * _tuple_desc->byte_size(); void* tuple_buffer = tuple_pool->allocate(tuple_buffer_size); if (tuple_buffer == nullptr) { - return Status("Allocate memory for row batch failed."); + return Status::InternalError("Allocate memory for row batch failed."); } Tuple* tuple = reinterpret_cast(tuple_buffer); @@ -304,7 +304,7 @@ Status EsHttpScanNode::scanner_scan( RETURN_IF_CANCELLED(_runtime_state); // If we have finished all works if (_scan_finished.load()) { - return Status::OK; + return Status::OK(); } // This row batch has been filled up, and break this @@ -347,15 +347,15 @@ Status EsHttpScanNode::scanner_scan( } // Process already set failed, so we just return OK if (!_process_status.ok()) { - return Status::OK; + return Status::OK(); } // Scan already finished, just return if (_scan_finished.load()) { - return Status::OK; + return Status::OK(); } // Runtime state is canceled, just return cancel if (_runtime_state->is_cancelled()) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } // Queue size Must be samller than _max_buffered_batches _batch_queue.push_back(row_batch); @@ -365,7 +365,7 @@ Status EsHttpScanNode::scanner_scan( } } - return Status::OK; + return Status::OK(); } // Prefer to the local host diff --git a/be/src/exec/es_http_scanner.cpp b/be/src/exec/es_http_scanner.cpp index 331ede8902..f142770c94 100644 --- a/be/src/exec/es_http_scanner.cpp +++ b/be/src/exec/es_http_scanner.cpp @@ -70,13 +70,13 @@ Status EsHttpScanner::open() { if (_tuple_desc == nullptr) { std::stringstream ss; ss << "Unknown tuple descriptor, tuple_id=" << _tuple_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } const std::string& host = _properties.at(ESScanReader::KEY_HOST_PORT); _es_reader.reset(new ESScanReader(host, _properties)); if (_es_reader == nullptr) { - return Status("Es reader construct failed."); + return Status::InternalError("Es reader construct failed."); } RETURN_IF_ERROR(_es_reader->open()); @@ -85,14 +85,14 @@ Status EsHttpScanner::open() { _read_timer = ADD_TIMER(_profile, "TotalRawReadTime(*)"); _materialize_timer = ADD_TIMER(_profile, "MaterializeTupleTime(*)"); - return Status::OK; + return Status::OK(); } Status EsHttpScanner::get_next(Tuple* tuple, MemPool* tuple_pool, bool* eof) { SCOPED_TIMER(_read_timer); if (_line_eof && _batch_eof) { *eof = true; - return Status::OK; + return Status::OK(); } while (!_batch_eof) { @@ -100,7 +100,7 @@ Status EsHttpScanner::get_next(Tuple* tuple, MemPool* tuple_pool, bool* eof) { RETURN_IF_ERROR(_es_reader->get_next(&_batch_eof, _es_scroll_parser)); if (_batch_eof) { *eof = true; - return Status::OK; + return Status::OK(); } } @@ -113,7 +113,7 @@ Status EsHttpScanner::get_next(Tuple* tuple, MemPool* tuple_pool, bool* eof) { } } - return Status::OK; + return Status::OK(); } void EsHttpScanner::close() { diff --git a/be/src/exec/es_scan_node.cpp b/be/src/exec/es_scan_node.cpp index 08426968cc..1e5741f6bb 100644 --- a/be/src/exec/es_scan_node.cpp +++ b/be/src/exec/es_scan_node.cpp @@ -71,11 +71,11 @@ Status EsScanNode::prepare(RuntimeState* state) { std::stringstream ss; ss << "es tuple descriptor is null, _tuple_id=" << _tuple_id; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _env = state->exec_env(); - return Status::OK; + return Status::OK(); } Status EsScanNode::open(RuntimeState* state) { @@ -119,7 +119,7 @@ Status EsScanNode::open(RuntimeState* state) { std::stringstream ss; ss << "es fail to open: hosts empty"; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } @@ -178,7 +178,7 @@ Status EsScanNode::open(RuntimeState* state) { std::stringstream ss; ss << "es open error: scan_range_idx=" << i << ", can't find shard on any node"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -193,11 +193,11 @@ Status EsScanNode::open(RuntimeState* state) { for (int i = 0; i < _conjunct_ctxs.size(); ++i) { if (!check_left_conjuncts(_conjunct_ctxs[i]->root())) { - return Status("esquery could only be executed on es, but could not push down to es"); + return Status::InternalError("esquery could only be executed on es, but could not push down to es"); } } - return Status::OK; + return Status::OK(); } Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -252,11 +252,11 @@ Status EsScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) *eos = true; } - return Status::OK; + return Status::OK(); } Status EsScanNode::close(RuntimeState* state) { - if (is_closed()) return Status::OK; + if (is_closed()) return Status::OK(); VLOG(1) << "EsScanNode::Close"; RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -293,7 +293,7 @@ Status EsScanNode::close(RuntimeState* state) { ss << "es close error: scan_range_idx=" << i << ", msg=" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } VLOG(1) << "es close result=" << apache::thrift::ThriftDebugString(result); @@ -309,7 +309,7 @@ Status EsScanNode::close(RuntimeState* state) { #endif } - return Status::OK; + return Status::OK(); } void EsScanNode::debug_string(int indentation_level, stringstream* out) const { @@ -331,7 +331,7 @@ Status EsScanNode::set_scan_ranges(const vector& scan_ranges) } _offsets.resize(scan_ranges.size(), 0); - return Status::OK; + return Status::OK(); } Status EsScanNode::open_es(TNetworkAddress& address, TExtOpenResult& result, TExtOpenParams& params) { @@ -346,7 +346,7 @@ Status EsScanNode::open_es(TNetworkAddress& address, TExtOpenResult& result, TEx std::stringstream ss; ss << "es create client error: address=" << address << ", msg=" << status.get_error_msg(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } try { @@ -361,7 +361,7 @@ Status EsScanNode::open_es(TNetworkAddress& address, TExtOpenResult& result, TEx } catch (apache::thrift::TException &e) { std::stringstream ss; ss << "es open error: address=" << address << ", msg=" << e.what(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } #else TStatus status; @@ -704,14 +704,14 @@ Status EsScanNode::get_next_from_es(TExtGetNextResult& result) { << ", msg=" << e.what(); LOG(WARNING) << ss.str(); RETURN_IF_ERROR(client.reopen()); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } } catch (apache::thrift::TException &e) { std::stringstream ss; ss << "es get_next error: scan_range_idx=" << _scan_range_idx << ", msg=" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } #else TStatus status; @@ -748,10 +748,10 @@ Status EsScanNode::get_next_from_es(TExtGetNextResult& result) { ss << "es get_next error: scan_range_idx=" << _scan_range_idx << ", msg=rows or num_rows not in result"; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status EsScanNode::materialize_row(MemPool* tuple_pool, Tuple* tuple, @@ -781,7 +781,7 @@ Status EsScanNode::materialize_row(MemPool* tuple_pool, Tuple* tuple, case TYPE_CHAR: case TYPE_VARCHAR: { if (val_idx >= col.string_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "STRING")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "STRING")); } const string& val = col.string_vals[val_idx]; size_t val_size = val.size(); @@ -798,70 +798,70 @@ Status EsScanNode::materialize_row(MemPool* tuple_pool, Tuple* tuple, } case TYPE_TINYINT: if (val_idx >= col.byte_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "TINYINT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "TINYINT")); } *reinterpret_cast(slot) = col.byte_vals[val_idx]; break; case TYPE_SMALLINT: if (val_idx >= col.short_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "SMALLINT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "SMALLINT")); } *reinterpret_cast(slot) = col.short_vals[val_idx]; break; case TYPE_INT: if (val_idx >= col.int_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "INT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "INT")); } *reinterpret_cast(slot) = col.int_vals[val_idx]; break; case TYPE_BIGINT: if (val_idx >= col.long_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "BIGINT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "BIGINT")); } *reinterpret_cast(slot) = col.long_vals[val_idx]; break; case TYPE_LARGEINT: if (val_idx >= col.long_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "LARGEINT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "LARGEINT")); } *reinterpret_cast(slot) = col.long_vals[val_idx]; break; case TYPE_DOUBLE: if (val_idx >= col.double_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "DOUBLE")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "DOUBLE")); } *reinterpret_cast(slot) = col.double_vals[val_idx]; break; case TYPE_FLOAT: if (val_idx >= col.double_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "FLOAT")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "FLOAT")); } *reinterpret_cast(slot) = col.double_vals[val_idx]; break; case TYPE_BOOLEAN: if (val_idx >= col.bool_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "BOOLEAN")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "BOOLEAN")); } *reinterpret_cast(slot) = col.bool_vals[val_idx]; break; case TYPE_DATE: if (val_idx >= col.long_vals.size() || !reinterpret_cast(slot)->from_unixtime(col.long_vals[val_idx])) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "TYPE_DATE")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "TYPE_DATE")); } reinterpret_cast(slot)->cast_to_date(); break; case TYPE_DATETIME: { if (val_idx >= col.long_vals.size() || !reinterpret_cast(slot)->from_unixtime(col.long_vals[val_idx])) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "TYPE_DATETIME")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "TYPE_DATETIME")); } reinterpret_cast(slot)->set_type(TIME_DATETIME); break; } case TYPE_DECIMAL: { if (val_idx >= col.binary_vals.size()) { - return Status(strings::Substitute(ERROR_INVALID_COL_DATA, "DECIMAL")); + return Status::InternalError(strings::Substitute(ERROR_INVALID_COL_DATA, "DECIMAL")); } const string& val = col.binary_vals[val_idx]; *reinterpret_cast(slot) = *reinterpret_cast(&val); @@ -871,7 +871,7 @@ Status EsScanNode::materialize_row(MemPool* tuple_pool, Tuple* tuple, DCHECK(false); } } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/exchange_node.cpp b/be/src/exec/exchange_node.cpp index 9dcf2fe2b1..bad1f24917 100644 --- a/be/src/exec/exchange_node.cpp +++ b/be/src/exec/exchange_node.cpp @@ -51,13 +51,13 @@ ExchangeNode::ExchangeNode( Status ExchangeNode::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(ExecNode::init(tnode, state)); if (!_is_merging) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_sort_exec_exprs.init(tnode.exchange_node.sort_info, _pool)); _is_asc_order = tnode.exchange_node.sort_info.is_asc_order; _nulls_first = tnode.exchange_node.sort_info.nulls_first; - return Status::OK; + return Status::OK(); } Status ExchangeNode::prepare(RuntimeState* state) { @@ -76,7 +76,7 @@ Status ExchangeNode::prepare(RuntimeState* state) { state, _row_descriptor, _row_descriptor, expr_mem_tracker())); // AddExprCtxsToFree(_sort_exec_exprs); } - return Status::OK; + return Status::OK(); } Status ExchangeNode::open(RuntimeState* state) { @@ -91,18 +91,18 @@ Status ExchangeNode::open(RuntimeState* state) { } else { RETURN_IF_ERROR(fill_input_row_batch(state)); } - return Status::OK; + return Status::OK(); } Status ExchangeNode::collect_query_statistics(QueryStatistics* statistics) { RETURN_IF_ERROR(ExecNode::collect_query_statistics(statistics)); statistics->merge(_sub_plan_query_statistics_recvr.get()); - return Status::OK; + return Status::OK(); } Status ExchangeNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } if (_is_merging) { _sort_exec_exprs.close(state); @@ -135,7 +135,7 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* if (reached_limit()) { _stream_recvr->transfer_all_resources(output_batch); *eos = true; - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -183,12 +183,12 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* if (reached_limit()) { _stream_recvr->transfer_all_resources(output_batch); *eos = true; - return Status::OK; + return Status::OK(); } if (output_batch->at_capacity()) { *eos = false; - return Status::OK; + return Status::OK(); } } @@ -200,7 +200,7 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* RETURN_IF_ERROR(fill_input_row_batch(state)); *eos = (_input_batch == NULL); if (*eos) { - return Status::OK; + return Status::OK(); } _next_row_idx = 0; @@ -244,7 +244,7 @@ Status ExchangeNode::get_next_merging(RuntimeState* state, RowBatch* output_batc } COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } void ExchangeNode::debug_string(int indentation_level, std::stringstream* out) const { diff --git a/be/src/exec/exec_node.cpp b/be/src/exec/exec_node.cpp index ce7533aa49..c8d6dca784 100644 --- a/be/src/exec/exec_node.cpp +++ b/be/src/exec/exec_node.cpp @@ -171,7 +171,7 @@ void ExecNode::push_down_predicate( Status ExecNode::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR( Expr::create_expr_trees(_pool, tnode.conjuncts, &_conjunct_ctxs)); - return Status::OK; + return Status::OK(); } Status ExecNode::prepare(RuntimeState* state) { @@ -199,7 +199,7 @@ Status ExecNode::prepare(RuntimeState* state) { RETURN_IF_ERROR(_children[i]->prepare(state)); } - return Status::OK; + return Status::OK(); } Status ExecNode::open(RuntimeState* state) { @@ -213,7 +213,7 @@ Status ExecNode::reset(RuntimeState* state) { for (int i = 0; i < _children.size(); ++i) { RETURN_IF_ERROR(_children[i]->reset(state)); } - return Status::OK; + return Status::OK(); } Status ExecNode::collect_query_statistics(QueryStatistics* statistics) { @@ -221,12 +221,12 @@ Status ExecNode::collect_query_statistics(QueryStatistics* statistics) { for (auto child_node : _children) { child_node->collect_query_statistics(statistics); } - return Status::OK; + return Status::OK(); } Status ExecNode::close(RuntimeState* state) { if (_is_closed) { - return Status::OK; + return Status::OK(); } _is_closed = true; RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); @@ -236,10 +236,13 @@ Status ExecNode::close(RuntimeState* state) { } Status result; - for (int i = 0; i < _children.size(); ++i) { - result.add_error(_children[i]->close(state)); + auto st = _children[i]->close(state); + if (result.ok() && !st.ok()) { + result = st; + } } + Expr::close(_conjunct_ctxs, state); if (expr_mem_pool() != nullptr) { @@ -281,7 +284,7 @@ Status ExecNode::create_tree(RuntimeState* state, ObjectPool* pool, const TPlan& const DescriptorTbl& descs, ExecNode** root) { if (plan.nodes.size() == 0) { *root = NULL; - return Status::OK; + return Status::OK(); } int node_idx = 0; @@ -289,11 +292,11 @@ Status ExecNode::create_tree(RuntimeState* state, ObjectPool* pool, const TPlan& if (node_idx + 1 != plan.nodes.size()) { // TODO: print thrift msg for diagnostic purposes. - return Status( + return Status::InternalError( "Plan tree only partially reconstructed. Not all thrift nodes were used."); } - return Status::OK; + return Status::OK(); } Status ExecNode::create_tree_helper( @@ -307,7 +310,7 @@ Status ExecNode::create_tree_helper( // propagate error case if (*node_idx >= tnodes.size()) { // TODO: print thrift msg - return Status("Failed to reconstruct plan tree from thrift."); + return Status::InternalError("Failed to reconstruct plan tree from thrift."); } const TPlanNode& tnode = tnodes[*node_idx]; @@ -330,7 +333,7 @@ Status ExecNode::create_tree_helper( // this means we have been given a bad tree and must fail if (*node_idx >= tnodes.size()) { // TODO: print thrift msg - return Status("Failed to reconstruct plan tree from thrift."); + return Status::InternalError("Failed to reconstruct plan tree from thrift."); } } @@ -346,7 +349,7 @@ Status ExecNode::create_tree_helper( node->runtime_profile()->add_child(node->_children[0]->runtime_profile(), false, NULL); } - return Status::OK; + return Status::OK(); } Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanNode& tnode, @@ -357,31 +360,31 @@ Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanN switch (tnode.node_type) { case TPlanNodeType::CSV_SCAN_NODE: *node = pool->add(new CsvScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::MYSQL_SCAN_NODE: #ifdef DORIS_WITH_MYSQL *node = pool->add(new MysqlScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); #else - return Status("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); + return Status::InternalError("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); #endif case TPlanNodeType::ES_SCAN_NODE: *node = pool->add(new EsScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::ES_HTTP_SCAN_NODE: *node = pool->add(new EsHttpScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::SCHEMA_SCAN_NODE: *node = pool->add(new SchemaScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::OLAP_SCAN_NODE: *node = pool->add(new OlapScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::AGGREGATION_NODE: if (config::enable_partitioned_aggregation) { @@ -391,38 +394,38 @@ Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanN } else { *node = pool->add(new AggregationNode(pool, tnode, descs)); } - return Status::OK; + return Status::OK(); /*case TPlanNodeType::PRE_AGGREGATION_NODE: *node = pool->add(new PreAggregationNode(pool, tnode, descs)); - return Status::OK;*/ + return Status::OK();*/ case TPlanNodeType::HASH_JOIN_NODE: *node = pool->add(new HashJoinNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::CROSS_JOIN_NODE: *node = pool->add(new CrossJoinNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::MERGE_JOIN_NODE: *node = pool->add(new MergeJoinNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::EMPTY_SET_NODE: *node = pool->add(new EmptySetNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::EXCHANGE_NODE: *node = pool->add(new ExchangeNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::SELECT_NODE: *node = pool->add(new SelectNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::OLAP_REWRITE_NODE: *node = pool->add(new OlapRewriteNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::SORT_NODE: if (tnode.sort_node.use_top_n) { @@ -431,22 +434,22 @@ Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanN *node = pool->add(new SpillSortNode(pool, tnode, descs)); } - return Status::OK; + return Status::OK(); case TPlanNodeType::ANALYTIC_EVAL_NODE: *node = pool->add(new AnalyticEvalNode(pool, tnode, descs)); break; case TPlanNodeType::MERGE_NODE: *node = pool->add(new MergeNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::UNION_NODE: *node = pool->add(new UnionNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); case TPlanNodeType::BROKER_SCAN_NODE: *node = pool->add(new BrokerScanNode(pool, tnode, descs)); - return Status::OK; + return Status::OK(); default: map::const_iterator i = @@ -458,10 +461,10 @@ Status ExecNode::create_node(RuntimeState* state, ObjectPool* pool, const TPlanN } error_msg << str << " not implemented"; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } void ExecNode::set_debug_options( @@ -538,11 +541,11 @@ Status ExecNode::exec_debug_action(TExecNodePhase::type phase) { DCHECK(phase != TExecNodePhase::INVALID); if (_debug_phase != phase) { - return Status::OK; + return Status::OK(); } if (_debug_action == TDebugAction::FAIL) { - return Status(TStatusCode::INTERNAL_ERROR, "Debug Action: FAIL"); + return Status::InternalError("Debug Action: FAIL"); } if (_debug_action == TDebugAction::WAIT) { @@ -551,7 +554,7 @@ Status ExecNode::exec_debug_action(TExecNodePhase::type phase) { } } - return Status::OK; + return Status::OK(); } // Codegen for EvalConjuncts. The generated signature is @@ -677,7 +680,7 @@ Status ExecNode::claim_buffer_reservation(RuntimeState* state) { ss << "Spillable buffer size for node " << _id << " of " << _resource_profile.spillable_buffer_size << "bytes is less than the minimum buffer pool buffer size of " << buffer_pool->min_buffer_len() << "bytes"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } ss << print_plan_node_type(_type) << " id=" << _id << " ptr=" << this; @@ -697,7 +700,7 @@ Status ExecNode::claim_buffer_reservation(RuntimeState* state) { RETURN_IF_ERROR(EnableDenyReservationDebugAction()); } */ - return Status::OK; + return Status::OK(); } Status ExecNode::release_unused_reservation() { @@ -713,11 +716,11 @@ Status ExecNode::enable_deny_reservation_debug_action() { debug_action_param_.c_str(), debug_action_param_.size(), &parse_result); if (parse_result != StringParser::PARSE_SUCCESS || probability < 0.0 || probability > 1.0) { - return Status(Substitute( + return Status::InternalError(Substitute( "Invalid SET_DENY_RESERVATION_PROBABILITY param: '$0'", debug_action_param_)); } _buffer_pool_client.SetDebugDenyIncreaseReservation(probability); - return Status::OK(); + return Status::OK()(); } */ diff --git a/be/src/exec/exec_node.h b/be/src/exec/exec_node.h index 0e9e265baa..e6d4487d13 100644 --- a/be/src/exec/exec_node.h +++ b/be/src/exec/exec_node.h @@ -397,7 +397,7 @@ private: do { \ /* if (UNLIKELY(MemTracker::limit_exceeded(*(state)->mem_trackers()))) { */ \ if (UNLIKELY(state->instance_mem_tracker()->any_limit_exceeded())) { \ - return Status::MEM_LIMIT_EXCEEDED; \ + return Status::MemoryLimitExceeded("Memory limit exceeded"); \ } \ } while (false) } diff --git a/be/src/exec/hash_join_node.cpp b/be/src/exec/hash_join_node.cpp index e24842af8a..a758b16220 100644 --- a/be/src/exec/hash_join_node.cpp +++ b/be/src/exec/hash_join_node.cpp @@ -77,7 +77,7 @@ Status HashJoinNode::init(const TPlanNode& tnode, RuntimeState* state) { Expr::create_expr_trees(_pool, tnode.hash_join_node.other_join_conjuncts, &_other_join_conjunct_ctxs)); - return Status::OK; + return Status::OK(); } Status HashJoinNode::prepare(RuntimeState* state) { @@ -143,7 +143,7 @@ Status HashJoinNode::prepare(RuntimeState* state) { if (state->codegen_level() > 0) { if (_join_op == TJoinOp::LEFT_ANTI_JOIN) { - return Status::OK; + return Status::OK(); } LlvmCodeGen* codegen = NULL; RETURN_IF_ERROR(state->get_codegen(&codegen)); @@ -151,7 +151,7 @@ Status HashJoinNode::prepare(RuntimeState* state) { // Codegen for hashing rows Function* hash_fn = _hash_tbl->codegen_hash_current_row(state); if (hash_fn == NULL) { - return Status::OK; + return Status::OK(); } // Codegen for build path @@ -174,12 +174,12 @@ Status HashJoinNode::prepare(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } Status HashJoinNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); @@ -253,7 +253,7 @@ Status HashJoinNode::construct_hash_table(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } Status HashJoinNode::open(RuntimeState* state) { @@ -299,7 +299,7 @@ Status HashJoinNode::open(RuntimeState* state) { _probe_batch_pos = 0; _hash_tbl_iterator = _hash_tbl->begin(); _eos = true; - return Status::OK; + return Status::OK(); } if (_hash_tbl->size() > 1024) { @@ -401,7 +401,7 @@ Status HashJoinNode::open(RuntimeState* state) { } } - return Status::OK; + return Status::OK(); } Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eos) { @@ -411,7 +411,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } // These cases are simpler and use a more efficient processing loop @@ -419,7 +419,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo || _join_op == TJoinOp::RIGHT_ANTI_JOIN)) { if (_eos) { *eos = true; - return Status::OK; + return Status::OK(); } return left_join_get_next(state, out_batch, eos); @@ -513,7 +513,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo if (out_batch->is_full() || reached_limit()) { *eos = reached_limit(); - return Status::OK; + return Status::OK(); } } } @@ -534,7 +534,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo if (out_batch->is_full() || reached_limit()) { *eos = reached_limit(); - return Status::OK; + return Status::OK(); } } } @@ -545,7 +545,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo _probe_batch_pos = 0; if (out_batch->is_full() || out_batch->at_resource_limit()) { - return Status::OK; + return Status::OK(); } // get new probe batch @@ -566,7 +566,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo } if (out_batch->is_full() || out_batch->at_resource_limit()) { - return Status::OK; + return Status::OK(); } continue; @@ -633,7 +633,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } } _hash_tbl_iterator.next(); @@ -645,7 +645,7 @@ Status HashJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eo *eos = !_hash_tbl_iterator.has_next(); } - return Status::OK; + return Status::OK(); } Status HashJoinNode::left_join_get_next(RuntimeState* state, @@ -700,7 +700,7 @@ Status HashJoinNode::left_join_get_next(RuntimeState* state, } } - return Status::OK; + return Status::OK(); } string HashJoinNode::get_probe_row_output_string(TupleRow* probe_row) { diff --git a/be/src/exec/kudu_scan_node.cpp b/be/src/exec/kudu_scan_node.cpp index 9ea2e7ee7a..f3e84cd8b7 100644 --- a/be/src/exec/kudu_scan_node.cpp +++ b/be/src/exec/kudu_scan_node.cpp @@ -95,7 +95,7 @@ Status KuduScanNode::prepare(RuntimeState* state) { _tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id); - return Status::OK; + return Status::OK(); } @@ -107,7 +107,7 @@ Status KuduScanNode::set_scan_ranges(const std::vector& scan_r _scan_tokens.push_back(params.scan_range.kudu_scan_token); } // COUNTER_SET(kudu_remote_tokens_, num_remote_tokens); - return Status::OK; + return Status::OK(); } Status KuduScanNode::open(RuntimeState* state) { @@ -142,7 +142,7 @@ Status KuduScanNode::open(RuntimeState* state) { // thread_avail_cb_id_ = state->resource_pool()->AddThreadAvailableCb( // bind(mem_fn(&KuduScanNode::thread_available_cb), this, _1)); thread_available_cb(state->resource_pool()); - return Status::OK; + return Status::OK(); } Status KuduScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -155,7 +155,7 @@ Status KuduScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eo if (reached_limit() || _scan_tokens.empty()) { *eos = true; - return Status::OK; + return Status::OK(); } *eos = false; @@ -191,7 +191,7 @@ Status KuduScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eo Status KuduScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } SCOPED_TIMER(_runtime_profile->total_time_counter()); // PeriodicCounterUpdater::StopRateCounter(total_throughput_counter()); @@ -212,7 +212,7 @@ Status KuduScanNode::close(RuntimeState* state) { _materialized_row_batches->Cleanup(); ExecNode::close(state); - return Status::OK; + return Status::OK(); } void KuduScanNode::debug_string(int indentation_level, stringstream* out) const { @@ -277,7 +277,7 @@ Status KuduScanNode::process_scan_token(KuduScanner* scanner, const string& scan } } if (eos) scan_ranges_complete_counter()->update(1); - return Status::OK; + return Status::OK(); } void KuduScanNode::run_scanner_thread(KuduScanNode *scanNode, const string& name, const string* initial_token) { diff --git a/be/src/exec/kudu_scanner.cpp b/be/src/exec/kudu_scanner.cpp index 57accc4ff4..9865cbfe7f 100644 --- a/be/src/exec/kudu_scanner.cpp +++ b/be/src/exec/kudu_scanner.cpp @@ -111,7 +111,7 @@ Status KuduScanner::get_next(RowBatch* row_batch, bool* eos) { close_current_client_scanner(); *eos = true; } - return Status::OK; + return Status::OK(); } void KuduScanner::close() { @@ -145,7 +145,7 @@ Status KuduScanner::open_next_scan_token(const string& scan_token) { // SCOPED_TIMER(_state->total_storage_wait_timer()); KUDU_RETURN_IF_ERROR(_scanner->Open(), "Unable to open scanner"); } - return Status::OK; + return Status::OK(); } void KuduScanner::close_current_client_scanner() { @@ -164,7 +164,7 @@ Status KuduScanner::handle_empty_projection(RowBatch* row_batch, bool* batch_don if (row_batch->at_capacity() || _scan_node->reached_limit()) { *batch_done = true; } - return Status::OK; + return Status::OK(); } Status KuduScanner::decode_rows_into_row_batch(RowBatch* row_batch, Tuple** tuple_mem, @@ -209,7 +209,7 @@ Status KuduScanner::decode_rows_into_row_batch(RowBatch* row_batch, Tuple** tupl // Check the status in case an error status was set during conjunct evaluation. //return _state->get_query_status(); - return Status::OK; + return Status::OK(); } Status KuduScanner::get_next_scanner_batch() { @@ -222,7 +222,7 @@ Status KuduScanner::get_next_scanner_batch() { _cur_kudu_batch_num_read = 0; COUNTER_UPDATE(_scan_node->rows_read_counter(), _cur_kudu_batch.NumRows()); _last_alive_time_micros = now; - return Status::OK; + return Status::OK(); } } // namespace impala diff --git a/be/src/exec/kudu_util.cpp b/be/src/exec/kudu_util.cpp index 0f4bcb6ad1..5a65b15e8c 100644 --- a/be/src/exec/kudu_util.cpp +++ b/be/src/exec/kudu_util.cpp @@ -46,12 +46,12 @@ bool KuduIsAvailable() { return CheckKuduAvailability().ok(); } Status CheckKuduAvailability() { if (KuduClientIsSupported()) { if (config::disable_kudu) { - return Status(TStatusCode::KUDU_NOT_ENABLED); + return Status::InternalError(TStatusCode::KUDU_NOT_ENABLED); } else{ - return Status::OK; + return Status::OK(); } } - return Status(TStatusCode::KUDU_NOT_SUPPORTED_ON_OS); + return Status::InternalError(TStatusCode::KUDU_NOT_SUPPORTED_ON_OS); } Status CreateKuduClient(const std::vector& master_addrs, @@ -61,7 +61,7 @@ Status CreateKuduClient(const std::vector& master_addrs, b.add_master_server_addr(address); } KUDU_RETURN_IF_ERROR(b.Build(client), "Unable to create Kudu client"); - return Status::OK; + return Status::OK(); } std::string KuduSchemaDebugString(const KuduSchema& schema) { diff --git a/be/src/exec/kudu_util.h b/be/src/exec/kudu_util.h index 3118c25d38..095d7c6976 100644 --- a/be/src/exec/kudu_util.h +++ b/be/src/exec/kudu_util.h @@ -60,12 +60,12 @@ void LogKuduMessage(kudu::client::KuduLogSeverity severity, const char* filename do { \ kudu::Status _s = (expr); \ if (UNLIKELY(!_s.ok())) { \ - return Status(_s.ToString()); \ + return Status::InternalError(_s.ToString()); \ } \ } while (0) - // 63: return Status(strings::Substitute("$0: $1", prepend, _s.ToString())); + // 63: return Status::InternalError("strings::Substitute("$0: $1", prepend, _s.ToString())); } /// namespace impala #endif diff --git a/be/src/exec/local_file_reader.cpp b/be/src/exec/local_file_reader.cpp index d4b379af08..331ce6104d 100644 --- a/be/src/exec/local_file_reader.cpp +++ b/be/src/exec/local_file_reader.cpp @@ -34,7 +34,7 @@ Status LocalFileReader::open() { std::stringstream ss; ss << "Open file failed. path=" << _path << ", error=" << strerror_r(errno, err_buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (_start_offset != 0) { @@ -44,18 +44,18 @@ Status LocalFileReader::open() { std::stringstream ss; ss << "Seek to start_offset failed. offset=" << _start_offset << ", error=" << strerror_r(errno, err_buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } - return Status::OK; + return Status::OK(); } Status LocalFileReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { if (_eof) { *buf_len = 0; *eof = true; - return Status::OK; + return Status::OK(); } size_t read_len = fread(buf, 1, *buf_len, _fp); if (read_len < *buf_len) { @@ -64,7 +64,7 @@ Status LocalFileReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { std::stringstream ss; ss << "Read file failed. path=" << _path << ", error=" << strerror_r(errno, err_buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } else if (feof(_fp)) { *buf_len = read_len; _eof = true; @@ -72,10 +72,10 @@ Status LocalFileReader::read(uint8_t* buf, size_t* buf_len, bool* eof) { *eof = true; } } else { - return Status("Unknown read failed."); + return Status::InternalError("Unknown read failed."); } } - return Status::OK; + return Status::OK(); } void LocalFileReader::close() { diff --git a/be/src/exec/local_file_writer.cpp b/be/src/exec/local_file_writer.cpp index dfef24d38a..dcb103ae78 100644 --- a/be/src/exec/local_file_writer.cpp +++ b/be/src/exec/local_file_writer.cpp @@ -35,7 +35,7 @@ Status LocalFileWriter::open() { ss << "Open file failed. path=" << _path << ", errno= " << errno << ", description=" << get_str_err_msg(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (_start_offset != 0) { @@ -45,11 +45,11 @@ Status LocalFileWriter::open() { ss << "Seek to start_offset failed. offset=" << _start_offset << ", errno= " << errno << ", description=" << get_str_err_msg(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } - return Status::OK; + return Status::OK(); } Status LocalFileWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_len) { @@ -61,11 +61,11 @@ Status LocalFileWriter::write(const uint8_t* buf, size_t buf_len, size_t* writte << ", path=" << _path << ", failed with errno=" << errno << ", description=" << get_str_err_msg(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } *written_len = bytes_written; - return Status::OK; + return Status::OK(); } void LocalFileWriter::close() { diff --git a/be/src/exec/lzo_decompressor.cpp b/be/src/exec/lzo_decompressor.cpp index c82d0644c0..c017c87711 100644 --- a/be/src/exec/lzo_decompressor.cpp +++ b/be/src/exec/lzo_decompressor.cpp @@ -52,7 +52,7 @@ LzopDecompressor::~LzopDecompressor() { } Status LzopDecompressor::init() { - return Status::OK; + return Status::OK(); } Status LzopDecompressor::decompress( @@ -65,7 +65,7 @@ Status LzopDecompressor::decompress( // this is the first time to call lzo decompress, parse the header info first RETURN_IF_ERROR(parse_header_info(input, input_len, input_bytes_read, more_input_bytes)); if (*more_input_bytes > 0) { - return Status::OK; + return Status::OK(); } } @@ -82,7 +82,7 @@ Status LzopDecompressor::decompress( if (left_input_len < sizeof(uint32_t)) { // block is at least have uncompressed_size *more_input_bytes = sizeof(uint32_t) - left_input_len; - return Status::OK; + return Status::OK(); } uint8_t* block_start = input + *input_bytes_read; @@ -93,13 +93,13 @@ Status LzopDecompressor::decompress( left_input_len -= sizeof(uint32_t); if (uncompressed_size == 0) { *stream_end = true; - return Status::OK; + return Status::OK(); } // 2. compressed size if (left_input_len < sizeof(uint32_t)) { *more_input_bytes = sizeof(uint32_t) - left_input_len; - return Status::OK; + return Status::OK(); } uint32_t compressed_size; @@ -109,7 +109,7 @@ Status LzopDecompressor::decompress( std::stringstream ss; ss << "lzo block size: " << compressed_size << " is greater than LZO_MAX_BLOCK_SIZE: " << LZO_MAX_BLOCK_SIZE; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 3. out checksum @@ -117,7 +117,7 @@ Status LzopDecompressor::decompress( if (_header_info.output_checksum_type != CHECK_NONE) { if (left_input_len < sizeof(uint32_t)) { *more_input_bytes = sizeof(uint32_t) - left_input_len; - return Status::OK; + return Status::OK(); } ptr = get_uint32(ptr, &out_checksum); @@ -129,7 +129,7 @@ Status LzopDecompressor::decompress( if (compressed_size < uncompressed_size && _header_info.input_checksum_type != CHECK_NONE) { if (left_input_len < sizeof(uint32_t)) { *more_input_bytes = sizeof(uint32_t) - left_input_len; - return Status::OK; + return Status::OK(); } ptr = get_uint32(ptr, &out_checksum); @@ -143,7 +143,7 @@ Status LzopDecompressor::decompress( // 5. checksum compressed data if (left_input_len < compressed_size) { *more_input_bytes = compressed_size - left_input_len; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(checksum(_header_info.input_checksum_type, "compressed", in_checksum, ptr, compressed_size)); @@ -151,7 +151,7 @@ Status LzopDecompressor::decompress( // 6. decompress if (output_max_len < uncompressed_size) { *more_output_bytes = uncompressed_size - output_max_len; - return Status::OK; + return Status::OK(); } if (compressed_size == uncompressed_size) { // the data is uncompressed, just copy to the output buf @@ -167,7 +167,7 @@ Status LzopDecompressor::decompress( ss << "Lzo decompression failed with ret: " << ret << " decompressed len: " << uncompressed_size << " expected: " << *decompressed_len; - return Status(ss.str()); + return Status::InternalError(ss.str()); } RETURN_IF_ERROR(checksum(_header_info.output_checksum_type, "decompressed", @@ -195,7 +195,7 @@ Status LzopDecompressor::decompress( << " input_bytes_read: " << *input_bytes_read << " next_uncompressed_size: " << next_uncompressed_size; - return Status::OK; + return Status::OK(); } // file-header ::= -- most of this information is not used. @@ -219,7 +219,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, << ", or parsing header info may failed." << " only given: " << input_len; *more_input_bytes = MIN_HEADER_SIZE - input_len; - return Status::OK; + return Status::OK(); } uint8_t* ptr = input; @@ -227,7 +227,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, if (memcmp(ptr, LZOP_MAGIC, sizeof(LZOP_MAGIC))) { std::stringstream ss; ss << "invalid lzo magic number"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } ptr += sizeof(LZOP_MAGIC); uint8_t* header = ptr; @@ -238,7 +238,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, std::stringstream ss; ss << "compressed with later version of lzop: " << &_header_info.version << " must be less than: " << LZOP_VERSION; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 3. lib version @@ -247,7 +247,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, std::stringstream ss; ss << "compressed with incompatible lzo version: " << &_header_info.lib_version << "must be at least: " << MIN_LZO_VERSION; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 4. version needed @@ -256,7 +256,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, std::stringstream ss; ss << "compressed with imp incompatible lzo version: " << &_header_info.version << " must be at no more than: " << LZOP_VERSION; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 5. method @@ -264,7 +264,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, if (_header_info.method < 1 || _header_info.method > 3) { std::stringstream ss; ss << "invalid compression method: " << _header_info.method; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 6. skip level @@ -276,7 +276,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, if (flags & (F_RESERVED | F_MULTIPART | F_H_FILTER)) { std::stringstream ss; ss << "unsupported lzo flags: " << flags; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _header_info.header_checksum_type = header_type(flags); _header_info.input_checksum_type = input_type(flags); @@ -294,7 +294,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, size_t left = input_len - (ptr - input); if (left < filename_len) { *more_input_bytes = filename_len - left; - return Status::OK; + return Status::OK(); } _header_info.filename = std::string((char*) ptr, (size_t) filename_len); @@ -304,7 +304,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, // 10. checksum if (left < sizeof(uint32_t)) { *more_input_bytes = sizeof(uint32_t) - left; - return Status::OK; + return Status::OK(); } uint32_t expected_checksum; uint8_t* cur = ptr; @@ -322,7 +322,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, std::stringstream ss; ss << "invalid header checksum: " << computed_checksum << " expected: " << expected_checksum; - return Status(ss.str()); + return Status::InternalError(ss.str()); } left -= sizeof(uint32_t); @@ -330,7 +330,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, if (flags & F_H_EXTRA_FIELD) { if (left < sizeof(uint32_t)) { *more_input_bytes = sizeof(uint32_t) - left; - return Status::OK; + return Status::OK(); } uint32_t extra_len; ptr = get_uint32(ptr, &extra_len); @@ -339,7 +339,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, // add the checksum and the len to the total ptr size. if (left < sizeof(int32_t) + extra_len) { *more_input_bytes = sizeof(int32_t) + extra_len - left; - return Status::OK; + return Status::OK(); } left -= sizeof(int32_t) + extra_len; ptr += sizeof(int32_t) + extra_len; @@ -351,7 +351,7 @@ Status LzopDecompressor::parse_header_info(uint8_t* input, size_t input_len, _is_header_loaded = true; LOG(INFO) << debug_info(); - return Status::OK; + return Status::OK(); } Status LzopDecompressor::checksum(LzoChecksum type, const std::string& source, @@ -360,7 +360,7 @@ Status LzopDecompressor::checksum(LzoChecksum type, const std::string& source, uint32_t computed_checksum; switch (type) { case CHECK_NONE: - return Status::OK; + return Status::OK(); case CHECK_CRC32: computed_checksum = lzo_crc32(CRC32_INIT_VALUE, ptr, len); break; @@ -370,7 +370,7 @@ Status LzopDecompressor::checksum(LzoChecksum type, const std::string& source, default: std::stringstream ss; ss << "Invalid checksum type: " << type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (computed_checksum != expected) { @@ -378,10 +378,10 @@ Status LzopDecompressor::checksum(LzoChecksum type, const std::string& source, ss << "checksum of " << source << " block failed." << " computed checksum: " << computed_checksum << " expected: " << expected; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } std::string LzopDecompressor::debug_info() { diff --git a/be/src/exec/merge_join_node.cpp b/be/src/exec/merge_join_node.cpp index bc90e1f7e1..ebda7e389e 100644 --- a/be/src/exec/merge_join_node.cpp +++ b/be/src/exec/merge_join_node.cpp @@ -71,7 +71,7 @@ Status MergeJoinNode::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(Expr::create_expr_trees( _pool, tnode.merge_join_node.other_join_conjuncts, &_other_join_conjunct_ctxs)); - return Status::OK; + return Status::OK(); } Status MergeJoinNode::prepare(RuntimeState* state) { @@ -112,7 +112,7 @@ Status MergeJoinNode::prepare(RuntimeState* state) { break; default: - return Status("unspport compare type."); + return Status::InternalError("unspport compare type."); break; } } @@ -138,12 +138,12 @@ Status MergeJoinNode::prepare(RuntimeState* state) { _right_child_ctx.reset( new ChildReaderContext(row_desc(), state->batch_size(), state->instance_mem_tracker())); - return Status::OK; + return Status::OK(); } Status MergeJoinNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); Expr::close(_left_expr_ctxs, state); @@ -170,7 +170,7 @@ Status MergeJoinNode::open(RuntimeState* state) { RETURN_IF_ERROR(get_input_row(state, 0)); RETURN_IF_ERROR(get_input_row(state, 1)); - return Status::OK; + return Status::OK(); } Status MergeJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* eos) { @@ -180,7 +180,7 @@ Status MergeJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* e if (reached_limit() || _eos) { *eos = true; - return Status::OK; + return Status::OK(); } while (true) { @@ -193,7 +193,7 @@ Status MergeJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* e if (*eos) { _eos = true; - return Status::OK; + return Status::OK(); } if (eval_conjuncts(&_other_join_conjunct_ctxs[0], _other_join_conjunct_ctxs.size(), row)) { @@ -207,7 +207,7 @@ Status MergeJoinNode::get_next(RuntimeState* state, RowBatch* out_batch, bool* e } } - return Status::OK; + return Status::OK(); } void MergeJoinNode::create_output_row(TupleRow* out, TupleRow* left, TupleRow* right) { @@ -231,10 +231,10 @@ void MergeJoinNode::create_output_row(TupleRow* out, TupleRow* left, TupleRow* r Status MergeJoinNode::compare_row(TupleRow* left_row, TupleRow* right_row, bool* is_lt) { if (left_row == NULL) { *is_lt = false; - return Status::OK; + return Status::OK(); } else if (right_row == NULL) { *is_lt = true; - return Status::OK; + return Status::OK(); } for (int i = 0; i < _left_expr_ctxs.size(); ++i) { @@ -244,19 +244,19 @@ Status MergeJoinNode::compare_row(TupleRow* left_row, TupleRow* right_row, bool* if (cmp_val < 0) { *is_lt = true; - return Status::OK; + return Status::OK(); } else if (cmp_val == 0) { // do nothing } else { *is_lt = false; - return Status::OK; + return Status::OK(); } } // equal *is_lt = false; - return Status::OK; + return Status::OK(); } Status MergeJoinNode::get_next_row(RuntimeState* state, TupleRow* out_row, bool* eos) { @@ -265,7 +265,7 @@ Status MergeJoinNode::get_next_row(RuntimeState* state, TupleRow* out_row, bool* if (left_row == NULL && right_row == NULL) { *eos = true; - return Status::OK; + return Status::OK(); } bool is_lt = true; @@ -279,7 +279,7 @@ Status MergeJoinNode::get_next_row(RuntimeState* state, TupleRow* out_row, bool* RETURN_IF_ERROR(get_input_row(state, 1)); } - return Status::OK; + return Status::OK(); } Status MergeJoinNode::get_input_row(RuntimeState* state, int child_idx) { @@ -319,11 +319,11 @@ Status MergeJoinNode::get_input_row(RuntimeState* state, int child_idx) { if (ctx->row_idx >= ctx->batch.num_rows()) { ctx->current_row = NULL; - return Status::OK; + return Status::OK(); } ctx->current_row = ctx->batch.get_row(ctx->row_idx++); - return Status::OK; + return Status::OK(); } void MergeJoinNode::debug_string(int indentation_level, stringstream* out) const { diff --git a/be/src/exec/merge_node.cpp b/be/src/exec/merge_node.cpp index 82c29a6678..9d393b4947 100644 --- a/be/src/exec/merge_node.cpp +++ b/be/src/exec/merge_node.cpp @@ -56,7 +56,7 @@ Status MergeNode::init(const TPlanNode& tnode, RuntimeState* state) { _result_expr_ctx_lists.push_back(ctxs); } - return Status::OK; + return Status::OK(); } Status MergeNode::prepare(RuntimeState* state) { @@ -87,7 +87,7 @@ Status MergeNode::prepare(RuntimeState* state) { DCHECK_EQ(_result_expr_ctx_lists[i].size(), _materialized_slots.size()); } - return Status::OK; + return Status::OK(); } Status MergeNode::open(RuntimeState* state) { @@ -102,7 +102,7 @@ Status MergeNode::open(RuntimeState* state) { RETURN_IF_ERROR(Expr::open(_result_expr_ctx_lists[i], state)); } - return Status::OK; + return Status::OK(); } Status MergeNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -124,7 +124,7 @@ Status MergeNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) *eos = reached_limit(); if (*eos || row_batch->is_full()) { - return Status::OK; + return Status::OK(); } } @@ -157,7 +157,7 @@ Status MergeNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) _child_idx = INVALID_CHILD_IDX; } - return Status::OK; + return Status::OK(); } // Fetch new batch if one is available, otherwise move on to next child. @@ -179,12 +179,12 @@ Status MergeNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) _child_idx = INVALID_CHILD_IDX; *eos = true; - return Status::OK; + return Status::OK(); } Status MergeNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } // don't call ExecNode::close(), it always closes all children _child_row_batch.reset(NULL); diff --git a/be/src/exec/mysql_scan_node.cpp b/be/src/exec/mysql_scan_node.cpp index 3262e91ed8..b7393e62f3 100644 --- a/be/src/exec/mysql_scan_node.cpp +++ b/be/src/exec/mysql_scan_node.cpp @@ -47,11 +47,11 @@ Status MysqlScanNode::prepare(RuntimeState* state) { VLOG(1) << "MysqlScanNode::Prepare"; if (_is_init) { - return Status::OK; + return Status::OK(); } if (NULL == state) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } RETURN_IF_ERROR(ScanNode::prepare(state)); @@ -59,7 +59,7 @@ Status MysqlScanNode::prepare(RuntimeState* state) { _tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id); if (NULL == _tuple_desc) { - return Status("Failed to get tuple descriptor."); + return Status::InternalError("Failed to get tuple descriptor."); } _slot_num = _tuple_desc->slots().size(); @@ -68,7 +68,7 @@ Status MysqlScanNode::prepare(RuntimeState* state) { static_cast(_tuple_desc->table_desc()); if (NULL == mysql_table) { - return Status("mysql table pointer is NULL."); + return Status::InternalError("mysql table pointer is NULL."); } _my_param.host = mysql_table->host(); @@ -80,24 +80,24 @@ Status MysqlScanNode::prepare(RuntimeState* state) { _mysql_scanner.reset(new(std::nothrow) MysqlScanner(_my_param)); if (_mysql_scanner.get() == NULL) { - return Status("new a mysql scanner failed."); + return Status::InternalError("new a mysql scanner failed."); } _tuple_pool.reset(new(std::nothrow) MemPool(mem_tracker())); if (_tuple_pool.get() == NULL) { - return Status("new a mem pool failed."); + return Status::InternalError("new a mem pool failed."); } _text_converter.reset(new(std::nothrow) TextConverter('\\')); if (_text_converter.get() == NULL) { - return Status("new a text convertor failed."); + return Status::InternalError("new a text convertor failed."); } _is_init = true; - return Status::OK; + return Status::OK(); } Status MysqlScanNode::open(RuntimeState* state) { @@ -105,11 +105,11 @@ Status MysqlScanNode::open(RuntimeState* state) { VLOG(1) << "MysqlScanNode::Open"; if (NULL == state) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } if (!_is_init) { - return Status("used before initialize."); + return Status::InternalError("used before initialize."); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::OPEN)); @@ -127,10 +127,10 @@ Status MysqlScanNode::open(RuntimeState* state) { } if (_mysql_scanner->field_num() != materialize_num) { - return Status("input and output not equal."); + return Status::InternalError("input and output not equal."); } - return Status::OK; + return Status::OK(); } Status MysqlScanNode::write_text_slot(char* value, int value_length, @@ -139,21 +139,21 @@ Status MysqlScanNode::write_text_slot(char* value, int value_length, true, false, _tuple_pool.get())) { std::stringstream ss; ss << "fail to convert mysql value '" << value << "' TO " << slot->type(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { VLOG(1) << "MysqlScanNode::GetNext"; if (NULL == state || NULL == row_batch || NULL == eos) { - return Status("input is NULL pointer"); + return Status::InternalError("input is NULL pointer"); } if (!_is_init) { - return Status("used before initialize."); + return Status::InternalError("used before initialize."); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::GETNEXT)); @@ -163,7 +163,7 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } // create new tuple buffer for row_batch @@ -171,7 +171,7 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e void* tuple_buffer = _tuple_pool->allocate(tuple_buffer_size); if (NULL == tuple_buffer) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } _tuple = reinterpret_cast(tuple_buffer); @@ -186,7 +186,7 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e // next get_next() call row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), !reached_limit()); *eos = reached_limit(); - return Status::OK; + return Status::OK(); } // read mysql @@ -197,7 +197,7 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e if (mysql_eos) { row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), false); *eos = true; - return Status::OK; + return Status::OK(); } int row_idx = row_batch->add_row(); @@ -221,7 +221,7 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e std::stringstream ss; ss << "nonnull column contains NULL. table=" << _table_name << ", column=" << slot_desc->col_name(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else { RETURN_IF_ERROR(write_text_slot(data[j], length[j], slot_desc, state)); @@ -241,12 +241,12 @@ Status MysqlScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e } } - return Status::OK; + return Status::OK(); } Status MysqlScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -270,7 +270,7 @@ void MysqlScanNode::debug_string(int indentation_level, stringstream* out) const } Status MysqlScanNode::set_scan_ranges(const vector& scan_ranges) { - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/mysql_scanner.cpp b/be/src/exec/mysql_scanner.cpp index 4aaf65af84..274650b711 100644 --- a/be/src/exec/mysql_scanner.cpp +++ b/be/src/exec/mysql_scanner.cpp @@ -49,13 +49,13 @@ MysqlScanner::~MysqlScanner() { Status MysqlScanner::open() { if (_is_open) { LOG(INFO) << "this scanner already opened"; - return Status::OK; + return Status::OK(); } _my_conn = mysql_init(NULL); if (NULL == _my_conn) { - return Status("mysql init failed."); + return Status::InternalError("mysql init failed."); } VLOG(1) << "MysqlScanner::Connect"; @@ -71,17 +71,17 @@ Status MysqlScanner::open() { } if (mysql_set_character_set(_my_conn, "utf8")) { - return Status("mysql set character set failed."); + return Status::InternalError("mysql set character set failed."); } _is_open = true; - return Status::OK; + return Status::OK(); } Status MysqlScanner::query(const std::string& query) { if (!_is_open) { - return Status("Query before open."); + return Status::InternalError("Query before open."); } int sql_result = mysql_query(_my_conn, query.c_str()); @@ -107,13 +107,13 @@ Status MysqlScanner::query(const std::string& query) { _field_num = mysql_num_fields(_my_result); - return Status::OK; + return Status::OK(); } Status MysqlScanner::query(const std::string& table, const std::vector& fields, const std::vector& filters) { if (!_is_open) { - return Status("Query before open."); + return Status::InternalError("Query before open."); } _sql_str = "SELECT"; @@ -145,22 +145,22 @@ Status MysqlScanner::query(const std::string& table, const std::vectorSupportsSerialize(); } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::prepare(RuntimeState* state) { @@ -234,7 +234,7 @@ Status NewPartitionedAggregationNode::prepare(RuntimeState* state) { expr_results_pool_.get(), expr_mem_tracker(), build_row_desc, row_desc, &ht_ctx_)); } // AddCodegenDisabledMessage(state); - return Status::OK; + return Status::OK(); } //void NewPartitionedAggregationNode::Codegen(RuntimeState* state) { @@ -298,7 +298,7 @@ Status NewPartitionedAggregationNode::open(RuntimeState* state) { } // Streaming preaggregations do all processing in GetNext(). - if (is_streaming_preagg_) return Status::OK; + if (is_streaming_preagg_) return Status::OK(); RowBatch batch(child(0)->row_desc(), state->batch_size(), mem_tracker()); // Read all the rows from the child and process them. @@ -345,7 +345,7 @@ Status NewPartitionedAggregationNode::open(RuntimeState* state) { if (!grouping_exprs_.empty()) { RETURN_IF_ERROR(MoveHashPartitions(child(0)->rows_returned())); } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, @@ -353,12 +353,12 @@ Status NewPartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* ro int first_row_idx = row_batch->num_rows(); RETURN_IF_ERROR(GetNextInternal(state, row_batch, eos)); RETURN_IF_ERROR(HandleOutputStrings(row_batch, first_row_idx)); - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::HandleOutputStrings(RowBatch* row_batch, int first_row_idx) { - if (!needs_finalize_ && !needs_serialize_) return Status::OK; + if (!needs_finalize_ && !needs_serialize_) return Status::OK(); // String data returned by Serialize() or Finalize() is from local expr allocations in // the agg function contexts, and will be freed on the next GetNext() call by // FreeLocalAllocations(). The data either needs to be copied out now or sent up the @@ -378,7 +378,7 @@ Status NewPartitionedAggregationNode::HandleOutputStrings(RowBatch* row_batch, break; } } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::CopyStringData(const SlotDescriptor& slot_desc, @@ -399,7 +399,7 @@ Status NewPartitionedAggregationNode::CopyStringData(const SlotDescriptor& slot_ memcpy(new_ptr, sv->ptr, sv->len); sv->ptr = new_ptr; } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::GetNextInternal(RuntimeState* state, @@ -413,7 +413,7 @@ Status NewPartitionedAggregationNode::GetNextInternal(RuntimeState* state, if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } if (grouping_exprs_.empty()) { @@ -422,7 +422,7 @@ Status NewPartitionedAggregationNode::GetNextInternal(RuntimeState* state, if (!singleton_output_tuple_returned_) GetSingletonOutput(row_batch); singleton_output_tuple_returned_ = true; *eos = true; - return Status::OK; + return Status::OK(); } if (!child_eos_) { @@ -435,7 +435,7 @@ Status NewPartitionedAggregationNode::GetNextInternal(RuntimeState* state, *eos = partition_eos_ && child_eos_; COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } void NewPartitionedAggregationNode::GetSingletonOutput(RowBatch* row_batch) { @@ -470,7 +470,7 @@ Status NewPartitionedAggregationNode::GetRowsFromPartition(RuntimeState* state, if (aggregated_partitions_.empty() && spilled_partitions_.empty()) { // No more partitions, all done. partition_eos_ = true; - return Status::OK; + return Status::OK(); } // Process next partition. RETURN_IF_ERROR(NextPartition()); @@ -511,7 +511,7 @@ Status NewPartitionedAggregationNode::GetRowsFromPartition(RuntimeState* state, partition_eos_ = reached_limit(); if (output_iterator_.AtEnd()) row_batch->mark_needs_deep_copy(); - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::GetRowsStreaming(RuntimeState* state, @@ -579,7 +579,7 @@ Status NewPartitionedAggregationNode::GetRowsStreaming(RuntimeState* state, _num_rows_returned += out_batch->num_rows(); COUNTER_SET(num_passthrough_rows_, _num_rows_returned); - return Status::OK; + return Status::OK(); } bool NewPartitionedAggregationNode::ShouldExpandPreaggHashTables() const { @@ -670,7 +670,7 @@ Status NewPartitionedAggregationNode::reset(RuntimeState* state) { } Status NewPartitionedAggregationNode::close(RuntimeState* state) { - if (is_closed()) return Status::OK; + if (is_closed()) return Status::OK(); if (!singleton_output_tuple_returned_) { GetOutputTuple(agg_fn_evals_, singleton_output_tuple_, mem_pool_.get()); @@ -749,7 +749,7 @@ Status NewPartitionedAggregationNode::Partition::InitStreams() { // unaggregated row stream. DCHECK(!unaggregated_row_stream->has_write_iterator()); } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::Partition::InitHashTable(bool* got_memory) { @@ -784,7 +784,7 @@ Status NewPartitionedAggregationNode::Partition::SerializeStreamForSpilling() { DCHECK(!parent->serialize_stream_->is_pinned()); // Serialize and copy the spilled partition's stream into the new stream. - Status status = Status::OK; + Status status = Status::OK(); BufferedTupleStream3* new_stream = parent->serialize_stream_.get(); NewPartitionedHashTable::Iterator it = hash_tbl->Begin(parent->ht_ctx_.get()); while (!it.AtEnd()) { @@ -827,7 +827,7 @@ Status NewPartitionedAggregationNode::Partition::SerializeStreamForSpilling() { } DCHECK(parent->serialize_stream_->has_write_iterator()); } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::Partition::Spill(bool more_aggregate_rows) { @@ -868,7 +868,7 @@ Status NewPartitionedAggregationNode::Partition::Spill(bool more_aggregate_rows) if (parent->num_spilled_partitions_->value() == 1) { parent->add_runtime_exec_option("Spilled"); } - return Status::OK; + return Status::OK(); } void NewPartitionedAggregationNode::Partition::Close(bool finalize_rows) { @@ -1071,14 +1071,14 @@ Status NewPartitionedAggregationNode::AppendSpilledRow( partition->unaggregated_row_stream.get(); DCHECK(!stream->is_pinned()); Status status; - if (LIKELY(stream->AddRow(row, &status))) return Status::OK; + if (LIKELY(stream->AddRow(row, &status))) return Status::OK(); RETURN_IF_ERROR(status); // Keep trying to free memory by spilling until we succeed or hit an error. // Running out of partitions to spill is treated as an error by SpillPartition(). while (true) { RETURN_IF_ERROR(SpillPartition(AGGREGATED_ROWS)); - if (stream->AddRow(row, &status)) return Status::OK; + if (stream->AddRow(row, &status)) return Status::OK(); RETURN_IF_ERROR(status); } } @@ -1168,7 +1168,7 @@ Status NewPartitionedAggregationNode::CreateHashPartitions( if (!is_streaming_preagg_) { COUNTER_SET(max_partition_level_, level); } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::CheckAndResizeHashPartitions( @@ -1187,7 +1187,7 @@ Status NewPartitionedAggregationNode::CheckAndResizeHashPartitions( RETURN_IF_ERROR(SpillPartition(partitioning_aggregated_rows)); } } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::NextPartition() { @@ -1236,7 +1236,7 @@ Status NewPartitionedAggregationNode::NextPartition() { output_partition_ = partition; output_iterator_ = output_partition_->hash_tbl->Begin(ht_ctx_.get()); COUNTER_UPDATE(num_hash_buckets_, output_partition_->hash_tbl->num_buckets()); - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::BuildSpilledPartition(Partition** built_partition) { @@ -1277,7 +1277,7 @@ Status NewPartitionedAggregationNode::BuildSpilledPartition(Partition** built_pa } else { *built_partition = dst_partition; } - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::RepartitionSpilledPartition() { @@ -1322,7 +1322,7 @@ Status NewPartitionedAggregationNode::RepartitionSpilledPartition() { int64_t num_input_rows = partition->aggregated_row_stream->num_rows() + partition->unaggregated_row_stream->num_rows(); RETURN_IF_ERROR(MoveHashPartitions(num_input_rows)); - return Status::OK; + return Status::OK(); } template @@ -1350,7 +1350,7 @@ Status NewPartitionedAggregationNode::ProcessStream(BufferedTupleStream3* input_ } while (!eos); } input_stream->Close(NULL, RowBatch::FlushMode::NO_FLUSH_RESOURCES); - return Status::OK; + return Status::OK(); } Status NewPartitionedAggregationNode::SpillPartition(bool more_aggregate_rows) { @@ -1425,7 +1425,7 @@ Status NewPartitionedAggregationNode::MoveHashPartitions(int64_t num_input_rows) } VLOG(2) << ss.str(); hash_partitions_.clear(); - return Status::OK; + return Status::OK(); } void NewPartitionedAggregationNode::PushSpilledPartition(Partition* partition) { @@ -1585,7 +1585,7 @@ void NewPartitionedAggregationNode::ClosePartitions() { // codegen->GetPtrPtrType(codegen->GetType(ExprContext::LLVM_CLASS_NAME)); // StructType* tuple_struct = intermediate_tuple_desc_->GetLlvmStruct(codegen); // if (tuple_struct == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenUpdateSlot(): failed to generate " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenUpdateSlot(): failed to generate " // "intermediate tuple desc"); // } // PointerType* tuple_ptr_type = codegen->GetPtrType(tuple_struct); @@ -1732,10 +1732,10 @@ void NewPartitionedAggregationNode::ClosePartitions() { // // *fn = codegen->FinalizeFunction(*fn); // if (*fn == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenUpdateSlot(): codegen'd " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenUpdateSlot(): codegen'd " // "UpdateSlot() function failed verification, see log"); // } -// return Status::OK; +// return Status::OK(); //} // //Status NewPartitionedAggregationNode::CodegenCallUda(LlvmCodeGen* codegen, @@ -1774,7 +1774,7 @@ void NewPartitionedAggregationNode::ClosePartitions() { // Value* anyval_result = builder->CreateLoad(dst_lowered_ptr, "anyval_result"); // // *updated_dst_val = CodegenAnyVal(codegen, builder, dst_type, anyval_result); -// return Status::OK; +// return Status::OK(); //} // IR codegen for the UpdateTuple loop. This loop is query specific and based on the @@ -1824,13 +1824,13 @@ void NewPartitionedAggregationNode::ClosePartitions() { // // for (const SlotDescriptor* slot_desc : intermediate_tuple_desc_->slots()) { // if (slot_desc->type().type == TYPE_CHAR) { -// return Status("NewPartitionedAggregationNode::CodegenUpdateTuple(): cannot codegen" +// return Status::InternalError("NewPartitionedAggregationNode::CodegenUpdateTuple(): cannot codegen" // "CHAR in aggregations"); // } // } // // if (intermediate_tuple_desc_->GetLlvmStruct(codegen) == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenUpdateTuple(): failed to generate " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenUpdateTuple(): failed to generate " // "intermediate tuple desc"); // } // @@ -1910,10 +1910,10 @@ void NewPartitionedAggregationNode::ClosePartitions() { // // CodegenProcessBatch() does the final optimizations. // *fn = codegen->FinalizeFunction(*fn); // if (*fn == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenUpdateTuple(): codegen'd " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenUpdateTuple(): codegen'd " // "UpdateTuple() function failed verification, see log"); // } -// return Status::OK; +// return Status::OK(); //} // //Status NewPartitionedAggregationNode::CodegenProcessBatch(LlvmCodeGen* codegen, @@ -1978,7 +1978,7 @@ void NewPartitionedAggregationNode::ClosePartitions() { // DCHECK_GE(replaced, 1); // process_batch_fn = codegen->FinalizeFunction(process_batch_fn); // if (process_batch_fn == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenProcessBatch(): codegen'd " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenProcessBatch(): codegen'd " // "ProcessBatch() function failed verification, see log"); // } // @@ -1986,7 +1986,7 @@ void NewPartitionedAggregationNode::ClosePartitions() { // reinterpret_cast(&process_batch_no_grouping_fn_) : // reinterpret_cast(&process_batch_fn_); // codegen->AddFunctionToJit(process_batch_fn, codegened_fn_ptr); -// return Status::OK; +// return Status::OK(); //} // //Status NewPartitionedAggregationNode::CodegenProcessBatchStreaming( @@ -2051,13 +2051,13 @@ void NewPartitionedAggregationNode::ClosePartitions() { // DCHECK(process_batch_streaming_fn != NULL); // process_batch_streaming_fn = codegen->FinalizeFunction(process_batch_streaming_fn); // if (process_batch_streaming_fn == NULL) { -// return Status("NewPartitionedAggregationNode::CodegenProcessBatchStreaming(): codegen'd " +// return Status::InternalError("NewPartitionedAggregationNode::CodegenProcessBatchStreaming(): codegen'd " // "ProcessBatchStreaming() function failed verification, see log"); // } // // codegen->AddFunctionToJit(process_batch_streaming_fn, // reinterpret_cast(&process_batch_streaming_fn_)); -// return Status::OK; +// return Status::OK(); //} #endif diff --git a/be/src/exec/new_partitioned_aggregation_node_ir.cc b/be/src/exec/new_partitioned_aggregation_node_ir.cc index 547810e027..6674bbdd2f 100644 --- a/be/src/exec/new_partitioned_aggregation_node_ir.cc +++ b/be/src/exec/new_partitioned_aggregation_node_ir.cc @@ -32,7 +32,7 @@ Status NewPartitionedAggregationNode::ProcessBatchNoGrouping(RowBatch* batch) { FOREACH_ROW(batch, 0, batch_iter) { UpdateTuple(agg_fn_evals_.data(), output_tuple, batch_iter.get()); } - return Status::OK; + return Status::OK(); } template @@ -60,7 +60,7 @@ Status NewPartitionedAggregationNode::ProcessBatch(RowBatch* batch, } DCHECK(expr_vals_cache->AtEnd()); } - return Status::OK; + return Status::OK(); } template @@ -101,7 +101,7 @@ Status NewPartitionedAggregationNode::ProcessRow(TupleRow* row, // Hoist lookups out of non-null branch to speed up non-null case. const uint32_t hash = expr_vals_cache->CurExprValuesHash(); const uint32_t partition_idx = hash >> (32 - NUM_PARTITIONING_BITS); - if (expr_vals_cache->IsRowNull()) return Status::OK; + if (expr_vals_cache->IsRowNull()) return Status::OK(); // To process this row, we first see if it can be aggregated or inserted into this // partition's hash table. If we need to insert it and that fails, due to OOM, we // spill the partition. The partition to spill is not necessarily dst_partition, @@ -129,7 +129,7 @@ Status NewPartitionedAggregationNode::ProcessRow(TupleRow* row, } else if (found) { // Row is already in hash table. Do the aggregation and we're done. UpdateTuple(dst_partition->agg_fn_evals.data(), it.GetTuple(), row); - return Status::OK; + return Status::OK(); } // If we are seeing this result row for the first time, we need to construct the @@ -149,7 +149,7 @@ Status NewPartitionedAggregationNode::AddIntermediateTuple(Partition* partition, UpdateTuple(partition->agg_fn_evals.data(), intermediate_tuple, row, AGGREGATED_ROWS); // After copying and initializing the tuple, insert it into the hash table. insert_it.SetTuple(intermediate_tuple, hash); - return Status::OK; + return Status::OK(); } else if (!process_batch_status_.ok()) { return std::move(process_batch_status_); } @@ -209,7 +209,7 @@ Status NewPartitionedAggregationNode::ProcessBatchStreaming(bool needs_serialize } } - return Status::OK; + return Status::OK(); } bool NewPartitionedAggregationNode::TryAddToHashTable( diff --git a/be/src/exec/new_partitioned_hash_table.cc b/be/src/exec/new_partitioned_hash_table.cc index 948cefa732..c1234d6164 100644 --- a/be/src/exec/new_partitioned_hash_table.cc +++ b/be/src/exec/new_partitioned_hash_table.cc @@ -118,7 +118,7 @@ Status NewPartitionedHashTableCtx::Init(ObjectPool* pool, RuntimeState* state, i int scratch_row_size = sizeof(Tuple*) * num_build_tuples; scratch_row_ = reinterpret_cast(malloc(scratch_row_size)); if (UNLIKELY(scratch_row_ == NULL)) { - return Status(Substitute("Failed to allocate $0 bytes for scratch row of " + return Status::InternalError(Substitute("Failed to allocate $0 bytes for scratch row of " "NewPartitionedHashTableCtx.", scratch_row_size)); } @@ -127,7 +127,7 @@ Status NewPartitionedHashTableCtx::Init(ObjectPool* pool, RuntimeState* state, i ExprContext* context = pool->add(new ExprContext(build_exprs_[i])); context->prepare(state, row_desc, tracker); if (context == nullptr) { - return Status("Hashtable init error."); + return Status::InternalError("Hashtable init error."); } build_expr_evals_.push_back(context); } @@ -137,7 +137,7 @@ Status NewPartitionedHashTableCtx::Init(ObjectPool* pool, RuntimeState* state, i ExprContext* context = pool->add(new ExprContext(probe_exprs_[i])); context->prepare(state, row_desc_probe, tracker); if (context == nullptr) { - return Status("Hashtable init error."); + return Status::InternalError("Hashtable init error."); } probe_expr_evals_.push_back(context); } @@ -166,7 +166,7 @@ Status NewPartitionedHashTableCtx::Open(RuntimeState* state) { for (int i = 0; i < probe_expr_evals_.size(); i++) { RETURN_IF_ERROR(probe_expr_evals_[i]->open(state)); } - return Status::OK; + return Status::OK(); } void NewPartitionedHashTableCtx::Close(RuntimeState* state) { @@ -322,7 +322,7 @@ Status NewPartitionedHashTableCtx::ExprValuesCache::Init(RuntimeState* state, &expr_values_offsets_, &var_result_offset_); if (expr_values_bytes_per_row_ == 0) { DCHECK_EQ(num_exprs_, 0); - return Status::OK; + return Status::OK(); } DCHECK_GT(expr_values_bytes_per_row_, 0); // Compute the maximum number of cached rows which can fit in the memory budget. @@ -355,7 +355,7 @@ Status NewPartitionedHashTableCtx::ExprValuesCache::Init(RuntimeState* state, memset(cur_expr_values_hash_, 0, sizeof(uint32) * capacity_); null_bitmap_.Reset(capacity_); - return Status::OK; + return Status::OK(); } void NewPartitionedHashTableCtx::ExprValuesCache::Close(MemTracker* tracker) { @@ -453,12 +453,12 @@ Status NewPartitionedHashTable::Init(bool* got_memory) { if (bucket_allocation_ == nullptr) { num_buckets_ = 0; *got_memory = false; - return Status::OK; + return Status::OK(); } buckets_ = reinterpret_cast(bucket_allocation_->data()); memset(buckets_, 0, buckets_byte_size); *got_memory = true; - return Status::OK; + return Status::OK(); } void NewPartitionedHashTable::Close() { @@ -485,7 +485,7 @@ Status NewPartitionedHashTable::CheckAndResize( } if (shift > 0) return ResizeBuckets(num_buckets_ << shift, ht_ctx, got_memory); *got_memory = true; - return Status::OK; + return Status::OK(); } Status NewPartitionedHashTable::ResizeBuckets( @@ -499,7 +499,7 @@ Status NewPartitionedHashTable::ResizeBuckets( << " buckets."; if (max_num_buckets_ != -1 && num_buckets > max_num_buckets_) { *got_memory = false; - return Status::OK; + return Status::OK(); } ++num_resizes_; @@ -515,7 +515,7 @@ Status NewPartitionedHashTable::ResizeBuckets( RETURN_IF_ERROR(allocator_->Allocate(new_size, &new_allocation)); if (new_allocation == NULL) { *got_memory = false; - return Status::OK; + return Status::OK(); } Bucket* new_buckets = reinterpret_cast(new_allocation->data()); memset(new_buckets, 0, new_size); @@ -541,7 +541,7 @@ Status NewPartitionedHashTable::ResizeBuckets( bucket_allocation_ = std::move(new_allocation); buckets_ = reinterpret_cast(bucket_allocation_->data()); *got_memory = true; - return Status::OK; + return Status::OK(); } bool NewPartitionedHashTable::GrowNodeArray(Status* status) { @@ -760,7 +760,7 @@ Status NewPartitionedHashTableCtx::CodegenEvalRow(LlvmCodeGen* codegen, bool bui for (int i = 0; i < ctxs.size(); ++i) { // Disable codegen for CHAR if (ctxs[i]->root()->type().type == TYPE_CHAR) { - return Status("NewPartitionedHashTableCtx::CodegenEvalRow(): CHAR NYI"); + return Status::InternalError("NewPartitionedHashTableCtx::CodegenEvalRow(): CHAR NYI"); } } @@ -816,7 +816,7 @@ Status NewPartitionedHashTableCtx::CodegenEvalRow(LlvmCodeGen* codegen, bool bui if (!status.ok()) { (*fn)->eraseFromParent(); // deletes function *fn = NULL; - return Status(Substitute( + return Status::InternalError(Substitute( "Problem with NewPartitionedHashTableCtx::CodegenEvalRow(): $0", status.GetDetail())); } @@ -871,10 +871,10 @@ Status NewPartitionedHashTableCtx::CodegenEvalRow(LlvmCodeGen* codegen, bool bui *fn = codegen->FinalizeFunction(*fn); if (*fn == NULL) { - return Status("Codegen'd NewPartitionedHashTableCtx::EvalRow() function failed verification, " + return Status::InternalError("Codegen'd NewPartitionedHashTableCtx::EvalRow() function failed verification, " "see log"); } - return Status::OK; + return Status::OK(); } // Codegen for hashing the current row. In the case with both string and non-string data @@ -915,7 +915,7 @@ Status NewPartitionedHashTableCtx::CodegenHashRow(LlvmCodeGen* codegen, bool use for (int i = 0; i < build_expr_ctxs_.size(); ++i) { // Disable codegen for CHAR if (build_expr_ctxs_[i]->root()->type().type == TYPE_CHAR) { - return Status("NewPartitionedHashTableCtx::CodegenHashRow(): CHAR NYI"); + return Status::InternalError("NewPartitionedHashTableCtx::CodegenHashRow(): CHAR NYI"); } } @@ -1046,10 +1046,10 @@ Status NewPartitionedHashTableCtx::CodegenHashRow(LlvmCodeGen* codegen, bool use } *fn = codegen->FinalizeFunction(*fn); if (*fn == NULL) { - return Status( + return Status::InternalError( "Codegen'd NewPartitionedHashTableCtx::HashRow() function failed verification, see log"); } - return Status::OK; + return Status::OK(); } // Codegen for NewPartitionedHashTableCtx::Equals. For a group by with (bigint, string), @@ -1123,7 +1123,7 @@ Status NewPartitionedHashTableCtx::CodegenEquals(LlvmCodeGen* codegen, bool forc for (int i = 0; i < build_expr_ctxs_.size(); ++i) { // Disable codegen for CHAR if (build_expr_ctxs_[i]->root()->type().type == TYPE_CHAR) { - return Status("NewPartitionedHashTableCtx::CodegenEquals(): CHAR NYI"); + return Status::InternalError("NewPartitionedHashTableCtx::CodegenEquals(): CHAR NYI"); } } @@ -1167,7 +1167,7 @@ Status NewPartitionedHashTableCtx::CodegenEquals(LlvmCodeGen* codegen, bool forc if (!status.ok()) { (*fn)->eraseFromParent(); // deletes function *fn = NULL; - return Status( + return Status::InternalError( Substitute("Problem with NewPartitionedHashTableCtx::CodegenEquals: $0", status.GetDetail())); } if (build_expr_ctxs_.size() > LlvmCodeGen::CODEGEN_INLINE_EXPRS_THRESHOLD) { @@ -1236,10 +1236,10 @@ Status NewPartitionedHashTableCtx::CodegenEquals(LlvmCodeGen* codegen, bool forc } *fn = codegen->FinalizeFunction(*fn); if (*fn == NULL) { - return Status("Codegen'd NewPartitionedHashTableCtx::Equals() function failed verification, " + return Status::InternalError("Codegen'd NewPartitionedHashTableCtx::Equals() function failed verification, " "see log"); } - return Status::OK; + return Status::OK(); } Status NewPartitionedHashTableCtx::ReplaceHashTableConstants(LlvmCodeGen* codegen, @@ -1256,7 +1256,7 @@ Status NewPartitionedHashTableCtx::ReplaceHashTableConstants(LlvmCodeGen* codege fn, stores_duplicates, "stores_duplicates"); replacement_counts->quadratic_probing = codegen->ReplaceCallSitesWithBoolConst( fn, FLAGS_enable_quadratic_probing, "quadratic_probing"); - return Status::OK; + return Status::OK(); } #endif diff --git a/be/src/exec/new_partitioned_hash_table.h b/be/src/exec/new_partitioned_hash_table.h index 69c000cbdf..fda89dbff6 100644 --- a/be/src/exec/new_partitioned_hash_table.h +++ b/be/src/exec/new_partitioned_hash_table.h @@ -690,7 +690,7 @@ class NewPartitionedHashTable { /// max fill factor. /// If 'got_memory' is true, then it is guaranteed at least 'rows_to_add' rows can be /// inserted without need to resize. If there is not enough memory available to - /// resize the hash table, Status::OK() is returned and 'got_memory' is false. If a + /// resize the hash table, Status::OK()() is returned and 'got_memory' is false. If a /// another error occurs, an error status may be returned. Status CheckAndResize(uint64_t buckets_to_fill, const NewPartitionedHashTableCtx* ht_ctx, bool* got_memory); diff --git a/be/src/exec/olap_common.cpp b/be/src/exec/olap_common.cpp index a3827fb5f5..14310d5d8e 100644 --- a/be/src/exec/olap_common.cpp +++ b/be/src/exec/olap_common.cpp @@ -67,12 +67,12 @@ Status OlapScanKeys::get_key_range(std::vector* key_range) { key_range->push_back(range); } - return Status::OK; + return Status::OK(); } Status DorisScanRange::init() { if (!_scan_range.__isset.partition_column_ranges) { - return Status::OK; + return Status::OK(); } const std::vector& partition_column_ranges @@ -166,7 +166,7 @@ Status DorisScanRange::init() { break; } - return Status::OK; + return Status::OK(); } int DorisScanRange::has_intersection(const std::string column_name, diff --git a/be/src/exec/olap_common.h b/be/src/exec/olap_common.h index bb8304f0b8..46e00991fd 100644 --- a/be/src/exec/olap_common.h +++ b/be/src/exec/olap_common.h @@ -318,11 +318,11 @@ ColumnValueRange::ColumnValueRange(std::string col_name, PrimitiveType type, template Status ColumnValueRange::add_fixed_value(T value) { if (INVALID_TYPE == _column_type) { - return Status("AddFixedValue failed, Invalid type"); + return Status::InternalError("AddFixedValue failed, Invalid type"); } _fixed_values.insert(value); - return Status::OK; + return Status::OK(); } template @@ -434,7 +434,7 @@ void ColumnValueRange::convert_to_range_value() { template Status ColumnValueRange::add_range(SQLFilterOp op, T value) { if (INVALID_TYPE == _column_type) { - return Status("AddRange failed, Invalid type"); + return Status::InternalError("AddRange failed, Invalid type"); } if (is_fixed_value_range()) { @@ -468,7 +468,7 @@ Status ColumnValueRange::add_range(SQLFilterOp op, T value) { } default: { - return Status("AddRangefail! Unsupport SQLFilterOp."); + return Status::InternalError("AddRangefail! Unsupport SQLFilterOp."); } } @@ -515,7 +515,7 @@ Status ColumnValueRange::add_range(SQLFilterOp op, T value) { } default: { - return Status("AddRangefail! Unsupport SQLFilterOp."); + return Status::InternalError("AddRangefail! Unsupport SQLFilterOp."); } } } @@ -529,7 +529,7 @@ Status ColumnValueRange::add_range(SQLFilterOp op, T value) { } } - return Status::OK; + return Status::OK(); } template @@ -662,12 +662,12 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { if (range.is_empty_value_range()) { _begin_scan_keys.clear(); _end_scan_keys.clear(); - return Status::OK; + return Status::OK(); } // 2. stop extend ScanKey when it's already extend a range value if (_has_range_value) { - return Status::OK; + return Status::OK(); } //if a column doesn't have any predicate, we will try converting the range to fixed values @@ -680,7 +680,7 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { if (range.is_range_value_convertible()) { range.convert_to_range_value(); } else { - return Status::OK; + return Status::OK(); } } } else { @@ -791,7 +791,7 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { _end_include = range.is_end_include(); } - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/exec/olap_meta_reader.cpp b/be/src/exec/olap_meta_reader.cpp index fb5b65cbae..191a2d29ee 100644 --- a/be/src/exec/olap_meta_reader.cpp +++ b/be/src/exec/olap_meta_reader.cpp @@ -50,7 +50,7 @@ Status EngineMetaReader::get_hints( ss << "failed to get tablet: " << tablet_id << "with schema hash: " << schema_hash << ", reason: " << err; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } RuntimeProfile::Counter* show_hints_timer = profile->get_counter("ShowHintsTime"); @@ -70,7 +70,7 @@ Status EngineMetaReader::get_hints( block_row_count, &range); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); - return Status("fail to show hints"); + return Status::InternalError("fail to show hints"); } ranges.emplace_back(std::move(range)); have_valid_range = true; @@ -81,7 +81,7 @@ Status EngineMetaReader::get_hints( auto res = table->split_range({}, {}, block_row_count, &range); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); - return Status("fail to show hints"); + return Status::InternalError("fail to show hints"); } ranges.emplace_back(std::move(range)); } @@ -110,7 +110,7 @@ Status EngineMetaReader::get_hints( } } - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/exec/olap_rewrite_node.cpp b/be/src/exec/olap_rewrite_node.cpp index 32676811d3..a0e9139c02 100644 --- a/be/src/exec/olap_rewrite_node.cpp +++ b/be/src/exec/olap_rewrite_node.cpp @@ -45,7 +45,7 @@ Status OlapRewriteNode::init(const TPlanNode& tnode, RuntimeState* state) { _pool, tnode.olap_rewrite_node.columns, &_columns)); _column_types = tnode.olap_rewrite_node.column_types; _output_tuple_id = tnode.olap_rewrite_node.output_tuple_id; - return Status::OK; + return Status::OK(); } Status OlapRewriteNode::prepare(RuntimeState* state) { @@ -68,14 +68,14 @@ Status OlapRewriteNode::prepare(RuntimeState* state) { _column_types[i].precision, _column_types[i].scale); } } - return Status::OK; + return Status::OK(); } Status OlapRewriteNode::open(RuntimeState* state) { RETURN_IF_ERROR(ExecNode::open(state)); RETURN_IF_ERROR(Expr::open(_columns, state)); RETURN_IF_ERROR(child(0)->open(state)); - return Status::OK; + return Status::OK(); } Status OlapRewriteNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -86,7 +86,7 @@ Status OlapRewriteNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* // we're already done or we exhausted the last child batch and there won't be any // new ones *eos = true; - return Status::OK; + return Status::OK(); } // start (or continue) consuming row batches from child @@ -103,17 +103,17 @@ Status OlapRewriteNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* if (copy_rows(state, row_batch)) { *eos = reached_limit() || (_child_row_idx == _child_row_batch->num_rows() && _child_eos); - return Status::OK; + return Status::OK(); } if (_child_eos) { // finished w/ last child row batch, and child eos is true *eos = true; - return Status::OK; + return Status::OK(); } } - return Status::OK; + return Status::OK(); } bool OlapRewriteNode::copy_one_row(TupleRow* src_row, Tuple* tuple, @@ -261,7 +261,7 @@ bool OlapRewriteNode::copy_rows(RuntimeState* state, RowBatch* output_batch) { Status OlapRewriteNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } _child_row_batch.reset(); // RETURN_IF_ERROR(child(0)->close(state)); diff --git a/be/src/exec/olap_scan_node.cpp b/be/src/exec/olap_scan_node.cpp index 3547ccfedf..0177c6cd23 100644 --- a/be/src/exec/olap_scan_node.cpp +++ b/be/src/exec/olap_scan_node.cpp @@ -62,7 +62,7 @@ OlapScanNode::OlapScanNode(ObjectPool* pool, const TPlanNode& tnode, const Descr _scanner_done(false), _transfer_done(false), _wait_duration(0, 0, 1, 0), - _status(Status::OK), + _status(Status::OK()), _resource_info(nullptr), _buffered_bytes(0), _running_thread(0), @@ -86,7 +86,7 @@ Status OlapScanNode::init(const TPlanNode& tnode, RuntimeState* state) { // Now, we drop this functional DCHECK(!_is_result_order) << "ordered result don't support any more"; - return Status::OK; + return Status::OK(); } void OlapScanNode::_init_counter(RuntimeState* state) { @@ -154,7 +154,7 @@ Status OlapScanNode::prepare(RuntimeState* state) { _tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id); if (_tuple_desc == NULL) { // TODO: make sure we print all available diagnostic output to our error log - return Status("Failed to get tuple descriptor."); + return Status::InternalError("Failed to get tuple descriptor."); } const std::vector& slots = _tuple_desc->slots(); @@ -183,7 +183,7 @@ Status OlapScanNode::prepare(RuntimeState* state) { } _runtime_state = state; - return Status::OK; + return Status::OK(); } Status OlapScanNode::open(RuntimeState* state) { @@ -204,7 +204,7 @@ Status OlapScanNode::open(RuntimeState* state) { _resource_info = ResourceTls::get_resource_tls(); - return Status::OK; + return Status::OK(); } Status OlapScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -217,14 +217,14 @@ Status OlapScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eo _transfer_done = true; boost::lock_guard guard(_status_mutex); if (LIKELY(_status.ok())) { - _status = Status::CANCELLED; + _status = Status::Cancelled("Cancelled"); } return _status; } if (_eos) { *eos = true; - return Status::OK; + return Status::OK(); } // check if started. @@ -298,7 +298,7 @@ Status OlapScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eo row_batch->tuple_data_pool()->total_reserved_bytes()); delete materialized_batch; - return Status::OK; + return Status::OK(); } // all scanner done, change *eos to true @@ -311,12 +311,12 @@ Status OlapScanNode::collect_query_statistics(QueryStatistics* statistics) { RETURN_IF_ERROR(ExecNode::collect_query_statistics(statistics)); statistics->add_scan_bytes(_read_compressed_counter->value()); statistics->add_scan_rows(rows_returned()); - return Status::OK; + return Status::OK(); } Status OlapScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); @@ -368,7 +368,7 @@ Status OlapScanNode::set_scan_ranges(const std::vector& scan_r COUNTER_UPDATE(_tablet_counter, 1); } - return Status::OK; + return Status::OK(); } Status OlapScanNode::start_scan(RuntimeState* state) { @@ -398,7 +398,7 @@ Status OlapScanNode::start_scan(RuntimeState* state) { // 6. Start multi thread to read serval `Sub Sub ScanRange` RETURN_IF_ERROR(start_scan_thread(state)); - return Status::OK; + return Status::OK(); } Status OlapScanNode::normalize_conjuncts() { @@ -510,7 +510,7 @@ Status OlapScanNode::normalize_conjuncts() { } } - return Status::OK; + return Status::OK(); } Status OlapScanNode::build_olap_filters() { @@ -531,7 +531,7 @@ Status OlapScanNode::build_olap_filters() { } } - return Status::OK; + return Status::OK(); } Status OlapScanNode::select_scan_ranges() { @@ -555,7 +555,7 @@ Status OlapScanNode::select_scan_ranges() { } } - return Status::OK; + return Status::OK(); } Status OlapScanNode::build_scan_key() { @@ -581,7 +581,7 @@ Status OlapScanNode::build_scan_key() { _scan_keys.debug(); - return Status::OK; + return Status::OK(); } Status OlapScanNode::split_scan_range() { @@ -604,7 +604,7 @@ Status OlapScanNode::split_scan_range() { DCHECK(_query_key_ranges.size() == _query_scan_ranges.size()); - return Status::OK; + return Status::OK(); } Status OlapScanNode::start_scan_thread(RuntimeState* state) { @@ -613,7 +613,7 @@ Status OlapScanNode::start_scan_thread(RuntimeState* state) { // thread num if (0 == _query_scan_ranges.size()) { _transfer_done = true; - return Status::OK; + return Status::OK(); } int key_range_size = _query_key_ranges.size(); @@ -658,7 +658,7 @@ Status OlapScanNode::start_scan_thread(RuntimeState* state) { new boost::thread( &OlapScanNode::transfer_thread, this, state)); - return Status::OK; + return Status::OK(); } template @@ -672,7 +672,7 @@ Status OlapScanNode::normalize_predicate(ColumnValueRange& range, SlotDescrip // 3. Add range to Column->ColumnValueRange map _column_value_ranges[slot->col_name()] = range; - return Status::OK; + return Status::OK(); } static bool ignore_cast(SlotDescriptor* slot, Expr* expr) { @@ -834,7 +834,7 @@ Status OlapScanNode::normalize_in_predicate(SlotDescriptor* slot, ColumnValueRan default: { LOG(WARNING) << "Normalize filter fail, Unsupport Primitive type. [type=" << expr->type() << "]"; - return Status("Normalize filter fail, Unsupport Primitive type"); + return Status::InternalError("Normalize filter fail, Unsupport Primitive type"); } } } @@ -842,7 +842,7 @@ Status OlapScanNode::normalize_in_predicate(SlotDescriptor* slot, ColumnValueRan } } - return Status::OK; + return Status::OK(); } void OlapScanNode::construct_is_null_pred_in_where_pred(Expr* expr, SlotDescriptor* slot, std::string is_null_str) { @@ -949,7 +949,7 @@ Status OlapScanNode::normalize_binary_predicate(SlotDescriptor* slot, ColumnValu default: { LOG(WARNING) << "Normalize filter fail, Unsupport Primitive type. [type=" << expr->type() << "]"; - return Status("Normalize filter fail, Unsupport Primitive type"); + return Status::InternalError("Normalize filter fail, Unsupport Primitive type"); } } @@ -960,7 +960,7 @@ Status OlapScanNode::normalize_binary_predicate(SlotDescriptor* slot, ColumnValu } } - return Status::OK; + return Status::OK(); } bool OlapScanNode::select_scan_range(boost::shared_ptr scan_range) { @@ -1010,12 +1010,12 @@ Status OlapScanNode::get_sub_scan_range( } } - return Status::OK; + return Status::OK(); } void OlapScanNode::transfer_thread(RuntimeState* state) { // scanner open pushdown to scanThread - Status status = Status::OK; + Status status = Status::OK(); for (auto scanner : _olap_scanners) { status = Expr::clone_if_not_exists(_conjunct_ctxs, state, scanner->conjunct_ctxs()); if (!status.ok()) { @@ -1155,7 +1155,7 @@ void OlapScanNode::transfer_thread(RuntimeState* state) { } void OlapScanNode::scanner_thread(OlapScanner* scanner) { - Status status = Status::OK; + Status status = Status::OK(); bool eos = false; RuntimeState* state = scanner->runtime_state(); DCHECK(NULL != state); @@ -1187,7 +1187,7 @@ void OlapScanNode::scanner_thread(OlapScanner* scanner) { while (!eos && raw_rows_read < raw_rows_threshold) { if (UNLIKELY(_transfer_done)) { eos = true; - status = Status::CANCELLED; + status = Status::Cancelled("Cancelled"); LOG(INFO) << "Scan thread cancelled, cause query done, maybe reach limit."; break; } @@ -1268,7 +1268,7 @@ Status OlapScanNode::add_one_batch(RowBatchInterface* row_batch) { } // remove one batch, notify main thread _row_batch_added_cv.notify_one(); - return Status::OK; + return Status::OK(); } void OlapScanNode::debug_string( diff --git a/be/src/exec/olap_scanner.cpp b/be/src/exec/olap_scanner.cpp index 068c6b7487..9af7735001 100644 --- a/be/src/exec/olap_scanner.cpp +++ b/be/src/exec/olap_scanner.cpp @@ -86,7 +86,7 @@ Status OlapScanner::_prepare( ss << "failed to get tablet: " << tablet_id << " with schema hash: " << schema_hash << ", reason: " << err; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } { ReadLock rdlock(_olap_table->get_header_lock_ptr()); @@ -95,7 +95,7 @@ Status OlapScanner::_prepare( std::stringstream ss; ss << "fail to get latest version of tablet: " << tablet_id; OLAP_LOG_WARNING(ss.str().c_str()); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (delta->end_version() == _version @@ -106,7 +106,7 @@ Status OlapScanner::_prepare( std::stringstream ss; ss << "fail to check version hash of tablet: " << tablet_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } } @@ -116,7 +116,7 @@ Status OlapScanner::_prepare( RETURN_IF_ERROR(_init_params(key_ranges, filters, is_nulls)); } - return Status::OK; + return Status::OK(); } Status OlapScanner::open() { @@ -133,9 +133,9 @@ Status OlapScanner::open() { std::stringstream ss; ss << "failed to initialize storage reader. tablet=" << _params.olap_table->full_name() << ", res=" << res << ", backend=" << BackendOptions::get_localhost(); - return Status(ss.str().c_str()); + return Status::InternalError(ss.str().c_str()); } - return Status::OK; + return Status::OK(); } Status OlapScanner::_init_params( @@ -193,14 +193,14 @@ Status OlapScanner::_init_params( OLAPStatus res = _read_row_cursor.init(_olap_table->tablet_schema(), _params.return_columns); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init row cursor.[res=%d]", res); - return Status("failed to initialize storage read row cursor"); + return Status::InternalError("failed to initialize storage read row cursor"); } _read_row_cursor.allocate_memory_for_string_type(_olap_table->tablet_schema()); for (auto cid : _return_columns) { _query_fields.push_back(_read_row_cursor.get_field_by_index(cid)); } - return Status::OK; + return Status::OK(); } Status OlapScanner::_init_return_columns() { @@ -213,7 +213,7 @@ Status OlapScanner::_init_return_columns() { std::stringstream ss; ss << "field name is invalied. field=" << slot->col_name(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _return_columns.push_back(index); if (_olap_table->tablet_schema()[index].type == OLAP_FIELD_TYPE_VARCHAR || @@ -226,9 +226,9 @@ Status OlapScanner::_init_return_columns() { _query_slots.push_back(slot); } if (_return_columns.empty()) { - return Status("failed to build storage scanner, no materialized slot!"); + return Status::InternalError("failed to build storage scanner, no materialized slot!"); } - return Status::OK; + return Status::OK(); } Status OlapScanner::get_batch( @@ -251,7 +251,7 @@ Status OlapScanner::get_batch( // Read one row from reader auto res = _reader->next_row_with_aggregation(&_read_row_cursor, eof); if (res != OLAP_SUCCESS) { - return Status("Internal Error: read storage fail."); + return Status::InternalError("Internal Error: read storage fail."); } // If we reach end of this scanner, break if (UNLIKELY(*eof)) { @@ -347,7 +347,7 @@ Status OlapScanner::get_batch( } } - return Status::OK; + return Status::OK(); } void OlapScanner::_convert_row_to_tuple(Tuple* tuple) { @@ -470,13 +470,13 @@ void OlapScanner::_update_realtime_counter() { Status OlapScanner::close(RuntimeState* state) { if (_is_closed) { - return Status::OK; + return Status::OK(); } update_counter(); _reader.reset(); Expr::close(_conjunct_ctxs, state); _is_closed = true; - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/exec/olap_table_info.cpp b/be/src/exec/olap_table_info.cpp index 04e8f9f8f0..d9334398b8 100644 --- a/be/src/exec/olap_table_info.cpp +++ b/be/src/exec/olap_table_info.cpp @@ -53,7 +53,7 @@ Status OlapTableSchemaParam::init(const POlapTableSchemaParam& pschema) { if (it == std::end(slots_map)) { std::stringstream ss; ss << "unknown index column, column=" << col; - return Status(ss.str()); + return Status::InternalError(ss.str()); } index->slots.emplace_back(it->second); } @@ -64,7 +64,7 @@ Status OlapTableSchemaParam::init(const POlapTableSchemaParam& pschema) { [] (const OlapTableIndexSchema* lhs, const OlapTableIndexSchema* rhs) { return lhs->index_id < rhs->index_id; }); - return Status::OK; + return Status::OK(); } Status OlapTableSchemaParam::init(const TOlapTableSchemaParam& tschema) { @@ -87,7 +87,7 @@ Status OlapTableSchemaParam::init(const TOlapTableSchemaParam& tschema) { if (it == std::end(slots_map)) { std::stringstream ss; ss << "unknown index column, column=" << col; - return Status(ss.str()); + return Status::InternalError(ss.str()); } index->slots.emplace_back(it->second); } @@ -98,7 +98,7 @@ Status OlapTableSchemaParam::init(const TOlapTableSchemaParam& tschema) { [] (const OlapTableIndexSchema* lhs, const OlapTableIndexSchema* rhs) { return lhs->index_id < rhs->index_id; }); - return Status::OK; + return Status::OK(); } void OlapTableSchemaParam::to_protobuf(POlapTableSchemaParam* pschema) const { @@ -168,7 +168,7 @@ Status OlapTablePartitionParam::init() { if (it == std::end(slots_map)) { std::stringstream ss; ss << "partition column not found, column=" << _t_param.partition_column; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _partition_slot_desc = it->second; } @@ -181,7 +181,7 @@ Status OlapTablePartitionParam::init() { if (it == std::end(slots_map)) { std::stringstream ss; ss << "distributed column not found, columns=" << col; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _distributed_slot_descs.emplace_back(it->second); } @@ -204,7 +204,7 @@ Status OlapTablePartitionParam::init() { ss << "number of partition's index is not equal with schema's" << ", num_part_indexes=" << t_part.indexes.size() << ", num_schema_indexes=" << num_indexes; - return Status(ss.str()); + return Status::InternalError(ss.str()); } part->indexes = t_part.indexes; std::sort(part->indexes.begin(), part->indexes.end(), @@ -218,13 +218,13 @@ Status OlapTablePartitionParam::init() { ss << "partition's index is not equal with schema's" << ", part_index=" << part->indexes[j].index_id << ", schema_index=" << _schema->indexes()[j]->index_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } _partitions.emplace_back(part); _partitions_map->emplace(part->end_key, part); } - return Status::OK; + return Status::OK(); } bool OlapTablePartitionParam::find_tablet(Tuple* tuple, @@ -252,7 +252,7 @@ Status OlapTablePartitionParam::_create_partition_key(const TExprNode& t_expr, T t_expr.date_literal.value.c_str(), t_expr.date_literal.value.size())) { std::stringstream ss; ss << "invalid date literal in partition column, date=" << t_expr.date_literal; - return Status(ss.str()); + return Status::InternalError(ss.str()); } break; } @@ -290,11 +290,11 @@ Status OlapTablePartitionParam::_create_partition_key(const TExprNode& t_expr, T default: { std::stringstream ss; ss << "unsupported partition column node type, type=" << t_expr.node_type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } *part_key = tuple; - return Status::OK; + return Status::OK(); } std::string OlapTablePartitionParam::debug_string() const { diff --git a/be/src/exec/olap_table_sink.cpp b/be/src/exec/olap_table_sink.cpp index 6e4a7b3aac..1d4033a613 100644 --- a/be/src/exec/olap_table_sink.cpp +++ b/be/src/exec/olap_table_sink.cpp @@ -60,7 +60,7 @@ Status NodeChannel::init(RuntimeState* state) { if (_node_info == nullptr) { std::stringstream ss; ss << "unknown node id, id=" << _node_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } RowDescriptor row_desc(_tuple_desc, false); _batch.reset(new RowBatch(row_desc, state->batch_size(), _parent->_mem_tracker)); @@ -70,7 +70,7 @@ Status NodeChannel::init(RuntimeState* state) { if (_stub == nullptr) { LOG(WARNING) << "Get rpc stub failed, host=" << _node_info->host << ", port=" << _node_info->brpc_port; - return Status("get rpc stub failed"); + return Status::InternalError("get rpc stub failed"); } // Initialize _add_batch_request @@ -78,7 +78,7 @@ Status NodeChannel::init(RuntimeState* state) { _add_batch_request.set_index_id(_index_id); _add_batch_request.set_sender_id(_parent->_sender_id); - return Status::OK; + return Status::OK(); } void NodeChannel::open() { @@ -115,7 +115,7 @@ Status NodeChannel::open_wait() { LOG(WARNING) << "failed to open tablet writer, error=" << berror(_open_closure->cntl.ErrorCode()) << ", error_text=" << _open_closure->cntl.ErrorText(); - return Status("failed to open tablet writer"); + return Status::InternalError("failed to open tablet writer"); } Status status(_open_closure->result.status()); if (_open_closure->unref()) { @@ -141,7 +141,7 @@ Status NodeChannel::add_row(Tuple* input_tuple, int64_t tablet_id) { _batch->get_row(row_no)->set_tuple(0, tuple); _batch->commit_last_row(); _add_batch_request.add_tablet_ids(tablet_id); - return Status::OK; + return Status::OK(); } Status NodeChannel::close(RuntimeState* state) { @@ -194,7 +194,7 @@ void NodeChannel::cancel() { Status NodeChannel::_wait_in_flight_packet() { if (!_has_in_flight_packet) { - return Status::OK; + return Status::OK(); } _add_batch_closure->join(); _has_in_flight_packet = false; @@ -202,7 +202,7 @@ Status NodeChannel::_wait_in_flight_packet() { LOG(WARNING) << "failed to send batch, error=" << berror(_add_batch_closure->cntl.ErrorCode()) << ", error_text=" << _add_batch_closure->cntl.ErrorText(); - return Status("failed to send batch"); + return Status::InternalError("failed to send batch"); } return {_add_batch_closure->result.status()}; } @@ -239,7 +239,7 @@ Status NodeChannel::_send_cur_batch(bool eos) { _next_packet_seq++; _batch->reset(); - return Status::OK; + return Status::OK(); } IndexChannel::~IndexChannel() { @@ -253,7 +253,7 @@ Status IndexChannel::init(RuntimeState* state, auto location = _parent->_location->find_tablet(tablet.tablet_id); if (location == nullptr) { LOG(WARNING) << "unknow tablet, tablet_id=" << tablet.tablet_id; - return Status("unknown tablet"); + return Status::InternalError("unknown tablet"); } std::vector channels; for (auto& node_id : location->node_ids) { @@ -274,7 +274,7 @@ Status IndexChannel::init(RuntimeState* state, for (auto& it : _node_channels) { RETURN_IF_ERROR(it.second->init(state)); } - return Status::OK; + return Status::OK(); } Status IndexChannel::open() { @@ -295,7 +295,7 @@ Status IndexChannel::open() { } } } - return Status::OK; + return Status::OK(); } Status IndexChannel::add_row(Tuple* tuple, int64_t tablet_id) { @@ -318,7 +318,7 @@ Status IndexChannel::add_row(Tuple* tuple, int64_t tablet_id) { } } } - return Status::OK; + return Status::OK(); } Status IndexChannel::close(RuntimeState* state) { @@ -411,7 +411,7 @@ Status OlapTableSink::init(const TDataSink& t_sink) { _location = _pool->add(new OlapTableLocationParam(table_sink.location)); _nodes_info = _pool->add(new DorisNodesInfo(table_sink.nodes_info)); - return Status::OK; + return Status::OK(); } Status OlapTableSink::prepare(RuntimeState* state) { @@ -435,14 +435,14 @@ Status OlapTableSink::prepare(RuntimeState* state) { _output_tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_desc_id); if (_output_tuple_desc == nullptr) { LOG(WARNING) << "unknown destination tuple descriptor, id=" << _tuple_desc_id; - return Status("unknown destination tuple descriptor"); + return Status::InternalError("unknown destination tuple descriptor"); } if (!_output_expr_ctxs.empty()) { if (_output_expr_ctxs.size() != _output_tuple_desc->slots().size()) { LOG(WARNING) << "number of exprs is not same with slots, num_exprs=" << _output_expr_ctxs.size() << ", num_slots=" << _output_tuple_desc->slots().size(); - return Status("number of exprs is not same with slots"); + return Status::InternalError("number of exprs is not same with slots"); } for (int i = 0; i < _output_expr_ctxs.size(); ++i) { if (!is_type_compatible(_output_expr_ctxs[i]->root()->type().type, @@ -451,7 +451,7 @@ Status OlapTableSink::prepare(RuntimeState* state) { << _output_expr_ctxs[i]->root()->type().type << ", slot_type=" << _output_tuple_desc->slots()[i]->type().type << ", slot_name=" << _output_tuple_desc->slots()[i]->col_name(); - return Status("expr's type is not same with slot's"); + return Status::InternalError("expr's type is not same with slot's"); } } } @@ -518,7 +518,7 @@ Status OlapTableSink::prepare(RuntimeState* state) { _channels.emplace_back(channel); } - return Status::OK; + return Status::OK(); } Status OlapTableSink::open(RuntimeState* state) { @@ -530,7 +530,7 @@ Status OlapTableSink::open(RuntimeState* state) { for (auto channel : _channels) { RETURN_IF_ERROR(channel->open()); } - return Status::OK; + return Status::OK(); } Status OlapTableSink::send(RuntimeState* state, RowBatch* input_batch) { @@ -579,7 +579,7 @@ Status OlapTableSink::send(RuntimeState* state, RowBatch* input_batch) { _number_output_rows++; } } - return Status::OK; + return Status::OK(); } Status OlapTableSink::close(RuntimeState* state, Status close_status) { diff --git a/be/src/exec/partitioned_aggregation_node.cc b/be/src/exec/partitioned_aggregation_node.cc index d39f88ccaf..fbd4638f7b 100644 --- a/be/src/exec/partitioned_aggregation_node.cc +++ b/be/src/exec/partitioned_aggregation_node.cc @@ -38,6 +38,7 @@ #include "runtime/tuple_row.h" #include "udf/udf_internal.h" #include "util/runtime_profile.h" +#include "util/stack_util.h" #include "gen_cpp/Exprs_types.h" #include "gen_cpp/PlanNodes_types.h" @@ -86,7 +87,7 @@ Status PartitionedAggregationNode::init(const TPlanNode& tnode, RuntimeState* st _pool, tnode.agg_node.aggregate_functions[i], &evaluator)); _aggregate_evaluators.push_back(evaluator); } - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::prepare(RuntimeState* state) { @@ -210,7 +211,7 @@ Status PartitionedAggregationNode::prepare(RuntimeState* state) { // add_runtime_exec_option("Codegen Enabled"); // } // } - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::open(RuntimeState* state) { @@ -269,7 +270,7 @@ Status PartitionedAggregationNode::open(RuntimeState* state) { if (!_probe_expr_ctxs.empty()) { RETURN_IF_ERROR(move_hash_partitions(child(0)->rows_returned())); } - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -281,7 +282,7 @@ Status PartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_b if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } ExprContext** ctxs = &_conjunct_ctxs[0]; @@ -306,7 +307,7 @@ Status PartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_b row_batch->tuple_data_pool()->acquire_data(_mem_pool.get(), true); *eos = true; COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } if (_output_iterator.at_end()) { @@ -318,7 +319,7 @@ Status PartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_b if (_aggregated_partitions.empty() && _spilled_partitions.empty()) { // No more partitions, all done. *eos = true; - return Status::OK; + return Status::OK(); } // Process next partition. RETURN_IF_ERROR(next_partition()); @@ -358,7 +359,7 @@ Status PartitionedAggregationNode::get_next(RuntimeState* state, RowBatch* row_b if (_output_iterator.at_end()) { row_batch->mark_need_to_return(); } - return Status::OK; + return Status::OK(); } void PartitionedAggregationNode::cleanup_hash_tbl( @@ -403,12 +404,12 @@ Status PartitionedAggregationNode::reset(RuntimeState* state) { create_hash_partitions(0); } // return ExecNode::reset(state); - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } if (!_singleton_output_tuple_returned) { @@ -474,7 +475,7 @@ Status PartitionedAggregationNode::Partition::init_streams() { // This stream is only used to spill, no need to ever have this pinned. RETURN_IF_ERROR(unaggregated_row_stream->init(parent->id(), parent->runtime_profile(), false)); DCHECK(unaggregated_row_stream->has_write_block()); - return Status::OK; + return Status::OK(); } bool PartitionedAggregationNode::Partition::init_hash_table() { @@ -510,7 +511,7 @@ Status PartitionedAggregationNode::Partition::clean_up() { const vector& evaluators = parent->_aggregate_evaluators; // serialize and copy the spilled partition's stream into the new stream. - Status status = Status::OK; + Status status = Status::OK(); bool failed_to_add = false; BufferedTupleStream2* new_stream = parent->_serialize_stream.get(); PartitionedHashTable::Iterator it = hash_tbl->begin(parent->_ht_ctx.get()); @@ -556,7 +557,7 @@ Status PartitionedAggregationNode::Partition::clean_up() { } DCHECK(parent->_serialize_stream->has_write_block()); } - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::Partition::spill() { @@ -607,7 +608,7 @@ Status PartitionedAggregationNode::Partition::spill() { if (parent->_num_spilled_partitions->value() == 1) { parent->add_runtime_exec_option("Spilled"); } - return Status::OK; + return Status::OK(); } void PartitionedAggregationNode::Partition::close(bool finalize_rows) { @@ -797,7 +798,7 @@ Status PartitionedAggregationNode::append_spilled_row(BufferedTupleStream2* stre DCHECK(!stream->is_pinned()); DCHECK(stream->has_write_block()); if (LIKELY(stream->add_row(row, &_process_batch_status))) { - return Status::OK; + return Status::OK(); } // Adding fails iff either we hit an error or haven't switched to I/O buffers. @@ -813,7 +814,7 @@ Status PartitionedAggregationNode::append_spilled_row(BufferedTupleStream2* stre // Adding the row should succeed after the I/O buffer switch. if (stream->add_row(row, &_process_batch_status)) { - return Status::OK; + return Status::OK(); } DCHECK(!_process_batch_status.ok()); return _process_batch_status; @@ -862,7 +863,7 @@ Status PartitionedAggregationNode::create_hash_partitions(int level) { } COUNTER_UPDATE(_partitions_created, PARTITION_FANOUT); // COUNTER_SET(_max_partition_level, level); - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::check_and_resize_hash_partitions(int num_rows, @@ -880,7 +881,7 @@ Status PartitionedAggregationNode::check_and_resize_hash_partitions(int num_rows RETURN_IF_ERROR(spill_partition()); } } - return Status::OK; + return Status::OK(); } int64_t PartitionedAggregationNode::largest_spilled_partition() const { @@ -960,15 +961,12 @@ Status PartitionedAggregationNode::next_partition() { // "Repartitioning level $1. Number of rows $2.", // _id, partition->level + 1, num_input_rows)); // _state->SetMemTrackerExceeded(); - - Status status = Status::MEM_LIMIT_EXCEEDED; stringstream error_msg; error_msg << "Cannot perform aggregation at node with id " << _id << ". " << "Repartitioning did not reduce the size of a spilled partition. " << "Repartitioning level " << partition->level + 1 << ". Number of rows " << num_input_rows << " ."; - status.add_error_msg(error_msg.str()); - return status; + return Status::MemoryLimitExceeded(error_msg.str()); } RETURN_IF_ERROR(move_hash_partitions(num_input_rows)); } @@ -980,7 +978,7 @@ Status PartitionedAggregationNode::next_partition() { _output_partition = partition; _output_iterator = _output_partition->hash_tbl->begin(_ht_ctx.get()); COUNTER_UPDATE(_num_hash_buckets, _output_partition->hash_tbl->num_buckets()); - return Status::OK; + return Status::OK(); } template @@ -1008,7 +1006,7 @@ Status PartitionedAggregationNode::process_stream(BufferedTupleStream2* input_st } while (!eos); } input_stream->close(); - return Status::OK; + return Status::OK(); } Status PartitionedAggregationNode::spill_partition() { @@ -1084,7 +1082,7 @@ Status PartitionedAggregationNode::move_hash_partitions(int64_t num_input_rows) } VLOG(2) << ss.str(); _hash_partitions.clear(); - return Status::OK; + return Status::OK(); } void PartitionedAggregationNode::close_partitions() { diff --git a/be/src/exec/partitioned_aggregation_node_ir.cc b/be/src/exec/partitioned_aggregation_node_ir.cc index 10683b43b8..e9837d538f 100644 --- a/be/src/exec/partitioned_aggregation_node_ir.cc +++ b/be/src/exec/partitioned_aggregation_node_ir.cc @@ -29,7 +29,7 @@ Status PartitionedAggregationNode::process_batch_no_grouping( for (int i = 0; i < batch->num_rows(); ++i) { update_tuple(&_agg_fn_ctxs[0], _singleton_output_tuple, batch->get_row(i)); } - return Status::OK; + return Status::OK(); } template @@ -48,7 +48,7 @@ Status PartitionedAggregationNode::process_batch(RowBatch* batch, PartitionedHas RETURN_IF_ERROR(process_row(batch->get_row(i), ht_ctx)); } - return Status::OK; + return Status::OK(); } template @@ -56,11 +56,11 @@ Status PartitionedAggregationNode::process_row(TupleRow* row, PartitionedHashTab uint32_t hash = 0; if (AGGREGATED_ROWS) { if (!ht_ctx->eval_and_hash_build(row, &hash)) { - return Status::OK; + return Status::OK(); } } else { if (!ht_ctx->eval_and_hash_probe(row, &hash)) { - return Status::OK; + return Status::OK(); } } @@ -90,7 +90,7 @@ Status PartitionedAggregationNode::process_row(TupleRow* row, PartitionedHashTab } else if (found) { // Row is already in hash table. Do the aggregation and we're done. update_tuple(&dst_partition->agg_fn_ctxs[0], it.get_tuple(), row); - return Status::OK; + return Status::OK(); } // If we are seeing this result row for the first time, we need to construct the @@ -114,7 +114,7 @@ Status PartitionedAggregationNode::add_intermediate_tuple( update_tuple(&partition->agg_fn_ctxs[0], intermediate_tuple, row, AGGREGATED_ROWS); // After copying and initializing the tuple, insert it into the hash table. insert_it.set_tuple(intermediate_tuple, hash); - return Status::OK; + return Status::OK(); } else if (!_process_batch_status.ok()) { return _process_batch_status; } diff --git a/be/src/exec/pl_task_root.cpp b/be/src/exec/pl_task_root.cpp index a60f905460..6867c8fc50 100644 --- a/be/src/exec/pl_task_root.cpp +++ b/be/src/exec/pl_task_root.cpp @@ -45,18 +45,18 @@ Status ExchangeNode::prepare(RuntimeState* state) { DCHECK_GT(_num_senders, 0); _stream_recvr = state->create_recvr(_row_descriptor, _id, _num_senders, config::exchg_node_buffer_size_bytes, runtime_profile()); - return Status::OK; + return Status::OK(); } Status ExchangeNode::open(RuntimeState* state) { SCOPED_TIMER(_runtime_profile->total_time_counter()); RETURN_IF_ERROR(ExecNode::open(state)); - return Status::OK; + return Status::OK(); } Status ExchangeNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } return ExecNode::close(state); } @@ -67,7 +67,7 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } ExprContext* const* ctxs = &_conjunct_ctxs[0]; @@ -102,12 +102,12 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } if (output_batch->is_full()) { *eos = false; - return Status::OK; + return Status::OK(); } } @@ -124,13 +124,13 @@ Status ExchangeNode::get_next(RuntimeState* state, RowBatch* output_batch, bool* << " instance_id=" << state->fragment_instance_id(); if (is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } *eos = (_input_batch.get() == NULL); if (*eos) { - return Status::OK; + return Status::OK(); } _next_row_idx = 0; diff --git a/be/src/exec/plain_text_line_reader.cpp b/be/src/exec/plain_text_line_reader.cpp index 2ab75768ec..6f7198ca1e 100644 --- a/be/src/exec/plain_text_line_reader.cpp +++ b/be/src/exec/plain_text_line_reader.cpp @@ -186,7 +186,7 @@ Status PlainTextLineReader::read_line(const uint8_t** ptr, size_t* size, bool* e if (_eof || update_eof()) { *size = 0; *eof = true; - return Status::OK; + return Status::OK(); } int found_line_delimiter = 0; size_t offset = 0; @@ -243,7 +243,7 @@ Status PlainTextLineReader::read_line(const uint8_t** ptr, size_t* size, bool* e if (!_stream_end) { std::stringstream ss; ss << "Compressed file has been truncated, which is not allowed"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } else { // last loop we meet stream end, // and now we finished reading file, so we are finished @@ -314,7 +314,7 @@ Status PlainTextLineReader::read_line(const uint8_t** ptr, size_t* size, bool* e << " input_read_bytes: " << input_read_bytes << " decompressed_len: " << decompressed_len; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (_more_input_bytes > 0) { @@ -344,7 +344,7 @@ Status PlainTextLineReader::read_line(const uint8_t** ptr, size_t* size, bool* e // update total read bytes _total_read_bytes += *size + found_line_delimiter; - return Status::OK; + return Status::OK(); } } // end of namespace diff --git a/be/src/exec/pre_aggregation_node.cpp b/be/src/exec/pre_aggregation_node.cpp index 46305dfe27..3456af20f0 100644 --- a/be/src/exec/pre_aggregation_node.cpp +++ b/be/src/exec/pre_aggregation_node.cpp @@ -107,15 +107,15 @@ PreAggregationNode::~PreAggregationNode() { Status PreAggregationNode::prepare(RuntimeState* state) { if (_construct_fail) { - return Status("construct failed."); + return Status::InternalError("construct failed."); } if (_is_init) { - return Status::OK; + return Status::OK(); } if (NULL == state || _children.size() != 1) { - return Status("input parameter is not OK."); + return Status::InternalError("input parameter is not OK."); } RETURN_IF_ERROR(ExecNode::prepare(state)); @@ -129,7 +129,7 @@ Status PreAggregationNode::prepare(RuntimeState* state) { if (NULL == _build_timer || NULL == _get_results_timer || NULL == _hash_table_buckets_counter || NULL == _hash_table_load_factor_counter) { - return Status("construct timer and counter failed."); + return Status::InternalError("construct timer and counter failed."); } SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -146,7 +146,7 @@ Status PreAggregationNode::prepare(RuntimeState* state) { true, id(), *state->mem_trackers(), 16384)); if (NULL == _hash_tbl.get()) { - return Status("new one hash table failed."); + return Status::InternalError("new one hash table failed."); } // Determine the number of string slots in the output @@ -182,7 +182,7 @@ Status PreAggregationNode::prepare(RuntimeState* state) { } if (0 == _children_tuple.size()) { - return Status("children tuple size is zero."); + return Status::InternalError("children tuple size is zero."); } _tuple_row_size = _row_descriptor.tuple_descriptors().size() * sizeof(Tuple*); @@ -191,7 +191,7 @@ Status PreAggregationNode::prepare(RuntimeState* state) { _tuple_pool.reset(new(std::nothrow) MemPool()); if (NULL == _tuple_pool.get()) { - return Status("no memory for Mempool."); + return Status::InternalError("no memory for Mempool."); } _tuple_pool->set_limits(*state->mem_trackers()); @@ -202,7 +202,7 @@ Status PreAggregationNode::prepare(RuntimeState* state) { } _is_init = true; - return Status::OK; + return Status::OK(); } // Open Function, used to build hash table, Do not read in this Operation. @@ -213,7 +213,7 @@ Status PreAggregationNode::open(RuntimeState* state) { // open child ready to read data, only open, do nothing. RETURN_IF_ERROR(_children[0]->open(state)); - return Status::OK; + return Status::OK(); } // GetNext interface, read data from _hash_tbl @@ -226,7 +226,7 @@ Status PreAggregationNode::get_next(RuntimeState* state, RowBatch* row_batch, bo if ((!_output_iterator.has_next() && _child_eos) || reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } // if hash table hash data not read, read hash table first. @@ -331,12 +331,12 @@ read_from_hash: *eos = (!_output_iterator.has_next() && _child_eos) || reached_limit(); COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } Status PreAggregationNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); @@ -359,7 +359,7 @@ Status PreAggregationNode::construct_single_row() { _singleton_agg_row = reinterpret_cast(_tuple_pool->allocate(_tuple_row_size)); if (NULL == _singleton_agg_row) { - return Status("new memory failed."); + return Status::InternalError("new memory failed."); } for (int i = 0; i < _row_descriptor.tuple_descriptors().size(); ++i) { @@ -367,7 +367,7 @@ Status PreAggregationNode::construct_single_row() { _row_descriptor.tuple_descriptors()[i]->byte_size())); if (NULL == tuple) { - return Status("new one tuple failed."); + return Status::InternalError("new one tuple failed."); } _singleton_agg_row->set_tuple(i, tuple); @@ -409,7 +409,7 @@ Status PreAggregationNode::construct_single_row() { RawValue::write(default_value_ptr, value, agg_expr->get_child(0)->type(), NULL); } - return Status::OK; + return Status::OK(); } /** @@ -453,7 +453,7 @@ Status PreAggregationNode::process_row_batch_no_grouping(RowBatch* batch) { RETURN_IF_ERROR(update_agg_row(_singleton_agg_row, batch->get_row(i))); } - return Status::OK; + return Status::OK(); } Status PreAggregationNode::process_row_batch_with_grouping(RowBatch* batch) { @@ -466,7 +466,7 @@ Status PreAggregationNode::process_row_batch_with_grouping(RowBatch* batch) { agg_row = construct_row(probe_row); if (NULL == agg_row) { - return Status("Construct row failed."); + return Status::InternalError("Construct row failed."); } _hash_tbl->insert(agg_row); @@ -476,13 +476,13 @@ Status PreAggregationNode::process_row_batch_with_grouping(RowBatch* batch) { } } - return Status::OK; + return Status::OK(); } // Function used to update Aggregate Row in hash table Status PreAggregationNode::update_agg_row(TupleRow* agg_row, TupleRow* probe_row) { if (NULL == agg_row) { - return Status("internal error: input pointer is NULL"); + return Status::InternalError("internal error: input pointer is NULL"); } // compute all aggregate expr @@ -503,7 +503,7 @@ Status PreAggregationNode::update_agg_row(TupleRow* agg_row, TupleRow* probe_row DCHECK(slot != NULL); if (NULL == slot) { - return Status("get slot pointer is NULL."); + return Status::InternalError("get slot pointer is NULL."); } // switch opcode @@ -553,7 +553,7 @@ Status PreAggregationNode::update_agg_row(TupleRow* agg_row, TupleRow* probe_row default: LOG(WARNING) << "invalid type: " << type_to_string(agg_expr->type()); - return Status("unknown type"); + return Status::InternalError("unknown type"); } break; @@ -603,7 +603,7 @@ Status PreAggregationNode::update_agg_row(TupleRow* agg_row, TupleRow* probe_row default: LOG(WARNING) << "invalid type: " << type_to_string(agg_expr->type()); - return Status("unknown type"); + return Status::InternalError("unknown type"); } break; @@ -628,18 +628,18 @@ Status PreAggregationNode::update_agg_row(TupleRow* agg_row, TupleRow* probe_row default: LOG(WARNING) << "invalid type: " << type_to_string(agg_expr->type()); - return Status("Aggsum not valid."); + return Status::InternalError("Aggsum not valid."); } break; default: LOG(WARNING) << "bad aggregate operator: " << agg_expr->agg_op(); - return Status("unknown agg op."); + return Status::InternalError("unknown agg op."); } } - return Status::OK; + return Status::OK(); } void PreAggregationNode::debug_string(int indentation_level, stringstream* out) const { diff --git a/be/src/exec/read_write_util.h b/be/src/exec/read_write_util.h index 72681771f2..09d0f0e277 100644 --- a/be/src/exec/read_write_util.h +++ b/be/src/exec/read_write_util.h @@ -172,7 +172,7 @@ inline bool ReadWriteUtil::read_zlong(uint8_t** buf, int* buf_len, int64_t* val, DCHECK_LE(shift, 64); if (UNLIKELY(*buf_len < 1)) { - *status = Status("Insufficient buffer length"); + *status = Status::InternalError("Insufficient buffer length"); return false; } @@ -194,7 +194,7 @@ inline bool ReadWriteUtil::read(uint8_t** buf, int* buf_len, T* val, Status* sta if (UNLIKELY(val_len > *buf_len)) { std::stringstream ss; ss << "Cannot read " << val_len << " bytes, buffer length is " << *buf_len; - *status = Status(ss.str()); + *status = Status::InternalError(ss.str()); return false; } @@ -211,7 +211,7 @@ inline bool ReadWriteUtil::skip_bytes(uint8_t** buf, int* buf_len, int num_bytes if (UNLIKELY(num_bytes > *buf_len)) { std::stringstream ss; ss << "Cannot skip " << num_bytes << " bytes, buffer length is " << *buf_len; - *status = Status(ss.str()); + *status = Status::InternalError(ss.str()); return false; } diff --git a/be/src/exec/scan_node.cpp b/be/src/exec/scan_node.cpp index 1782550cbf..be49d5d17c 100644 --- a/be/src/exec/scan_node.cpp +++ b/be/src/exec/scan_node.cpp @@ -63,7 +63,7 @@ Status ScanNode::prepare(RuntimeState* state) { _num_disks_accessed_counter = ADD_COUNTER(runtime_profile(), _s_num_disks_accessed_counter, TUnit::UNIT); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/schema_scan_node.cpp b/be/src/exec/schema_scan_node.cpp index c99b19c9ad..24228749c0 100644 --- a/be/src/exec/schema_scan_node.cpp +++ b/be/src/exec/schema_scan_node.cpp @@ -85,16 +85,16 @@ Status SchemaScanNode::init(const TPlanNode& tnode, RuntimeState* state) { if (tnode.schema_scan_node.__isset.thread_id) { _scanner_param.thread_id = tnode.schema_scan_node.thread_id; } - return Status::OK; + return Status::OK(); } Status SchemaScanNode::prepare(RuntimeState* state) { if (_is_init) { - return Status::OK; + return Status::OK(); } if (NULL == state) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } RETURN_IF_ERROR(ScanNode::prepare(state)); @@ -103,14 +103,14 @@ Status SchemaScanNode::prepare(RuntimeState* state) { _tuple_pool.reset(new(std::nothrow) MemPool(mem_tracker())); if (NULL == _tuple_pool.get()) { - return Status("Allocate MemPool failed."); + return Status::InternalError("Allocate MemPool failed."); } // get dest tuple desc _dest_tuple_desc = state->desc_tbl().get_tuple_descriptor(_tuple_id); if (NULL == _dest_tuple_desc) { - return Status("Failed to get tuple descriptor."); + return Status::InternalError("Failed to get tuple descriptor."); } _slot_num = _dest_tuple_desc->slots().size(); @@ -119,14 +119,14 @@ Status SchemaScanNode::prepare(RuntimeState* state) { static_cast(_dest_tuple_desc->table_desc()); if (NULL == schema_table) { - return Status("Failed to get schema table descriptor."); + return Status::InternalError("Failed to get schema table descriptor."); } // new one scanner _schema_scanner.reset(SchemaScanner::create(schema_table->schema_table_type())); if (NULL == _schema_scanner.get()) { - return Status("schema scanner get NULL pointer."); + return Status::InternalError("schema scanner get NULL pointer."); } RETURN_IF_ERROR(_schema_scanner->init(&_scanner_param, _pool)); @@ -134,13 +134,13 @@ Status SchemaScanNode::prepare(RuntimeState* state) { _src_tuple_desc = _schema_scanner->tuple_desc(); if (NULL == _src_tuple_desc) { - return Status("failed to get src schema tuple desc."); + return Status::InternalError("failed to get src schema tuple desc."); } _src_tuple = reinterpret_cast(new(std::nothrow) char[_src_tuple_desc->byte_size()]); if (NULL == _src_tuple) { - return Status("new src tuple failed."); + return Status::InternalError("new src tuple failed."); } // if src tuple desc slots is zero, it's the dummy slots. @@ -165,13 +165,13 @@ Status SchemaScanNode::prepare(RuntimeState* state) { if (j >= _src_tuple_desc->slots().size()) { LOG(WARNING) << "no match column for this column(" << _dest_tuple_desc->slots()[i]->col_name() << ")"; - return Status("no match column for this column."); + return Status::InternalError("no match column for this column."); } if (_src_tuple_desc->slots()[j]->type().type != _dest_tuple_desc->slots()[i]->type().type) { LOG(WARNING) << "schema not match. input is " << _src_tuple_desc->slots()[j]->type() << " and output is " << _dest_tuple_desc->slots()[i]->type(); - return Status("schema not match."); + return Status::InternalError("schema not match."); } _index_map[i] = j; } @@ -180,16 +180,16 @@ Status SchemaScanNode::prepare(RuntimeState* state) { _tuple_idx = 0; _is_init = true; - return Status::OK; + return Status::OK(); } Status SchemaScanNode::open(RuntimeState* state) { if (!_is_init) { - return Status("Open before Init."); + return Status::InternalError("Open before Init."); } if (NULL == state) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -231,11 +231,11 @@ void SchemaScanNode::copy_one_row() { Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { if (!_is_init) { - return Status("GetNext before Init."); + return Status::InternalError("GetNext before Init."); } if (NULL == state || NULL == row_batch || NULL == eos) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } RETURN_IF_CANCELLED(state); @@ -244,7 +244,7 @@ Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } // create new tuple buffer for row_batch @@ -252,7 +252,7 @@ Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, void* tuple_buffer = _tuple_pool->allocate(tuple_buffer_size); if (NULL == tuple_buffer) { - return Status("Allocate tuple buffer failed."); + return Status::InternalError("Allocate tuple buffer failed."); } // no use to clear, because CopyOneRow can clear @@ -268,7 +268,7 @@ Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, // next get_next() call row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), !reached_limit()); *eos = reached_limit(); - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_schema_scanner->get_next_row(_src_tuple, @@ -277,7 +277,7 @@ Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, if (scanner_eos) { row_batch->tuple_data_pool()->acquire_data(_tuple_pool.get(), false); *eos = true; - return Status::OK; + return Status::OK(); } int row_idx = row_batch->add_row(); @@ -297,12 +297,12 @@ Status SchemaScanNode::get_next(RuntimeState* state, RowBatch* row_batch, } } - return Status::OK; + return Status::OK(); } Status SchemaScanNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::CLOSE)); SCOPED_TIMER(_runtime_profile->total_time_counter()); @@ -326,7 +326,7 @@ void SchemaScanNode::debug_string(int indentation_level, stringstream* out) cons } Status SchemaScanNode::set_scan_ranges(const vector& scan_ranges) { - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/schema_scanner.cpp b/be/src/exec/schema_scanner.cpp index 574a3c5523..c2b99da6b1 100644 --- a/be/src/exec/schema_scanner.cpp +++ b/be/src/exec/schema_scanner.cpp @@ -41,39 +41,39 @@ SchemaScanner::~SchemaScanner() { Status SchemaScanner::start(RuntimeState* state) { if (!_is_init) { - return Status("call Start before Init."); + return Status::InternalError("call Start before Init."); } - return Status::OK; + return Status::OK(); } Status SchemaScanner::get_next_row(Tuple* tuple, MemPool* pool, bool* eos) { if (!_is_init) { - return Status("used before initialized."); + return Status::InternalError("used before initialized."); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } *eos = true; - return Status::OK; + return Status::OK(); } Status SchemaScanner::init(SchemaScannerParam* param, ObjectPool* pool) { if (_is_init) { - return Status::OK; + return Status::OK(); } if (NULL == param || NULL == pool || NULL == _columns) { - return Status("invalid parameter"); + return Status::InternalError("invalid parameter"); } RETURN_IF_ERROR(create_tuple_desc(pool)); _param = param; _is_init = true; - return Status::OK; + return Status::OK(); } SchemaScanner* SchemaScanner::create(TSchemaTableType::type type) { @@ -139,7 +139,7 @@ Status SchemaScanner::create_tuple_desc(ObjectPool* pool) { SlotDescriptor* slot = pool->add(new(std::nothrow) SlotDescriptor(t_slot_desc)); if (NULL == slot) { - return Status("no memory for _tuple_desc."); + return Status::InternalError("no memory for _tuple_desc."); } slots.push_back(slot); @@ -152,14 +152,14 @@ Status SchemaScanner::create_tuple_desc(ObjectPool* pool) { _tuple_desc = pool->add(new(std::nothrow) TupleDescriptor(t_tuple_desc)); if (NULL == _tuple_desc) { - return Status("no memory for _tuple_desc."); + return Status::InternalError("no memory for _tuple_desc."); } for (int i = 0; i < slots.size(); ++i) { _tuple_desc->add_slot(slots[i]); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/schema_scanner/schema_charsets_scanner.cpp b/be/src/exec/schema_scanner/schema_charsets_scanner.cpp index 1930f5a69f..ed015273c3 100644 --- a/be/src/exec/schema_scanner/schema_charsets_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_charsets_scanner.cpp @@ -50,7 +50,7 @@ Status SchemaCharsetsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_charsets[_index].charset); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_charsets[_index].charset, len + 1); str_slot->len = len; @@ -62,7 +62,7 @@ Status SchemaCharsetsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_charsets[_index].default_collation); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_charsets[_index].default_collation, len + 1); str_slot->len = len; @@ -74,7 +74,7 @@ Status SchemaCharsetsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_charsets[_index].description); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_charsets[_index].description, len + 1); str_slot->len = len; @@ -85,19 +85,19 @@ Status SchemaCharsetsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { *(int64_t*)slot = _s_charsets[_index].maxlen; } _index++; - return Status::OK; + return Status::OK(); } Status SchemaCharsetsScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("call this before initial."); + return Status::InternalError("call this before initial."); } if (NULL == _s_charsets[_index].charset) { *eos = true; - return Status::OK; + return Status::OK(); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("invalid parameter."); + return Status::InternalError("invalid parameter."); } *eos = false; return fill_one_row(tuple, pool); diff --git a/be/src/exec/schema_scanner/schema_collations_scanner.cpp b/be/src/exec/schema_scanner/schema_collations_scanner.cpp index deb4ce02ed..5418edfc84 100644 --- a/be/src/exec/schema_scanner/schema_collations_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_collations_scanner.cpp @@ -53,7 +53,7 @@ Status SchemaCollationsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_collations[_index].name); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_collations[_index].name, len + 1); str_slot->len = len; @@ -65,7 +65,7 @@ Status SchemaCollationsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_collations[_index].charset); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_collations[_index].charset, len + 1); str_slot->len = len; @@ -82,7 +82,7 @@ Status SchemaCollationsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_collations[_index].is_default); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_collations[_index].is_default, len + 1); str_slot->len = len; @@ -94,7 +94,7 @@ Status SchemaCollationsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_s_collations[_index].is_compile); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _s_collations[_index].is_compile, len + 1); str_slot->len = len; @@ -105,19 +105,19 @@ Status SchemaCollationsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { *(int64_t*)slot = _s_collations[_index].sortlen; } _index++; - return Status::OK; + return Status::OK(); } Status SchemaCollationsScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("call this before initial."); + return Status::InternalError("call this before initial."); } if (NULL == _s_collations[_index].name) { *eos = true; - return Status::OK; + return Status::OK(); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("invalid parameter."); + return Status::InternalError("invalid parameter."); } *eos = false; return fill_one_row(tuple, pool); diff --git a/be/src/exec/schema_scanner/schema_columns_scanner.cpp b/be/src/exec/schema_scanner/schema_columns_scanner.cpp index 793b4a1672..cea4f0c7dd 100644 --- a/be/src/exec/schema_scanner/schema_columns_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_columns_scanner.cpp @@ -62,7 +62,7 @@ SchemaColumnsScanner::~SchemaColumnsScanner() { Status SchemaColumnsScanner::start(RuntimeState *state) { if (!_is_init) { - return Status("schema columns scanner not inited."); + return Status::InternalError("schema columns scanner not inited."); } // get all database TGetDbsParams db_params; @@ -77,10 +77,10 @@ Status SchemaColumnsScanner::start(RuntimeState *state) { RETURN_IF_ERROR(SchemaHelper::get_db_names(*(_param->ip), _param->port, db_params, &_db_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } - return Status::OK; + return Status::OK(); } std::string SchemaColumnsScanner::type_to_string(TColumnDesc &desc) { @@ -313,7 +313,7 @@ Status SchemaColumnsScanner::fill_one_row(Tuple *tuple, MemPool *pool) { } } _column_index++; - return Status::OK; + return Status::OK(); } Status SchemaColumnsScanner::get_new_desc() { @@ -331,11 +331,11 @@ Status SchemaColumnsScanner::get_new_desc() { RETURN_IF_ERROR(SchemaHelper::describe_table(*(_param->ip), _param->port, desc_params, &_desc_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } _column_index = 0; - return Status::OK; + return Status::OK(); } Status SchemaColumnsScanner::get_new_table() { @@ -355,18 +355,18 @@ Status SchemaColumnsScanner::get_new_table() { RETURN_IF_ERROR(SchemaHelper::get_table_names(*(_param->ip), _param->port, table_params, &_table_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } _table_index = 0; - return Status::OK; + return Status::OK(); } Status SchemaColumnsScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("use this class before inited."); + return Status::InternalError("use this class before inited."); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("input parameter is NULL."); + return Status::InternalError("input parameter is NULL."); } while (_column_index >= _desc_result.columns.size()) { if (_table_index >= _table_result.tables.size()) { @@ -374,7 +374,7 @@ Status SchemaColumnsScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos RETURN_IF_ERROR(get_new_table()); } else { *eos = true; - return Status::OK; + return Status::OK(); } } else { RETURN_IF_ERROR(get_new_desc()); diff --git a/be/src/exec/schema_scanner/schema_dummy_scanner.cpp b/be/src/exec/schema_scanner/schema_dummy_scanner.cpp index 6733757b75..957b581a3e 100644 --- a/be/src/exec/schema_scanner/schema_dummy_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_dummy_scanner.cpp @@ -35,12 +35,12 @@ SchemaDummyScanner::~SchemaDummyScanner() { } Status SchemaDummyScanner::start() { - return Status::OK; + return Status::OK(); } Status SchemaDummyScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { *eos = true; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/schema_scanner/schema_schemata_scanner.cpp b/be/src/exec/schema_scanner/schema_schemata_scanner.cpp index f1794e5058..2048f55f07 100644 --- a/be/src/exec/schema_scanner/schema_schemata_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_schemata_scanner.cpp @@ -41,7 +41,7 @@ SchemaSchemataScanner::~SchemaSchemataScanner() { Status SchemaSchemataScanner::start(RuntimeState *state) { if (!_is_init) { - return Status("used before initial."); + return Status::InternalError("used before initial."); } TGetDbsParams db_params; if (NULL != _param->wild) { @@ -57,10 +57,10 @@ Status SchemaSchemataScanner::start(RuntimeState *state) { RETURN_IF_ERROR(SchemaHelper::get_db_names(*(_param->ip), _param->port, db_params, &_db_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } - return Status::OK; + return Status::OK(); } Status SchemaSchemataScanner::fill_one_row(Tuple *tuple, MemPool *pool) { @@ -87,7 +87,7 @@ Status SchemaSchemataScanner::fill_one_row(Tuple *tuple, MemPool *pool) { str_slot->len = strlen("utf8") + 1; str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } memcpy(str_slot->ptr, "utf8", str_slot->len); } @@ -98,7 +98,7 @@ Status SchemaSchemataScanner::fill_one_row(Tuple *tuple, MemPool *pool) { str_slot->len = strlen("utf8_general_ci") + 1; str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } memcpy(str_slot->ptr, "utf8_general_ci", str_slot->len); } @@ -107,19 +107,19 @@ Status SchemaSchemataScanner::fill_one_row(Tuple *tuple, MemPool *pool) { tuple->set_null(_tuple_desc->slots()[4]->null_indicator_offset()); } _db_index++; - return Status::OK; + return Status::OK(); } Status SchemaSchemataScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("Used before Initialized."); + return Status::InternalError("Used before Initialized."); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } if (_db_index >= _db_result.dbs.size()) { *eos = true; - return Status::OK; + return Status::OK(); } *eos = false; return fill_one_row(tuple, pool); diff --git a/be/src/exec/schema_scanner/schema_tables_scanner.cpp b/be/src/exec/schema_scanner/schema_tables_scanner.cpp index 9afbf11cb7..de9340a973 100644 --- a/be/src/exec/schema_scanner/schema_tables_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_tables_scanner.cpp @@ -61,7 +61,7 @@ SchemaTablesScanner::~SchemaTablesScanner() { Status SchemaTablesScanner::start(RuntimeState *state) { if (!_is_init) { - return Status("used before initialized."); + return Status::InternalError("used before initialized."); } TGetDbsParams db_params; if (NULL != _param->db) { @@ -78,9 +78,9 @@ Status SchemaTablesScanner::start(RuntimeState *state) { RETURN_IF_ERROR(SchemaHelper::get_db_names(*(_param->ip), _param->port, db_params, &_db_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } - return Status::OK; + return Status::OK(); } Status SchemaTablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { @@ -108,7 +108,7 @@ Status SchemaTablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { str_slot->len = src->length(); str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memcpy failed."); + return Status::InternalError("Allocate memcpy failed."); } memcpy(str_slot->ptr, src->c_str(), str_slot->len); } @@ -120,7 +120,7 @@ Status SchemaTablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { str_slot->len = src->length(); str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memcpy failed."); + return Status::InternalError("Allocate memcpy failed."); } memcpy(str_slot->ptr, src->c_str(), str_slot->len); } @@ -132,7 +132,7 @@ Status SchemaTablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { str_slot->len = src->length(); str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memcpy failed."); + return Status::InternalError("Allocate memcpy failed."); } memcpy(str_slot->ptr, src->c_str(), str_slot->len); } else { @@ -209,13 +209,13 @@ Status SchemaTablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { } else { str_slot->ptr = (char *)pool->allocate(str_slot->len); if (NULL == str_slot->ptr) { - return Status("Allocate memcpy failed."); + return Status::InternalError("Allocate memcpy failed."); } memcpy(str_slot->ptr, src->c_str(), str_slot->len); } } _table_index++; - return Status::OK; + return Status::OK(); } Status SchemaTablesScanner::get_new_table() { @@ -235,25 +235,25 @@ Status SchemaTablesScanner::get_new_table() { RETURN_IF_ERROR(SchemaHelper::list_table_status(*(_param->ip), _param->port, table_params, &_table_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } _table_index = 0; - return Status::OK; + return Status::OK(); } Status SchemaTablesScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("Used before initialized."); + return Status::InternalError("Used before initialized."); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("input pointer is NULL."); + return Status::InternalError("input pointer is NULL."); } while (_table_index >= _table_result.tables.size()) { if (_db_index < _db_result.dbs.size()) { RETURN_IF_ERROR(get_new_table()); } else { *eos = true; - return Status::OK; + return Status::OK(); } } *eos = false; diff --git a/be/src/exec/schema_scanner/schema_variables_scanner.cpp b/be/src/exec/schema_scanner/schema_variables_scanner.cpp index 8604b712a7..13e24e1cb7 100644 --- a/be/src/exec/schema_scanner/schema_variables_scanner.cpp +++ b/be/src/exec/schema_scanner/schema_variables_scanner.cpp @@ -56,10 +56,10 @@ Status SchemaVariablesScanner::start(RuntimeState *state) { RETURN_IF_ERROR(SchemaHelper::show_varialbes(*(_param->ip), _param->port, var_params, &_var_result)); } else { - return Status("IP or port dosn't exists"); + return Status::InternalError("IP or port dosn't exists"); } _begin = _var_result.variables.begin(); - return Status::OK; + return Status::OK(); } Status SchemaVariablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { @@ -70,7 +70,7 @@ Status SchemaVariablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_begin->first.c_str()); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _begin->first.c_str(), len + 1); str_slot->len = len; @@ -82,25 +82,25 @@ Status SchemaVariablesScanner::fill_one_row(Tuple *tuple, MemPool *pool) { int len = strlen(_begin->second.c_str()); str_slot->ptr = (char *)pool->allocate(len + 1); if (NULL == str_slot->ptr) { - return Status("No Memory."); + return Status::InternalError("No Memory."); } memcpy(str_slot->ptr, _begin->second.c_str(), len + 1); str_slot->len = len; } ++_begin; - return Status::OK; + return Status::OK(); } Status SchemaVariablesScanner::get_next_row(Tuple *tuple, MemPool *pool, bool *eos) { if (!_is_init) { - return Status("call this before initial."); + return Status::InternalError("call this before initial."); } if (_begin == _var_result.variables.end()) { *eos = true; - return Status::OK; + return Status::OK(); } if (NULL == tuple || NULL == pool || NULL == eos) { - return Status("invalid parameter."); + return Status::InternalError("invalid parameter."); } *eos = false; return fill_one_row(tuple, pool); diff --git a/be/src/exec/select_node.cpp b/be/src/exec/select_node.cpp index b962e5e3d3..933f97064f 100644 --- a/be/src/exec/select_node.cpp +++ b/be/src/exec/select_node.cpp @@ -36,14 +36,14 @@ Status SelectNode::prepare(RuntimeState* state) { RETURN_IF_ERROR(ExecNode::prepare(state)); _child_row_batch.reset( new RowBatch(child(0)->row_desc(), state->batch_size(), mem_tracker())); - return Status::OK; + return Status::OK(); } Status SelectNode::open(RuntimeState* state) { RETURN_IF_ERROR(exec_debug_action(TExecNodePhase::OPEN)); RETURN_IF_ERROR(ExecNode::open(state)); RETURN_IF_ERROR(child(0)->open(state)); - return Status::OK; + return Status::OK(); } Status SelectNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -56,7 +56,7 @@ Status SelectNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) // new ones _child_row_batch->transfer_resource_ownership(row_batch); *eos = true; - return Status::OK; + return Status::OK(); } *eos = false; @@ -69,7 +69,7 @@ Status SelectNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) _child_row_batch->transfer_resource_ownership(row_batch); _child_row_batch->reset(); if (row_batch->at_capacity()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(child(0)->get_next(state, _child_row_batch.get(), &_child_eos)); } @@ -80,18 +80,18 @@ Status SelectNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) if (*eos) { _child_row_batch->transfer_resource_ownership(row_batch); } - return Status::OK; + return Status::OK(); } if (_child_eos) { // finished w/ last child row batch, and child eos is true _child_row_batch->transfer_resource_ownership(row_batch); *eos = true; - return Status::OK; + return Status::OK(); } } - return Status::OK; + return Status::OK(); } bool SelectNode::copy_rows(RowBatch* output_batch) { @@ -133,7 +133,7 @@ bool SelectNode::copy_rows(RowBatch* output_batch) { Status SelectNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } _child_row_batch.reset(); return ExecNode::close(state); diff --git a/be/src/exec/sort_exec_exprs.cpp b/be/src/exec/sort_exec_exprs.cpp index 3e57824e33..5a3d62c064 100644 --- a/be/src/exec/sort_exec_exprs.cpp +++ b/be/src/exec/sort_exec_exprs.cpp @@ -38,14 +38,14 @@ Status SortExecExprs::init( } else { _materialize_tuple = false; } - return Status::OK; + return Status::OK(); } Status SortExecExprs::init(const std::vector& lhs_ordering_expr_ctxs, const std::vector& rhs_ordering_expr_ctxs) { _lhs_ordering_expr_ctxs = lhs_ordering_expr_ctxs; _rhs_ordering_expr_ctxs = rhs_ordering_expr_ctxs; - return Status::OK; + return Status::OK(); } Status SortExecExprs::prepare(RuntimeState* state, const RowDescriptor& child_row_desc, @@ -57,7 +57,7 @@ Status SortExecExprs::prepare(RuntimeState* state, const RowDescriptor& child_ro } RETURN_IF_ERROR(Expr::prepare( _lhs_ordering_expr_ctxs, state, output_row_desc, expr_mem_tracker)); - return Status::OK; + return Status::OK(); } Status SortExecExprs::open(RuntimeState* state) { @@ -67,7 +67,7 @@ Status SortExecExprs::open(RuntimeState* state) { RETURN_IF_ERROR(Expr::open(_lhs_ordering_expr_ctxs, state)); RETURN_IF_ERROR(Expr::clone_if_not_exists( _lhs_ordering_expr_ctxs, state, &_rhs_ordering_expr_ctxs)); - return Status::OK; + return Status::OK(); } void SortExecExprs::close(RuntimeState* state) { diff --git a/be/src/exec/sort_node.cpp b/be/src/exec/sort_node.cpp index fa84a47927..2a11e0c599 100644 --- a/be/src/exec/sort_node.cpp +++ b/be/src/exec/sort_node.cpp @@ -41,7 +41,7 @@ Status SortNode::init(const TPlanNode& tnode, RuntimeState* state) { sort_tuple_slot_exprs, _pool)); _is_asc_order = tnode.sort_node.is_asc_order; _nulls_first = tnode.sort_node.nulls_first; - return Status::OK; + return Status::OK(); } Status SortNode::prepare(RuntimeState* state) { @@ -49,7 +49,7 @@ Status SortNode::prepare(RuntimeState* state) { RETURN_IF_ERROR(ExecNode::prepare(state)); RETURN_IF_ERROR(_sort_exec_exprs.prepare( state, child(0)->row_desc(), _row_descriptor, expr_mem_tracker())); - return Status::OK; + return Status::OK(); } Status SortNode::open(RuntimeState* state) { @@ -72,7 +72,7 @@ Status SortNode::open(RuntimeState* state) { // The child can be closed at this point. child(0)->close(state); - return Status::OK; + return Status::OK(); } Status SortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -83,7 +83,7 @@ Status SortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -113,12 +113,12 @@ Status SortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { } COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } Status SortNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } _sort_exec_exprs.close(state); _sorter.reset(); @@ -150,7 +150,7 @@ Status SortNode::sort_input(RuntimeState* state) { // RETURN_IF_ERROR(QueryMaintenance(state)); } while (!eos); RETURN_IF_ERROR(_sorter->input_done()); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exec/sort_node.h b/be/src/exec/sort_node.h index 6ccff0978d..2d5b1d88fc 100644 --- a/be/src/exec/sort_node.h +++ b/be/src/exec/sort_node.h @@ -53,7 +53,7 @@ private: Status sort_input(RuntimeState* state); // Create a block manager object and set it in block_mgr_. - // Returns and sets the query status to Status::MEM_LIMIT_EXCEEDED if there is not + // Returns and sets the query status to Status::MemoryLimitExceeded("Memory limit exceeded") if there is not // enough memory for the sort. Status create_block_mgr(RuntimeState* state); diff --git a/be/src/exec/spill_sort_node.cc b/be/src/exec/spill_sort_node.cc index 00df4cd764..e2bb07ca9f 100644 --- a/be/src/exec/spill_sort_node.cc +++ b/be/src/exec/spill_sort_node.cc @@ -39,7 +39,7 @@ Status SpillSortNode::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(_sort_exec_exprs.init(tnode.sort_node.sort_info, _pool)); _is_asc_order = tnode.sort_node.sort_info.is_asc_order; _nulls_first = tnode.sort_node.sort_info.nulls_first; - return Status::OK; + return Status::OK(); } Status SpillSortNode::prepare(RuntimeState* state) { @@ -48,7 +48,7 @@ Status SpillSortNode::prepare(RuntimeState* state) { RETURN_IF_ERROR(_sort_exec_exprs.prepare( state, child(0)->row_desc(), _row_descriptor, expr_mem_tracker())); // AddExprCtxsToFree(_sort_exec_exprs); - return Status::OK; + return Status::OK(); } Status SpillSortNode::open(RuntimeState* state) { @@ -80,7 +80,7 @@ Status SpillSortNode::open(RuntimeState* state) { // if (!IsInSubplan()) { child(0)->close(state); // } - return Status::OK; + return Status::OK(); } Status SpillSortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -93,7 +93,7 @@ Status SpillSortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e if (reached_limit()) { *eos = true; - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -123,7 +123,7 @@ Status SpillSortNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* e } COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } Status SpillSortNode::reset(RuntimeState* state) { @@ -132,17 +132,17 @@ Status SpillSortNode::reset(RuntimeState* state) { _sorter->reset(); } // return ExecNode::reset(state); - return Status::OK; + return Status::OK(); } Status SpillSortNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } _sort_exec_exprs.close(state); _sorter.reset(); ExecNode::close(state); - return Status::OK; + return Status::OK(); } void SpillSortNode::debug_string(int indentation_level, stringstream* out) const { @@ -171,7 +171,7 @@ Status SpillSortNode::sort_input(RuntimeState* state) { } while (!eos); RETURN_IF_ERROR(_sorter->input_done()); - return Status::OK; + return Status::OK(); } } // end namespace doris diff --git a/be/src/exec/topn_node.cpp b/be/src/exec/topn_node.cpp index 4f24a8d706..0d6ca6ddcf 100644 --- a/be/src/exec/topn_node.cpp +++ b/be/src/exec/topn_node.cpp @@ -56,7 +56,7 @@ Status TopNNode::init(const TPlanNode& tnode, RuntimeState* state) { DCHECK_EQ(_conjuncts.size(), 0) << "TopNNode should never have predicates to evaluate."; _abort_on_default_limit_exceeded = tnode.sort_node.is_default_limit; - return Status::OK; + return Status::OK(); } Status TopNNode::prepare(RuntimeState* state) { @@ -80,7 +80,7 @@ Status TopNNode::prepare(RuntimeState* state) { _abort_on_default_limit_exceeded = _abort_on_default_limit_exceeded && state->abort_on_default_limit_exceeded(); _materialized_tuple_desc = _row_descriptor.tuple_descriptors()[0]; - return Status::OK; + return Status::OK(); } Status TopNNode::open(RuntimeState* state) { @@ -115,7 +115,7 @@ Status TopNNode::open(RuntimeState* state) { RETURN_IF_ERROR(child(0)->get_next(state, &batch, &eos)); if (_abort_on_default_limit_exceeded && child(0)->rows_returned() > _limit) { - return Status("DEFAULT_ORDER_BY_LIMIT has been exceeded."); + return Status::InternalError("DEFAULT_ORDER_BY_LIMIT has been exceeded."); } for (int i = 0; i < batch.num_rows(); ++i) { @@ -135,7 +135,7 @@ Status TopNNode::open(RuntimeState* state) { // if (!is_in_subplan()) { child(0)->close(state); // } - return Status::OK; + return Status::OK(); } Status TopNNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -177,12 +177,12 @@ Status TopNNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { COUNTER_UPDATE(memory_used_counter(), _tuple_pool->peak_allocated_bytes()); } } - return Status::OK; + return Status::OK(); } Status TopNNode::close(RuntimeState* state) { if (is_closed()) { - return Status::OK; + return Status::OK(); } if (_tuple_pool.get() != NULL) { _tuple_pool->free_all(); diff --git a/be/src/exec/union_node.cpp b/be/src/exec/union_node.cpp index b8714bec81..2b57e8c0da 100644 --- a/be/src/exec/union_node.cpp +++ b/be/src/exec/union_node.cpp @@ -68,7 +68,7 @@ Status UnionNode::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(Expr::create_expr_trees(_pool, texprs, &ctxs)); _child_expr_lists.push_back(ctxs); } - return Status::OK; + return Status::OK(); } Status UnionNode::prepare(RuntimeState* state) { @@ -93,7 +93,7 @@ Status UnionNode::prepare(RuntimeState* state) { // AddExprCtxsToFree(_child_expr_lists[i]); DCHECK_EQ(_child_expr_lists[i].size(), _tuple_desc->slots().size()); } - return Status::OK; + return Status::OK(); } void UnionNode::codegen(RuntimeState* state) { @@ -158,7 +158,7 @@ Status UnionNode::open(RuntimeState* state) { // succeeded. if (!_children.empty()) RETURN_IF_ERROR(child(_child_idx)->open(state)); - return Status::OK; + return Status::OK(); } Status UnionNode::get_next_pass_through(RuntimeState* state, RowBatch* row_batch) { @@ -180,7 +180,7 @@ Status UnionNode::get_next_pass_through(RuntimeState* state, RowBatch* row_batch _to_close_child_idx = _child_idx; ++_child_idx; } - return Status::OK; + return Status::OK(); } Status UnionNode::get_next_materialized(RuntimeState* state, RowBatch* row_batch) { @@ -252,7 +252,7 @@ Status UnionNode::get_next_materialized(RuntimeState* state, RowBatch* row_batch } DCHECK_LE(_child_idx, _children.size()); - return Status::OK; + return Status::OK(); } Status UnionNode::get_next_const(RuntimeState* state, RowBatch* row_batch) { @@ -272,7 +272,7 @@ Status UnionNode::get_next_const(RuntimeState* state, RowBatch* row_batch) { ++_const_expr_list_idx; } - return Status::OK; + return Status::OK(); } Status UnionNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) { @@ -317,7 +317,7 @@ Status UnionNode::get_next(RuntimeState* state, RowBatch* row_batch, bool* eos) (!has_more_passthrough() && !has_more_materialized() && !has_more_const(state)); COUNTER_SET(_rows_returned_counter, _num_rows_returned); - return Status::OK; + return Status::OK(); } #if 0 @@ -335,7 +335,7 @@ Status UnionNode::reset(RuntimeState* state) { #endif Status UnionNode::close(RuntimeState* state) { - if (is_closed()) return Status::OK; + if (is_closed()) return Status::OK(); _child_batch.reset(); for (auto& exprs : _const_expr_lists) { Expr::close(exprs, state); diff --git a/be/src/exprs/agg_fn.cc b/be/src/exprs/agg_fn.cc index 8b448bb729..8d194745ce 100644 --- a/be/src/exprs/agg_fn.cc +++ b/be/src/exprs/agg_fn.cc @@ -88,7 +88,7 @@ Status AggFn::Init(const RowDescriptor& row_desc, RuntimeState* state) { DCHECK_EQ(_fn.binary_type, TFunctionBinaryType::BUILTIN); stringstream ss; ss << "Function " << _fn.name.function_name << " is not implemented."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } RETURN_IF_ERROR(UserFunctionCache::instance()->get_function_ptr( @@ -128,7 +128,7 @@ Status AggFn::Init(const RowDescriptor& row_desc, RuntimeState* state) { _fn.hdfs_location, _fn.checksum, &finalize_fn_, &_cache_entry)); } - return Status::OK; + return Status::OK(); } Status AggFn::Create(const TExpr& texpr, const RowDescriptor& row_desc, @@ -140,7 +140,7 @@ Status AggFn::Create(const TExpr& texpr, const RowDescriptor& row_desc, //TODO chenhao DCHECK_EQ(texpr_node.node_type, TExprNodeType::AGG_EXPR); if (!texpr_node.__isset.fn) { - return Status("Function not set in thrift AGGREGATE_EXPR node"); + return Status::InternalError("Function not set in thrift AGGREGATE_EXPR node"); } AggFn* new_agg_fn = pool->add(new AggFn(texpr_node, intermediate_slot_desc, output_slot_desc)); @@ -155,7 +155,7 @@ Status AggFn::Create(const TExpr& texpr, const RowDescriptor& row_desc, input_expr->assign_fn_ctx_idx(&fn_ctx_idx); } *agg_fn = new_agg_fn; - return Status::OK; + return Status::OK(); } FunctionContext::TypeDesc AggFn::GetIntermediateTypeDesc() const { @@ -185,10 +185,10 @@ FunctionContext::TypeDesc AggFn::GetOutputTypeDesc() const { // codegen->InlineConstFnAttrs(GetOutputTypeDesc(), arg_type_descs_, *uda_fn); // *uda_fn = codegen->FinalizeFunction(*uda_fn); // if (*uda_fn == nullptr) { -// return Status(TErrorCode::UDF_VERIFY_FAILED, symbol, fn_.hdfs_location); +// return Status::InternalError(TErrorCode::UDF_VERIFY_FAILED, symbol, fn_.hdfs_location); // } // } -// return Status::OK; +// return Status::OK(); //} void AggFn::Close() { diff --git a/be/src/exprs/agg_fn.h b/be/src/exprs/agg_fn.h index 92705117cb..602728ec09 100644 --- a/be/src/exprs/agg_fn.h +++ b/be/src/exprs/agg_fn.h @@ -136,7 +136,7 @@ class AggFn : public Expr { WARN_UNUSED_RESULT; Status get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { - return Status::OK; + return Status::OK(); } /// Releases all cache entries to libCache for all nodes in the expr tree. diff --git a/be/src/exprs/agg_fn_evaluator.cpp b/be/src/exprs/agg_fn_evaluator.cpp index f46b0ae6ce..aee822eb29 100755 --- a/be/src/exprs/agg_fn_evaluator.cpp +++ b/be/src/exprs/agg_fn_evaluator.cpp @@ -96,7 +96,7 @@ Status AggFnEvaluator::create( pool, desc.nodes, NULL, &node_idx, &expr, &ctx)); (*result)->_input_exprs_ctxs.push_back(ctx); } - return Status::OK; + return Status::OK(); } AggFnEvaluator::AggFnEvaluator(const TExprNode& desc, bool is_analytic_fn) : @@ -204,7 +204,7 @@ Status AggFnEvaluator::prepare( DCHECK_EQ(_fn.binary_type, TFunctionBinaryType::BUILTIN); stringstream ss; ss << "Function " << _fn.name.function_name << " is not implemented."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // Load the function pointers. @@ -261,7 +261,7 @@ Status AggFnEvaluator::prepare( *agg_fn_ctx = FunctionContextImpl::create_context(state, pool, intermediate_type, output_type, arg_types, 0, false); - return Status::OK; + return Status::OK(); } Status AggFnEvaluator::open(RuntimeState* state, FunctionContext* agg_fn_ctx) { @@ -274,7 +274,7 @@ Status AggFnEvaluator::open(RuntimeState* state, FunctionContext* agg_fn_ctx) { constant_args[i] = _input_exprs_ctxs[i]->root()->get_const_val(_input_exprs_ctxs[i]); } agg_fn_ctx->impl()->set_constant_args(constant_args); - return Status::OK; + return Status::OK(); } void AggFnEvaluator::close(RuntimeState* state) { diff --git a/be/src/exprs/anyval_util.cpp b/be/src/exprs/anyval_util.cpp index 19428fe9c6..541f2472b6 100755 --- a/be/src/exprs/anyval_util.cpp +++ b/be/src/exprs/anyval_util.cpp @@ -47,7 +47,7 @@ Status allocate_any_val(RuntimeState* state, MemPool* pool, const TypeDescriptor state, mem_limit_exceeded_msg, anyval_size); } memset(*result, 0, anyval_size); - return Status::OK; + return Status::OK(); } AnyVal* create_any_val(ObjectPool* pool, const TypeDescriptor& type) { diff --git a/be/src/exprs/arithmetic_expr.cpp b/be/src/exprs/arithmetic_expr.cpp index 7fef93cb64..4562dc2380 100644 --- a/be/src/exprs/arithmetic_expr.cpp +++ b/be/src/exprs/arithmetic_expr.cpp @@ -288,7 +288,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateFAdd(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -306,7 +306,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateFSub(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -324,7 +324,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateFMul(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -342,7 +342,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateFDiv(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -360,7 +360,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateFRem(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -374,7 +374,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateAnd(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -388,7 +388,7 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateOr(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } @@ -402,12 +402,12 @@ Status ArithmeticExpr::codegen_binary_op( val = builder.CreateXor(lhs_val.get_val(), rhs_val.get_val(), "val"); break; default: - return Status("Unk"); + return Status::InternalError("Unk"); } break; } default: - return Status("Unknow operato"); + return Status::InternalError("Unknow operato"); } builder.CreateBr(ret_block); @@ -432,87 +432,87 @@ Status ArithmeticExpr::codegen_binary_op( builder.CreateRet(result.value()); *fn = codegen->finalize_function(*fn); - return Status::OK; + return Status::OK(); } Status AddExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::ADD)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status SubExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::SUB)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status MulExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::MUL)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status DivExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::DIV)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status ModExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::MOD)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status BitAndExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::BIT_AND)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status BitOrExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::BIT_OR)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status BitXorExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(codegen_binary_op(state, fn , BinaryOpType::BIT_XOR)); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } // IR codegen for compound add predicates. Compound predicate has non trivial @@ -538,7 +538,7 @@ Status BitXorExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** Status BitNotExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } LlvmCodeGen* codegen = NULL; RETURN_IF_ERROR(state->get_codegen(&codegen)); @@ -590,7 +590,7 @@ Status BitNotExpr::get_codegend_compute_fn(RuntimeState* state, llvm::Function** *fn = codegen->finalize_function(*fn); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exprs/binary_predicate.cpp b/be/src/exprs/binary_predicate.cpp index f01c6b64e6..1b08dd665c 100644 --- a/be/src/exprs/binary_predicate.cpp +++ b/be/src/exprs/binary_predicate.cpp @@ -351,7 +351,7 @@ Status BinaryPredicate::codegen_compare_fn( builder.CreateRet(result.value()); *fn = codegen->finalize_function(*fn); - return Status::OK; + return Status::OK(); } #define BINARY_PRED_FN(CLASS, TYPE, FN, OP, LLVM_PRED) \ @@ -369,11 +369,11 @@ Status BinaryPredicate::codegen_compare_fn( Status CLASS::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { \ if (_ir_compute_fn != NULL) { \ *fn = _ir_compute_fn; \ - return Status::OK; \ + return Status::OK(); \ } \ RETURN_IF_ERROR(codegen_compare_fn(state, fn, LLVM_PRED)); \ _ir_compute_fn = *fn; \ - return Status::OK; \ + return Status::OK(); \ } \ // add '/**/' to pass codestyle check of cooder @@ -448,11 +448,11 @@ COMPLICATE_BINARY_PRED_FNS(DecimalV2Val, get_decimalv2_val, DecimalV2Value, from Status CLASS::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { \ if (_ir_compute_fn != NULL) { \ *fn = _ir_compute_fn; \ - return Status::OK; \ + return Status::OK(); \ } \ RETURN_IF_ERROR(codegen_compare_fn(state, fn , LLVM_PRED)); \ _ir_compute_fn = *fn; \ - return Status::OK; \ + return Status::OK(); \ } \ #define DATETIME_BINARY_PRED_FNS() \ @@ -600,7 +600,7 @@ Status EqStringValPred::get_codegend_compute_fn(RuntimeState* state, llvm::Funct builder.CreateRet(result.value()); *fn = codegen->finalize_function(*fn); - return Status::OK; + return Status::OK(); } #endif diff --git a/be/src/exprs/case_expr.cpp b/be/src/exprs/case_expr.cpp index c52c2b2f75..70967d6725 100644 --- a/be/src/exprs/case_expr.cpp +++ b/be/src/exprs/case_expr.cpp @@ -55,7 +55,7 @@ Status CaseExpr::prepare( RuntimeState* state, const RowDescriptor& desc, ExprContext* ctx) { RETURN_IF_ERROR(Expr::prepare(state, desc, ctx)); register_function_context(ctx, state, 0); - return Status::OK; + return Status::OK(); } Status CaseExpr::open( @@ -73,7 +73,7 @@ Status CaseExpr::open( case_state->case_val = create_any_val(state->obj_pool(), TypeDescriptor(TYPE_BOOLEAN)); case_state->when_val = create_any_val(state->obj_pool(), _children[0]->type()); } - return Status::OK; + return Status::OK(); } void CaseExpr::close( @@ -189,7 +189,7 @@ std::string CaseExpr::debug_string() const { Status CaseExpr::get_codegend_compute_fn(RuntimeState* state, Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } const int num_children = get_num_children(); @@ -294,7 +294,7 @@ Status CaseExpr::get_codegend_compute_fn(RuntimeState* state, Function** fn) { *fn = codegen->finalize_function(function); DCHECK(*fn != NULL); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } void CaseExpr::get_child_val(int child_idx, ExprContext* ctx, TupleRow* row, AnyVal* dst) { diff --git a/be/src/exprs/cast_expr.cpp b/be/src/exprs/cast_expr.cpp index 7e55612ddb..834ffea9dc 100644 --- a/be/src/exprs/cast_expr.cpp +++ b/be/src/exprs/cast_expr.cpp @@ -222,7 +222,7 @@ Status CastExpr::codegen_cast_fn(RuntimeState* state, llvm::Function** fn) { Value* val = NULL; Instruction::CastOps cast_op = codegen->get_cast_op(_children[0]->type(), _type); if (cast_op == Instruction::CastOps::CastOpsEnd) { - return Status("Unknow type"); + return Status::InternalError("Unknow type"); } val = builder.CreateCast(cast_op, child_val.get_val(), codegen->get_type(_type), "val"); builder.CreateBr(ret_block); @@ -244,18 +244,18 @@ Status CastExpr::codegen_cast_fn(RuntimeState* state, llvm::Function** fn) { builder.CreateRet(result.value()); *fn = codegen->finalize_function(*fn); - return Status::OK; + return Status::OK(); } #define CODEGEN_DEFINE(CLASS) \ Status CLASS::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { \ if (_ir_compute_fn != NULL) { \ *fn = _ir_compute_fn; \ - return Status::OK; \ + return Status::OK(); \ } \ RETURN_IF_ERROR(codegen_cast_fn(state, fn)); \ _ir_compute_fn = *fn; \ - return Status::OK; \ + return Status::OK(); \ } CODEGEN_DEFINE(CastBooleanExpr); diff --git a/be/src/exprs/compound_predicate.cpp b/be/src/exprs/compound_predicate.cpp index de42469fe4..24783b809f 100644 --- a/be/src/exprs/compound_predicate.cpp +++ b/be/src/exprs/compound_predicate.cpp @@ -154,7 +154,7 @@ Status CompoundPredicate::codegen_compute_fn( bool and_fn, RuntimeState* state, Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } DCHECK_EQ(get_num_children(), 2); @@ -273,7 +273,7 @@ Status CompoundPredicate::codegen_compute_fn( *fn = codegen->finalize_function(function); DCHECK(*fn != NULL); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exprs/expr.cpp b/be/src/exprs/expr.cpp index 9ca09ca9e5..ee850da5be 100644 --- a/be/src/exprs/expr.cpp +++ b/be/src/exprs/expr.cpp @@ -258,13 +258,13 @@ Status Expr::create_expr_tree(ObjectPool* pool, const TExpr& texpr, ExprContext* // input is empty if (texpr.nodes.size() == 0) { *ctx = NULL; - return Status::OK; + return Status::OK(); } int node_idx = 0; Expr* e = NULL; Status status = create_tree_from_thrift(pool, texpr.nodes, NULL, &node_idx, &e, ctx); if (status.ok() && node_idx + 1 != texpr.nodes.size()) { - status = Status( + status = Status::InternalError( "Expression tree only partially reconstructed. Not all thrift nodes were used."); } if (!status.ok()) { @@ -284,7 +284,7 @@ Status Expr::create_expr_trees( RETURN_IF_ERROR(create_expr_tree(pool, texprs[i], &ctx)); ctxs->push_back(ctx); } - return Status::OK; + return Status::OK(); } Status Expr::create_tree_from_thrift( @@ -296,7 +296,7 @@ Status Expr::create_tree_from_thrift( ExprContext** ctx) { // propagate error case if (*node_idx >= nodes.size()) { - return Status("Failed to reconstruct expression tree from thrift."); + return Status::InternalError("Failed to reconstruct expression tree from thrift."); } int num_children = nodes[*node_idx].num_children; Expr* expr = NULL; @@ -316,10 +316,10 @@ Status Expr::create_tree_from_thrift( // we are expecting a child, but have used all nodes // this means we have been given a bad tree and must fail if (*node_idx >= nodes.size()) { - return Status("Failed to reconstruct expression tree from thrift."); + return Status::InternalError("Failed to reconstruct expression tree from thrift."); } } - return Status::OK; + return Status::OK(); } Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** expr) { @@ -332,7 +332,7 @@ Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** e case TExprNodeType::DATE_LITERAL: case TExprNodeType::STRING_LITERAL: *expr = pool->add(new Literal(texpr_node)); - return Status::OK; + return Status::OK(); case TExprNodeType::COMPOUND_PRED: switch (texpr_node.opcode) { case TExprOpcode::COMPOUND_AND: @@ -345,22 +345,22 @@ Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** e *expr = pool->add(new NotPredicate(texpr_node)); break; } - return Status::OK; + return Status::OK(); case TExprNodeType::BINARY_PRED: *expr = pool->add(BinaryPredicate::from_thrift(texpr_node)); - return Status::OK; + return Status::OK(); case TExprNodeType::NULL_LITERAL: *expr = pool->add(new NullLiteral(texpr_node)); - return Status::OK; + return Status::OK(); case TExprNodeType::ARITHMETIC_EXPR: if (texpr_node.opcode != TExprOpcode::INVALID_OPCODE) { *expr = pool->add(ArithmeticExpr::from_thrift(texpr_node)); - return Status::OK; + return Status::OK(); } case TExprNodeType::CAST_EXPR: if (texpr_node.__isset.child_type) { *expr = pool->add(CastExpr::from_thrift(texpr_node)); - return Status::OK; + return Status::OK(); } case TExprNodeType::COMPUTE_FUNCTION_CALL: case TExprNodeType::FUNCTION_CALL: @@ -376,22 +376,22 @@ Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** e } else { *expr = pool->add(new ScalarFnCall(texpr_node)); } - return Status::OK; + return Status::OK(); //case TExprNodeType::AGG_EXPR: { // if (!texpr_node.__isset.agg_expr) { - // return Status("Aggregation expression not set in thrift node"); + // return Status::InternalError("Aggregation expression not set in thrift node"); // } // *expr = pool->add(new AggregateExpr(texpr_node)); - // return Status::OK; + // return Status::OK(); //} case TExprNodeType::CASE_EXPR: { if (!texpr_node.__isset.case_expr) { - return Status("Case expression not set in thrift node"); + return Status::InternalError("Case expression not set in thrift node"); } *expr = pool->add(new CaseExpr(texpr_node)); - return Status::OK; + return Status::OK(); } case TExprNodeType::IN_PRED: { @@ -404,30 +404,30 @@ Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** e *expr = pool->add(new ScalarFnCall(texpr_node)); break; } - return Status::OK; + return Status::OK(); } case TExprNodeType::SLOT_REF: { if (!texpr_node.__isset.slot_ref) { - return Status("Slot reference not set in thrift node"); + return Status::InternalError("Slot reference not set in thrift node"); } *expr = pool->add(new SlotRef(texpr_node)); - return Status::OK; + return Status::OK(); } case TExprNodeType::TUPLE_IS_NULL_PRED: { *expr = pool->add(new TupleIsNullPredicate(texpr_node)); - return Status::OK; + return Status::OK(); } case TExprNodeType::INFO_FUNC: { *expr = pool->add(new InfoFunc(texpr_node)); - return Status::OK; + return Status::OK(); } #if 0 case TExprNodeType::FUNCTION_CALL: { if (!texpr_node.__isset.fn_call_expr) { - return Status("Udf call not set in thrift node"); + return Status::InternalError("Udf call not set in thrift node"); } if (texpr_node.fn_call_expr.fn.binary_type == TFunctionBinaryType::HIVE) { @@ -437,14 +437,14 @@ Status Expr::create_expr(ObjectPool* pool, const TExprNode& texpr_node, Expr** e *expr = pool->add(new NativeUdfExpr(texpr_node)); } - return Status::OK; + return Status::OK(); } #endif default: std::stringstream os; os << "Unknown expr node type: " << texpr_node.node_type; - return Status(os.str()); + return Status::InternalError(os.str()); } } @@ -560,7 +560,7 @@ Status Expr::prepare( for (int i = 0; i < ctxs.size(); ++i) { RETURN_IF_ERROR(ctxs[i]->prepare(state, row_desc, tracker)); } - return Status::OK; + return Status::OK(); } Status Expr::prepare(RuntimeState* state, const RowDescriptor& row_desc, @@ -569,14 +569,14 @@ Status Expr::prepare(RuntimeState* state, const RowDescriptor& row_desc, for (int i = 0; i < _children.size(); ++i) { RETURN_IF_ERROR(_children[i]->prepare(state, row_desc, context)); } - return Status::OK; + return Status::OK(); } Status Expr::open(const std::vector& ctxs, RuntimeState* state) { for (int i = 0; i < ctxs.size(); ++i) { RETURN_IF_ERROR(ctxs[i]->open(state)); } - return Status::OK; + return Status::OK(); } Status Expr::open( @@ -587,7 +587,7 @@ Status Expr::open( for (int i = 0; i < _children.size(); ++i) { RETURN_IF_ERROR(_children[i]->open(state, context, scope)); } - return Status::OK; + return Status::OK(); } void Expr::close(const std::vector& ctxs, RuntimeState* state) { @@ -626,13 +626,13 @@ Status Expr::clone_if_not_exists( for (int i = 0; i < new_ctxs->size(); ++i) { DCHECK((*new_ctxs)[i]->_is_clone); } - return Status::OK; + return Status::OK(); } new_ctxs->resize(ctxs.size()); for (int i = 0; i < ctxs.size(); ++i) { RETURN_IF_ERROR(ctxs[i]->clone(state, &(*new_ctxs)[i])); } - return Status::OK; + return Status::OK(); } std::string Expr::debug_string() const { @@ -858,10 +858,10 @@ Status Expr::get_fn_context_error(ExprContext* ctx) { if (_fn_context_index != -1) { FunctionContext* fn_ctx = ctx->fn_context(_fn_context_index); if (fn_ctx->has_error()) { - return Status(fn_ctx->error_msg()); + return Status::InternalError(fn_ctx->error_msg()); } } - return Status::OK; + return Status::OK(); } llvm::Function* Expr::create_ir_function_prototype( @@ -966,7 +966,7 @@ int Expr::inline_constants(LlvmCodeGen* codegen, Function* fn) { Status Expr::get_codegend_compute_fn_wrapper(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } LlvmCodeGen* codegen = NULL; RETURN_IF_ERROR(state->get_codegen(&codegen)); @@ -986,7 +986,7 @@ Status Expr::get_codegend_compute_fn_wrapper(RuntimeState* state, llvm::Function builder.CreateRet(ret); _ir_compute_fn = codegen->finalize_function(_ir_compute_fn); *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } Expr* Expr::copy(ObjectPool* pool, Expr* old_expr) { @@ -1019,7 +1019,7 @@ Status Expr::create(const TExpr& texpr, const RowDescriptor& row_desc, // TODO pengyubing replace by Init() ExprContext* ctx = pool->add(new ExprContext(root)); // TODO chenhao check node type in ScalarExpr Init() - Status status = Status::OK; + Status status = Status::OK(); if (texpr.nodes[0].node_type != TExprNodeType::CASE_EXPR) { status = root->prepare(state, row_desc, ctx); } @@ -1030,7 +1030,7 @@ Status Expr::create(const TExpr& texpr, const RowDescriptor& row_desc, int fn_ctx_idx = 0; root->assign_fn_ctx_idx(&fn_ctx_idx); *scalar_expr = root; - return Status::OK; + return Status::OK(); } Status Expr::create(const vector& texprs, const RowDescriptor& row_desc, @@ -1042,7 +1042,7 @@ Status Expr::create(const vector& texprs, const RowDescriptor& row_desc, DCHECK(expr != nullptr); exprs->push_back(expr); } - return Status::OK; + return Status::OK(); } Status Expr::create(const TExpr& texpr, const RowDescriptor& row_desc, @@ -1071,17 +1071,17 @@ Status Expr::create_tree(const TExpr& texpr, ObjectPool* pool, Expr* root) { } } if (UNLIKELY(child_node_idx + 1 != texpr.nodes.size())) { - return Status("Expression tree only partially reconstructed. Not all thrift " \ + return Status::InternalError("Expression tree only partially reconstructed. Not all thrift " \ "nodes were used."); } - return Status::OK; + return Status::OK(); } Status Expr::create_tree_internal(const vector& nodes, ObjectPool* pool, Expr* root, int* child_node_idx) { // propagate error case if (*child_node_idx >= nodes.size()) { - return Status("Failed to reconstruct expression tree from thrift."); + return Status::InternalError("Failed to reconstruct expression tree from thrift."); } const TExprNode& texpr_node = nodes[*child_node_idx]; @@ -1096,7 +1096,7 @@ Status Expr::create_tree_internal(const vector& nodes, ObjectPool* po RETURN_IF_ERROR(create_tree_internal(nodes, pool, child_expr, child_node_idx)); DCHECK(child_expr->get_child(i) != nullptr); } - return Status::OK; + return Status::OK(); } // TODO chenhao diff --git a/be/src/exprs/expr_context.cpp b/be/src/exprs/expr_context.cpp index 77151f1b61..fb5e50a110 100644 --- a/be/src/exprs/expr_context.cpp +++ b/be/src/exprs/expr_context.cpp @@ -28,6 +28,7 @@ #include "runtime/raw_value.h" #include "udf/udf_internal.h" #include "util/debug_util.h" +#include "util/stack_util.h" #include "exprs/anyval_util.h" namespace doris { @@ -65,7 +66,7 @@ Status ExprContext::prepare(RuntimeState* state, const RowDescriptor& row_desc, Status ExprContext::open(RuntimeState* state) { DCHECK(_prepared); if (_opened) { - return Status::OK; + return Status::OK(); } _opened = true; // Fragment-local state is only initialized for original contexts. Clones inherit the @@ -80,7 +81,7 @@ Status ExprContext::open(std::vector evals, RuntimeState* state) { for (int i = 0; i < evals.size(); ++i) { RETURN_IF_ERROR(evals[i]->open(state)); } - return Status::OK; + return Status::OK(); } void ExprContext::close(RuntimeState* state) { @@ -468,7 +469,7 @@ Status ExprContext::get_const_value(RuntimeState* state, Expr& expr, DCHECK(_opened); if (!expr.is_constant()) { *const_val = nullptr; - return Status::OK; + return Status::OK(); } // A constant expression shouldn't have any SlotRefs expr in it. @@ -478,7 +479,7 @@ Status ExprContext::get_const_value(RuntimeState* state, Expr& expr, ObjectPool* obj_pool = state->obj_pool(); *const_val = create_any_val(obj_pool, result_type); if (*const_val == NULL) { - return Status("Could not create any val"); + return Status::InternalError("Could not create any val"); } const void* result = ExprContext::get_value(&expr, nullptr); @@ -508,8 +509,8 @@ Status ExprContext::get_error(int start_idx, int end_idx) const { for (int idx = start_idx; idx < end_idx; ++idx) { DCHECK_LT(idx, _fn_contexts.size()); FunctionContext* fn_ctx = _fn_contexts[idx]; - if (fn_ctx->has_error()) return Status(fn_ctx->error_msg()); + if (fn_ctx->has_error()) return Status::InternalError(fn_ctx->error_msg()); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exprs/in_predicate.cpp b/be/src/exprs/in_predicate.cpp index 4a9ede32b2..dc3b821b3b 100644 --- a/be/src/exprs/in_predicate.cpp +++ b/be/src/exprs/in_predicate.cpp @@ -41,15 +41,15 @@ InPredicate::~InPredicate() { Status InPredicate::prepare(RuntimeState* state, const TypeDescriptor& type) { if (_is_prepare) { - return Status::OK; + return Status::OK(); } _hybird_set.reset(HybirdSetBase::create_set(type.type)); if (NULL == _hybird_set.get()) { - return Status("Unknown column type."); + return Status::InternalError("Unknown column type."); } _is_prepare = true; - return Status::OK; + return Status::OK(); } Status InPredicate::open( @@ -61,11 +61,11 @@ Status InPredicate::open( for (int i = 1; i < _children.size(); ++i) { if (_children[0]->type().is_string_type()) { if (!_children[i]->type().is_string_type()) { - return Status("InPredicate type not same"); + return Status::InternalError("InPredicate type not same"); } } else { if (_children[i]->type().type != _children[0]->type().type) { - return Status("InPredicate type not same"); + return Status::InternalError("InPredicate type not same"); } } @@ -76,7 +76,7 @@ Status InPredicate::open( } _hybird_set->insert(value); } - return Status::OK; + return Status::OK(); } Status InPredicate::prepare( @@ -85,19 +85,19 @@ Status InPredicate::prepare( RETURN_IF_ERROR(_children[i]->prepare(state, row_desc, context)); } if (_is_prepare) { - return Status::OK; + return Status::OK(); } if (_children.size() < 1) { - return Status("no Function operator in."); + return Status::InternalError("no Function operator in."); } _hybird_set.reset(HybirdSetBase::create_set(_children[0]->type().type)); if (NULL == _hybird_set.get()) { - return Status("Unknown column type."); + return Status::InternalError("Unknown column type."); } _is_prepare = true; - return Status::OK; + return Status::OK(); } void InPredicate::insert(void* value) { diff --git a/be/src/exprs/literal.cpp b/be/src/exprs/literal.cpp index 9a6b626252..bdd00f7dc9 100644 --- a/be/src/exprs/literal.cpp +++ b/be/src/exprs/literal.cpp @@ -190,7 +190,7 @@ StringVal Literal::get_string_val(ExprContext* context, TupleRow* row) { Status Literal::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } DCHECK_EQ(get_num_children(), 0); @@ -249,13 +249,13 @@ Status Literal::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn std::stringstream ss; ss << "Invalid type: " << _type; DCHECK(false) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } builder.CreateRet(v.value()); *fn = codegen->finalize_function(*fn); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exprs/new_agg_fn_evaluator.cc b/be/src/exprs/new_agg_fn_evaluator.cc index b37582c329..bfc7a2fa80 100644 --- a/be/src/exprs/new_agg_fn_evaluator.cc +++ b/be/src/exprs/new_agg_fn_evaluator.cc @@ -143,7 +143,7 @@ Status NewAggFnEvaluator::Create(const AggFn& agg_fn, RuntimeState* state, Objec } *result = agg_fn_eval; - return Status::OK; + return Status::OK(); cleanup: DCHECK(!status.ok()); @@ -160,11 +160,11 @@ Status NewAggFnEvaluator::Create(const vector& agg_fns, RuntimeState* st &agg_fn_eval, tracker, row_desc)); evals->push_back(agg_fn_eval); } - return Status::OK; + return Status::OK(); } Status NewAggFnEvaluator::Open(RuntimeState* state) { - if (opened_) return Status::OK; + if (opened_) return Status::OK(); opened_ = true; // TODO chenhao, ScalarFnEvaluator different from ExprContext RETURN_IF_ERROR(ExprContext::open(input_evals_, state)); @@ -178,13 +178,13 @@ Status NewAggFnEvaluator::Open(RuntimeState* state) { &constant_args[i])); } agg_fn_ctx_->impl()->set_constant_args(move(constant_args)); - return Status::OK; + return Status::OK(); } Status NewAggFnEvaluator::Open( const vector& evals, RuntimeState* state) { for (NewAggFnEvaluator* eval : evals) RETURN_IF_ERROR(eval->Open(state)); - return Status::OK; + return Status::OK(); } void NewAggFnEvaluator::Close(RuntimeState* state) { diff --git a/be/src/exprs/null_literal.cpp b/be/src/exprs/null_literal.cpp index 1917dee12b..691a01e472 100644 --- a/be/src/exprs/null_literal.cpp +++ b/be/src/exprs/null_literal.cpp @@ -87,7 +87,7 @@ DecimalV2Val NullLiteral::get_decimalv2_val(ExprContext*, TupleRow*) { Status NullLiteral::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } DCHECK_EQ(get_num_children(), 0); @@ -102,7 +102,7 @@ Status NullLiteral::get_codegend_compute_fn(RuntimeState* state, llvm::Function* builder.CreateRet(v); *fn = codegen->finalize_function(*fn); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } } diff --git a/be/src/exprs/scalar_fn_call.cpp b/be/src/exprs/scalar_fn_call.cpp index b14daf7fd1..9e67d793e2 100644 --- a/be/src/exprs/scalar_fn_call.cpp +++ b/be/src/exprs/scalar_fn_call.cpp @@ -63,7 +63,7 @@ Status ScalarFnCall::prepare( DCHECK_EQ(_fn.binary_type, TFunctionBinaryType::BUILTIN); std::stringstream ss; ss << "Function " << _fn.name.function_name << " is not implemented."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } FunctionContext::TypeDesc return_type = AnyValUtil::column_type_to_type_desc(_type); @@ -86,7 +86,7 @@ Status ScalarFnCall::prepare( _fn_context_index = context->register_func( state, return_type, arg_types, varargs_buffer_size); // _scalar_fn = OpcodeRegistry::instance()->get_function_ptr(_opcode); - Status status = Status::OK; + Status status = Status::OK(); if (_scalar_fn == NULL) { if (SymbolsUtil::is_mangled(_fn.scalar_fn.symbol)) { status = UserFunctionCache::instance()->get_function_ptr( @@ -129,7 +129,7 @@ Status ScalarFnCall::prepare( return status; } else { DCHECK_EQ(_fn.binary_type, TFunctionBinaryType::NATIVE); - return Status(Substitute("Problem loading UDF '$0':\n$1", + return Status::InternalError(Substitute("Problem loading UDF '$0':\n$1", _fn.name.function_name, status.GetDetail())); return status; } @@ -201,12 +201,12 @@ Status ScalarFnCall::open( if (scope == FunctionContext::FRAGMENT_LOCAL) { _prepare_fn(fn_ctx, FunctionContext::FRAGMENT_LOCAL); if (fn_ctx->has_error()) { - return Status(fn_ctx->error_msg()); + return Status::InternalError(fn_ctx->error_msg()); } } _prepare_fn(fn_ctx, FunctionContext::THREAD_LOCAL); if (fn_ctx->has_error()) { - return Status(fn_ctx->error_msg()); + return Status::InternalError(fn_ctx->error_msg()); } } @@ -222,7 +222,7 @@ Status ScalarFnCall::open( } } - return Status::OK; + return Status::OK(); } void ScalarFnCall::close( @@ -274,12 +274,12 @@ bool ScalarFnCall::is_constant() const { Status ScalarFnCall::get_codegend_compute_fn(RuntimeState* state, Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } for (int i = 0; i < get_num_children(); ++i) { if (_children[i]->type().type == TYPE_CHAR) { *fn = NULL; - return Status("ScalarFnCall Codegen not supported for CHAR"); + return Status::InternalError("ScalarFnCall Codegen not supported for CHAR"); } } @@ -403,7 +403,7 @@ Status ScalarFnCall::get_codegend_compute_fn(RuntimeState* state, Function** fn) *fn = codegen->finalize_function(*fn); DCHECK(*fn != NULL); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } Status ScalarFnCall::get_udf(RuntimeState* state, Function** udf) { @@ -488,7 +488,7 @@ Status ScalarFnCall::get_udf(RuntimeState* state, Function** udf) { ss << "Builtin '" << _fn.name.function_name << "' with symbol '" << _fn.scalar_fn.symbol << "' does not exist. " << "Verify that all your impalads are the same version."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } #else if (!SymbolsUtil::is_mangled(symbol)) { @@ -508,7 +508,7 @@ Status ScalarFnCall::get_udf(RuntimeState* state, Function** udf) { ss << "Builtin '" << _fn.name.function_name << "' with symbol '" << symbol << "' does not exist. " << "Verify that all your impalads are the same version."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // Builtin functions may use Expr::GetConstant(). Clone the function in case we need // to use it again, and rename it to something more manageable than the mangled name. @@ -526,16 +526,16 @@ Status ScalarFnCall::get_udf(RuntimeState* state, Function** udf) { std::stringstream ss; ss << "Unable to locate function " << _fn.scalar_fn.symbol << " from LLVM module " << _fn.hdfs_location; - return Status(ss.str()); + return Status::InternalError(ss.str()); } *udf = codegen->finalize_function(*udf); if (*udf == NULL) { - return Status("udf verify failed"); + return Status::InternalError("udf verify failed"); // TODO(zc) // TErrorCode::UDF_VERIFY_FAILED, _fn.scalar_fn.symbol, _fn.hdfs_location); } } - return Status::OK; + return Status::OK(); } Status ScalarFnCall::get_function(RuntimeState* state, const std::string& symbol, void** fn) { @@ -553,13 +553,13 @@ Status ScalarFnCall::get_function(RuntimeState* state, const std::string& symbol std::stringstream ss; ss << "Unable to locate function " << symbol << " from LLVM module " << _fn.hdfs_location; - return Status(ss.str()); + return Status::InternalError(ss.str()); } codegen->AddFunctionToJit(ir_fn, fn); - return Status::OK(); + return Status::OK()(); #endif } - return Status::OK; + return Status::OK(); } void ScalarFnCall::evaluate_children( diff --git a/be/src/exprs/slot_ref.cpp b/be/src/exprs/slot_ref.cpp index b3e91fa4a7..dbc0386d35 100644 --- a/be/src/exprs/slot_ref.cpp +++ b/be/src/exprs/slot_ref.cpp @@ -73,7 +73,7 @@ Status SlotRef::prepare( RuntimeState* state, const RowDescriptor& row_desc, ExprContext* ctx) { DCHECK_EQ(_children.size(), 0); if (_slot_id == -1) { - return Status::OK; + return Status::OK(); } const SlotDescriptor* slot_desc = state->desc_tbl().get_slot_descriptor(_slot_id); @@ -81,26 +81,26 @@ Status SlotRef::prepare( // TODO: create macro MAKE_ERROR() that returns a stream std::stringstream error; error << "couldn't resolve slot descriptor " << _slot_id; - return Status(error.str()); + return Status::InternalError(error.str()); } if (!slot_desc->is_materialized()) { std::stringstream error; error << "reference to non-materialized slot. slot_id: " << _slot_id; - return Status(error.str()); + return Status::InternalError(error.str()); } // TODO(marcel): get from runtime state _tuple_idx = row_desc.get_tuple_idx(slot_desc->parent()); if (_tuple_idx == RowDescriptor::INVALID_IDX) { - return Status("can't support"); + return Status::InternalError("can't support"); } DCHECK(_tuple_idx != RowDescriptor::INVALID_IDX); _tuple_is_nullable = row_desc.tuple_is_nullable(_tuple_idx); _slot_offset = slot_desc->tuple_offset(); _null_indicator_offset = slot_desc->null_indicator_offset(); _is_nullable = slot_desc->is_nullable(); - return Status::OK; + return Status::OK(); } int SlotRef::get_slot_ids(std::vector* slot_ids) const { @@ -171,7 +171,7 @@ std::string SlotRef::debug_string() const { Status SlotRef::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn) { if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } DCHECK_EQ(get_num_children(), 0); @@ -193,7 +193,7 @@ Status SlotRef::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn Function* _ir_compute_fn = codegen->get_registered_expr_fn(unique_slot_id); if (_ir_compute_fn != NULL) { *fn = _ir_compute_fn; - return Status::OK; + return Status::OK(); } LLVMContext& context = codegen->context(); @@ -410,7 +410,7 @@ Status SlotRef::get_codegend_compute_fn(RuntimeState* state, llvm::Function** fn *fn = codegen->finalize_function(*fn); codegen->register_expr_fn(unique_slot_id, *fn); _ir_compute_fn = *fn; - return Status::OK; + return Status::OK(); } BooleanVal SlotRef::get_boolean_val(ExprContext* context, TupleRow* row) { diff --git a/be/src/exprs/tuple_is_null_predicate.cpp b/be/src/exprs/tuple_is_null_predicate.cpp index 4c1f1370a7..a8b7ab03a8 100644 --- a/be/src/exprs/tuple_is_null_predicate.cpp +++ b/be/src/exprs/tuple_is_null_predicate.cpp @@ -44,7 +44,7 @@ Status TupleIsNullPredicate::prepare( } } - return Status::OK; + return Status::OK(); } BooleanVal TupleIsNullPredicate::get_boolean_val(ExprContext* ctx, TupleRow* row) { diff --git a/be/src/http/action/meta_action.cpp b/be/src/http/action/meta_action.cpp index c4d7ad7b0a..7f32c60fce 100644 --- a/be/src/http/action/meta_action.cpp +++ b/be/src/http/action/meta_action.cpp @@ -45,22 +45,22 @@ Status MetaAction::_handle_header(HttpRequest *req, std::string* json_header) { if (req_tablet_id == "" || req_schema_hash == "") { LOG(WARNING) << "invalid argument.tablet_id:" << req_tablet_id << ", schema_hash:" << req_schema_hash; - return Status("invalid arguments"); + return Status::InternalError("invalid arguments"); } uint64_t tablet_id = std::stoull(req_tablet_id); uint32_t schema_hash = std::stoul(req_schema_hash); OLAPTablePtr olap_table = OLAPEngine::get_instance()->get_table(tablet_id, schema_hash); if (olap_table == nullptr) { LOG(WARNING) << "no tablet for tablet_id:" << tablet_id << " schema hash:" << schema_hash; - return Status("no tablet exist"); + return Status::InternalError("no tablet exist"); } OLAPStatus s = OlapHeaderManager::get_json_header(olap_table->store(), tablet_id, schema_hash, json_header); if (s == OLAP_ERR_META_KEY_NOT_FOUND) { - return Status("no header exist"); + return Status::InternalError("no header exist"); } else if (s != OLAP_SUCCESS) { - return Status("backend error"); + return Status::InternalError("backend error"); } - return Status::OK; + return Status::OK(); } void MetaAction::handle(HttpRequest *req) { diff --git a/be/src/http/action/mini_load.cpp b/be/src/http/action/mini_load.cpp index e9f71b740e..b804740c77 100644 --- a/be/src/http/action/mini_load.cpp +++ b/be/src/http/action/mini_load.cpp @@ -108,16 +108,16 @@ static Status check_request(HttpRequest* req) { // check params if (!is_name_valid(params[DB_KEY])) { - return Status("Database name is not valid."); + return Status::InternalError("Database name is not valid."); } if (!is_name_valid(params[TABLE_KEY])) { - return Status("Table name is not valid."); + return Status::InternalError("Table name is not valid."); } if (!is_name_valid(params[LABEL_KEY])) { - return Status("Label name is not valid."); + return Status::InternalError("Label name is not valid."); } - return Status::OK; + return Status::OK(); } Status MiniLoadAction::data_saved_dir(const LoadHandle& desc, @@ -137,7 +137,7 @@ Status MiniLoadAction::data_saved_dir(const LoadHandle& desc, ss << prefix << "/" << table << "." << desc.sub_label << "." << buf << "." << tv.tv_usec; *file_path = ss.str(); - return Status::OK; + return Status::OK(); } Status MiniLoadAction::_load( @@ -220,7 +220,7 @@ Status MiniLoadAction::_load( << master_address.hostname << ":" << master_address.port << ") because: " << e.what(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } return Status(res.status); @@ -300,7 +300,7 @@ Status MiniLoadAction::check_auth( << master_address.hostname << ":" << master_address.port << ") because: " << e.what(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } return Status(res.status); @@ -334,7 +334,7 @@ Status MiniLoadAction::_on_header(HttpRequest* req) { if (body_bytes > max_body_bytes) { std::stringstream ss; ss << "file size exceed max body size, max_body_bytes=" << max_body_bytes; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else { evhttp_connection_set_max_body_size( @@ -356,7 +356,7 @@ Status MiniLoadAction::_on_header(HttpRequest* req) { { std::lock_guard l(_lock); if (_current_load.find(ctx->load_handle) != _current_load.end()) { - return Status("Duplicate mini load request."); + return Status::InternalError("Duplicate mini load request."); } _current_load.insert(ctx->load_handle); ctx->need_remove_handle = true; @@ -376,11 +376,11 @@ Status MiniLoadAction::_on_header(HttpRequest* req) { char buf[64]; LOG(WARNING) << "open file failed, path=" << ctx->file_path << ", errno=" << errno << ", errmsg=" << strerror_r(errno, buf, sizeof(buf)); - return Status("open file failed"); + return Status::InternalError("open file failed"); } req->set_handler_ctx(ctx.release()); - return Status::OK; + return Status::OK(); } void MiniLoadAction::on_chunk_data(HttpRequest* http_req) { @@ -464,7 +464,7 @@ Status MiniLoadAction::generate_check_load_req( const char k_basic[] = "Basic "; const std::string& auth = http_req->header(HttpHeaders::AUTHORIZATION); if (auth.compare(0, sizeof(k_basic) - 1, k_basic, sizeof(k_basic) - 1) != 0) { - return Status("Not support Basic authorization."); + return Status::InternalError("Not support Basic authorization."); } check_load_req->protocolVersion = FrontendServiceVersion::V1; @@ -473,7 +473,7 @@ Status MiniLoadAction::generate_check_load_req( std::string cluster; if (!parse_auth(str, &(check_load_req->user), &(check_load_req->passwd), &cluster)) { LOG(WARNING) << "parse auth string failed." << auth << " and str " << str; - return Status("Parse authorization failed."); + return Status::InternalError("Parse authorization failed."); } if (!cluster.empty()) { check_load_req->__set_cluster(cluster); @@ -490,7 +490,7 @@ Status MiniLoadAction::generate_check_load_req( check_load_req->__set_user_ip(user_ip); } - return Status::OK; + return Status::OK(); } bool LoadHandleCmp::operator() (const LoadHandle& lhs, const LoadHandle& rhs) const { diff --git a/be/src/http/action/pprof_actions.cpp b/be/src/http/action/pprof_actions.cpp index fb42699125..15077a3756 100644 --- a/be/src/http/action/pprof_actions.cpp +++ b/be/src/http/action/pprof_actions.cpp @@ -259,7 +259,7 @@ Status PprofActions::setup(ExecEnv* exec_env, EvHttpServer* http_server) { http_server->register_handler(HttpMethod::GET, "/pprof/symbol", action); http_server->register_handler(HttpMethod::HEAD, "/pprof/symbol", action); http_server->register_handler(HttpMethod::POST, "/pprof/symbol", action); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/http/action/restore_tablet_action.cpp b/be/src/http/action/restore_tablet_action.cpp index e2fbb3f0ba..adbb02fe3f 100644 --- a/be/src/http/action/restore_tablet_action.cpp +++ b/be/src/http/action/restore_tablet_action.cpp @@ -68,7 +68,7 @@ Status RestoreTabletAction::_handle(HttpRequest *req) { if (tablet_id_str.empty()) { std::string error_msg = std::string( "parameter " + TABLET_ID + " not specified in url."); - return Status(error_msg); + return Status::InternalError(error_msg); } // Get schema hash @@ -76,7 +76,7 @@ Status RestoreTabletAction::_handle(HttpRequest *req) { if (schema_hash_str.empty()) { std::string error_msg = std::string( "parameter " + SCHEMA_HASH + " not specified in url."); - return Status(error_msg); + return Status::InternalError(error_msg); } // valid str format @@ -88,7 +88,7 @@ Status RestoreTabletAction::_handle(HttpRequest *req) { OLAPEngine::get_instance()->get_table(tablet_id, schema_hash); if (tablet.get() != nullptr) { LOG(WARNING) << "find tablet. tablet_id=" << tablet_id << " schema_hash=" << schema_hash; - return Status("tablet already exists, can not restore."); + return Status::InternalError("tablet already exists, can not restore."); } std::string key = std::to_string(tablet_id) + "_" + std::to_string(schema_hash); { @@ -96,7 +96,7 @@ Status RestoreTabletAction::_handle(HttpRequest *req) { std::lock_guard l(_tablet_restore_lock); if (_tablet_path_map.find(key) != _tablet_path_map.end()) { LOG(INFO) << "tablet_id:" << tablet_id << " schema_hash:" << schema_hash << " is restoring."; - return Status("tablet is already restoring"); + return Status::InternalError("tablet is already restoring"); } else { // set key in map and initialize value as "" _tablet_path_map[key] = ""; @@ -127,7 +127,7 @@ Status RestoreTabletAction::_reload_tablet( if (!s.ok()) { LOG(WARNING) << "remove invalid tablet schema hash path:" << tablet_path << " failed"; } - return Status("command executor load header failed"); + return Status::InternalError("command executor load header failed"); } else { LOG(INFO) << "load header success. status: " << res << ", signature: " << tablet_id; @@ -148,7 +148,7 @@ Status RestoreTabletAction::_reload_tablet( if (!s.ok()) { LOG(WARNING) << "remove time label path:" << time_label_path.string() << " failed"; } - return Status::OK; + return Status::OK(); } } @@ -159,7 +159,7 @@ Status RestoreTabletAction::_restore(const std::string& key, int64_t tablet_id, if (!ret) { LOG(WARNING) << "can not find tablet:" << tablet_id << ", schema hash:" << schema_hash; - return Status("can find tablet path in trash"); + return Status::InternalError("can find tablet path in trash"); } LOG(INFO) << "tablet path in trash:" << latest_tablet_path; std::string original_header_path = latest_tablet_path + "/" + std::to_string(tablet_id) +".hdr"; @@ -167,7 +167,7 @@ Status RestoreTabletAction::_restore(const std::string& key, int64_t tablet_id, OLAPStatus load_status = header.load_and_init(); if (load_status != OLAP_SUCCESS) { LOG(WARNING) << "header load and init error, header path:" << original_header_path; - return Status("load header failed"); + return Status::InternalError("load header failed"); } // latest_tablet_path: /root_path/trash/time_label/tablet_id/schema_hash { @@ -204,7 +204,7 @@ Status RestoreTabletAction::_restore(const std::string& key, int64_t tablet_id, if (!s.ok()) { LOG(WARNING) << "remove invalid tablet path:" << restore_tablet_path << " failed"; } - return Status("create link path failed"); + return Status::InternalError("create link path failed"); } } std::string restore_shard_path = store->get_shard_path_from_header(std::to_string(header.shard())); diff --git a/be/src/http/action/stream_load.cpp b/be/src/http/action/stream_load.cpp index d303589df9..83e6ec8f48 100644 --- a/be/src/http/action/stream_load.cpp +++ b/be/src/http/action/stream_load.cpp @@ -139,7 +139,7 @@ Status StreamLoadAction::_handle(StreamLoadContext* ctx) { LOG(WARNING) << "recevie body don't equal with body bytes, body_bytes=" << ctx->body_bytes << ", receive_bytes=" << ctx->receive_bytes << ", id=" << ctx->id; - return Status("receive body dont't equal with body bytes"); + return Status::InternalError("receive body dont't equal with body bytes"); } if (!ctx->use_streaming) { // if we use non-streaming, we need to close file first, @@ -157,7 +157,7 @@ Status StreamLoadAction::_handle(StreamLoadContext* ctx) { // If put file succeess we need commit this load RETURN_IF_ERROR(_exec_env->stream_load_executor()->commit_txn(ctx)); - return Status::OK; + return Status::OK(); } int StreamLoadAction::on_header(HttpRequest* req) { @@ -202,7 +202,7 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req, StreamLoadContext* ct // auth information if (!parse_basic_auth(*http_req, &ctx->auth)) { LOG(WARNING) << "parse basic authorization failed." << ctx->brief(); - return Status("no valid Basic authorization"); + return Status::InternalError("no valid Basic authorization"); } // check content length ctx->body_bytes = 0; @@ -214,7 +214,7 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req, StreamLoadContext* ct std::stringstream ss; ss << "body exceed max size, max_body_bytes=" << max_body_bytes; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else { #ifndef BE_TEST @@ -232,7 +232,7 @@ Status StreamLoadAction::_on_header(HttpRequest* http_req, StreamLoadContext* ct LOG(WARNING) << "unknown data format." << ctx->brief(); std::stringstream ss; ss << "unknown data format, format=" << http_req->header(HTTP_FORMAT_KEY); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -353,7 +353,7 @@ Status StreamLoadAction::_process_put(HttpRequest* http_req, StreamLoadContext* // if we not use streaming, we must download total content before we begin // to process this load if (!ctx->use_streaming) { - return Status::OK; + return Status::OK(); } return _exec_env->stream_load_executor()->execute_plan_fragment(ctx); } @@ -371,7 +371,7 @@ Status StreamLoadAction::_data_saved_path(HttpRequest* req, std::string* file_pa std::stringstream ss; ss << prefix << "/" << req->param(HTTP_TABLE_KEY) << "." << buf << "." << tv.tv_usec; *file_path = ss.str(); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/http/download_action.cpp b/be/src/http/download_action.cpp index 3e4176db01..1d4db19573 100644 --- a/be/src/http/download_action.cpp +++ b/be/src/http/download_action.cpp @@ -234,14 +234,14 @@ std::string DownloadAction::get_content_type(const std::string& file_name) { Status DownloadAction::check_token(HttpRequest *req) { const std::string& token_str = req->param(TOKEN_PARAMETER); if (token_str.empty()) { - return Status("token is not specified."); + return Status::InternalError("token is not specified."); } if (token_str != _exec_env->token()) { - return Status("invalid token."); + return Status::InternalError("invalid token."); } - return Status::OK; + return Status::OK(); } Status DownloadAction::check_path_is_allowed(const std::string& file_path) { @@ -249,17 +249,17 @@ Status DownloadAction::check_path_is_allowed(const std::string& file_path) { boost::system::error_code errcode; boost::filesystem::path path = canonical(file_path, errcode); if (errcode.value() != boost::system::errc::success) { - return Status("file path is invalid: " + file_path); + return Status::InternalError("file path is invalid: " + file_path); } std::string canonical_file_path = path.string(); for (auto& allow_path : _allow_paths) { if (FileSystemUtil::contain_path(allow_path, canonical_file_path)) { - return Status::OK; + return Status::OK(); } } - return Status("file path is not allowed: " + canonical_file_path); + return Status::InternalError("file path is not allowed: " + canonical_file_path); } Status DownloadAction::check_log_path_is_allowed(const std::string& file_path) { @@ -267,15 +267,15 @@ Status DownloadAction::check_log_path_is_allowed(const std::string& file_path) { boost::system::error_code errcode; boost::filesystem::path path = canonical(file_path, errcode); if (errcode.value() != boost::system::errc::success) { - return Status("file path is invalid: " + file_path); + return Status::InternalError("file path is invalid: " + file_path); } std::string canonical_file_path = path.string(); if (FileSystemUtil::contain_path(_error_log_root_dir, canonical_file_path)) { - return Status::OK; + return Status::OK(); } - return Status("file path is not allowed: " + file_path); + return Status::InternalError("file path is not allowed: " + file_path); } } // end namespace doris diff --git a/be/src/http/ev_http_server.cpp b/be/src/http/ev_http_server.cpp index aa564c4cde..cf8e71c5dd 100644 --- a/be/src/http/ev_http_server.cpp +++ b/be/src/http/ev_http_server.cpp @@ -120,7 +120,7 @@ Status EvHttpServer::start() { _workers.emplace_back(worker); _workers[i].detach(); } - return Status::OK; + return Status::OK(); } void EvHttpServer::stop() { @@ -135,7 +135,7 @@ Status EvHttpServer::_bind() { if (res < 0) { std::stringstream ss; ss << "convert address failed, host=" << _host << ", port=" << _port; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _server_fd = butil::tcp_listen(point, true); if (_server_fd < 0) { @@ -143,7 +143,7 @@ Status EvHttpServer::_bind() { std::stringstream ss; ss << "tcp listen failed, errno=" << errno << ", errmsg=" << strerror_r(errno, buf, sizeof(buf)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } res = butil::make_non_blocking(_server_fd); if (res < 0) { @@ -151,9 +151,9 @@ Status EvHttpServer::_bind() { std::stringstream ss; ss << "make socket to non_blocking failed, errno=" << errno << ", errmsg=" << strerror_r(errno, buf, sizeof(buf)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } bool EvHttpServer::register_handler( diff --git a/be/src/http/http_client.cpp b/be/src/http/http_client.cpp index f56592125d..b1fdb66eff 100644 --- a/be/src/http/http_client.cpp +++ b/be/src/http/http_client.cpp @@ -37,7 +37,7 @@ Status HttpClient::init(const std::string& url) { if (_curl == nullptr) { _curl = curl_easy_init(); if (_curl == nullptr) { - return Status("fail to initalize curl"); + return Status::InternalError("fail to initalize curl"); } } else { curl_easy_reset(_curl); @@ -52,30 +52,30 @@ Status HttpClient::init(const std::string& url) { auto code = curl_easy_setopt(_curl, CURLOPT_ERRORBUFFER, _error_buf); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_ERRORBUFFER, msg=" << _to_errmsg(code); - return Status("fail to set error buffer"); + return Status::InternalError("fail to set error buffer"); } // forbid signals code = curl_easy_setopt(_curl, CURLOPT_NOSIGNAL, 1L); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_NOSIGNAL, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_NOSIGNAL"); + return Status::InternalError("fail to set CURLOPT_NOSIGNAL"); } // set fail on error code = curl_easy_setopt(_curl, CURLOPT_FAILONERROR, 1L); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_FAILONERROR, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_FAILONERROR"); + return Status::InternalError("fail to set CURLOPT_FAILONERROR"); } // set redirect code = curl_easy_setopt(_curl, CURLOPT_FOLLOWLOCATION, 1L); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_FOLLOWLOCATION, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_FOLLOWLOCATION"); + return Status::InternalError("fail to set CURLOPT_FOLLOWLOCATION"); } code = curl_easy_setopt(_curl, CURLOPT_MAXREDIRS, 20); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_MAXREDIRS, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_MAXREDIRS"); + return Status::InternalError("fail to set CURLOPT_MAXREDIRS"); } curl_write_callback callback = [] (char* buffer, size_t size, size_t nmemb, void* param) { @@ -87,21 +87,21 @@ Status HttpClient::init(const std::string& url) { code = curl_easy_setopt(_curl, CURLOPT_WRITEFUNCTION, callback); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_WRITEFUNCTION, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_WRITEFUNCTION"); + return Status::InternalError("fail to set CURLOPT_WRITEFUNCTION"); } code = curl_easy_setopt(_curl, CURLOPT_WRITEDATA, (void*) this); if (code != CURLE_OK) { LOG(WARNING) << "fail to set CURLOPT_WRITEDATA, msg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_WRITEDATA"); + return Status::InternalError("fail to set CURLOPT_WRITEDATA"); } // set url code = curl_easy_setopt(_curl, CURLOPT_URL, url.c_str()); if (code != CURLE_OK) { LOG(WARNING) << "failed to set CURLOPT_URL, errmsg=" << _to_errmsg(code); - return Status("fail to set CURLOPT_URL"); + return Status::InternalError("fail to set CURLOPT_URL"); } - return Status::OK; + return Status::OK(); } void HttpClient::set_method(HttpMethod method) { @@ -162,9 +162,9 @@ Status HttpClient::execute(const std::function& result) const = 0; + + // Return the size of this file + virtual Status size(uint64_t* size) const = 0; + + // Return name of this file + virtual std::string file_name() const = 0; +}; + +} diff --git a/be/src/olap/env/io_posix.cpp b/be/src/olap/env/io_posix.cpp new file mode 100644 index 0000000000..4a4ffb0b2f --- /dev/null +++ b/be/src/olap/env/io_posix.cpp @@ -0,0 +1,117 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "env/io_posix.h" + +#include +#include +#include +#include + +#include "common/logging.h" +#include "gutil/macro.h" +#include "gutil/port.h" +#include "util/slice.h" + +namespace doris { + +static Status do_readv_at(const std::string& filename, uint64_t offset, + const Slice* res, size_t res_cnt) { + // Convert the results into the iovec vector to request + // and calculate the total bytes requested + size_t bytes_req = 0; + struct iovec iov[res_cnt]; + for (size_t i = 0; i < res_cnt; i++) { + Slice& result = results[i]; + bytes_req += result.size(); + iov[i] = { result.mutable_data(), result.size() }; + } + + uint64_t cur_offset = offset; + size_t completed_iov = 0; + size_t rem = bytes_req; + while (rem > 0) { + // Never request more than IOV_MAX in one request + size_t iov_count = std::min(res_cnt - completed_iov, static_cast(IOV_MAX)); + ssize_t r; + RETRY_ON_EINTR(r, preadv(fd, iov + completed_iov, iov_count, cur_offset)); + + if (PREDICT_FALSE(r < 0)) { + // An error: return a non-ok status. + // TODO(zc): return IOError(filename, errno); + return Status::InternalError("IOError"); + } + + if (PREDICT_FALSE(r == 0)) { + // EOF. + // return Status::EndOfFile( + // Substitute("EOF trying to read $0 bytes at offset $1", bytes_req, offset)); + return Status::InternalError("EOF"); + } + + if (PREDICT_TRUE(r == rem)) { + // All requested bytes were read. This is almost always the case. + return Status::OK()(); + } + DCHECK_LE(r, rem); + // Adjust iovec vector based on bytes read for the next request + ssize_t bytes_rem = r; + for (size_t i = completed_iov; i < res_cnt; i++) { + if (bytes_rem >= iov[i].iov_len) { + // The full length of this iovec was read + completed_iov++; + bytes_rem -= iov[i].iov_len; + } else { + // Partially read this result. + // Adjust the iov_len and iov_base to request only the missing data. + iov[i].iov_base = static_cast(iov[i].iov_base) + bytes_rem; + iov[i].iov_len -= bytes_rem; + break; // Don't need to adjust remaining iovec's + } + } + cur_offset += r; + rem -= r; + } + DCHECK_EQ(0, rem); + return Status::OK(); +} + +PosixRandomAccessFile::~PosixRandomAccessFile() { + int res; + RETRY_ON_EINTR(res, close(_fd)); + if (res != 0) { + char buf[64]; + LOG(WARNING) << "close file failed, name=" << _filename + << ", errno=" << errno << ", msg=" << strerror_r(errno, buf, 64); + } +} + +Status PosixRandomAccessFile::size(uint64_t* size) const { + struct stat st; + auto res = fstat(_fd, &st); + if (res != 0) { + return Status::InternalError("failed to get file stat"); + } + *size = st.st_size; + return Status::OK(); +} + +} diff --git a/be/src/olap/env/io_posix.h b/be/src/olap/env/io_posix.h new file mode 100644 index 0000000000..7ff70b7076 --- /dev/null +++ b/be/src/olap/env/io_posix.h @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "olap/env/io_posix.h" + +namespace doris { + +class PosixRandomAccessFile : public RandomAccessFile { +public: + PosixRandomAccessFile(std::string filename, int fd) : _filename(std::move(filename)), _fd(fd) { + } + ~PosixRandomAccessFile() override; + + Status read_at(uint64_t offset, const Slice& result) override { + return read_at(offset, &result, 1); + } + + Status readv_at(uint64_t offset, const Slice* result, size_t count) override; + + Status size(uint64_t* size) const override; + + const std::string& filename() const override { return _filename; } +private: + std::string _filename; + int _fd; +}; + +} diff --git a/be/src/olap/olap_engine.cpp b/be/src/olap/olap_engine.cpp index e3303dd83e..656fd437bb 100644 --- a/be/src/olap/olap_engine.cpp +++ b/be/src/olap/olap_engine.cpp @@ -83,9 +83,9 @@ bool _sort_table_by_create_time(const OLAPTablePtr& a, const OLAPTablePtr& b) { static Status _validate_options(const EngineOptions& options) { if (options.store_paths.empty()) { - return Status("store paths is empty");; + return Status::InternalError("store paths is empty");; } - return Status::OK; + return Status::OK(); } Status OLAPEngine::open(const EngineOptions& options, OLAPEngine** engine_ptr) { @@ -94,15 +94,15 @@ Status OLAPEngine::open(const EngineOptions& options, OLAPEngine** engine_ptr) { auto st = engine->open(); if (st != OLAP_SUCCESS) { LOG(WARNING) << "engine open failed, res=" << st; - return Status("open engine failed"); + return Status::InternalError("open engine failed"); } st = engine->_start_bg_worker(); if (st != OLAP_SUCCESS) { LOG(WARNING) << "engine start background failed, res=" << st; - return Status("open engine failed"); + return Status::InternalError("open engine failed"); } *engine_ptr = engine.release(); - return Status::OK; + return Status::OK(); } OLAPEngine::OLAPEngine(const EngineOptions& options) @@ -660,7 +660,7 @@ Status OLAPEngine::set_cluster_id(int32_t cluster_id) { } _effective_cluster_id = cluster_id; _is_all_cluster_id_exist = true; - return Status::OK; + return Status::OK(); } std::vector OLAPEngine::get_stores_for_create_table( diff --git a/be/src/olap/rowset/segment_v2/encoding_info.cpp b/be/src/olap/rowset/segment_v2/encoding_info.cpp index 33037b10a2..854894bc9b 100644 --- a/be/src/olap/rowset/segment_v2/encoding_info.cpp +++ b/be/src/olap/rowset/segment_v2/encoding_info.cpp @@ -34,10 +34,10 @@ struct TypeEncodingTraits { }; template struct TypeEncodingTraits { static Status create_page_builder(PageBuilder** builder) { - return Status::OK; + return Status::OK(); } static Status create_page_decoder(PageDecoder** decoder) { - return Status::OK; + return Status::OK(); } }; @@ -101,10 +101,10 @@ Status EncodingInfoResolver::get( auto key = std::make_pair(data_type, encoding_type); auto it = _encoding_map.find(key); if (it == std::end(_encoding_map)) { - return Status("fail to find valid type encoding"); + return Status::InternalError("fail to find valid type encoding"); } *out = it->second; - return Status::OK; + return Status::OK(); } static EncodingInfoResolver s_encoding_info_resolver; diff --git a/be/src/olap/rowset/segment_v2/ordinal_page_index.cpp b/be/src/olap/rowset/segment_v2/ordinal_page_index.cpp index 9bd96b1fc0..e8388a4459 100644 --- a/be/src/olap/rowset/segment_v2/ordinal_page_index.cpp +++ b/be/src/olap/rowset/segment_v2/ordinal_page_index.cpp @@ -38,14 +38,14 @@ Status OrdinalPageIndex::load() { for (int i = 0; i < _num_pages; ++i) { ptr = decode_varint32_ptr(ptr, limit, &_rowids[i]); if (ptr == nullptr) { - return Status("Data corruption"); + return Status::InternalError("Data corruption"); } ptr = _pages[i].decode_from(ptr, limit); if (ptr == nullptr) { - return Status("Data corruption"); + return Status::InternalError("Data corruption"); } } - return Status::OK; + return Status::OK(); } OrdinalPageIndexIterator OrdinalPageIndex::seek_at_or_before(rowid_t rid) { diff --git a/be/src/olap/store.cpp b/be/src/olap/store.cpp index 1ff613f8d9..674c0c18d1 100755 --- a/be/src/olap/store.cpp +++ b/be/src/olap/store.cpp @@ -77,19 +77,19 @@ Status OlapStore::load() { DIRECT_IO_ALIGNMENT, TEST_FILE_BUF_SIZE) != 0) { LOG(WARNING) << "fail to allocate memory. size=" << TEST_FILE_BUF_SIZE; - return Status("No memory"); + return Status::InternalError("No memory"); } if (posix_memalign((void**)&_test_file_read_buf, DIRECT_IO_ALIGNMENT, TEST_FILE_BUF_SIZE) != 0) { LOG(WARNING) << "fail to allocate memory. size=" << TEST_FILE_BUF_SIZE; - return Status("No memory"); + return Status::InternalError("No memory"); } RETURN_IF_ERROR(_check_path_exist()); std::string align_tag_path = _path + ALIGN_TAG_PREFIX; if (access(align_tag_path.c_str(), F_OK) == 0) { LOG(WARNING) << "align tag was found, path=" << _path; - return Status("invalid root path: "); + return Status::InternalError("invalid root path: "); } RETURN_IF_ERROR(_init_cluster_id()); @@ -98,7 +98,7 @@ Status OlapStore::load() { RETURN_IF_ERROR(_init_meta()); _is_used = true; - return Status::OK; + return Status::OK(); } Status OlapStore::_check_path_exist() { @@ -107,7 +107,7 @@ Status OlapStore::_check_path_exist() { char buf[64]; LOG(WARNING) << "opendir failed, path=" << _path << ", errno=" << errno << ", errmsg=" << strerror_r(errno, buf, 64); - return Status("opendir failed"); + return Status::InternalError("opendir failed"); } struct dirent dirent; struct dirent* result = nullptr; @@ -116,10 +116,10 @@ Status OlapStore::_check_path_exist() { LOG(WARNING) << "readdir failed, path=" << _path << ", errno=" << errno << ", errmsg=" << strerror_r(errno, buf, 64); closedir(dirp); - return Status("readdir failed"); + return Status::InternalError("readdir failed"); } closedir(dirp); - return Status::OK; + return Status::OK(); } Status OlapStore::_init_cluster_id() { @@ -130,7 +130,7 @@ Status OlapStore::_init_cluster_id() { char errmsg[64]; LOG(WARNING) << "fail to create file. [path='" << cluster_id_path << "' err='" << strerror_r(errno, errmsg, 64) << "']"; - return Status("invalid store path: create cluster id failed"); + return Status::InternalError("invalid store path: create cluster id failed"); } } @@ -139,7 +139,7 @@ Status OlapStore::_init_cluster_id() { fp = fopen(cluster_id_path.c_str(), "r+b"); if (fp == NULL) { LOG(WARNING) << "fail to open cluster id path. path=" << cluster_id_path; - return Status("invalid store path: open cluster id failed"); + return Status::InternalError("invalid store path: open cluster id failed"); } int lock_res = flock(fp->_fileno, LOCK_EX | LOCK_NB); @@ -147,7 +147,7 @@ Status OlapStore::_init_cluster_id() { LOG(WARNING) << "fail to lock file descriptor. path=" << cluster_id_path; fclose(fp); fp = NULL; - return Status("invalid store path: flock cluster id failed"); + return Status::InternalError("invalid store path: flock cluster id failed"); } // obtain cluster id of all root paths @@ -162,7 +162,7 @@ Status OlapStore::_read_cluster_id(const std::string& path, int32_t* cluster_id) std::fstream fs(path.c_str(), std::fstream::in); if (!fs.is_open()) { LOG(WARNING) << "fail to open cluster id path. [path='" << path << "']"; - return Status("open file failed"); + return Status::InternalError("open file failed"); } fs >> tmp_cluster_id; @@ -179,9 +179,9 @@ Status OlapStore::_read_cluster_id(const std::string& path, int32_t* cluster_id) fs.rdstate() & std::fstream::eofbit, fs.rdstate() & std::fstream::failbit, fs.rdstate() & std::fstream::badbit); - return Status("cluster id file corrupt"); + return Status::InternalError("cluster id file corrupt"); } - return Status::OK; + return Status::OK(); } Status OlapStore::_init_extension_and_capacity() { @@ -194,7 +194,7 @@ Status OlapStore::_init_extension_and_capacity() { _storage_medium = TStorageMedium::HDD; } else { LOG(WARNING) << "store path has wrong extension. path=" << _path; - return Status("invalid sotre path: invalid extension"); + return Status::InternalError("invalid sotre path: invalid extension"); } } else { _storage_medium = TStorageMedium::HDD; @@ -208,16 +208,16 @@ Status OlapStore::_init_extension_and_capacity() { << "path=" << _path << ", capacity_bytes=" << _capacity_bytes << ", disk_capacity=" << disk_capacity; - return Status("invalid store path: invalid capacity"); + return Status::InternalError("invalid store path: invalid capacity"); } std::string data_path = _path + DATA_PREFIX; if (!check_dir_existed(data_path) && create_dir(data_path) != OLAP_SUCCESS) { LOG(WARNING) << "failed to create data root path. path=" << data_path; - return Status("invalid store path: failed to create data directory"); + return Status::InternalError("invalid store path: failed to create data directory"); } - return Status::OK; + return Status::OK(); } Status OlapStore::_init_file_system() { @@ -226,7 +226,7 @@ Status OlapStore::_init_file_system() { char errmsg[64]; LOG(WARNING) << "stat failed, path=" << _path << ", errno=" << errno << ", errmsg=" << strerror_r(errno, errmsg, 64); - return Status("invalid store path: stat failed"); + return Status::InternalError("invalid store path: stat failed"); } dev_t mount_device; @@ -241,7 +241,7 @@ Status OlapStore::_init_file_system() { char errmsg[64]; LOG(WARNING) << "setmntent failed, path=" << kMtabPath << ", errno=" << errno << ", errmsg=" << strerror_r(errno, errmsg, 64); - return Status("invalid store path: setmntent failed"); + return Status::InternalError("invalid store path: setmntent failed"); } bool is_find = false; @@ -268,12 +268,12 @@ Status OlapStore::_init_file_system() { if (!is_find) { LOG(WARNING) << "fail to find file system, path=" << _path; - return Status("invalid store path: find file system failed"); + return Status::InternalError("invalid store path: find file system failed"); } _file_system = mount_entry->mnt_fsname; - return Status::OK; + return Status::OK(); } Status OlapStore::_init_meta() { @@ -286,24 +286,24 @@ Status OlapStore::_init_meta() { _meta = new(std::nothrow) OlapMeta(_path); if (_meta == nullptr) { LOG(WARNING) << "new olap meta failed"; - return Status("new olap meta failed"); + return Status::InternalError("new olap meta failed"); } OLAPStatus res = _meta->init(); if (res != OLAP_SUCCESS) { LOG(WARNING) << "init meta failed"; - return Status("init meta failed"); + return Status::InternalError("init meta failed"); } - return Status::OK; + return Status::OK(); } Status OlapStore::set_cluster_id(int32_t cluster_id) { if (_cluster_id != -1) { if (_cluster_id == cluster_id) { - return Status::OK; + return Status::OK(); } LOG(ERROR) << "going to set cluster id to already assigned store, cluster_id=" << _cluster_id << ", new_cluster_id=" << cluster_id; - return Status("going to set cluster id to already assigned store"); + return Status::InternalError("going to set cluster id to already assigned store"); } return _write_cluster_id_to_path(_cluster_id_path(), cluster_id); } @@ -312,11 +312,11 @@ Status OlapStore::_write_cluster_id_to_path(const std::string& path, int32_t clu std::fstream fs(path.c_str(), std::fstream::out); if (!fs.is_open()) { LOG(WARNING) << "fail to open cluster id path. path=" << path; - return Status("IO Error"); + return Status::InternalError("IO Error"); } fs << cluster_id; fs.close(); - return Status::OK; + return Status::OK(); } void OlapStore::health_check() { diff --git a/be/src/runtime/buffer_control_block.cpp b/be/src/runtime/buffer_control_block.cpp index 6199d731d6..61f45a19ca 100644 --- a/be/src/runtime/buffer_control_block.cpp +++ b/be/src/runtime/buffer_control_block.cpp @@ -80,14 +80,14 @@ BufferControlBlock::~BufferControlBlock() { } Status BufferControlBlock::init() { - return Status::OK; + return Status::OK(); } Status BufferControlBlock::add_batch(TFetchDataResult* result) { boost::unique_lock l(_lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } int num_rows = result->result_batch.rows.size(); @@ -98,7 +98,7 @@ Status BufferControlBlock::add_batch(TFetchDataResult* result) { } if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } if (_waiting_rpc.empty()) { @@ -112,7 +112,7 @@ Status BufferControlBlock::add_batch(TFetchDataResult* result) { delete result; _packet_num++; } - return Status::OK; + return Status::OK(); } Status BufferControlBlock::get_batch(TFetchDataResult* result) { @@ -129,7 +129,7 @@ Status BufferControlBlock::get_batch(TFetchDataResult* result) { // cancelled if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } if (_batch_queue.empty()) { @@ -138,10 +138,10 @@ Status BufferControlBlock::get_batch(TFetchDataResult* result) { result->eos = true; result->__set_packet_num(_packet_num); _packet_num++; - return Status::OK; + return Status::OK(); } else { // can not get here - return Status("Internal error, can not Get here!"); + return Status::InternalError("Internal error, can not Get here!"); } } @@ -158,7 +158,7 @@ Status BufferControlBlock::get_batch(TFetchDataResult* result) { delete item; item = NULL; - return Status::OK; + return Status::OK(); } void BufferControlBlock::get_batch(GetResultBatchCtx* ctx) { @@ -168,7 +168,7 @@ void BufferControlBlock::get_batch(GetResultBatchCtx* ctx) { return; } if (_is_cancelled) { - ctx->on_failure(Status::CANCELLED); + ctx->on_failure(Status::Cancelled("Cancelled")); return; } if (!_batch_queue.empty()) { @@ -213,7 +213,7 @@ Status BufferControlBlock::close(Status exec_status) { } _waiting_rpc.clear(); } - return Status::OK; + return Status::OK(); } Status BufferControlBlock::cancel() { @@ -222,10 +222,10 @@ Status BufferControlBlock::cancel() { _data_removal.notify_all(); _data_arriaval.notify_all(); for (auto& ctx : _waiting_rpc) { - ctx->on_failure(Status::CANCELLED); + ctx->on_failure(Status::Cancelled("Cancelled")); } _waiting_rpc.clear(); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/buffered_block_mgr.cpp b/be/src/runtime/buffered_block_mgr.cpp index 03b79266a8..ff8acc3004 100644 --- a/be/src/runtime/buffered_block_mgr.cpp +++ b/be/src/runtime/buffered_block_mgr.cpp @@ -39,7 +39,7 @@ Status BufferedBlockMgr::create(RuntimeState* state, int64_t block_size, boost::shared_ptr* block_mgr) { block_mgr->reset(new BufferedBlockMgr(state, block_size)); (*block_mgr)->init(state); - return Status::OK; + return Status::OK(); } void BufferedBlockMgr::init(RuntimeState* state) { @@ -63,12 +63,12 @@ Status BufferedBlockMgr::get_new_block(Block** block, int64_t len) { *block = NULL; Block* new_block = _obj_pool.add(new Block()); if (UNLIKELY(new_block == NULL)) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } uint8_t* buffer = _tuple_pool->allocate(len); if (UNLIKELY(buffer == NULL)) { - return Status("Allocate memory failed."); + return Status::InternalError("Allocate memory failed."); } //new_block->set_buffer_desc(_obj_pool.Add(new BufferDescriptor(buffer, len)); @@ -77,15 +77,15 @@ Status BufferedBlockMgr::get_new_block(Block** block, int64_t len) { *block = new_block; if (UNLIKELY(_state->instance_mem_tracker()->any_limit_exceeded())) { - return Status::MEM_LIMIT_EXCEEDED; + return Status::MemoryLimitExceeded("Memory limit exceeded"); } - return Status::OK; + return Status::OK(); } //Status BufferedBlockMgr::Block::delete() { //// TODO: delete block or not,we should delete the new BLOCK - //return Status::OK; + //return Status::OK(); //} Status BufferedBlockMgr::get_new_block(Block** block) { diff --git a/be/src/runtime/buffered_block_mgr.h b/be/src/runtime/buffered_block_mgr.h index 4b9c97b5f9..e268cf61b7 100644 --- a/be/src/runtime/buffered_block_mgr.h +++ b/be/src/runtime/buffered_block_mgr.h @@ -90,7 +90,7 @@ public: } Status delete_block() { - return Status::OK; + return Status::OK(); } // Debug helper method to print the state of a block. diff --git a/be/src/runtime/buffered_block_mgr2.cc b/be/src/runtime/buffered_block_mgr2.cc index 11806e2c00..ee38101ad0 100644 --- a/be/src/runtime/buffered_block_mgr2.cc +++ b/be/src/runtime/buffered_block_mgr2.cc @@ -30,6 +30,7 @@ #include "util/debug_util.h" #include "util/uid_util.h" #include "util/pretty_printer.h" +#include "util/stack_util.h" using std::string; using std::stringstream; @@ -250,7 +251,7 @@ Status BufferedBlockMgr2::create( } } (*block_mgr)->init(state->exec_env()->disk_io_mgr(), profile, parent, mem_limit); - return Status::OK; + return Status::OK(); } int64_t BufferedBlockMgr2::available_buffers(Client* client) const { @@ -276,7 +277,7 @@ Status BufferedBlockMgr2::register_client( lock_guard lock(_lock); *client = _obj_pool.add(a_client); _unfullfilled_reserved_buffers += num_reserved_buffers; - return Status::OK; + return Status::OK(); } void BufferedBlockMgr2::clear_reservations(Client* client) { @@ -371,7 +372,7 @@ bool BufferedBlockMgr2::consume_memory(Client* client, int64_t size) { ++buffers_acquired; } while (buffers_acquired != buffers_needed); - Status status = Status::OK; + Status status = Status::OK(); if (buffers_acquired == buffers_needed) { status = write_unpinned_blocks(); } @@ -425,19 +426,18 @@ bool BufferedBlockMgr2::is_cancelled() { } Status BufferedBlockMgr2::mem_limit_too_low_error(Client* client, int node_id) { + VLOG_QUERY << "Query: " << _query_id << ". Node=" << node_id + << " ran out of memory: " << endl + << debug_internal() << endl << client->debug_string(); + // TODO: what to print here. We can't know the value of the entire query here. - Status status = Status::MEM_LIMIT_EXCEEDED; stringstream error_msg; error_msg << "The memory limit is set too low to initialize spilling operator (id=" << node_id << "). The minimum required memory to spill this operator is " << PrettyPrinter::print( client->_num_reserved_buffers * max_block_size(), TUnit::BYTES) << "."; - status.add_error_msg(error_msg.str()); - VLOG_QUERY << "Query: " << _query_id << ". Node=" << node_id - << " ran out of memory: " << endl - << debug_internal() << endl << client->debug_string(); - return status; + return Status::MemoryLimitExceeded(error_msg.str()); } Status BufferedBlockMgr2::get_new_block( @@ -450,7 +450,7 @@ Status BufferedBlockMgr2::get_new_block( { lock_guard lock(_lock); if (_is_cancelled){ - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } new_block = get_unused_block(client); DCHECK(new_block->validate()) << endl << new_block->debug_string(); @@ -472,7 +472,7 @@ Status BufferedBlockMgr2::get_new_block( new_block->_is_deleted = true; return_unused_block(new_block); } - return Status::OK; + return Status::OK(); } } @@ -500,11 +500,11 @@ Status BufferedBlockMgr2::get_new_block( DCHECK(new_block == NULL || new_block->is_pinned()); *block = new_block; - return Status::OK; + return Status::OK(); } Status BufferedBlockMgr2::transfer_buffer(Block* dst, Block* src, bool unpin) { - Status status = Status::OK; + Status status = Status::OK(); DCHECK(dst != NULL); DCHECK(src != NULL); @@ -531,7 +531,7 @@ Status BufferedBlockMgr2::transfer_buffer(Block* dst, Block* src, bool unpin) { if (_is_cancelled) { // We can't be sure the write succeeded, so return the buffer to src. src->_is_pinned = true; - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } DCHECK(!src->_in_write); } @@ -544,7 +544,7 @@ Status BufferedBlockMgr2::transfer_buffer(Block* dst, Block* src, bool unpin) { src->_is_deleted = true; return_unused_block(src); } - return Status::OK; + return Status::OK(); } BufferedBlockMgr2::~BufferedBlockMgr2() { @@ -615,13 +615,13 @@ MemTracker* BufferedBlockMgr2::get_tracker(Client* client) const { // IMPALA-1884. Status BufferedBlockMgr2::delete_or_unpin_block(Block* block, bool unpin) { if (block == NULL) { - return is_cancelled() ? Status::CANCELLED : Status::OK; + return is_cancelled() ? Status::Cancelled("Cancelled") : Status::OK(); } if (unpin) { return block->unpin(); } else { block->del(); - return is_cancelled() ? Status::CANCELLED : Status::OK; + return is_cancelled() ? Status::Cancelled("Cancelled") : Status::OK(); } } @@ -645,7 +645,7 @@ Status BufferedBlockMgr2::pin_block(Block* block, bool* pinned, Block* release_b if (!block->_is_pinned) { if (release_block == NULL) { - return Status::OK; + return Status::OK(); } if (block->_buffer_desc != NULL) { @@ -709,11 +709,11 @@ Status BufferedBlockMgr2::unpin_block(Block* block) { lock_guard unpinned_lock(_lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } DCHECK(block->validate()) << endl << block->debug_string(); if (!block->_is_pinned) { - return Status::OK; + return Status::OK(); } DCHECK_EQ(block->_buffer_desc->len, _max_block_size) << "Can only unpin io blocks."; DCHECK(validate()) << endl << debug_internal(); @@ -732,12 +732,12 @@ Status BufferedBlockMgr2::unpin_block(Block* block) { RETURN_IF_ERROR(write_unpinned_blocks()); DCHECK(validate()) << endl << debug_internal(); DCHECK(block->validate()) << endl << block->debug_string(); - return Status::OK; + return Status::OK(); } Status BufferedBlockMgr2::write_unpinned_blocks() { if (_disable_spill) { - return Status::OK; + return Status::OK(); } // Assumes block manager lock is already taken. @@ -750,7 +750,7 @@ Status BufferedBlockMgr2::write_unpinned_blocks() { ++_non_local_outstanding_writes; } DCHECK(validate()) << endl << debug_internal(); - return Status::OK; + return Status::OK(); } Status BufferedBlockMgr2::write_unpinned_block(Block* block) { @@ -801,13 +801,13 @@ Status BufferedBlockMgr2::write_unpinned_block(Block* block) { } #endif } - return Status::OK; + return Status::OK(); } Status BufferedBlockMgr2::allocate_scratch_space(int64_t block_size, TmpFileMgr::File** tmp_file, int64_t* file_offset) { // Assumes block manager lock is already taken. - vector errs; + vector errs; // Find the next physical file in round-robin order and create a write range for it. for (int attempt = 0; attempt < _tmp_files.size(); ++attempt) { *tmp_file = &_tmp_files[_next_block_index]; @@ -817,24 +817,24 @@ Status BufferedBlockMgr2::allocate_scratch_space(int64_t block_size, } Status status = (*tmp_file)->allocate_space(_max_block_size, file_offset); if (status.ok()) { - return Status::OK; + return Status::OK(); } // Log error and try other files if there was a problem. Problematic files will be // blacklisted so we will not repeatedly log the same error. LOG(WARNING) << "Error while allocating temporary file range: " << status.get_error_msg() << ". Will try another temporary file."; - errs.push_back(status); + errs.emplace_back(status.message().data, status.message().size); } - Status err_status("No usable temporary files: space could not be allocated on any " - "temporary device."); + Status err_status = Status::InternalError( + "No usable temporary files: space could not be allocated on any temporary device."); for (int i = 0; i < errs.size(); ++i) { - err_status.add_error(errs[i]); + err_status = err_status.clone_and_append(errs[i]); } return err_status; } void BufferedBlockMgr2::write_complete(Block* block, const Status& write_status) { - Status status = Status::OK; + Status status = Status::OK(); lock_guard lock(_lock); _outstanding_writes_counter->update(-1); DCHECK(validate()) << endl << debug_internal(); @@ -977,7 +977,7 @@ Status BufferedBlockMgr2::find_buffer_for_block(Block* block, bool* in_mem) { unique_lock l(_lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } // First check if there is enough reserved memory to satisfy this request. @@ -1001,7 +1001,7 @@ Status BufferedBlockMgr2::find_buffer_for_block(Block* block, bool* in_mem) { // only happens if the buffer has not already been allocated by the block mgr. // This check should ensure that the memory cannot be consumed by another client // of the block mgr. - return Status::OK; + return Status::OK(); } if (block->_buffer_desc != NULL) { @@ -1031,7 +1031,7 @@ Status BufferedBlockMgr2::find_buffer_for_block(Block* block, bool* in_mem) { // There are no free buffers or blocks we can evict. We need to fail this request. // If this is an optional request, return OK. If it is required, return OOM. if (!is_reserved_request) { - return Status::OK; + return Status::OK(); } if (VLOG_QUERY_IS_ON) { @@ -1040,10 +1040,8 @@ Status BufferedBlockMgr2::find_buffer_for_block(Block* block, bool* in_mem) { << endl << debug_internal() << endl << client->debug_string(); VLOG_QUERY << ss.str(); } - Status status = Status::MEM_LIMIT_EXCEEDED; - status.add_error_msg("Query did not have enough memory to get the minimum required " - "buffers in the block manager."); - return status; + return Status::MemoryLimitExceeded("Query did not have enough memory to get the minimum required " + "buffers in the block manager."); } DCHECK(buffer_desc != NULL); @@ -1070,7 +1068,7 @@ Status BufferedBlockMgr2::find_buffer_for_block(Block* block, bool* in_mem) { // of free buffers below the threshold is reached. RETURN_IF_ERROR(write_unpinned_blocks()); DCHECK(validate()) << endl << debug_internal(); - return Status::OK; + return Status::OK(); } // We need to find a new buffer. We prefer getting this buffer in this order: @@ -1089,7 +1087,7 @@ Status BufferedBlockMgr2::find_buffer( *buffer_desc = _obj_pool.add(new BufferDescriptor(new_buffer, _max_block_size)); (*buffer_desc)->all_buffers_it = _all_io_buffers.insert( _all_io_buffers.end(), *buffer_desc); - return Status::OK; + return Status::OK(); } // Second, try to pick a buffer from the free list. @@ -1097,7 +1095,7 @@ Status BufferedBlockMgr2::find_buffer( // There are no free buffers. If spills are disabled or there no unpinned blocks we // can write, return. We can't get a buffer. if (_disable_spill) { - return Status("Spilling has been disabled for plans that do not have stats and " + return Status::InternalError("Spilling has been disabled for plans that do not have stats and " "are not hinted to prevent potentially bad plans from using too many cluster " "resources. Compute stats on these tables, hint the plan or disable this " "behavior via query options to enable spilling."); @@ -1107,7 +1105,7 @@ Status BufferedBlockMgr2::find_buffer( // Get a free buffer from the front of the queue and assign it to the block. do { if (_unpinned_blocks.empty() && _non_local_outstanding_writes == 0) { - return Status::OK; + return Status::OK(); } SCOPED_TIMER(_buffer_wait_timer); // Try to evict unpinned blocks before waiting. @@ -1115,12 +1113,12 @@ Status BufferedBlockMgr2::find_buffer( DCHECK_GT(_non_local_outstanding_writes, 0) << endl << debug_internal(); _buffer_available_cv.wait(lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } } while (_free_io_buffers.empty()); } *buffer_desc = _free_io_buffers.dequeue(); - return Status::OK; + return Status::OK(); } BufferedBlockMgr2::Block* BufferedBlockMgr2::get_unused_block(Client* client) { @@ -1311,11 +1309,11 @@ Status BufferedBlockMgr2::init_tmp_files() { } } if (_tmp_files.empty()) { - return Status("No spilling directories configured. Cannot spill. Set --scratch_dirs" + return Status::InternalError("No spilling directories configured. Cannot spill. Set --scratch_dirs" " or see log for previous errors that prevented use of provided directories"); } _next_block_index = rand() % _tmp_files.size(); - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/runtime/buffered_block_mgr2.h b/be/src/runtime/buffered_block_mgr2.h index d96dc91332..dba452b0db 100644 --- a/be/src/runtime/buffered_block_mgr2.h +++ b/be/src/runtime/buffered_block_mgr2.h @@ -344,7 +344,7 @@ public: Status get_new_block(Client* client, Block* unpin_block, Block** block, int64_t len = -1); // Cancels the block mgr. All subsequent calls that return a Status fail with - // Status::CANCELLED. Idempotent. + // Status::Cancelled("Cancelled"). Idempotent. void cancel(); // Returns true if the block manager was cancelled. @@ -490,7 +490,7 @@ private: // Callback used by DiskIoMgr to indicate a block write has completed. write_status // is the status of the write. _is_cancelled is set to true if write_status is not - // Status::OK or a re-issue of the write fails. Returns the block's buffer to the + // Status::OK() or a re-issue of the write fails. Returns the block's buffer to the // free buffers list if it is no longer pinned. Returns the block itself to the free // blocks list if it has been deleted. void write_complete(Block* block, const Status& write_status); @@ -587,7 +587,7 @@ private: DiskIoMgr::RequestContext* _io_request_context; // If true, a disk write failed and all API calls return. - // Status::CANCELLED. Set to true if there was an error writing a block, or if + // Status::Cancelled("Cancelled"). Set to true if there was an error writing a block, or if // write_complete() needed to reissue the write and that failed. bool _is_cancelled; diff --git a/be/src/runtime/buffered_tuple_stream.cpp b/be/src/runtime/buffered_tuple_stream.cpp index 65739fa562..8f4dd1924a 100644 --- a/be/src/runtime/buffered_tuple_stream.cpp +++ b/be/src/runtime/buffered_tuple_stream.cpp @@ -125,7 +125,7 @@ Status BufferedTupleStream::init(RuntimeProfile* profile) { RETURN_IF_ERROR(new_block_for_write(_fixed_tuple_row_size, &got_block)); if (!got_block) { - return Status("Allocate memory failed. %d", _fixed_tuple_row_size); + return Status::InternalError("Allocate memory failed."); } DCHECK(_write_block != NULL); @@ -134,7 +134,7 @@ Status BufferedTupleStream::init(RuntimeProfile* profile) { RETURN_IF_ERROR(prepare_for_read()); }; - return Status::OK; + return Status::OK(); } void BufferedTupleStream::close() { @@ -155,7 +155,7 @@ Status BufferedTupleStream::new_block_for_write(int min_size, bool* got_block) { err_msg << "Cannot process row that is bigger than the IO size " << "(row_size=" << PrettyPrinter::print(min_size, TUnit::BYTES) << ". To run this query, increase the io size"; - return Status(err_msg.str()); + return Status::InternalError(err_msg.str()); } int64_t block_len = _block_mgr->max_block_size(); @@ -177,7 +177,7 @@ Status BufferedTupleStream::new_block_for_write(int min_size, bool* got_block) { _total_byte_size += block_len; *got_block = (new_block != NULL); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream::next_block_for_read() { @@ -197,14 +197,14 @@ Status BufferedTupleStream::next_block_for_read() { _read_ptr = (*_read_block)->buffer() + _null_indicators_read_block; } - return Status::OK; + return Status::OK(); } Status BufferedTupleStream::prepare_for_read(bool* got_buffer) { DCHECK(!_closed); if (_blocks.empty()) { - return Status::OK; + return Status::OK(); } _read_block = _blocks.begin(); @@ -221,7 +221,7 @@ Status BufferedTupleStream::prepare_for_read(bool* got_buffer) { *got_buffer = true; } - return Status::OK; + return Status::OK(); } int BufferedTupleStream::compute_num_null_indicator_bytes(int block_size) const { @@ -257,7 +257,7 @@ Status BufferedTupleStream::get_next_internal(RowBatch* batch, bool* eos, *eos = (_rows_returned == _num_rows); if (*eos) { - return Status::OK; + return Status::OK(); } DCHECK_GE(_null_indicators_read_block, 0); @@ -404,7 +404,7 @@ Status BufferedTupleStream::get_next_internal(RowBatch* batch, bool* eos, } DCHECK_EQ(indices->size(), i); - return Status::OK; + return Status::OK(); } // TODO: Move this somewhere in general. We don't want this function inlined diff --git a/be/src/runtime/buffered_tuple_stream2.cc b/be/src/runtime/buffered_tuple_stream2.cc index 2cac980ff0..135eb1440a 100644 --- a/be/src/runtime/buffered_tuple_stream2.cc +++ b/be/src/runtime/buffered_tuple_stream2.cc @@ -149,13 +149,13 @@ Status BufferedTupleStream2::init(int node_id, RuntimeProfile* profile, bool pin if (!pinned) { RETURN_IF_ERROR(unpin_stream()); } - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::switch_to_io_buffers(bool* got_buffer) { if (!_use_small_buffers) { *got_buffer = (_write_block != NULL); - return Status::OK; + return Status::OK(); } _use_small_buffers = false; Status status = new_block_for_write(_block_mgr->max_block_size(), got_buffer); @@ -199,12 +199,12 @@ Status BufferedTupleStream2::unpin_block(BufferedBlockMgr2::Block* block) { SCOPED_TIMER(_unpin_timer); DCHECK(block->is_pinned()); if (!block->is_max_size()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(block->unpin()); --_num_pinned; DCHECK_EQ(_num_pinned, num_pinned(_blocks)); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::new_block_for_write(int64_t min_size, bool* got_block) { @@ -215,7 +215,7 @@ Status BufferedTupleStream2::new_block_for_write(int64_t min_size, bool* got_blo error_msg << "Cannot process row that is bigger than the IO size (row_size=" << PrettyPrinter::print(min_size, TUnit::BYTES) << "). To run this query, increase the IO size (--read_size option)."; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } BufferedBlockMgr2::Block* unpin_block = _write_block; @@ -238,7 +238,7 @@ Status BufferedTupleStream2::new_block_for_write(int64_t min_size, bool* got_blo if (block_len == _block_mgr->max_block_size()) { // Do not switch to IO-buffers automatically. Do not get a buffer. *got_block = false; - return Status::OK; + return Status::OK(); } } @@ -252,7 +252,7 @@ Status BufferedTupleStream2::new_block_for_write(int64_t min_size, bool* got_blo if (!*got_block) { DCHECK(unpin_block == NULL); - return Status::OK; + return Status::OK(); } if (unpin_block != NULL) { @@ -279,7 +279,7 @@ Status BufferedTupleStream2::new_block_for_write(int64_t min_size, bool* got_blo ++_num_small_blocks; } _total_byte_size += block_len; - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::next_block_for_read() { @@ -349,13 +349,13 @@ Status BufferedTupleStream2::next_block_for_read() { _read_ptr = (*_read_block)->buffer() + _null_indicators_read_block; } DCHECK_EQ(_num_pinned, num_pinned(_blocks)) << debug_string(); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::prepare_for_read(bool delete_on_read, bool* got_buffer) { DCHECK(!_closed); if (_blocks.empty()) { - return Status::OK; + return Status::OK(); } if (!_read_write && _write_block != NULL) { @@ -377,7 +377,7 @@ Status BufferedTupleStream2::prepare_for_read(bool delete_on_read, bool* got_buf if (!current_pinned) { DCHECK(got_buffer != NULL) << "Should have reserved enough blocks"; *got_buffer = false; - return Status::OK; + return Status::OK(); } ++_num_pinned; DCHECK_EQ(_num_pinned, num_pinned(_blocks)); @@ -399,7 +399,7 @@ Status BufferedTupleStream2::prepare_for_read(bool delete_on_read, bool* got_buf if (got_buffer != NULL) { *got_buffer = true; } - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::pin_stream(bool already_reserved, bool* pinned) { @@ -409,7 +409,7 @@ Status BufferedTupleStream2::pin_stream(bool already_reserved, bool* pinned) { // If we can't get all the blocks, don't try at all. if (!_block_mgr->try_acquire_tmp_reservation(_block_mgr_client, blocks_unpinned())) { *pinned = false; - return Status::OK; + return Status::OK(); } } @@ -425,7 +425,7 @@ Status BufferedTupleStream2::pin_stream(bool already_reserved, bool* pinned) { if (!*pinned) { VLOG_QUERY << "Should have been reserved." << std::endl << _block_mgr->debug_string(_block_mgr_client); - return Status::OK; + return Status::OK(); } ++_num_pinned; DCHECK_EQ(_num_pinned, num_pinned(_blocks)); @@ -442,7 +442,7 @@ Status BufferedTupleStream2::pin_stream(bool already_reserved, bool* pinned) { } *pinned = true; _pinned = true; - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::unpin_stream(bool all) { @@ -463,7 +463,7 @@ Status BufferedTupleStream2::unpin_stream(bool all) { _write_block = NULL; } _pinned = false; - return Status::OK; + return Status::OK(); } int BufferedTupleStream2::compute_num_null_indicator_bytes(int block_size) const { @@ -484,7 +484,7 @@ int BufferedTupleStream2::compute_num_null_indicator_bytes(int block_size) const Status BufferedTupleStream2::get_rows(scoped_ptr* batch, bool* got_rows) { RETURN_IF_ERROR(pin_stream(false, got_rows)); if (!*got_rows) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(prepare_for_read(false)); batch->reset( @@ -496,7 +496,7 @@ Status BufferedTupleStream2::get_rows(scoped_ptr* batch, bool* got_row while (!eos) { RETURN_IF_ERROR(get_next(batch->get(), &eos)); } - return Status::OK; + return Status::OK(); } Status BufferedTupleStream2::get_next(RowBatch* batch, bool* eos, @@ -515,7 +515,7 @@ Status BufferedTupleStream2::get_next_internal( DCHECK(batch->row_desc().equals(_desc)); *eos = (_rows_returned == _num_rows); if (*eos) { - return Status::OK; + return Status::OK(); } DCHECK_GE(_null_indicators_read_block, 0); @@ -648,7 +648,7 @@ Status BufferedTupleStream2::get_next_internal( batch->mark_need_to_return(); } DCHECK_EQ(indices->size(), i); - return Status::OK; + return Status::OK(); } void BufferedTupleStream2::read_strings(const vector& string_slots, diff --git a/be/src/runtime/buffered_tuple_stream3.cc b/be/src/runtime/buffered_tuple_stream3.cc index f3c86e0d2a..add66234fc 100644 --- a/be/src/runtime/buffered_tuple_stream3.cc +++ b/be/src/runtime/buffered_tuple_stream3.cc @@ -211,7 +211,7 @@ string BufferedTupleStream3::Page::DebugString() const { Status BufferedTupleStream3::Init(int node_id, bool pinned) { // if (!pinned) UnpinStream(UNPIN_ALL_EXCEPT_CURRENT); node_id_ = node_id; - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::PrepareForWrite(bool* got_reservation) { @@ -223,12 +223,12 @@ Status BufferedTupleStream3::PrepareForWrite(bool* got_reservation) { CHECK_CONSISTENCY_FULL(); *got_reservation = buffer_pool_client_->IncreaseReservationToFit(default_page_len_); - if (!*got_reservation) return Status::OK; + if (!*got_reservation) return Status::OK(); has_write_iterator_ = true; // Save reservation for the write iterators. buffer_pool_client_->SaveReservation(&write_page_reservation_, default_page_len_); CHECK_CONSISTENCY_FULL(); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::PrepareForReadWrite( @@ -241,13 +241,13 @@ Status BufferedTupleStream3::PrepareForReadWrite( CHECK_CONSISTENCY_FULL(); *got_reservation = buffer_pool_client_->IncreaseReservationToFit(2 * default_page_len_); - if (!*got_reservation) return Status::OK; + if (!*got_reservation) return Status::OK(); has_write_iterator_ = true; // Save reservation for both the read and write iterators. buffer_pool_client_->SaveReservation(&read_page_reservation_, default_page_len_); buffer_pool_client_->SaveReservation(&write_page_reservation_, default_page_len_); RETURN_IF_ERROR(PrepareForReadInternal(delete_on_read)); - return Status::OK; + return Status::OK(); } void BufferedTupleStream3::Close(RowBatch* batch, RowBatch::FlushMode flush) { @@ -281,7 +281,7 @@ int64_t BufferedTupleStream3::CalcBytesPinned() const { Status BufferedTupleStream3::PinPage(Page* page) { RETURN_IF_ERROR(buffer_pool_->Pin(buffer_pool_client_, &page->handle)); bytes_pinned_ += page->len(); - return Status::OK; + return Status::OK(); } int BufferedTupleStream3::ExpectedPinCount(bool stream_pinned, const Page* page) const { @@ -294,7 +294,7 @@ Status BufferedTupleStream3::PinPageIfNeeded(Page* page, bool stream_pinned) { DCHECK_EQ(new_pin_count, page->pin_count() + 1); RETURN_IF_ERROR(PinPage(page)); } - return Status::OK; + return Status::OK(); } void BufferedTupleStream3::UnpinPageIfNeeded(Page* page, bool stream_pinned) { @@ -379,7 +379,7 @@ Status BufferedTupleStream3::NewWritePage(int64_t page_len) noexcept { DCHECK_EQ(write_page_->num_rows, 0); write_ptr_ = write_buffer->data(); write_end_ptr_ = write_ptr_ + page_len; - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::CalcPageLenForRow(int64_t row_size, int64_t* page_len) { @@ -391,10 +391,10 @@ Status BufferedTupleStream3::CalcPageLenForRow(int64_t row_size, int64_t* page_l //<< " query option max row size:" //<< PrettyPrinter::print // (state_->query_options().max_row_size, TUnit::BYTES); - return Status(ss.str()); + return Status::InternalError(ss.str()); } *page_len = max(default_page_len_, BitUtil::RoundUpToPowerOfTwo(row_size)); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::AdvanceWritePage( @@ -438,7 +438,7 @@ Status BufferedTupleStream3::AdvanceWritePage( << "If the stream is unpinned, this should only fail for large pages"; CHECK_CONSISTENCY_FAST(); *got_reservation = false; - return Status::OK; + return Status::OK(); } if (write_reservation_to_restore > 0) { buffer_pool_client_->RestoreReservation( @@ -455,7 +455,7 @@ Status BufferedTupleStream3::AdvanceWritePage( return status; } *got_reservation = true; - return Status::OK; + return Status::OK(); } void BufferedTupleStream3::ResetWritePage() { @@ -517,7 +517,7 @@ Status BufferedTupleStream3::NextReadPage() { if (read_page_ == pages_.end()) { CHECK_CONSISTENCY_FULL(); - return Status::OK; + return Status::OK(); } if (!pinned_ && read_page_->len() > default_page_len_ @@ -530,7 +530,7 @@ Status BufferedTupleStream3::NextReadPage() { err_stream << "Internal error: couldn't pin large page of " << read_page_->len() << " bytes, client only had " << buffer_pool_client_->GetUnusedReservation() << " bytes of unused reservation:" << buffer_pool_client_->DebugString() << "\n"; - return Status(err_stream.str()); + return Status::InternalError(err_stream.str()); } // Ensure the next page is pinned for reading. By this point we should have enough // reservation to pin the page. If the stream is pinned, the page is already pinned. @@ -556,7 +556,7 @@ Status BufferedTupleStream3::NextReadPage() { buffer_pool_client_->SaveReservation(&write_page_reservation_, default_page_len_); } CHECK_CONSISTENCY_FAST(); - return Status::OK; + return Status::OK(); } void BufferedTupleStream3::InvalidateReadIterator() { @@ -586,7 +586,7 @@ Status BufferedTupleStream3::PrepareForRead(bool delete_on_read, bool* got_reser // If already pinned, no additional pin is needed (see ExpectedPinCount()). *got_reservation = pinned_ || pages_.empty() || buffer_pool_client_->IncreaseReservationToFit(default_page_len_); - if (!*got_reservation) return Status::OK; + if (!*got_reservation) return Status::OK(); return PrepareForReadInternal(delete_on_read); } @@ -618,7 +618,7 @@ Status BufferedTupleStream3::PrepareForReadInternal(bool delete_on_read) { rows_returned_ = 0; delete_on_read_ = delete_on_read; CHECK_CONSISTENCY_FULL(); - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::PinStream(bool* pinned) { @@ -626,7 +626,7 @@ Status BufferedTupleStream3::PinStream(bool* pinned) { CHECK_CONSISTENCY_FULL(); if (pinned_) { *pinned = true; - return Status::OK; + return Status::OK(); } *pinned = false; // First, make sure we have the reservation to pin all the pages for reading. @@ -645,7 +645,7 @@ Status BufferedTupleStream3::PinStream(bool* pinned) { - (restore_read_reservation ? default_page_len_ : 0); bool reservation_granted = buffer_pool_client_->IncreaseReservationToFit(increase_needed); - if (!reservation_granted) return Status::OK; + if (!reservation_granted) return Status::OK(); // If there is no current write page we should have some saved reservation to use. // Only continue saving it if the stream is empty and need it to pin the first page. @@ -664,7 +664,7 @@ Status BufferedTupleStream3::PinStream(bool* pinned) { pinned_ = true; *pinned = true; CHECK_CONSISTENCY_FULL(); - return Status::OK; + return Status::OK(); } /* void BufferedTupleStream3::UnpinStream(UnpinMode mode) { @@ -698,12 +698,11 @@ Status BufferedTupleStream3::GetRows( MemTracker* tracker, scoped_ptr* batch, bool* got_rows) { if (num_rows() > numeric_limits::max()) { // RowBatch::num_rows_ is a 32-bit int, avoid an overflow. - return Status(Substitute("Trying to read $0 rows into in-memory batch failed. Limit " - "is $1", - num_rows(), numeric_limits::max())); + return Status::InternalError(Substitute("Trying to read $0 rows into in-memory batch failed. Limit " + "is $1", num_rows(), numeric_limits::max())); } RETURN_IF_ERROR(PinStream(got_rows)); - if (!*got_rows) return Status::OK; + if (!*got_rows) return Status::OK(); bool got_reservation; RETURN_IF_ERROR(PrepareForRead(false, &got_reservation)); DCHECK(got_reservation) << "Stream was pinned"; @@ -719,7 +718,7 @@ Status BufferedTupleStream3::GetRows( while (!eos) { RETURN_IF_ERROR(GetNext(batch->get(), &eos)); } - return Status::OK; + return Status::OK(); } Status BufferedTupleStream3::GetNext(RowBatch* batch, bool* eos) { @@ -749,7 +748,7 @@ Status BufferedTupleStream3::GetNextInternal( DCHECK(is_pinned() || !FILL_FLAT_ROWS) << "FlatRowPtrs are only valid for pinned streams"; *eos = (rows_returned_ == num_rows_); - if (*eos) return Status::OK; + if (*eos) return Status::OK(); if (UNLIKELY(read_page_ == pages_.end() || read_page_rows_returned_ == read_page_->num_rows)) { @@ -822,7 +821,7 @@ Status BufferedTupleStream3::GetNextInternal( } if (FILL_FLAT_ROWS) DCHECK_EQ(flat_rows->size(), rows_to_fill); DCHECK_LE(read_ptr_, read_end_ptr_); - return Status::OK; + return Status::OK(); } void BufferedTupleStream3::FixUpStringsForRead( diff --git a/be/src/runtime/buffered_tuple_stream3.h b/be/src/runtime/buffered_tuple_stream3.h index bac387ae30..ebc26d3a8c 100644 --- a/be/src/runtime/buffered_tuple_stream3.h +++ b/be/src/runtime/buffered_tuple_stream3.h @@ -387,7 +387,7 @@ class BufferedTupleStream3 { Status GetBuffer(const BufferPool::BufferHandle** buffer) { RETURN_IF_ERROR(handle.GetBuffer(buffer)); retrieved_buffer = true; - return Status::OK; + return Status::OK(); } std::string DebugString() const; diff --git a/be/src/runtime/bufferpool/buffer_allocator.cc b/be/src/runtime/bufferpool/buffer_allocator.cc index 2e07a4bf4c..0f0f31fa59 100644 --- a/be/src/runtime/bufferpool/buffer_allocator.cc +++ b/be/src/runtime/bufferpool/buffer_allocator.cc @@ -225,7 +225,7 @@ Status BufferPool::BufferAllocator::Allocate( RETURN_IF_ERROR(AllocateInternal(len, buffer)); DCHECK(buffer->is_open()); buffer->client_ = client; - return Status::OK; + return Status::OK(); } Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* buffer) { @@ -237,18 +237,18 @@ Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* if (UNLIKELY(len > MAX_BUFFER_BYTES)) { err_stream << "Tried to allocate buffer of " << len << " bytes" << " max of " << MAX_BUFFER_BYTES << " bytes"; - return Status(err_stream.str()); + return Status::InternalError(err_stream.str()); } if (UNLIKELY(len > system_bytes_limit_)) { err_stream << "Tried to allocate buffer of " << len << " bytes" << " > buffer pool limit of " << MAX_BUFFER_BYTES << " bytes"; - return Status(err_stream.str()); + return Status::InternalError(err_stream.str()); } const int current_core = CpuInfo::get_current_core(); // Fast path: recycle a buffer of the correct size from this core's arena. FreeBufferArena* current_core_arena = per_core_arenas_[current_core].get(); - if (current_core_arena->PopFreeBuffer(len, buffer)) return Status::OK; + if (current_core_arena->PopFreeBuffer(len, buffer)) return Status::OK(); // Fast-ish path: allocate a new buffer if there is room in 'system_bytes_remaining_'. int64_t delta = DecreaseBytesRemaining(len, true, &system_bytes_remaining_); @@ -266,7 +266,7 @@ Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* // Each core should start searching from a different point to avoid hot-spots. int other_core = numa_node_cores[(numa_node_core_idx + i) % numa_node_cores.size()]; FreeBufferArena* other_core_arena = per_core_arenas_[other_core].get(); - if (other_core_arena->PopFreeBuffer(len, buffer)) return Status::OK; + if (other_core_arena->PopFreeBuffer(len, buffer)) return Status::OK(); } /* @@ -274,7 +274,7 @@ Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* for (int i = 0; i < numa_node_cores.size(); ++i) { int other_core = numa_node_cores[(numa_node_core_idx + i) % numa_node_cores.size()]; FreeBufferArena* other_core_arena = per_core_arenas_[other_core].get(); - if (other_core_arena->EvictCleanPage(len, buffer)) return Status::OK; + if (other_core_arena->EvictCleanPage(len, buffer)) return Status::OK(); } */ // Slow path: scavenge buffers of different sizes from free buffer lists and clean @@ -296,7 +296,7 @@ Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* << "bytes: was only able to free up " << delta << " bytes after " << max_scavenge_attempts_ << " attempts:\n" << pool_->DebugString(); - return Status(err_stream.str()); + return Status::InternalError(err_stream.str()); } } // We have headroom to allocate a new buffer at this point. @@ -306,7 +306,7 @@ Status BufferPool::BufferAllocator::AllocateInternal(int64_t len, BufferHandle* system_bytes_remaining_.add(len); return status; } - return Status::OK; + return Status::OK(); } int64_t DecreaseBytesRemaining( diff --git a/be/src/runtime/bufferpool/buffer_pool.cc b/be/src/runtime/bufferpool/buffer_pool.cc index afe62a8ab1..04b2bba7d1 100644 --- a/be/src/runtime/bufferpool/buffer_pool.cc +++ b/be/src/runtime/bufferpool/buffer_pool.cc @@ -104,7 +104,7 @@ Status BufferPool::PageHandle::GetBuffer(const BufferHandle** buffer) const { DCHECK(!page_->pin_in_flight); *buffer = &page_->buffer; DCHECK((*buffer)->is_open()); - return Status::OK; + return Status::OK(); } BufferPool::BufferPool(int64_t min_buffer_len, int64_t buffer_bytes_limit, @@ -126,7 +126,7 @@ Status BufferPool::RegisterClient(const string& name, //TmpFileMgr::FileGroup* f client->impl_ = new Client(this, //file_group, name, parent_reservation, mem_tracker, reservation_limit, profile); - return Status::OK; + return Status::OK(); } void BufferPool::DeregisterClient(ClientHandle* client) { @@ -148,7 +148,7 @@ Status BufferPool::CreatePage( Page* page = client->impl_->CreatePinnedPage(move(new_buffer)); handle->Open(page, client); if (buffer != nullptr) *buffer = &page->buffer; - return Status::OK; + return Status::OK(); } void BufferPool::DestroyPage(ClientHandle* client, PageHandle* handle) { @@ -185,7 +185,7 @@ Status BufferPool::Pin(ClientHandle* client, PageHandle* handle) { // Update accounting last to avoid complicating the error return path above. ++page->pin_count; client->impl_->reservation()->AllocateFrom(page->len); - return Status::OK; + return Status::OK(); } void BufferPool::Unpin(ClientHandle* client, PageHandle* handle) { @@ -225,7 +225,7 @@ Status BufferPool::ExtractBuffer( // Destroy the page and extract the buffer. client->impl_->DestroyPageInternal(page_handle, buffer_handle); DCHECK(buffer_handle->is_open()); - return Status::OK; + return Status::OK(); } Status BufferPool::AllocateBuffer( @@ -259,7 +259,7 @@ Status BufferPool::TransferBuffer(ClientHandle* src_client, BufferHandle* src, src_client->impl_->reservation()->ReleaseTo(src->len()); *dst = std::move(*src); dst->client_ = dst_client; - return Status::OK; + return Status::OK(); } void BufferPool::Maintenance() { @@ -478,10 +478,10 @@ Status BufferPool::Client::StartMoveToPinned(ClientHandle* client, Page* page) { if (dirty_unpinned_pages_.remove(page)) { // No writes were initiated for the page - just move it back to the pinned state. pinned_pages_.enqueue(page); - return Status::OK; + return Status::OK(); } - return Status("start move to pinned error, page is not in dirty."); + return Status::InternalError("start move to pinned error, page is not in dirty."); /* if (in_flight_write_pages_.contains(page)) { // A write is in flight. If so, wait for it to complete - then we only have to @@ -523,7 +523,7 @@ Status BufferPool::Client::StartMoveEvictedToPinned( pinned_pages_.enqueue(page); page->pin_in_flight = true; DCHECK_CONSISTENCY(); - return Status::OK; + return Status::OK(); } void BufferPool::Client::UndoMoveEvictedToPinned(Page* page) { @@ -555,7 +555,7 @@ Status BufferPool::Client::FinishMoveEvictedToPinned(Page* page) { file_group_->WaitForAsyncRead(page->write_handle.get(), page->buffer.mem_range())); file_group_->DestroyWriteHandle(move(page->write_handle)); page->pin_in_flight = false; - return Status::OK; + return Status::OK(); } */ Status BufferPool::Client::PrepareToAllocateBuffer(int64_t len) { @@ -566,7 +566,7 @@ Status BufferPool::Client::PrepareToAllocateBuffer(int64_t len) { reservation_.AllocateFrom(len); buffers_allocated_bytes_ += len; DCHECK_CONSISTENCY(); - return Status::OK; + return Status::OK(); } Status BufferPool::Client::DecreaseReservationTo(int64_t target_bytes) { @@ -575,11 +575,11 @@ Status BufferPool::Client::DecreaseReservationTo(int64_t target_bytes) { DCHECK_GE(current_reservation, target_bytes); int64_t amount_to_free = min(reservation_.GetUnusedReservation(), current_reservation - target_bytes); - if (amount_to_free == 0) return Status::OK; + if (amount_to_free == 0) return Status::OK(); // Clean enough pages to allow us to safely release reservation. //RETURN_IF_ERROR(CleanPages(&lock, amount_to_free)); reservation_.DecreaseReservation(amount_to_free); - return Status::OK; + return Status::OK(); } Status BufferPool::Client::CleanPages(unique_lock* client_lock, int64_t len) { @@ -610,7 +610,7 @@ Status BufferPool::Client::CleanPages(unique_lock* client_lock, int64_t l RETURN_IF_ERROR(write_status_); // Check if error occurred while waiting. } */ - return Status::OK; + return Status::OK(); } /* void BufferPool::Client::WriteDirtyPagesAsync(int64_t min_bytes_to_write) { diff --git a/be/src/runtime/bufferpool/suballocator.cc b/be/src/runtime/bufferpool/suballocator.cc index 4b2043eb02..a2e071fe3d 100644 --- a/be/src/runtime/bufferpool/suballocator.cc +++ b/be/src/runtime/bufferpool/suballocator.cc @@ -52,7 +52,7 @@ Status Suballocator::Allocate(int64_t bytes, unique_ptr* result) err_stream << "Requested memory allocation of " << bytes << " bytes, larger than max " << "supported of " << MAX_ALLOCATION_BYTES << " bytes"; - return Status(err_stream.str()); + return Status::InternalError(err_stream.str()); } unique_ptr free_node; bytes = max(bytes, MIN_ALLOCATION_BYTES); @@ -67,7 +67,7 @@ Status Suballocator::Allocate(int64_t bytes, unique_ptr* result) RETURN_IF_ERROR(AllocateBuffer(bytes, &free_node)); if (free_node == nullptr) { *result = nullptr; - return Status::OK; + return Status::OK(); } } @@ -81,7 +81,7 @@ Status Suballocator::Allocate(int64_t bytes, unique_ptr* result) free_node->in_use_ = true; allocated_ += free_node->len_; *result = move(free_node); - return Status::OK; + return Status::OK(); } int Suballocator::ComputeListIndex(int64_t bytes) const { @@ -93,7 +93,7 @@ Status Suballocator::AllocateBuffer(int64_t bytes, unique_ptr* re const int64_t buffer_len = max(min_buffer_len_, BitUtil::RoundUpToPowerOfTwo(bytes)); if (!client_->IncreaseReservationToFit(buffer_len)) { *result = nullptr; - return Status::OK; + return Status::OK(); } unique_ptr free_node; @@ -103,7 +103,7 @@ Status Suballocator::AllocateBuffer(int64_t bytes, unique_ptr* re free_node->data_ = free_node->buffer_.data(); free_node->len_ = buffer_len; *result = move(free_node); - return Status::OK; + return Status::OK(); } Status Suballocator::SplitToSize(unique_ptr free_node, @@ -124,7 +124,7 @@ Status Suballocator::SplitToSize(unique_ptr free_node, // Add the free node to the free list to restore the allocator to an internally // consistent state. AddToFreeList(move(free_node)); - return Status("Failed to allocate list node in Suballocator"); + return Status::InternalError("Failed to allocate list node in Suballocator"); } } @@ -150,7 +150,7 @@ Status Suballocator::SplitToSize(unique_ptr free_node, free_node = move(left_child); } *result = move(free_node); - return Status::OK; + return Status::OK(); } void Suballocator::Free(unique_ptr allocation) { @@ -239,8 +239,8 @@ Status Suballocation::Create(unique_ptr* new_suballocation) { // overhead of these allocations might be a consideration. new_suballocation->reset(new (nothrow) Suballocation()); if (*new_suballocation == nullptr) { - return Status(TStatusCode::MEM_ALLOC_FAILED); + return Status::MemoryAllocFailed("allocate memory failed"); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/bufferpool/system_allocator.cc b/be/src/runtime/bufferpool/system_allocator.cc index 0b79dc66b6..1bbfbb9f77 100644 --- a/be/src/runtime/bufferpool/system_allocator.cc +++ b/be/src/runtime/bufferpool/system_allocator.cc @@ -69,7 +69,7 @@ Status SystemAllocator::Allocate(int64_t len, BufferPool::BufferHandle* buffer) RETURN_IF_ERROR(AllocateViaMalloc(len, &buffer_mem)); } buffer->Open(buffer_mem, len, CpuInfo::get_current_core()); - return Status::OK; + return Status::OK(); } Status SystemAllocator::AllocateViaMMap(int64_t len, uint8_t** buffer_mem) { @@ -82,7 +82,7 @@ Status SystemAllocator::AllocateViaMMap(int64_t len, uint8_t** buffer_mem) { uint8_t* mem = reinterpret_cast( mmap(nullptr, map_len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)); if (mem == MAP_FAILED) { - return Status(TStatusCode::BUFFER_ALLOCATION_FAILED); + return Status::BufferAllocFailed("mmap failed"); } if (use_huge_pages) { @@ -112,7 +112,7 @@ Status SystemAllocator::AllocateViaMMap(int64_t len, uint8_t** buffer_mem) { #endif } *buffer_mem = mem; - return Status::OK; + return Status::OK(); } Status SystemAllocator::AllocateViaMalloc(int64_t len, uint8_t** buffer_mem) { @@ -129,7 +129,7 @@ Status SystemAllocator::AllocateViaMalloc(int64_t len, uint8_t** buffer_mem) { if (rc != 0) { std::stringstream ss; ss << "posix_memalign() failed to allocate buffer: " << get_str_err_msg(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (use_huge_pages) { #ifdef MADV_HUGEPAGE @@ -140,7 +140,7 @@ Status SystemAllocator::AllocateViaMalloc(int64_t len, uint8_t** buffer_mem) { DCHECK(rc == 0) << "madvise(MADV_HUGEPAGE) shouldn't fail" << errno; #endif } - return Status::OK; + return Status::OK(); } void SystemAllocator::Free(BufferPool::BufferHandle&& buffer) { diff --git a/be/src/runtime/client_cache.cpp b/be/src/runtime/client_cache.cpp index 2cef9d8e16..ab882c5c23 100644 --- a/be/src/runtime/client_cache.cpp +++ b/be/src/runtime/client_cache.cpp @@ -70,7 +70,7 @@ Status ClientCacheHelper::get_client( _used_clients->increment(1); } - return Status::OK; + return Status::OK(); } Status ClientCacheHelper::reopen_client(client_factory factory_method, void** client_key, @@ -103,7 +103,7 @@ Status ClientCacheHelper::reopen_client(client_factory factory_method, void** cl _client_map[*client_key]->set_send_timeout(timeout_ms); _client_map[*client_key]->set_recv_timeout(timeout_ms); - return Status::OK; + return Status::OK(); } Status ClientCacheHelper::create_client( @@ -129,7 +129,7 @@ Status ClientCacheHelper::create_client( _opened_clients->increment(1); } - return Status::OK; + return Status::OK(); } void ClientCacheHelper::release_client(void** client_key) { diff --git a/be/src/runtime/client_cache.h b/be/src/runtime/client_cache.h index 84054ab521..90caeb6024 100644 --- a/be/src/runtime/client_cache.h +++ b/be/src/runtime/client_cache.h @@ -247,7 +247,7 @@ private: // Obtains a pointer to a Thrift interface object (of type T), // backed by a live transport which is already open. Returns - // Status::OK unless there was an error opening the transport. + // Status::OK() unless there was an error opening the transport. Status get_client(const TNetworkAddress& hostport, T** iface, int timeout_ms) { return _client_cache_helper.get_client(hostport, _client_factory, reinterpret_cast(iface), timeout_ms); diff --git a/be/src/runtime/data_spliter.cpp b/be/src/runtime/data_spliter.cpp index 3f9f87233c..7f48182fb4 100644 --- a/be/src/runtime/data_spliter.cpp +++ b/be/src/runtime/data_spliter.cpp @@ -62,7 +62,7 @@ Status DataSpliter::from_thrift( // Partition infos int num_parts = t_sink.partition_infos.size(); if (num_parts == 0) { - return Status("Empty partition info."); + return Status::InternalError("Empty partition info."); } for (int i = 0; i < num_parts; ++i) { PartitionInfo* info = pool->add(new PartitionInfo()); @@ -82,7 +82,7 @@ Status DataSpliter::from_thrift( spliter->_rollup_map[iter.first] = schema; } - return Status::OK; + return Status::OK(); } Status DataSpliter::prepare(RuntimeState* state) { @@ -98,7 +98,7 @@ Status DataSpliter::prepare(RuntimeState* state) { for (auto iter : _partition_infos) { RETURN_IF_ERROR(iter->prepare(state, _row_desc, _expr_mem_tracker.get())); } - return Status::OK; + return Status::OK(); } Status DataSpliter::open(RuntimeState* state) { @@ -123,7 +123,7 @@ Status DataSpliter::open(RuntimeState* state) { _split_timer = ADD_TIMER(_profile, "process batch"); _finish_timer = ADD_TIMER(_profile, "sort time"); - return Status::OK; + return Status::OK(); } int DataSpliter::binary_find_partition(const PartRangeKey& key) const { @@ -151,7 +151,7 @@ Status DataSpliter::process_partition( if (_partition_expr_ctxs.size() == 0) { *part_index = 0; *info = _partition_infos[0]; - return Status::OK; + return Status::OK(); } else { // use binary search to get the right partition. ExprContext* ctx = _partition_expr_ctxs[0]; @@ -171,11 +171,11 @@ Status DataSpliter::process_partition( error_log << "there is no corresponding partition for this key: "; ctx->print_value(row, &error_log); state->update_num_rows_load_filtered(1); - return Status(error_log.str(), true); + return Status::InternalError(error_log.str()); } *info = _partition_infos[*part_index]; } - return Status::OK; + return Status::OK(); } Status DataSpliter::process_distribute( @@ -197,7 +197,7 @@ Status DataSpliter::process_distribute( *mod = hash_val % part->distributed_bucket(); - return Status::OK; + return Status::OK(); } Status DataSpliter::send_row( @@ -223,7 +223,7 @@ Status DataSpliter::send_row( RETURN_IF_ERROR(dpp_sink->add_batch(_obj_pool.get(), state, desc, batch)); batch->reset(); } - return Status::OK; + return Status::OK(); } Status DataSpliter::process_one_row(RuntimeState* state, TupleRow* row) { @@ -243,7 +243,7 @@ Status DataSpliter::process_one_row(RuntimeState* state, TupleRow* row) { state->append_error_msg_to_file( row->to_string(_row_desc), status.get_error_msg()); - return Status::OK; + return Status::OK(); } desc.partition_id = part->id(); @@ -257,7 +257,7 @@ Status DataSpliter::process_one_row(RuntimeState* state, TupleRow* row) { // process distribute RETURN_IF_ERROR(send_row(state, desc, row, _dpp_sink_vec[part_index])); - return Status::OK; + return Status::OK(); } Status DataSpliter::send(RuntimeState* state, RowBatch* batch) { @@ -266,14 +266,14 @@ Status DataSpliter::send(RuntimeState* state, RowBatch* batch) { for (int i = 0; i < num_rows; ++i) { RETURN_IF_ERROR(process_one_row(state, batch->get_row(i))); } - return Status::OK; + return Status::OK(); } Status DataSpliter::close(RuntimeState* state, Status close_status) { bool is_ok = true; Status err_status; if (_closed) { - return Status::OK; + return Status::OK(); } if (close_status.ok()) { SCOPED_TIMER(_finish_timer); @@ -330,7 +330,7 @@ Status DataSpliter::close(RuntimeState* state, Status close_status) { _expr_mem_tracker->close(); _closed = true; if (is_ok) { - return Status::OK; + return Status::OK(); } else { return err_status; } diff --git a/be/src/runtime/data_stream_mgr.cpp b/be/src/runtime/data_stream_mgr.cpp index 5f171716e6..8f7615617a 100644 --- a/be/src/runtime/data_stream_mgr.cpp +++ b/be/src/runtime/data_stream_mgr.cpp @@ -108,7 +108,7 @@ Status DataStreamMgr::transmit_data(const PTransmitDataParams* request, ::google // in acquiring _lock. // TODO: Rethink the lifecycle of DataStreamRecvr to distinguish // errors from receiver-initiated teardowns. - return Status::OK; + return Status::OK(); } // request can only be used before calling recvr's add_batch or when request @@ -127,7 +127,7 @@ Status DataStreamMgr::transmit_data(const PTransmitDataParams* request, ::google if (eos) { recvr->remove_sender(request->sender_id(), request->be_number()); } - return Status::OK; + return Status::OK(); } Status DataStreamMgr::deregister_recvr( @@ -147,7 +147,7 @@ Status DataStreamMgr::deregister_recvr( _fragment_stream_set.erase(std::make_pair(recvr->fragment_instance_id(), recvr->dest_node_id())); _receiver_map.erase(range.first); - return Status::OK; + return Status::OK(); } ++range.first; } @@ -156,7 +156,7 @@ Status DataStreamMgr::deregister_recvr( err << "unknown row receiver id: fragment_instance_id=" << fragment_instance_id << " node_id=" << node_id; LOG(ERROR) << err.str(); - return Status(err.str()); + return Status::InternalError(err.str()); } void DataStreamMgr::cancel(const TUniqueId& fragment_instance_id) { diff --git a/be/src/runtime/data_stream_recvr.cc b/be/src/runtime/data_stream_recvr.cc index 72fd35d533..4fa3e5993e 100644 --- a/be/src/runtime/data_stream_recvr.cc +++ b/be/src/runtime/data_stream_recvr.cc @@ -157,12 +157,12 @@ Status DataStreamRecvr::SenderQueue::get_batch(RowBatch** next_batch) { _current_batch.reset(); *next_batch = NULL; if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } if (_batch_queue.empty()) { DCHECK_EQ(_num_remaining_senders, 0); - return Status::OK; + return Status::OK(); } _received_first_batch = true; @@ -182,7 +182,7 @@ Status DataStreamRecvr::SenderQueue::get_batch(RowBatch** next_batch) { _pending_closures.pop_front(); } - return Status::OK; + return Status::OK(); } void DataStreamRecvr::SenderQueue::add_batch( @@ -336,7 +336,7 @@ Status DataStreamRecvr::create_merger(const TupleRowComparator& less_than) { bind(mem_fn(&SenderQueue::get_batch), _sender_queues[i], _1)); } RETURN_IF_ERROR(_merger->prepare(input_batch_suppliers)); - return Status::OK; + return Status::OK(); } void DataStreamRecvr::transfer_all_resources(RowBatch* transfer_batch) { diff --git a/be/src/runtime/data_stream_sender.cpp b/be/src/runtime/data_stream_sender.cpp index dfe0726c80..7591058e1c 100644 --- a/be/src/runtime/data_stream_sender.cpp +++ b/be/src/runtime/data_stream_sender.cpp @@ -130,9 +130,9 @@ private: if (cntl->Failed()) { LOG(WARNING) << "failed to send brpc batch, error=" << berror(cntl->ErrorCode()) << ", error_text=" << cntl->ErrorText(); - return Status(TStatusCode::THRIFT_RPC_ERROR, "failed to send batch"); + return Status::ThriftRpcError("failed to send batch"); } - return Status::OK; + return Status::OK(); } @@ -183,7 +183,7 @@ Status DataStreamSender::Channel::init(RuntimeState* state) { if (_brpc_dest_addr.hostname.empty()) { LOG(WARNING) << "there is no brpc destination address's hostname" ", maybe version is not compatible."; - return Status("no brpc destination"); + return Status::InternalError("no brpc destination"); } // initialize brpc request @@ -198,7 +198,7 @@ Status DataStreamSender::Channel::init(RuntimeState* state) { _brpc_stub = state->exec_env()->brpc_stub_cache()->get_stub(_brpc_dest_addr); _need_close = true; - return Status::OK; + return Status::OK(); } Status DataStreamSender::Channel::send_batch(PRowBatch* batch, bool eos) { @@ -228,7 +228,7 @@ Status DataStreamSender::Channel::send_batch(PRowBatch* batch, bool eos) { if (batch != nullptr) { _brpc_request.release_row_batch(); } - return Status::OK; + return Status::OK(); } Status DataStreamSender::Channel::add_row(TupleRow* row) { @@ -256,7 +256,7 @@ Status DataStreamSender::Channel::add_row(TupleRow* row) { } _batch->commit_last_row(); - return Status::OK; + return Status::OK(); } Status DataStreamSender::Channel::send_current_batch(bool eos) { @@ -268,12 +268,12 @@ Status DataStreamSender::Channel::send_current_batch(bool eos) { } _batch->reset(); RETURN_IF_ERROR(send_batch(&_pb_batch, eos)); - return Status::OK; + return Status::OK(); } Status DataStreamSender::Channel::close_internal() { if (!_need_close) { - return Status::OK; + return Status::OK(); } VLOG_RPC << "Channel::close() instance_id=" << _fragment_instance_id << " dest_node=" << _dest_node_id @@ -285,7 +285,7 @@ Status DataStreamSender::Channel::close_internal() { } RETURN_IF_ERROR(_wait_last_brpc()); _need_close = false; - return Status::OK; + return Status::OK(); } void DataStreamSender::Channel::close(RuntimeState* state) { @@ -350,7 +350,7 @@ Status DataStreamSender::init(const TDataSink& tsink) { // Partition infos int num_parts = t_stream_sink.output_partition.partition_infos.size(); if (num_parts == 0) { - return Status("Empty partition info."); + return Status::InternalError("Empty partition info."); } for (int i = 0; i < num_parts; ++i) { PartitionInfo* info = _pool->add(new PartitionInfo()); @@ -363,7 +363,7 @@ Status DataStreamSender::init(const TDataSink& tsink) { } else { } - return Status::OK; + return Status::OK(); } Status DataStreamSender::prepare(RuntimeState* state) { @@ -413,7 +413,7 @@ Status DataStreamSender::prepare(RuntimeState* state) { RETURN_IF_ERROR(_channels[i]->init(state)); } - return Status::OK; + return Status::OK(); } DataStreamSender::~DataStreamSender() { @@ -428,7 +428,7 @@ Status DataStreamSender::open(RuntimeState* state) { for (auto iter : _partition_infos) { RETURN_IF_ERROR(iter->open(state)); } - return Status::OK; + return Status::OK(); } Status DataStreamSender::send(RuntimeState* state, RowBatch* batch) { @@ -486,7 +486,7 @@ Status DataStreamSender::send(RuntimeState* state, RowBatch* batch) { COUNTER_UPDATE(_ignore_rows, ignore_rows); } - return Status::OK; + return Status::OK(); } int DataStreamSender::binary_find_partition(const PartRangeKey& key) const { @@ -513,7 +513,7 @@ Status DataStreamSender::find_partition( RuntimeState* state, TupleRow* row, PartitionInfo** info, bool* ignore) { if (_partition_expr_ctxs.size() == 0) { *info = _partition_infos[0]; - return Status::OK; + return Status::OK(); } else { *ignore = false; // use binary search to get the right partition. @@ -537,17 +537,17 @@ Status DataStreamSender::find_partition( ctx->print_value(row, &error_log); LOG(INFO) << error_log.str(); *ignore = true; - return Status::OK; + return Status::OK(); } else { std::stringstream error_log; error_log << "there is no corresponding partition for this key: "; ctx->print_value(row, &error_log); - return Status(error_log.str()); + return Status::InternalError(error_log.str()); } } *info = _partition_infos[part_index]; } - return Status::OK; + return Status::OK(); } Status DataStreamSender::process_distribute( @@ -570,7 +570,7 @@ Status DataStreamSender::process_distribute( int64_t part_id = part->id(); *code = RawValue::get_hash_value_fvn(&part_id, TypeDescriptor(TYPE_BIGINT), hash_val); - return Status::OK; + return Status::OK(); } Status DataStreamSender::compute_range_part_code( @@ -582,11 +582,11 @@ Status DataStreamSender::compute_range_part_code( PartitionInfo* part = nullptr; RETURN_IF_ERROR(find_partition(state, row, &part, ignore)); if (*ignore) { - return Status::OK; + return Status::OK(); } // process distribute RETURN_IF_ERROR(process_distribute(state, row, part, hash_value)); - return Status::OK; + return Status::OK(); } Status DataStreamSender::close(RuntimeState* state, Status exec_status) { @@ -599,7 +599,7 @@ Status DataStreamSender::close(RuntimeState* state, Status exec_status) { } Expr::close(_partition_expr_ctxs, state); - return Status::OK; + return Status::OK(); } template @@ -621,7 +621,7 @@ Status DataStreamSender::serialize_batch(RowBatch* src, T* dest, int num_receive COUNTER_UPDATE(_uncompressed_bytes_counter, uncompressed_bytes * num_receivers); } - return Status::OK; + return Status::OK(); } int64_t DataStreamSender::get_num_data_bytes_sent() const { diff --git a/be/src/runtime/descriptors.cpp b/be/src/runtime/descriptors.cpp index 1b0b6c0a7d..6044b5009e 100644 --- a/be/src/runtime/descriptors.cpp +++ b/be/src/runtime/descriptors.cpp @@ -528,13 +528,13 @@ Status DescriptorTbl::create(ObjectPool* pool, const TDescriptorTable& thrift_tb TupleDescriptorMap::iterator entry = (*tbl)->_tuple_desc_map.find(tdesc.parent); if (entry == (*tbl)->_tuple_desc_map.end()) { - return Status("unknown tid in slot descriptor msg"); + return Status::InternalError("unknown tid in slot descriptor msg"); } entry->second->add_slot(slot_d); } - return Status::OK; + return Status::OK(); } TableDescriptor* DescriptorTbl::get_table_descriptor(TableId id) const { diff --git a/be/src/runtime/disk_io_mgr.cc b/be/src/runtime/disk_io_mgr.cc index 9102870618..dfe7549f78 100644 --- a/be/src/runtime/disk_io_mgr.cc +++ b/be/src/runtime/disk_io_mgr.cc @@ -215,7 +215,7 @@ void DiskIoMgr::BufferDescriptor::reset(RequestContext* reader, _buffer_len = buffer_len; _len = 0; _eosr = false; - _status = Status::OK; + _status = Status::OK(); _mem_tracker = NULL; } @@ -404,7 +404,7 @@ Status DiskIoMgr::init(MemTracker* process_mem_tracker) { // ret = hadoopRzOptionsSetByteBufferPool(_cached_read_options, NULL); // DCHECK_EQ(ret, 0); - return Status::OK; + return Status::OK(); } // Status DiskIoMgr::register_context(RequestContext** request_context, MemTracker* mem_tracker) { @@ -412,7 +412,7 @@ Status DiskIoMgr::register_context(RequestContext** request_context, MemTracker* DCHECK(_request_context_cache.get() != NULL) << "Must call init() first."; *request_context = _request_context_cache->get_new_context(); (*request_context)->reset(mem_tracker); - return Status::OK; + return Status::OK(); } void DiskIoMgr::unregister_context(RequestContext* reader) { @@ -449,7 +449,7 @@ void DiskIoMgr::unregister_context(RequestContext* reader) { // is on. // If wait_for_disks_completion is true, wait for the number of active disks to become 0. void DiskIoMgr::cancel_context(RequestContext* context, bool wait_for_disks_completion) { - context->cancel(Status::CANCELLED); + context->cancel(Status::Cancelled("Cancelled")); if (wait_for_disks_completion) { unique_lock lock(context->_lock); @@ -521,15 +521,15 @@ Status DiskIoMgr::validate_scan_range(ScanRange* range) { stringstream ss; ss << "Invalid scan range. Bad disk id: " << disk_id; DCHECK(false) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status DiskIoMgr::add_scan_ranges(RequestContext* reader, const vector& ranges, bool schedule_immediately) { if (ranges.empty()) { - return Status::OK; + return Status::OK(); } // Validate and initialize all ranges @@ -570,7 +570,7 @@ Status DiskIoMgr::add_scan_ranges(RequestContext* reader, } DCHECK(reader->validate()) << endl << reader->debug_string(); - return Status::OK; + return Status::OK(); } // This function returns the next scan range the reader should work on, checking @@ -580,7 +580,7 @@ Status DiskIoMgr::get_next_range(RequestContext* reader, ScanRange** range) { DCHECK(reader != NULL); DCHECK(range != NULL); *range = NULL; - Status status = Status::OK; + Status status = Status::OK(); unique_lock reader_lock(reader->_lock); DCHECK(reader->validate()) << endl << reader->debug_string(); @@ -605,7 +605,7 @@ Status DiskIoMgr::get_next_range(RequestContext* reader, ScanRange** range) { // DCHECK((*range)->_try_cache); // // bool cached_read_succeeded; // // RETURN_IF_ERROR((*range)->read_from_cache(&cached_read_succeeded)); - // // if (cached_read_succeeded) return Status::OK; + // // if (cached_read_succeeded) return Status::OK(); // // This range ended up not being cached. Loop again and pick up a new range. // reader->add_request_range(*range, false); @@ -640,7 +640,7 @@ Status DiskIoMgr::read(RequestContext* reader, ScanRange* range, BufferDescripto stringstream error_msg; error_msg << "Cannot perform sync read larger than " << _max_buffer_size << ". Request was " << range->len(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } vector ranges; @@ -649,7 +649,7 @@ Status DiskIoMgr::read(RequestContext* reader, ScanRange* range, BufferDescripto RETURN_IF_ERROR(range->get_next(buffer)); DCHECK((*buffer) != NULL); DCHECK((*buffer)->eosr()); - return Status::OK; + return Status::OK(); } void DiskIoMgr::return_buffer(BufferDescriptor* buffer_desc) { @@ -867,7 +867,7 @@ bool DiskIoMgr::get_next_request_range(DiskQueue* disk_queue, RequestRange** ran // ? (*request_context)->_mem_tracker->limit_exceeded() : false; if (process_limit_exceeded || reader_limit_exceeded) { - (*request_context)->cancel(Status::MEM_LIMIT_EXCEEDED); + (*request_context)->cancel(Status::MemoryLimitExceeded("Memory limit exceeded")); } unique_lock request_lock((*request_context)->_lock); @@ -1145,7 +1145,7 @@ void DiskIoMgr::write(RequestContext* writer_context, WriteRange* write_range) { stringstream error_msg; error_msg << "fopen(" << write_range->_file << ", \"rb+\") failed with errno=" << errno << " description=" << get_str_err_msg(); - ret_status = Status(error_msg.str()); + ret_status = Status::InternalError(error_msg.str()); } else { ret_status = write_range_helper(file_handle, write_range); @@ -1153,7 +1153,7 @@ void DiskIoMgr::write(RequestContext* writer_context, WriteRange* write_range) { if (ret_status.ok() && success != 0) { stringstream error_msg; error_msg << "fclose(" << write_range->_file << ") failed"; - ret_status = Status(error_msg.str()); + ret_status = Status::InternalError(error_msg.str()); } } @@ -1168,7 +1168,7 @@ Status DiskIoMgr::write_range_helper(FILE* file_handle, WriteRange* write_range) error_msg << "fseek(" << write_range->_file << ", " << write_range->offset() << " SEEK_SET) failed with errno=" << errno << " description=" << get_str_err_msg(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } int64_t bytes_written = fwrite(write_range->_data, 1, write_range->_len, file_handle); @@ -1177,7 +1177,7 @@ Status DiskIoMgr::write_range_helper(FILE* file_handle, WriteRange* write_range) error_msg << "fwrite(buffer, 1, " << write_range->_len << ", " << write_range->_file << ") failed with errno=" << errno << " description=" << get_str_err_msg(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } #if 0 if (DorisMetrics::io_mgr_bytes_written() != NULL) { @@ -1185,7 +1185,7 @@ Status DiskIoMgr::write_range_helper(FILE* file_handle, WriteRange* write_range) } #endif - return Status::OK; + return Status::OK(); } int DiskIoMgr::free_buffers_idx(int64_t buffer_size) { @@ -1206,7 +1206,7 @@ Status DiskIoMgr::add_write_range(RequestContext* writer, WriteRange* write_rang } writer->add_request_range(write_range, false); - return Status::OK; + return Status::OK(); } /* diff --git a/be/src/runtime/disk_io_mgr.h b/be/src/runtime/disk_io_mgr.h index e5c730f371..03229dfd2c 100644 --- a/be/src/runtime/disk_io_mgr.h +++ b/be/src/runtime/disk_io_mgr.h @@ -359,7 +359,7 @@ public: // Returns the next buffer for this scan range. buffer is an output parameter. // This function blocks until a buffer is ready or an error occurred. If this is - // called when all buffers have been returned, *buffer is set to NULL and Status::OK + // called when all buffers have been returned, *buffer is set to NULL and Status::OK() // is returned. // Only one thread can be in get_next() at any time. Status get_next(BufferDescriptor** buffer); @@ -507,7 +507,7 @@ public: // This callback is invoked on each WriteRange after the write is complete or the // context is cancelled. The status returned by the callback parameter indicates - // if the write was successful (i.e. Status::OK), if there was an error + // if the write was successful (i.e. Status::OK()), if there was an error // TStatusCode::RUNTIME_ERROR) or if the context was cancelled // (TStatusCode::CANCELLED). The callback is only invoked if this WriteRange was // successfully added (i.e. add_write_range() succeeded). No locks are held while diff --git a/be/src/runtime/disk_io_mgr_reader_context.cc b/be/src/runtime/disk_io_mgr_reader_context.cc index b20a4011c9..7d34229520 100644 --- a/be/src/runtime/disk_io_mgr_reader_context.cc +++ b/be/src/runtime/disk_io_mgr_reader_context.cc @@ -155,7 +155,7 @@ DiskIoMgr::RequestContext::RequestContext(DiskIoMgr* parent, int num_disks) : // void DiskIoMgr::RequestContext::reset(MemTracker* tracker) { void DiskIoMgr::RequestContext::reset(MemTracker* tracker) { DCHECK_EQ(_state, Inactive); - _status = Status::OK; + _status = Status::OK(); _bytes_read_counter = NULL; _read_timer = NULL; diff --git a/be/src/runtime/disk_io_mgr_scan_range.cc b/be/src/runtime/disk_io_mgr_scan_range.cc index 5d0449937c..5fbaa53e47 100644 --- a/be/src/runtime/disk_io_mgr_scan_range.cc +++ b/be/src/runtime/disk_io_mgr_scan_range.cc @@ -84,7 +84,7 @@ Status DiskIoMgr::ScanRange::get_next(BufferDescriptor** buffer) { { unique_lock scan_range_lock(_lock); if (_eosr_returned) { - return Status::OK; + return Status::OK(); } DCHECK(validate()) << debug_string(); @@ -150,7 +150,7 @@ Status DiskIoMgr::ScanRange::get_next(BufferDescriptor** buffer) { _reader->_blocked_ranges.remove(this); _reader->schedule_scan_range(this); } - return Status::OK; + return Status::OK(); } void DiskIoMgr::ScanRange::cancel(const Status& status) { @@ -271,16 +271,16 @@ void DiskIoMgr::ScanRange::init_internal(DiskIoMgr* io_mgr, RequestContext* read Status DiskIoMgr::ScanRange::open() { unique_lock hdfs_lock(_hdfs_lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } // if (_fs != NULL) { // if (_hdfs_file != NULL) { - // return Status::OK; + // return Status::OK(); // } // _hdfs_file = _io_mgr->OpenHdfsFile(_fs, file(), mtime()); // if (_hdfs_file == NULL) { - // return Status(GetHdfsErrorMsg("Failed to open HDFS file ", _file)); + // return Status::InternalError("GetHdfsErrorMsg("Failed to open HDFS file ", _file)); // } // if (hdfsSeek(_fs, _hdfs_file->file(), _offset) != 0) { @@ -289,11 +289,11 @@ Status DiskIoMgr::ScanRange::open() { // string error_msg = GetHdfsErrorMsg(""); // stringstream ss; // ss << "Error seeking to " << _offset << " in file: " << _file << " " << error_msg; - // return Status(ss.str()); + // return Status::InternalError(ss.str()); // } // } else { if (_local_file != NULL) { - return Status::OK; + return Status::OK(); } _local_file = fopen(file(), "r"); @@ -301,7 +301,7 @@ Status DiskIoMgr::ScanRange::open() { string error_msg = get_str_err_msg(); stringstream ss; ss << "Could not open file: " << _file << ": " << error_msg; - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (fseek(_local_file, _offset, SEEK_SET) == -1) { fclose(_local_file); @@ -310,7 +310,7 @@ Status DiskIoMgr::ScanRange::open() { stringstream ss; ss << "Could not seek to " << _offset << " for file: " << _file << ": " << error_msg; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // } #if 0 @@ -318,7 +318,7 @@ Status DiskIoMgr::ScanRange::open() { DorisMetrics::io_mgr_num_open_files()->increment(1L); } #endif - return Status::OK; + return Status::OK(); } void DiskIoMgr::ScanRange::close() { @@ -392,7 +392,7 @@ void DiskIoMgr::ScanRange::close() { Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) { unique_lock hdfs_lock(_hdfs_lock); if (_is_cancelled) { - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } *eosr = false; @@ -412,7 +412,7 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) * int chunk_size = min(bytes_to_read - *bytes_read, max_chunk_size); * int last_read = hdfsRead(_fs, _hdfs_file->file(), buffer + *bytes_read, chunk_size); * if (last_read == -1) { - * return Status(GetHdfsErrorMsg("Error reading from HDFS file: ", _file)); + * return Status::InternalError("GetHdfsErrorMsg("Error reading from HDFS file: ", _file)); * } else if (last_read == 0) { * // No more bytes in the file. The scan range went past the end. * *eosr = true; @@ -432,7 +432,7 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) stringstream ss; ss << "Error reading from " << _file << " at byte offset: " << (_offset + _bytes_read) << ": " << error_msg; - return Status(ss.str()); + return Status::InternalError(ss.str()); } else { // On Linux, we should only get partial reads from block devices on error or eof. DCHECK(feof(_local_file) != 0); @@ -445,7 +445,7 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) if (_bytes_read == _len) { *eosr = true; } - return Status::OK; + return Status::OK(); } /* @@ -457,11 +457,11 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) * if (!status.ok()) return status; * * // Cached reads not supported on local filesystem. - * if (_fs == NULL) return Status::OK; + * if (_fs == NULL) return Status::OK(); * * { * unique_lock hdfs_lock(_hdfs_lock); - * if (_is_cancelled) return Status::CANCELLED; + * if (_is_cancelled) return Status::Cancelled("Cancelled"); * * DCHECK(_hdfs_file != NULL); * DCHECK(_cached_buffer == NULL); @@ -469,7 +469,7 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) * _io_mgr->_cached_read_options, len()); * * // Data was not cached, caller will fall back to normal read path. - * if (_cached_buffer == NULL) return Status::OK; + * if (_cached_buffer == NULL) return Status::OK(); * } * * // Cached read succeeded. @@ -493,7 +493,7 @@ Status DiskIoMgr::ScanRange::read(char* buffer, int64_t* bytes_read, bool* eosr) * } * *read_succeeded = true; * ++_reader->_num_used_buffers; - * return Status::OK; + * return Status::OK(); * } */ } // namespace doris diff --git a/be/src/runtime/dpp_sink.cpp b/be/src/runtime/dpp_sink.cpp index ad04adbb8a..aeb16e07f5 100644 --- a/be/src/runtime/dpp_sink.cpp +++ b/be/src/runtime/dpp_sink.cpp @@ -274,13 +274,13 @@ Status Translator::create_sorter(RuntimeState* state) { QSorter* sorter = _obj_pool->add(new QSorter(_row_desc, _rollup_schema.keys(), state)); RETURN_IF_ERROR(sorter->prepare(state)); _sorter = sorter; - return Status::OK; + return Status::OK(); } Status Translator::create_comparetor(RuntimeState* state) { RETURN_IF_ERROR(Expr::clone_if_not_exists(_rollup_schema.keys(), state, &_last_row_expr_ctxs)); RETURN_IF_ERROR(Expr::clone_if_not_exists(_rollup_schema.keys(), state, &_cur_row_expr_ctxs)); - return Status::OK; + return Status::OK(); } void Translator::format_output_path(RuntimeState* state) { @@ -311,31 +311,31 @@ Status Translator::create_writer(RuntimeState* state) { S_IRWXU | S_IRWXU) != OLAP_SUCCESS) { std::stringstream ss; ss << "open file failed; [file=" << _output_path << "]"; - return Status("open file failed."); + return Status::InternalError("open file failed."); } // 3. Create writer _writer = _obj_pool->add(new DppWriter(1, _output_row_expr_ctxs, fh)); RETURN_IF_ERROR(_writer->open()); - return Status::OK; + return Status::OK(); } Status Translator::create_value_updaters() { if (_rollup_schema.values().size() != _rollup_schema.value_ops().size()) { - return Status("size of values and value_ops are not equal."); + return Status::InternalError("size of values and value_ops are not equal."); } int num_values = _rollup_schema.values().size(); std::string keys_type = _rollup_schema.keys_type(); if ("DUP_KEYS" == keys_type) { - return Status::OK; + return Status::OK(); } else if ("UNIQUE_KEYS" == keys_type) { for (int i = 0; i < num_values; ++i) { _value_updaters.push_back(fake_update); } - return Status::OK; + return Status::OK(); } for (int i = 0; i < num_values; ++i) { @@ -495,7 +495,7 @@ Status Translator::create_value_updaters() { _value_updaters.push_back(update_min); break; case TAggregationType::SUM: - return Status("Unsupport sum operation on date/datetime column."); + return Status::InternalError("Unsupport sum operation on date/datetime column."); default: // replace _value_updaters.push_back(fake_update); @@ -509,7 +509,7 @@ Status Translator::create_value_updaters() { case TAggregationType::MAX: case TAggregationType::MIN: case TAggregationType::SUM: - return Status("Unsupport max/min/sum operation on char/varchar column."); + return Status::InternalError("Unsupport max/min/sum operation on char/varchar column."); default: // Only replace has meaning _value_updaters.push_back(fake_update); @@ -526,7 +526,7 @@ Status Translator::create_value_updaters() { case TAggregationType::MAX: case TAggregationType::MIN: case TAggregationType::SUM: - return Status("Unsupport max/min/sum operation on hll column."); + return Status::InternalError("Unsupport max/min/sum operation on hll column."); default: _value_updaters.push_back(fake_update); break; @@ -537,13 +537,13 @@ Status Translator::create_value_updaters() { std::stringstream ss; ss << "Unsupported column type(" << _rollup_schema.values()[i]->root()->type() << ")"; // No operation, just pusb back a fake update - return Status(ss.str()); + return Status::InternalError(ss.str()); break; } } } - return Status::OK; + return Status::OK(); } Status Translator::create_profile(RuntimeState* state) { @@ -557,7 +557,7 @@ Status Translator::create_profile(RuntimeState* state) { _sort_timer = ADD_TIMER(_profile, "sort time"); _agg_timer = ADD_TIMER(_profile, "aggregate time"); _writer_timer = ADD_TIMER(_profile, "write to file time"); - return Status::OK; + return Status::OK(); } Status Translator::prepare(RuntimeState* state) { @@ -578,7 +578,7 @@ Status Translator::prepare(RuntimeState* state) { _batch_to_write.reset( new RowBatch(_row_desc, state->batch_size(), state->instance_mem_tracker())); if (_batch_to_write.get() == nullptr) { - return Status("No memory to allocate RowBatch."); + return Status::InternalError("No memory to allocate RowBatch."); } // 5. prepare value updater @@ -592,7 +592,7 @@ Status Translator::prepare(RuntimeState* state) { } _hll_merge.prepare(hll_column_count, ((QSorter*)_sorter)->get_mem_pool()); - return Status::OK; + return Status::OK(); } Status Translator::add_batch(RowBatch* batch) { @@ -760,13 +760,13 @@ Status Translator::process_one_row(TupleRow* row) { std::stringstream ss; ss << "row is nullptr."; LOG(ERROR) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // first row // Just deep copy, and don't reuse its data. if (!_batch_to_write->in_flight()) { copy_row(row); - return Status::OK; + return Status::OK(); } int row_idx = _batch_to_write->add_row(); @@ -776,7 +776,7 @@ Status Translator::process_one_row(TupleRow* row) { if (eq_tuple_row(last_row, row)) { // Just merge to last row and return update_row(last_row, row); - return Status::OK; + return Status::OK(); } } _hll_merge.finalize_one_merge(last_row, @@ -795,7 +795,7 @@ Status Translator::process_one_row(TupleRow* row) { // deep copy the new row copy_row(row); - return Status::OK; + return Status::OK(); } Status Translator::process(RuntimeState* state) { @@ -841,7 +841,7 @@ Status Translator::process(RuntimeState* state) { } // Output last row - return Status::OK; + return Status::OK(); } Status Translator::close(RuntimeState* state) { @@ -853,12 +853,12 @@ Status Translator::close(RuntimeState* state) { Expr::close(_output_row_expr_ctxs, state); _batch_to_write.reset(); _hll_merge.close(); - return Status::OK; + return Status::OK(); } Status DppSink::init(RuntimeState* state) { _profile = state->obj_pool()->add(new RuntimeProfile(state->obj_pool(), "Dpp sink")); - return Status::OK; + return Status::OK(); } Status DppSink::get_or_create_translator( @@ -869,7 +869,7 @@ Status DppSink::get_or_create_translator( auto iter = _translator_map.find(tablet_desc); if (iter != _translator_map.end()) { *trans_vec = &iter->second; - return Status::OK; + return Status::OK(); } // new one _translator_map.insert(std::make_pair(tablet_desc, std::vector())); @@ -884,7 +884,7 @@ Status DppSink::get_or_create_translator( (*trans_vec)->push_back(translator); } _translator_count += (*trans_vec)->size(); - return Status::OK; + return Status::OK(); } Status DppSink::add_batch( @@ -900,7 +900,7 @@ Status DppSink::add_batch( RETURN_IF_ERROR(trans->add_batch(batch)); } // add this batch to appoint translator - return Status::OK; + return Status::OK(); } void DppSink::process(RuntimeState* state, Translator* trans, CountDownLatch* latch) { diff --git a/be/src/runtime/dpp_sink_internal.cpp b/be/src/runtime/dpp_sink_internal.cpp index 441f6a876e..2f54b5bea6 100644 --- a/be/src/runtime/dpp_sink_internal.cpp +++ b/be/src/runtime/dpp_sink_internal.cpp @@ -51,7 +51,7 @@ Status RollupSchema::from_thrift( std::stringstream ss; ss << "values size(" << t_schema.values.size() << ") not equal with value_ops size(" << t_schema.value_ops.size() << ")"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } schema->_keys_type = t_schema.keys_type; if (false == t_schema.__isset.keys_type) { @@ -66,7 +66,7 @@ Status RollupSchema::from_thrift( pool, t_schema.values, &schema->_value_ctxs)); schema->_value_ops.assign(t_schema.value_ops.begin(), t_schema.value_ops.end()); - return Status::OK; + return Status::OK(); } Status RollupSchema::prepare( @@ -75,19 +75,19 @@ Status RollupSchema::prepare( _key_ctxs, state, row_desc, mem_tracker)); RETURN_IF_ERROR(Expr::prepare( _value_ctxs, state, row_desc, mem_tracker)); - return Status::OK; + return Status::OK(); } Status RollupSchema::open(RuntimeState* state) { RETURN_IF_ERROR(Expr::open(_key_ctxs, state)); RETURN_IF_ERROR(Expr::open(_value_ctxs, state)); - return Status::OK; + return Status::OK(); } Status RollupSchema::close(RuntimeState* state) { Expr::close(_key_ctxs, state); Expr::close(_value_ctxs, state); - return Status::OK; + return Status::OK(); } Status PartRangeKey::from_thrift( @@ -96,7 +96,7 @@ Status PartRangeKey::from_thrift( PartRangeKey* key) { key->_sign = t_key.sign; if (key->_sign != 0) { - return Status::OK; + return Status::OK(); } key->_type = thrift_to_type(t_key.type); @@ -150,7 +150,7 @@ Status PartRangeKey::from_thrift( if (!(datetime->from_date_str(t_key.key.c_str(), t_key.key.length()))) { std::stringstream error_msg; error_msg << "Fail to convert date string:" << t_key.key; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } datetime->cast_to_date(); break; @@ -162,7 +162,7 @@ Status PartRangeKey::from_thrift( if (!(datetime->from_date_str(t_key.key.c_str(), t_key.key.length()))) { std::stringstream error_msg; error_msg << "Fail to convert datetime string:" << t_key.key; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } datetime->to_datetime(); break; @@ -175,10 +175,10 @@ Status PartRangeKey::from_thrift( if (parse_result != StringParser::PARSE_SUCCESS) { std::stringstream error_msg; error_msg << "Fail to convert string:" << t_key.key; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } Status PartRangeKey::from_value( @@ -189,7 +189,7 @@ Status PartRangeKey::from_value( key->_type = type; key->_key = value; - return Status::OK; + return Status::OK(); } Status PartRange::from_thrift( @@ -204,7 +204,7 @@ Status PartRange::from_thrift( range->_include_start_key = t_part_range.include_start_key; range->_include_end_key = t_part_range.include_end_key; VLOG_ROW << "after construct: " << range->debug_string(); - return Status::OK; + return Status::OK(); } Status PartitionInfo::from_thrift( @@ -216,12 +216,12 @@ Status PartitionInfo::from_thrift( if (t_partition.__isset.distributed_exprs) { partition->_distributed_bucket = t_partition.distribute_bucket; if (partition->_distributed_bucket == 0) { - return Status("Distributed bucket is 0."); + return Status::InternalError("Distributed bucket is 0."); } RETURN_IF_ERROR(Expr::create_expr_trees( pool, t_partition.distributed_exprs, &partition->_distributed_expr_ctxs)); } - return Status::OK; + return Status::OK(); } Status PartitionInfo::prepare( @@ -230,21 +230,21 @@ Status PartitionInfo::prepare( RETURN_IF_ERROR(Expr::prepare( _distributed_expr_ctxs, state, row_desc, mem_tracker)); } - return Status::OK; + return Status::OK(); } Status PartitionInfo::open(RuntimeState* state) { if (_distributed_expr_ctxs.size() > 0) { return Expr::open(_distributed_expr_ctxs, state); } - return Status::OK; + return Status::OK(); } Status PartitionInfo::close(RuntimeState* state) { if (_distributed_expr_ctxs.size() > 0) { Expr::close(_distributed_expr_ctxs, state); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/dpp_writer.cpp b/be/src/runtime/dpp_writer.cpp index 6bf9b027ec..f4c0addb8b 100644 --- a/be/src/runtime/dpp_writer.cpp +++ b/be/src/runtime/dpp_writer.cpp @@ -71,7 +71,7 @@ Status DppWriter::open() { _buf = new char[k_buf_len]; _pos = _buf; _end = _buf + k_buf_len; - return Status::OK; + return Status::OK(); } void DppWriter::reset_buf() { @@ -181,7 +181,7 @@ Status DppWriter::append_one_row(TupleRow* row) { case TYPE_HLL: const StringValue* str_val = (const StringValue*)(item); if (UNLIKELY(str_val->ptr == nullptr && str_val->len != 0)) { - return Status("String value ptr is null"); + return Status::InternalError("String value ptr is null"); } // write len first @@ -189,7 +189,7 @@ Status DppWriter::append_one_row(TupleRow* row) { if (len != str_val->len) { std::stringstream ss; ss << "length of string is overflow.len=" << str_val->len; - return Status(ss.str()); + return Status::InternalError(ss.str()); } append_to_buf(&len, 2); // passing a NULL pointer to memcpy may be core/ @@ -203,7 +203,7 @@ Status DppWriter::append_one_row(TupleRow* row) { const StringValue* str_val = (const StringValue*)(item); if (UNLIKELY(str_val->ptr == nullptr || str_val->len == 0)) { - return Status("String value ptr is null"); + return Status::InternalError("String value ptr is null"); } append_to_buf(str_val->ptr, str_val->len); break; @@ -227,19 +227,19 @@ Status DppWriter::append_one_row(TupleRow* row) { default: { std::stringstream ss; ss << "Unknown column type " << _output_expr_ctxs[i]->root()->type(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } } - return Status::OK; + return Status::OK(); } Status DppWriter::add_batch(RowBatch* batch) { int num_rows = batch->num_rows(); if (num_rows <= 0) { - return Status::OK; + return Status::OK(); } Status status; @@ -256,7 +256,7 @@ Status DppWriter::add_batch(RowBatch* batch) { int len = _pos - _buf; OLAPStatus olap_status = _fp->write(_buf, len); if (olap_status != OLAP_SUCCESS) { - return Status("write to file failed."); + return Status::InternalError("write to file failed."); } _content_adler32 = olap_adler32(_content_adler32, _buf, len); _write_len += len; @@ -269,7 +269,7 @@ Status DppWriter::write_header() { _header.set_file_length(_header.size() + _write_len); _header.set_checksum(_content_adler32); _header.serialize(_fp); - return Status::OK; + return Status::OK(); } Status DppWriter::close() { diff --git a/be/src/runtime/etl_job_mgr.cpp b/be/src/runtime/etl_job_mgr.cpp index 19a4f12a8c..7b71243294 100644 --- a/be/src/runtime/etl_job_mgr.cpp +++ b/be/src/runtime/etl_job_mgr.cpp @@ -68,7 +68,7 @@ EtlJobMgr::~EtlJobMgr() { } Status EtlJobMgr::init() { - return Status::OK; + return Status::OK(); } Status EtlJobMgr::start_job(const TMiniLoadEtlTaskRequest& req) { @@ -78,15 +78,15 @@ Status EtlJobMgr::start_job(const TMiniLoadEtlTaskRequest& req) { if (it != _running_jobs.end()) { // Already have this job, return what??? LOG(INFO) << "Duplicated etl job(" << id << ")"; - return Status::OK; + return Status::OK(); } - // If already success, we return Status::OK + // If already success, we return Status::OK() // and wait master ask me success information if (_success_jobs.exists(id)) { // Already success LOG(INFO) << "Already successful etl job(" << id << ")"; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( @@ -101,7 +101,7 @@ Status EtlJobMgr::start_job(const TMiniLoadEtlTaskRequest& req) { VLOG_ETL << "Job id(" << id << ") insert to EtlJobMgr."; _running_jobs.insert(id); - return Status::OK; + return Status::OK(); } void EtlJobMgr::report_to_master(PlanFragmentExecutor* executor) { @@ -189,14 +189,14 @@ Status EtlJobMgr::cancel_job(const TUniqueId& id) { if (it == _running_jobs.end()) { // Nothing to do LOG(INFO) << "No such job id, just print to info " << id; - return Status::OK; + return Status::OK(); } _running_jobs.erase(it); VLOG_ETL << "id(" << id << ") have been removed from EtlJobMgr."; EtlJobCtx job_ctx; - job_ctx.finish_status = Status::CANCELLED; + job_ctx.finish_status = Status::Cancelled("Cancelled"); _failed_jobs.put(id, job_ctx); - return Status::OK; + return Status::OK(); } Status EtlJobMgr::finish_job(const TUniqueId& id, @@ -208,7 +208,7 @@ Status EtlJobMgr::finish_job(const TUniqueId& id, if (it == _running_jobs.end()) { std::stringstream ss; ss << "Unknown job id(" << id << ")."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _running_jobs.erase(it); @@ -224,7 +224,7 @@ Status EtlJobMgr::finish_job(const TUniqueId& id, VLOG_ETL << "Move job(" << id << ") from running to " << (finish_status.ok() ? "success jobs" : "failed jobs"); - return Status::OK; + return Status::OK(); } Status EtlJobMgr::get_job_state(const TUniqueId& id, @@ -234,7 +234,7 @@ Status EtlJobMgr::get_job_state(const TUniqueId& id, if (it != _running_jobs.end()) { result->status.__set_status_code(TStatusCode::OK); result->__set_etl_state(TEtlState::RUNNING); - return Status::OK; + return Status::OK(); } // Successful if (_success_jobs.exists(id)) { @@ -254,7 +254,7 @@ Status EtlJobMgr::get_job_state(const TUniqueId& id, result->__set_tracking_url( to_load_error_http_path(ctx.result.debug_path)); } - return Status::OK; + return Status::OK(); } // failed information if (_failed_jobs.exists(id)) { @@ -267,12 +267,12 @@ Status EtlJobMgr::get_job_state(const TUniqueId& id, result->__set_tracking_url( to_http_path(ctx.result.debug_path)); } - return Status::OK; + return Status::OK(); } // NO this jobs result->status.__set_status_code(TStatusCode::OK); result->__set_etl_state(TEtlState::CANCELLED); - return Status::OK; + return Status::OK(); } Status EtlJobMgr::erase_job(const TDeleteEtlFilesRequest& req) { @@ -282,12 +282,12 @@ Status EtlJobMgr::erase_job(const TDeleteEtlFilesRequest& req) { if (it != _running_jobs.end()) { std::stringstream ss; ss << "Job(" << id << ") is running, can not be deleted."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _success_jobs.erase(id); _failed_jobs.erase(id); - return Status::OK; + return Status::OK(); } void EtlJobMgr::debug(std::stringstream& ss) { diff --git a/be/src/runtime/exec_env_init.cpp b/be/src/runtime/exec_env_init.cpp index 9208410c14..000a95c83d 100644 --- a/be/src/runtime/exec_env_init.cpp +++ b/be/src/runtime/exec_env_init.cpp @@ -123,7 +123,7 @@ Status ExecEnv::_init(const std::vector& store_paths) { _small_file_mgr->init(); _init_mem_tracker(); RETURN_IF_ERROR(_tablet_writer_mgr->start_bg_worker()); - return Status::OK; + return Status::OK(); } Status ExecEnv::_init_mem_tracker() { @@ -135,12 +135,12 @@ Status ExecEnv::_init_mem_tracker() { bytes_limit = ParseUtil::parse_mem_spec(config::mem_limit, &is_percent); if (bytes_limit < 0) { ss << "Failed to parse mem limit from '" + config::mem_limit + "'."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (!BitUtil::IsPowerOf2(config::min_buffer_size)) { ss << "--min_buffer_size must be a power-of-two: " << config::min_buffer_size; - return Status(ss.str()); + return Status::InternalError(ss.str()); } int64_t buffer_pool_limit = ParseUtil::parse_mem_spec( @@ -148,7 +148,7 @@ Status ExecEnv::_init_mem_tracker() { if (buffer_pool_limit <= 0) { ss << "Invalid --buffer_pool_limit value, must be a percentage or " "positive bytes value or percentage: " << config::buffer_pool_limit; - return Status(ss.str()); + return Status::InternalError(ss.str()); } buffer_pool_limit = BitUtil::RoundDown(buffer_pool_limit, config::min_buffer_size); @@ -157,7 +157,7 @@ Status ExecEnv::_init_mem_tracker() { if (clean_pages_limit <= 0) { ss << "Invalid --buffer_pool_clean_pages_limit value, must be a percentage or " "positive bytes value or percentage: " << config::buffer_pool_clean_pages_limit; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _init_buffer_pool(config::min_buffer_size, buffer_pool_limit, clean_pages_limit); @@ -178,7 +178,7 @@ Status ExecEnv::_init_mem_tracker() { LOG(INFO) << "Using global memory limit: " << PrettyPrinter::print(bytes_limit, TUnit::BYTES); RETURN_IF_ERROR(_disk_io_mgr->init(_mem_tracker)); RETURN_IF_ERROR(_tmp_file_mgr->init(DorisMetrics::metrics())); - return Status::OK; + return Status::OK(); } void ExecEnv::_init_buffer_pool(int64_t min_page_size, diff --git a/be/src/runtime/export_sink.cpp b/be/src/runtime/export_sink.cpp index 276e0502bb..932192c123 100644 --- a/be/src/runtime/export_sink.cpp +++ b/be/src/runtime/export_sink.cpp @@ -52,7 +52,7 @@ Status ExportSink::init(const TDataSink& t_sink) { // From the thrift expressions create the real exprs. RETURN_IF_ERROR(Expr::create_expr_trees(_pool, _t_output_expr, &_output_expr_ctxs)); - return Status::OK; + return Status::OK(); } Status ExportSink::prepare(RuntimeState* state) { @@ -76,7 +76,7 @@ Status ExportSink::prepare(RuntimeState* state) { _rows_written_counter = ADD_COUNTER(profile(), "RowsExported", TUnit::UNIT); _write_timer = ADD_TIMER(profile(), "WriteTime"); - return Status::OK; + return Status::OK(); } Status ExportSink::open(RuntimeState* state) { @@ -84,7 +84,7 @@ Status ExportSink::open(RuntimeState* state) { RETURN_IF_ERROR(Expr::open(_output_expr_ctxs, state)); // open broker RETURN_IF_ERROR(open_file_writer()); - return Status::OK; + return Status::OK(); } Status ExportSink::send(RuntimeState* state, RowBatch* batch) { @@ -112,7 +112,7 @@ Status ExportSink::send(RuntimeState* state, RowBatch* batch) { COUNTER_UPDATE(_bytes_written_counter, buf.size()); } COUNTER_UPDATE(_rows_written_counter, num_rows); - return Status::OK; + return Status::OK(); } Status ExportSink::gen_row_buffer(TupleRow* row, std::stringstream* ss) { @@ -197,7 +197,7 @@ Status ExportSink::gen_row_buffer(TupleRow* row, std::stringstream* ss) { default: { std::stringstream err_ss; err_ss << "can't export this type. type = " << _output_expr_ctxs[i]->root()->type(); - return Status(err_ss.str()); + return Status::InternalError(err_ss.str()); } } } @@ -208,7 +208,7 @@ Status ExportSink::gen_row_buffer(TupleRow* row, std::stringstream* ss) { } (*ss) << _t_export_sink.line_delimiter; - return Status::OK; + return Status::OK(); } Status ExportSink::close(RuntimeState* state, Status exec_status) { @@ -217,12 +217,12 @@ Status ExportSink::close(RuntimeState* state, Status exec_status) { _file_writer->close(); _file_writer = nullptr; } - return Status::OK; + return Status::OK(); } Status ExportSink::open_file_writer() { if (_file_writer != nullptr) { - return Status::OK; + return Status::OK(); } std::string file_name = gen_file_name(); @@ -249,12 +249,12 @@ Status ExportSink::open_file_writer() { default: { std::stringstream ss; ss << "Unknown file type, type=" << _t_export_sink.file_type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } _state->add_export_output_file(_t_export_sink.export_path + "/" + file_name); - return Status::OK; + return Status::OK(); } // TODO(lingbin): add some other info to file name, like partition diff --git a/be/src/runtime/export_task_mgr.cpp b/be/src/runtime/export_task_mgr.cpp index 59f89f907a..777ff99a08 100644 --- a/be/src/runtime/export_task_mgr.cpp +++ b/be/src/runtime/export_task_mgr.cpp @@ -44,7 +44,7 @@ ExportTaskMgr::~ExportTaskMgr() { } Status ExportTaskMgr::init() { - return Status::OK; + return Status::OK(); } Status ExportTaskMgr::start_task(const TExportTaskRequest& request) { @@ -54,15 +54,15 @@ Status ExportTaskMgr::start_task(const TExportTaskRequest& request) { if (it != _running_tasks.end()) { // Already have this task, return what??? LOG(INFO) << "Duplicated export task(" << id << ")"; - return Status::OK; + return Status::OK(); } - // If already success, we return Status::OK + // If already success, we return Status::OK() // and wait master ask me success information if (_success_tasks.exists(id)) { // Already success LOG(INFO) << "Already successful export task(" << id << ")"; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_exec_env->fragment_mgr()->exec_plan_fragment( @@ -77,7 +77,7 @@ Status ExportTaskMgr::start_task(const TExportTaskRequest& request) { VLOG_EXPORT << "accept one export Task. id=" << id; _running_tasks.insert(id); - return Status::OK; + return Status::OK(); } Status ExportTaskMgr::cancel_task(const TUniqueId& id) { @@ -86,14 +86,14 @@ Status ExportTaskMgr::cancel_task(const TUniqueId& id) { if (it == _running_tasks.end()) { // Nothing to do LOG(INFO) << "No such export task id, just print to info " << id; - return Status::OK; + return Status::OK(); } _running_tasks.erase(it); VLOG_EXPORT << "task id(" << id << ") have been removed from ExportTaskMgr."; ExportTaskCtx ctx; - ctx.status = Status::CANCELLED; + ctx.status = Status::Cancelled("Cancelled"); _failed_tasks.put(id, ctx); - return Status::OK; + return Status::OK(); } Status ExportTaskMgr::erase_task(const TUniqueId& id) { @@ -102,12 +102,12 @@ Status ExportTaskMgr::erase_task(const TUniqueId& id) { if (it != _running_tasks.end()) { std::stringstream ss; ss << "Task(" << id << ") is running, can not be deleted."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _success_tasks.erase(id); _failed_tasks.erase(id); - return Status::OK; + return Status::OK(); } void ExportTaskMgr::finalize_task(PlanFragmentExecutor* executor) { @@ -133,7 +133,7 @@ Status ExportTaskMgr::finish_task(const TUniqueId& id, if (it == _running_tasks.end()) { std::stringstream ss; ss << "Unknown task id(" << id << ")."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } _running_tasks.erase(it); @@ -149,7 +149,7 @@ Status ExportTaskMgr::finish_task(const TUniqueId& id, VLOG_EXPORT << "Move task(" << id << ") from running to " << (status.ok() ? "success tasks" : "failed tasks"); - return Status::OK; + return Status::OK(); } Status ExportTaskMgr::get_task_state(const TUniqueId& id, TExportStatusResult* result) { @@ -158,7 +158,7 @@ Status ExportTaskMgr::get_task_state(const TUniqueId& id, TExportStatusResult* r if (it != _running_tasks.end()) { result->status.__set_status_code(TStatusCode::OK); result->__set_state(TExportState::RUNNING); - return Status::OK; + return Status::OK(); } // Successful @@ -168,7 +168,7 @@ Status ExportTaskMgr::get_task_state(const TUniqueId& id, TExportStatusResult* r result->status.__set_status_code(TStatusCode::OK); result->__set_state(TExportState::FINISHED); result->__set_files(ctx.result.files); - return Status::OK; + return Status::OK(); } // failed information @@ -177,13 +177,13 @@ Status ExportTaskMgr::get_task_state(const TUniqueId& id, TExportStatusResult* r _success_tasks.get(id, &ctx); result->status.__set_status_code(TStatusCode::OK); result->__set_state(TExportState::CANCELLED); - return Status::OK; + return Status::OK(); } // NO this task result->status.__set_status_code(TStatusCode::OK); result->__set_state(TExportState::CANCELLED); - return Status::OK; + return Status::OK(); } void ExportTaskMgr::report_to_master(PlanFragmentExecutor* executor) { diff --git a/be/src/runtime/fragment_mgr.cpp b/be/src/runtime/fragment_mgr.cpp index 501432a938..d4f6759de0 100644 --- a/be/src/runtime/fragment_mgr.cpp +++ b/be/src/runtime/fragment_mgr.cpp @@ -206,14 +206,14 @@ Status FragmentExecState::execute() { } DorisMetrics::fragment_requests_total.increment(1); DorisMetrics::fragment_request_duration_us.increment(duration_ns / 1000); - return Status::OK; + return Status::OK(); } Status FragmentExecState::cancel() { std::lock_guard l(_status_lock); RETURN_IF_ERROR(_exec_status); _executor.cancel(); - return Status::OK; + return Status::OK(); } void FragmentExecState::callback(const Status& status, RuntimeProfile* profile, bool done) { @@ -245,7 +245,7 @@ void FragmentExecState::coordinator_callback( if (!coord_status.ok()) { std::stringstream ss; ss << "couldn't get a client for " << _coord_addr; - update_status(Status(TStatusCode::INTERNAL_ERROR, ss.str(), false)); + update_status(Status::InternalError(ss.str())); return; } @@ -325,7 +325,7 @@ void FragmentExecState::coordinator_callback( std::stringstream msg; msg << "ReportExecStatus() to " << _coord_addr << " failed:\n" << e.what(); LOG(WARNING) << msg.str(); - rpc_status = Status(TStatusCode::INTERNAL_ERROR, msg.str(), false); + rpc_status = Status::InternalError(msg.str()); } if (!rpc_status.ok()) { @@ -405,7 +405,7 @@ Status FragmentMgr::exec_plan_fragment( auto iter = _fragment_map.find(fragment_instance_id); if (iter != _fragment_map.end()) { // Duplicated - return Status::OK; + return Status::OK(); } } exec_state.reset(new FragmentExecState( @@ -421,7 +421,7 @@ Status FragmentMgr::exec_plan_fragment( auto iter = _fragment_map.find(fragment_instance_id); if (iter != _fragment_map.end()) { // Duplicated - return Status("Double execute"); + return Status::InternalError("Double execute"); } // register exec_state before starting exec thread _fragment_map.insert(std::make_pair(fragment_instance_id, exec_state)); @@ -440,7 +440,7 @@ Status FragmentMgr::exec_plan_fragment( std::lock_guard lock(_lock); _fragment_map.erase(fragment_instance_id); } - return Status("Put planfragment to failed."); + return Status::InternalError("Put planfragment to failed."); } } else { pthread_t id; @@ -454,12 +454,12 @@ Status FragmentMgr::exec_plan_fragment( err_msg.append(strerror(ret)); err_msg.append(","); err_msg.append(std::to_string(ret)); - return Status(err_msg); + return Status::InternalError(err_msg); } pthread_detach(id); } - return Status::OK; + return Status::OK(); } Status FragmentMgr::cancel(const TUniqueId& id) { @@ -469,13 +469,13 @@ Status FragmentMgr::cancel(const TUniqueId& id) { auto iter = _fragment_map.find(id); if (iter == _fragment_map.end()) { // No match - return Status::OK; + return Status::OK(); } exec_state = iter->second; } exec_state->cancel(); - return Status::OK; + return Status::OK(); } // @@ -525,7 +525,7 @@ Status FragmentMgr::trigger_profile_report(const PTriggerProfileReportRequest* r iter->second->executor()->report_profile_once(); } } - return Status::OK; + return Status::OK(); } diff --git a/be/src/runtime/initial_reservations.cc b/be/src/runtime/initial_reservations.cc index 90606184f1..9c2bd7f66f 100644 --- a/be/src/runtime/initial_reservations.cc +++ b/be/src/runtime/initial_reservations.cc @@ -50,17 +50,15 @@ Status InitialReservations::Init( const TUniqueId& query_id, int64_t query_min_reservation) { DCHECK_EQ(0, initial_reservations_.GetReservation()) << "Already inited"; if (!initial_reservations_.IncreaseReservation(query_min_reservation)) { - Status status; std::stringstream ss; ss << "Minimum reservation unavaliable: " << query_min_reservation << " query id:" << query_id; - status.add_error_msg(TStatusCode::MINIMUM_RESERVATION_UNAVAILABLE, ss.str()); - return status; + return Status::MinimumReservationUnavailable(ss.str()); } VLOG_QUERY << "Successfully claimed initial reservations (" << PrettyPrinter::print(query_min_reservation, TUnit::BYTES) << ") for" << " query " << print_id(query_id); - return Status::OK; + return Status::OK(); } void InitialReservations::Claim(BufferPool::ClientHandle* dst, int64_t bytes) { diff --git a/be/src/runtime/load_path_mgr.cpp b/be/src/runtime/load_path_mgr.cpp index 527106dc61..27a9c20daa 100644 --- a/be/src/runtime/load_path_mgr.cpp +++ b/be/src/runtime/load_path_mgr.cpp @@ -53,7 +53,7 @@ Status LoadPathMgr::init() { _idx = 0; _reserved_hours = std::max(config::load_data_reserve_hours, 1L); pthread_create(&_cleaner_id, nullptr, LoadPathMgr::cleaner, this); - return Status::OK; + return Status::OK(); } void* LoadPathMgr::cleaner(void* param) { @@ -72,12 +72,12 @@ Status LoadPathMgr::allocate_dir( const std::string& label, std::string* prefix) { if (_path_vec.empty()) { - return Status("No load path configed."); + return Status::InternalError("No load path configed."); } std::string path; auto size = _path_vec.size(); auto retry = size; - Status status = Status::OK; + Status status = Status::OK(); while (retry--) { { // add SHARD_PREFIX for compatible purpose @@ -89,7 +89,7 @@ Status LoadPathMgr::allocate_dir( status = FileUtils::create_dir(path); if (LIKELY(status.ok())) { *prefix = path; - return Status::OK; + return Status::OK(); } else { LOG(WARNING) << "create dir failed:" << path << ", error msg:" << status.get_error_msg(); } @@ -144,7 +144,7 @@ Status LoadPathMgr::get_load_error_file_name( << "_" << std::hex << fragment_instance_id.hi << "_" << fragment_instance_id.lo; *error_path = ss.str(); - return Status::OK; + return Status::OK(); } std::string LoadPathMgr::get_load_error_absolute_path(const std::string& file_path) { diff --git a/be/src/runtime/mem_tracker.cpp b/be/src/runtime/mem_tracker.cpp index 9644c477f3..b9a8f50693 100644 --- a/be/src/runtime/mem_tracker.cpp +++ b/be/src/runtime/mem_tracker.cpp @@ -32,6 +32,7 @@ #include "util/mem_info.h" #include "util/pretty_printer.h" #include "util/uid_util.h" +#include "util/stack_util.h" //using std::shared_ptr; //using std::weak_ptr; @@ -319,7 +320,7 @@ Status MemTracker::MemLimitExceeded(RuntimeState* state, const std::string& deta // } // ss << tracker_to_log->LogUsage(); // Status status = Status::MemLimitExceeded(ss.str()); - Status status = Status::MEM_LIMIT_EXCEEDED; + Status status = Status::MemoryLimitExceeded("Memory limit exceeded"); if (state != nullptr) state->log_error(status.get_error_msg()); return status; } diff --git a/be/src/runtime/merge_sorter.cpp b/be/src/runtime/merge_sorter.cpp index 4cf2b656be..9bcdac33f6 100644 --- a/be/src/runtime/merge_sorter.cpp +++ b/be/src/runtime/merge_sorter.cpp @@ -310,7 +310,7 @@ Status MergeSorter::Run::init() { if (!_is_sorted) { _sorter->_initial_runs_counter->update(1); } - return Status::OK; + return Status::OK(); } template @@ -351,7 +351,7 @@ Status MergeSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr ss << "Variable length data in a single tuple larger than block size "; ss << total_var_len; ss << " > " << _sorter->_block_mgr->max_block_size(); - return Status(TStatusCode::INTERNAL_ERROR, ss.str(), false); + return Status::InternalError(ss.str()); } } @@ -368,7 +368,7 @@ Status MergeSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr // dhc: we can't get here, because we can get the new block. If we can't get new block, // we will exit in tryAddBlock(MemTracker exceed). cur_fixed_len_block->return_allocation(_sort_tuple_size); - return Status::OK; + return Status::OK(); } } @@ -391,12 +391,12 @@ Status MergeSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr if (added) { cur_fixed_len_block = _fixed_len_blocks.back(); } else { - return Status::OK; + return Status::OK(); } } } - return Status::OK; + return Status::OK(); } Status MergeSorter::Run::prepare_read() { @@ -405,7 +405,7 @@ Status MergeSorter::Run::prepare_read() { //var_len_blocks_index_ = 0; _num_tuples_returned = 0; - return Status::OK; + return Status::OK(); } template @@ -413,7 +413,7 @@ Status MergeSorter::Run::get_next(RowBatch* output_batch, bool* eos) { if (_fixed_len_blocks_index == _fixed_len_blocks.size()) { *eos = true; DCHECK_EQ(_num_tuples_returned, _num_tuples); - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -438,7 +438,7 @@ Status MergeSorter::Run::get_next(RowBatch* output_batch, bool* eos) { _fixed_len_block_offset = 0; } - return Status::OK; + return Status::OK(); } Status MergeSorter::Run::try_add_block(std::vector* block_sequence, @@ -456,7 +456,7 @@ Status MergeSorter::Run::try_add_block(std::vector* bl } else { *added = false; } - return Status::OK; + return Status::OK(); } void MergeSorter::Run::copy_var_len_data(char* dest, const std::vector& var_values) { @@ -611,10 +611,10 @@ Status MergeSorter::add_batch(RowBatch* batch) { cur_batch_index += num_processed; if (cur_batch_index < batch->num_rows()) { - return Status("run is full"); + return Status::InternalError("run is full"); } } - return Status::OK; + return Status::OK(); } Status MergeSorter::input_done() { @@ -627,7 +627,7 @@ Status MergeSorter::input_done() { // from the sorted run. _sorted_runs.back()->prepare_read(); - return Status::OK; + return Status::OK(); } Status MergeSorter::get_next(RowBatch* output_batch, bool* eos) { @@ -637,7 +637,7 @@ Status MergeSorter::get_next(RowBatch* output_batch, bool* eos) { // in the pinned blocks in the single sorted run. RETURN_IF_ERROR(_sorted_runs.back()->get_next(output_batch, eos)); - return Status::OK; + return Status::OK(); } Status MergeSorter::sort_run() { @@ -664,6 +664,6 @@ Status MergeSorter::sort_run() { } _sorted_runs.push_back(_unsorted_run); _unsorted_run = NULL; - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/runtime/message_body_sink.cpp b/be/src/runtime/message_body_sink.cpp index 93fa788bca..b7bd2cb57e 100644 --- a/be/src/runtime/message_body_sink.cpp +++ b/be/src/runtime/message_body_sink.cpp @@ -38,20 +38,20 @@ Status MessageBodyFileSink::open() { char errmsg[64]; LOG(WARNING) << "fail to open file, file=" << _path << ", errmsg=" << strerror_r(errno, errmsg, 64); - return Status("fail to open file"); + return Status::InternalError("fail to open file"); } - return Status::OK; + return Status::OK(); } Status MessageBodyFileSink::append(const char* data, size_t size) { auto written = ::write(_fd, data, size); if (written == size) { - return Status::OK; + return Status::OK(); } char errmsg[64]; LOG(WARNING) << "fail to write, file=" << _path << ", error=" << strerror_r(errno, errmsg, 64); - return Status("fail to write file"); + return Status::InternalError("fail to write file"); } Status MessageBodyFileSink::finish() { @@ -61,10 +61,10 @@ Status MessageBodyFileSink::finish() { LOG(WARNING) << "fail to write, file=" << _path << ", error=" << strerror_r(errno, errmsg, 64); _fd = -1; - return Status("fail to close file"); + return Status::InternalError("fail to close file"); } _fd = -1; - return Status::OK; + return Status::OK(); } void MessageBodyFileSink::cancel() { diff --git a/be/src/runtime/message_body_sink.h b/be/src/runtime/message_body_sink.h index b8ebe7f01f..ffaadc57d9 100644 --- a/be/src/runtime/message_body_sink.h +++ b/be/src/runtime/message_body_sink.h @@ -32,7 +32,7 @@ public: } // called when all data has been append virtual Status finish() { - return Status::OK; + return Status::OK(); } // called when read HTTP failed virtual void cancel() { } diff --git a/be/src/runtime/mysql_table_sink.cpp b/be/src/runtime/mysql_table_sink.cpp index 031b315b41..2e0d5868a6 100644 --- a/be/src/runtime/mysql_table_sink.cpp +++ b/be/src/runtime/mysql_table_sink.cpp @@ -51,7 +51,7 @@ Status MysqlTableSink::init(const TDataSink& t_sink) { // From the thrift expressions create the real exprs. RETURN_IF_ERROR(Expr::create_expr_trees(_pool, _t_output_expr, &_output_expr_ctxs)); - return Status::OK; + return Status::OK(); } Status MysqlTableSink::prepare(RuntimeState* state) { @@ -62,7 +62,7 @@ Status MysqlTableSink::prepare(RuntimeState* state) { title << "MysqlTableSink (frag_id=" << state->fragment_instance_id() << ")"; // create profile _profile = state->obj_pool()->add(new RuntimeProfile(state->obj_pool(), title.str())); - return Status::OK; + return Status::OK(); } Status MysqlTableSink::open(RuntimeState* state) { @@ -71,7 +71,7 @@ Status MysqlTableSink::open(RuntimeState* state) { // create writer _writer = state->obj_pool()->add(new MysqlTableWriter(_output_expr_ctxs)); RETURN_IF_ERROR(_writer->open(_conn_info, _mysql_tbl)); - return Status::OK; + return Status::OK(); } Status MysqlTableSink::send(RuntimeState* state, RowBatch* batch) { @@ -80,7 +80,7 @@ Status MysqlTableSink::send(RuntimeState* state, RowBatch* batch) { Status MysqlTableSink::close(RuntimeState* state, Status exec_status) { Expr::close(_output_expr_ctxs, state); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/mysql_table_writer.cpp b/be/src/runtime/mysql_table_writer.cpp index 46c64aaf33..a00a33eaf6 100644 --- a/be/src/runtime/mysql_table_writer.cpp +++ b/be/src/runtime/mysql_table_writer.cpp @@ -50,7 +50,7 @@ MysqlTableWriter::~MysqlTableWriter() { Status MysqlTableWriter::open(const MysqlConnInfo& conn_info, const std::string& tbl) { _mysql_conn = mysql_init(nullptr); if (_mysql_conn == nullptr) { - return Status("Call mysql_init failed."); + return Status::InternalError("Call mysql_init failed."); } MYSQL* res = mysql_real_connect( @@ -65,19 +65,19 @@ Status MysqlTableWriter::open(const MysqlConnInfo& conn_info, const std::string& if (res == nullptr) { std::stringstream ss; ss << "mysql_real_connect failed because " << mysql_error(_mysql_conn); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // set character if (mysql_set_character_set(_mysql_conn, "utf8")) { std::stringstream ss; ss << "mysql_set_character_set failed because " << mysql_error(_mysql_conn); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _mysql_tbl = tbl; - return Status::OK; + return Status::OK(); } Status MysqlTableWriter::insert_row(TupleRow* row) { @@ -172,7 +172,7 @@ Status MysqlTableWriter::insert_row(TupleRow* row) { std::stringstream err_ss; err_ss << "can't convert this type to mysql type. type = " << _output_expr_ctxs[i]->root()->type(); - return Status(err_ss.str()); + return Status::InternalError(err_ss.str()); } } } @@ -185,15 +185,15 @@ Status MysqlTableWriter::insert_row(TupleRow* row) { std::stringstream err_ss; err_ss << "Insert to mysql server(" << mysql_get_host_info(_mysql_conn) << ") failed, because: " << mysql_error(_mysql_conn); - return Status(err_ss.str()); + return Status::InternalError(err_ss.str()); } - return Status::OK; + return Status::OK(); } Status MysqlTableWriter::append(RowBatch* batch) { if (batch == nullptr || batch->num_rows() == 0) { - return Status::OK; + return Status::OK(); } int num_rows = batch->num_rows(); @@ -201,7 +201,7 @@ Status MysqlTableWriter::append(RowBatch* batch) { RETURN_IF_ERROR(insert_row(batch->get_row(i))); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/mysql_table_writer.h b/be/src/runtime/mysql_table_writer.h index ace1e87df4..e737c58e3c 100644 --- a/be/src/runtime/mysql_table_writer.h +++ b/be/src/runtime/mysql_table_writer.h @@ -52,17 +52,17 @@ public: Status open(const MysqlConnInfo& conn_info, const std::string& tbl); Status begin_trans() { - return Status::OK; + return Status::OK(); } Status append(RowBatch* batch); Status abort_tarns() { - return Status::OK; + return Status::OK(); } Status finish_tarns() { - return Status::OK; + return Status::OK(); } private: diff --git a/be/src/runtime/plan_fragment_executor.cpp b/be/src/runtime/plan_fragment_executor.cpp index 19d786d3de..51d4664336 100644 --- a/be/src/runtime/plan_fragment_executor.cpp +++ b/be/src/runtime/plan_fragment_executor.cpp @@ -233,7 +233,7 @@ Status PlanFragmentExecutor::prepare(const TExecPlanFragmentParams& request) { _query_statistics.reset(new QueryStatistics()); _sink->set_query_statistics(_query_statistics); - return Status::OK; + return Status::OK(); } void PlanFragmentExecutor::optimize_llvm_module() { @@ -298,7 +298,7 @@ Status PlanFragmentExecutor::open_internal() { } if (_sink.get() == NULL) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_sink->open(runtime_state())); @@ -358,7 +358,7 @@ Status PlanFragmentExecutor::open_internal() { stop_report_thread(); send_report(true); - return Status::OK; + return Status::OK(); } void PlanFragmentExecutor::collect_query_statistics() { @@ -473,7 +473,7 @@ Status PlanFragmentExecutor::get_next(RowBatch** batch) { Status PlanFragmentExecutor::get_next_internal(RowBatch** batch) { if (_done) { *batch = NULL; - return Status::OK; + return Status::OK(); } while (!_done) { @@ -490,7 +490,7 @@ Status PlanFragmentExecutor::get_next_internal(RowBatch** batch) { *batch = NULL; } - return Status::OK; + return Status::OK(); } void PlanFragmentExecutor::update_status(const Status& status) { @@ -556,7 +556,7 @@ void PlanFragmentExecutor::close() { if (_prepared) { _sink->close(runtime_state(), _status); } else { - _sink->close(runtime_state(), Status("prepare failed")); + _sink->close(runtime_state(), Status::InternalError("prepare failed")); } } diff --git a/be/src/runtime/pull_load_task_mgr.cpp b/be/src/runtime/pull_load_task_mgr.cpp index 4002ecef43..5b38b03520 100644 --- a/be/src/runtime/pull_load_task_mgr.cpp +++ b/be/src/runtime/pull_load_task_mgr.cpp @@ -86,7 +86,7 @@ Status PullLoadTaskCtx::add_sub_task_info( std::lock_guard l(_lock); if (_finished_senders.count(sub_task_info.sub_task_id) > 0) { // Already receive this sub-task informations - return Status::OK; + return Status::OK(); } // Apply this information @@ -105,7 +105,7 @@ Status PullLoadTaskCtx::add_sub_task_info( *finish = true; } - return Status::OK; + return Status::OK(); } PullLoadTaskMgr::PullLoadTaskMgr(const std::string& path) @@ -120,7 +120,7 @@ Status PullLoadTaskMgr::init() { if (!st.ok()) { _dir_exist = false; } - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::load_task_ctxes() { @@ -143,7 +143,7 @@ Status PullLoadTaskMgr::load_task_ctxes() { } */ - return Status("Not implemented"); + return Status::InternalError("Not implemented"); } Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { @@ -152,7 +152,7 @@ Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { char buf[64]; std::stringstream ss; ss << "fopen(" << file_path << ") failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } DeferOp close_file(std::bind(&fclose, fp)); @@ -163,11 +163,11 @@ Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { char buf[64]; std::stringstream ss; ss << "fread content length failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (content_len > 10 * 1024 * 1024) { - return Status("Content is too big."); + return Status::InternalError("Content is too big."); } // 2. read content @@ -178,7 +178,7 @@ Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { char buf[64]; std::stringstream ss; ss << "fread content failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 3. checksum @@ -191,13 +191,13 @@ Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { char buf[64]; std::stringstream ss; ss << "fread content failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (read_checksum != checksum) { std::stringstream ss; ss << "fread checksum failed, read_checksum=" << read_checksum << ", content_checksum=" << checksum; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 4. new task context @@ -210,12 +210,12 @@ Status PullLoadTaskMgr::load_task_ctx(const std::string& file_path) { } LOG(INFO) << "success load task " << task_ctx->id(); - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::save_task_ctx(PullLoadTaskCtx* task_ctx) { if (!_dir_exist) { - return Status::OK; + return Status::OK(); } ThriftSerializer serializer(true, 64 * 1024); RETURN_IF_ERROR(task_ctx->serialize(&serializer)); @@ -230,7 +230,7 @@ Status PullLoadTaskMgr::save_task_ctx(PullLoadTaskCtx* task_ctx) { char buf[64]; std::stringstream ss; ss << "fopen(" << file_path << ") failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } DeferOp close_file(std::bind(&fclose, fp)); @@ -240,7 +240,7 @@ Status PullLoadTaskMgr::save_task_ctx(PullLoadTaskCtx* task_ctx) { char buf[64]; std::stringstream ss; ss << "fwrite content length failed., because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 2. write content @@ -249,7 +249,7 @@ Status PullLoadTaskMgr::save_task_ctx(PullLoadTaskCtx* task_ctx) { char buf[64]; std::stringstream ss; ss << "fwrite content failed., because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // 3. checksum @@ -260,10 +260,10 @@ Status PullLoadTaskMgr::save_task_ctx(PullLoadTaskCtx* task_ctx) { char buf[64]; std::stringstream ss; ss << "fwrite checksum failed., because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::register_task(const TUniqueId& id, int num_senders) { @@ -273,14 +273,14 @@ Status PullLoadTaskMgr::register_task(const TUniqueId& id, int num_senders) { if (it != std::end(_task_ctx_map)) { // Do nothing LOG(INFO) << "Duplicate pull load task, id=" << id << " num_senders=" << num_senders; - return Status::OK; + return Status::OK(); } std::shared_ptr task_ctx(new PullLoadTaskCtx(id, num_senders)); _task_ctx_map.emplace(id, task_ctx); } LOG(INFO) << "Register pull load task, id=" << id << ", num_senders=" << num_senders; - return Status::OK; + return Status::OK(); } std::string PullLoadTaskMgr::task_file_path(const TUniqueId& id) const { @@ -303,7 +303,7 @@ Status PullLoadTaskMgr::deregister_task(const TUniqueId& id) { auto it = _task_ctx_map.find(id); if (it == std::end(_task_ctx_map)) { LOG(INFO) << "Deregister unknown pull load task, id=" << id; - return Status::OK; + return Status::OK(); } _task_ctx_map.erase(it); ctx = it->second; @@ -315,7 +315,7 @@ Status PullLoadTaskMgr::deregister_task(const TUniqueId& id) { } LOG(INFO) << "Deregister pull load task, id=" << id; - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::report_sub_task_info( @@ -328,7 +328,7 @@ Status PullLoadTaskMgr::report_sub_task_info( std::stringstream ss; ss << "receive unknown pull load sub-task id=" << sub_task_info.id << ", sub_id=" << sub_task_info.sub_task_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } ctx = it->second; @@ -343,7 +343,7 @@ Status PullLoadTaskMgr::report_sub_task_info( } VLOG_RPC << "process one pull load sub-task, id=" << sub_task_info.id << ", sub_id=" << sub_task_info.sub_task_id; - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::fetch_task_info(const TUniqueId& tid, @@ -356,13 +356,13 @@ Status PullLoadTaskMgr::fetch_task_info(const TUniqueId& tid, LOG(INFO) << "Fetch unknown task info, id=" << tid; result->task_info.id = tid; result->task_info.etl_state = TEtlState::CANCELLED; - return Status::OK; + return Status::OK(); } ctx = it->second; } ctx->get_task_info(&result->task_info); - return Status::OK; + return Status::OK(); } Status PullLoadTaskMgr::fetch_all_task_infos( @@ -371,7 +371,7 @@ Status PullLoadTaskMgr::fetch_all_task_infos( for (auto& it : _task_ctx_map) { it.second->get_task_info(&result->task_infos); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/qsorter.cpp b/be/src/runtime/qsorter.cpp index 725c497c3c..eaabebf1bc 100644 --- a/be/src/runtime/qsorter.cpp +++ b/be/src/runtime/qsorter.cpp @@ -90,7 +90,7 @@ QSorter::QSorter( Status QSorter::prepare(RuntimeState* state) { RETURN_IF_ERROR(Expr::clone_if_not_exists(_order_expr_ctxs, state, &_lhs_expr_ctxs)); RETURN_IF_ERROR(Expr::clone_if_not_exists(_order_expr_ctxs, state, &_rhs_expr_ctxs)); - return Status::OK; + return Status::OK(); } // Insert if either not at the limit or it's a new TopN tuple_row @@ -99,17 +99,17 @@ Status QSorter::insert_tuple_row(TupleRow* input_row) { _row_desc.tuple_descriptors(), _tuple_pool.get()); if (insert_tuple_row == NULL) { - return Status("deep copy failed."); + return Status::InternalError("deep copy failed."); } _sorted_rows.push_back(insert_tuple_row); - return Status::OK; + return Status::OK(); } Status QSorter::add_batch(RowBatch* batch) { for (int i = 0; i < batch->num_rows(); ++i) { RETURN_IF_ERROR(insert_tuple_row(batch->get_row(i))); } - return Status::OK; + return Status::OK(); } // Reverse result in priority_queue @@ -117,7 +117,7 @@ Status QSorter::input_done() { std::sort(_sorted_rows.begin(), _sorted_rows.end(), TupleRowLessThan(_lhs_expr_ctxs, _rhs_expr_ctxs)); _next_iter = _sorted_rows.begin(); - return Status::OK; + return Status::OK(); } Status QSorter::get_next(RowBatch* batch, bool* eos) { @@ -131,14 +131,14 @@ Status QSorter::get_next(RowBatch* batch, bool* eos) { } *eos = _next_iter == _sorted_rows.end(); - return Status::OK; + return Status::OK(); } Status QSorter::close(RuntimeState* state) { _tuple_pool.reset(); Expr::close(_lhs_expr_ctxs, state); Expr::close(_rhs_expr_ctxs, state); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/runtime/result_buffer_mgr.cpp b/be/src/runtime/result_buffer_mgr.cpp index 48b30334a3..239bcac0fc 100644 --- a/be/src/runtime/result_buffer_mgr.cpp +++ b/be/src/runtime/result_buffer_mgr.cpp @@ -44,7 +44,7 @@ Status ResultBufferMgr::init() { _cancel_thread.reset( new boost::thread( boost::bind(boost::mem_fn(&ResultBufferMgr::cancel_thread), this))); - return Status::OK; + return Status::OK(); } Status ResultBufferMgr::create_sender( @@ -54,7 +54,7 @@ Status ResultBufferMgr::create_sender( if (*sender != nullptr) { LOG(WARNING) << "already have buffer control block for this instance " << query_id; - return Status::OK; + return Status::OK(); } boost::shared_ptr control_block( @@ -64,7 +64,7 @@ Status ResultBufferMgr::create_sender( _buffer_map.insert(std::make_pair(query_id, control_block)); } *sender = control_block; - return Status::OK; + return Status::OK(); } boost::shared_ptr ResultBufferMgr::find_control_block( @@ -86,7 +86,7 @@ Status ResultBufferMgr::fetch_data( if (NULL == cb) { // the sender tear down its buffer block - return Status("no result for this query."); + return Status::InternalError("no result for this query."); } return cb->get_batch(result); @@ -99,7 +99,7 @@ void ResultBufferMgr::fetch_data(const PUniqueId& finst_id, GetResultBatchCtx* c boost::shared_ptr cb = find_control_block(tid); if (cb == nullptr) { LOG(WARNING) << "no result for this query, id=" << tid; - ctx->on_failure(Status("no result for this query")); + ctx->on_failure(Status::InternalError("no result for this query")); return; } cb->get_batch(ctx); @@ -114,7 +114,7 @@ Status ResultBufferMgr::cancel(const TUniqueId& query_id) { _buffer_map.erase(iter); } - return Status::OK; + return Status::OK(); } Status ResultBufferMgr::cancel_at_time(time_t cancel_time, const TUniqueId& query_id) { @@ -128,7 +128,7 @@ Status ResultBufferMgr::cancel_at_time(time_t cancel_time, const TUniqueId& quer } iter->second.push_back(query_id); - return Status::OK; + return Status::OK(); } void ResultBufferMgr::cancel_thread() { diff --git a/be/src/runtime/result_sink.cpp b/be/src/runtime/result_sink.cpp index b25bd0ef2a..85b724d6ad 100644 --- a/be/src/runtime/result_sink.cpp +++ b/be/src/runtime/result_sink.cpp @@ -47,7 +47,7 @@ Status ResultSink::prepare_exprs(RuntimeState* state) { // Prepare the exprs to run. RETURN_IF_ERROR(Expr::prepare( _output_expr_ctxs, state, _row_desc, _expr_mem_tracker.get())); - return Status::OK; + return Status::OK(); } Status ResultSink::prepare(RuntimeState* state) { @@ -65,7 +65,7 @@ Status ResultSink::prepare(RuntimeState* state) { _writer.reset(new(std::nothrow) ResultWriter(_sender.get(), _output_expr_ctxs)); RETURN_IF_ERROR(_writer->init(state)); - return Status::OK; + return Status::OK(); } Status ResultSink::open(RuntimeState* state) { @@ -78,7 +78,7 @@ Status ResultSink::send(RuntimeState* state, RowBatch* batch) { Status ResultSink::close(RuntimeState* state, Status exec_status) { if (_closed) { - return Status::OK; + return Status::OK(); } // close sender, this is normal path end if (_sender) { @@ -89,7 +89,7 @@ Status ResultSink::close(RuntimeState* state, Status exec_status) { Expr::close(_output_expr_ctxs, state); _closed = true; - return Status::OK; + return Status::OK(); } void ResultSink::set_query_statistics(std::shared_ptr statistics) { diff --git a/be/src/runtime/result_writer.cpp b/be/src/runtime/result_writer.cpp index a543d2bef7..5475df5620 100644 --- a/be/src/runtime/result_writer.cpp +++ b/be/src/runtime/result_writer.cpp @@ -44,16 +44,16 @@ ResultWriter::~ResultWriter() { Status ResultWriter::init(RuntimeState* state) { if (NULL == _sinker) { - return Status("sinker is NULL pointer."); + return Status::InternalError("sinker is NULL pointer."); } _row_buffer = new(std::nothrow) MysqlRowBuffer(); if (NULL == _row_buffer) { - return Status("no memory to alloc."); + return Status::InternalError("no memory to alloc."); } - return Status::OK; + return Status::OK(); } Status ResultWriter::add_one_row(TupleRow* row) { @@ -173,15 +173,15 @@ Status ResultWriter::add_one_row(TupleRow* row) { } if (0 != buf_ret) { - return Status("pack mysql buffer failed."); + return Status::InternalError("pack mysql buffer failed."); } - return Status::OK; + return Status::OK(); } Status ResultWriter::append_row_batch(RowBatch* batch) { if (NULL == batch || 0 == batch->num_rows()) { - return Status::OK; + return Status::OK(); } Status status; diff --git a/be/src/runtime/routine_load/data_consumer.cpp b/be/src/runtime/routine_load/data_consumer.cpp index 14df29c284..b0c46a3e12 100644 --- a/be/src/runtime/routine_load/data_consumer.cpp +++ b/be/src/runtime/routine_load/data_consumer.cpp @@ -37,7 +37,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) { std::unique_lock l(_lock); if (_init) { // this consumer has already been initialized. - return Status::OK; + return Status::OK(); } RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); @@ -56,19 +56,19 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) { RdKafka::Conf::ConfResult res = conf->set(conf_key, conf_val, errstr); if (res == RdKafka::Conf::CONF_UNKNOWN) { // ignore unknown config - return Status::OK; + return Status::OK(); } else if (errstr.find("not supported") != std::string::npos) { // some java-only properties may be passed to here, and librdkafak will return 'xxx' not supported // ignore it - return Status::OK; + return Status::OK(); } else if (res != RdKafka::Conf::CONF_OK) { std::stringstream ss; ss << "PAUSE: failed to set '" << conf_key << "', value: '" << conf_val << "', err: " << errstr; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } VLOG(3) << "set " << conf_key << ": " << conf_val; - return Status::OK; + return Status::OK(); }; RETURN_IF_ERROR(set_conf("metadata.broker.list", ctx->kafka_info->brokers)); @@ -87,7 +87,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) { std::vector parts; boost::split(parts, item.second, boost::is_any_of(":")); if (parts.size() != 3) { - return Status("PAUSE: Invalid file property of kafka: " + item.second); + return Status::InternalError("PAUSE: Invalid file property of kafka: " + item.second); } int64_t file_id = std::stol(parts[1]); std::string file_path; @@ -95,7 +95,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) { if (!st.ok()) { std::stringstream ss; ss << "PAUSE: failed to get file for config: " << item.first << ", error: " << st.get_error_msg(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } RETURN_IF_ERROR(set_conf(item.first, file_path)); } else { @@ -108,20 +108,20 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) { std::stringstream ss; ss << "PAUSE: failed to set 'event_cb'"; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // create consumer _k_consumer = RdKafka::KafkaConsumer::create(conf, errstr); if (!_k_consumer) { LOG(WARNING) << "PAUSE: failed to create kafka consumer: " << errstr; - return Status("PAUSE: failed to create kafka consumer: " + errstr); + return Status::InternalError("PAUSE: failed to create kafka consumer: " + errstr); } VLOG(3) << "finished to init kafka consumer. " << ctx->brief(); _init = true; - return Status::OK; + return Status::OK(); } Status KafkaDataConsumer::assign_topic_partitions( @@ -155,10 +155,10 @@ Status KafkaDataConsumer::assign_topic_partitions( if (err) { LOG(WARNING) << "failed to assign topic partitions: " << ctx->brief(true) << ", err: " << RdKafka::err2str(err); - return Status("failed to assign topic partitions"); + return Status::InternalError("failed to assign topic partitions"); } - return Status::OK; + return Status::OK(); } Status KafkaDataConsumer::group_consume( @@ -171,7 +171,7 @@ Status KafkaDataConsumer::group_consume( int64_t received_rows = 0; int64_t put_rows = 0; - Status st = Status::OK; + Status st = Status::OK(); MonotonicStopWatch consumer_watch; MonotonicStopWatch watch; watch.start(); @@ -208,7 +208,7 @@ Status KafkaDataConsumer::group_consume( LOG(WARNING) << "kafka consume failed: " << _id << ", msg: " << msg->errstr(); done = true; - st = Status(msg->errstr()); + st = Status::InternalError(msg->errstr()); break; } @@ -240,7 +240,7 @@ Status KafkaDataConsumer::get_partition_meta(std::vector* partition_ids std::stringstream ss; ss << "failed to create topic: " << errstr; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } auto topic_deleter = [topic] () { delete topic; }; DeferOp delete_topic(std::bind(topic_deleter)); @@ -252,7 +252,7 @@ Status KafkaDataConsumer::get_partition_meta(std::vector* partition_ids std::stringstream ss; ss << "failed to get partition meta: " << RdKafka::err2str(err); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } auto meta_deleter = [metadata] () { delete metadata; }; DeferOp delete_meta(std::bind(meta_deleter)); @@ -271,7 +271,7 @@ Status KafkaDataConsumer::get_partition_meta(std::vector* partition_ids ss << ", try again"; } LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } RdKafka::TopicMetadata::PartitionMetadataIterator ip; @@ -281,27 +281,27 @@ Status KafkaDataConsumer::get_partition_meta(std::vector* partition_ids } if (partition_ids->empty()) { - return Status("no partition in this topic"); + return Status::InternalError("no partition in this topic"); } - return Status::OK; + return Status::OK(); } Status KafkaDataConsumer::cancel(StreamLoadContext* ctx) { std::unique_lock l(_lock); if (!_init) { - return Status("consumer is not initialized"); + return Status::InternalError("consumer is not initialized"); } _cancelled = true; LOG(INFO) << "kafka consumer cancelled. " << _id; - return Status::OK; + return Status::OK(); } Status KafkaDataConsumer::reset() { std::unique_lock l(_lock); _cancelled = false; - return Status::OK; + return Status::OK(); } // if the kafka brokers and topic are same, diff --git a/be/src/runtime/routine_load/data_consumer.h b/be/src/runtime/routine_load/data_consumer.h index baf2cf7449..95b10e6b15 100644 --- a/be/src/runtime/routine_load/data_consumer.h +++ b/be/src/runtime/routine_load/data_consumer.h @@ -128,7 +128,7 @@ public: virtual Status init(StreamLoadContext* ctx) override; // TODO(cmy): currently do not implement single consumer start method, using group_consume - virtual Status consume(StreamLoadContext* ctx) override { return Status::OK; } + virtual Status consume(StreamLoadContext* ctx) override { return Status::OK(); } virtual Status cancel(StreamLoadContext* ctx) override; // reassign partition topics virtual Status reset() override; diff --git a/be/src/runtime/routine_load/data_consumer_group.cpp b/be/src/runtime/routine_load/data_consumer_group.cpp index 3e76158d1c..7b2b6e1fa1 100644 --- a/be/src/runtime/routine_load/data_consumer_group.cpp +++ b/be/src/runtime/routine_load/data_consumer_group.cpp @@ -41,7 +41,7 @@ Status KafkaDataConsumerGroup::assign_topic_partitions(StreamLoadContext* ctx) { divide_parts[i], ctx->kafka_info->topic, ctx)); } - return Status::OK; + return Status::OK(); } KafkaDataConsumerGroup::~KafkaDataConsumerGroup() { @@ -60,7 +60,7 @@ KafkaDataConsumerGroup::~KafkaDataConsumerGroup() { } Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) { - Status result_st = Status::OK; + Status result_st = Status::OK(); // start all consumers for(auto& consumer : _consumers) { if (!_thread_pool.offer( @@ -79,7 +79,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) { }))) { LOG(WARNING) << "failed to submit data consumer: " << consumer->id() << ", group id: " << _grp_id; - return Status("failed to submit data consumer"); + return Status::InternalError("failed to submit data consumer"); } else { VLOG(1) << "submit a data consumer: " << consumer->id() << ", group id: " << _grp_id; } @@ -133,14 +133,14 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) { // nothing to be consumed, we have to cancel it, because // we do not allow finishing stream load pipe without data kafka_pipe->cancel(); - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } else { DCHECK(left_bytes < ctx->max_batch_size); DCHECK(left_rows < ctx->max_batch_rows); kafka_pipe->finish(); ctx->kafka_info->cmt_offset = std::move(cmt_offset); ctx->receive_bytes = ctx->max_batch_size - left_bytes; - return Status::OK; + return Status::OK(); } } @@ -175,7 +175,7 @@ Status KafkaDataConsumerGroup::start_all(StreamLoadContext* ctx) { left_time = ctx->max_interval_s * 1000 - watch.elapsed_time() / 1000 / 1000; } - return Status::OK; + return Status::OK(); } void KafkaDataConsumerGroup::actual_consume( diff --git a/be/src/runtime/routine_load/data_consumer_group.h b/be/src/runtime/routine_load/data_consumer_group.h index 233d07ce8e..b18ef53025 100644 --- a/be/src/runtime/routine_load/data_consumer_group.h +++ b/be/src/runtime/routine_load/data_consumer_group.h @@ -51,7 +51,7 @@ public: } // start all consumers - virtual Status start_all(StreamLoadContext* ctx) { return Status::OK; } + virtual Status start_all(StreamLoadContext* ctx) { return Status::OK(); } protected: UniqueId _grp_id; diff --git a/be/src/runtime/routine_load/data_consumer_pool.cpp b/be/src/runtime/routine_load/data_consumer_pool.cpp index 9528bb4968..2810cd68f7 100644 --- a/be/src/runtime/routine_load/data_consumer_pool.cpp +++ b/be/src/runtime/routine_load/data_consumer_pool.cpp @@ -36,7 +36,7 @@ Status DataConsumerPool::get_consumer( (*iter)->reset(); *ret = *iter; iter = _pool.erase(iter); - return Status::OK; + return Status::OK(); } else { ++iter; } @@ -51,7 +51,7 @@ Status DataConsumerPool::get_consumer( default: std::stringstream ss; ss << "PAUSE: unknown routine load task type: " << ctx->load_type; - return Status(ss.str()); + return Status::InternalError(ss.str()); } // init the consumer @@ -59,14 +59,14 @@ Status DataConsumerPool::get_consumer( VLOG(3) << "create new data consumer: " << consumer->id(); *ret = consumer; - return Status::OK; + return Status::OK(); } Status DataConsumerPool::get_consumer_grp( StreamLoadContext* ctx, std::shared_ptr* ret) { if (ctx->load_src_type != TLoadSourceType::KAFKA) { - return Status("PAUSE: Currently only support consumer group for Kafka data source"); + return Status::InternalError("PAUSE: Currently only support consumer group for Kafka data source"); } DCHECK(ctx->kafka_info); @@ -84,7 +84,7 @@ Status DataConsumerPool::get_consumer_grp( LOG(INFO) << "get consumer group " << grp->grp_id() << " with " << consumer_num << " consumers"; *ret = grp; - return Status::OK; + return Status::OK(); } void DataConsumerPool::return_consumer(std::shared_ptr consumer) { @@ -124,7 +124,7 @@ Status DataConsumerPool::start_bg_worker() { } }); _clean_idle_consumer_thread.detach(); - return Status::OK; + return Status::OK(); } void DataConsumerPool::_clean_idle_consumer_bg() { diff --git a/be/src/runtime/routine_load/routine_load_task_executor.cpp b/be/src/runtime/routine_load/routine_load_task_executor.cpp index 5875be7431..cd31aeee79 100644 --- a/be/src/runtime/routine_load/routine_load_task_executor.cpp +++ b/be/src/runtime/routine_load/routine_load_task_executor.cpp @@ -72,13 +72,13 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) { if (_task_map.find(task.id) != _task_map.end()) { // already submitted LOG(INFO) << "routine load task " << UniqueId(task.id) << " has already been submitted"; - return Status::OK; + return Status::OK(); } // the max queue size of thread pool is 100, here we use 80 as a very conservative limit if (_thread_pool.get_queue_size() >= 80) { LOG(INFO) << "too many tasks in queue: " << _thread_pool.get_queue_size() << ", reject task: " << UniqueId(task.id); - return Status("too many tasks"); + return Status::InternalError("too many tasks"); } // create the context @@ -119,7 +119,7 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) { default: LOG(WARNING) << "unknown load source type: " << task.type; delete ctx; - return Status("unknown load source type"); + return Status::InternalError("unknown load source type"); } VLOG(1) << "receive a new routine load task: " << ctx->brief(); @@ -148,11 +148,11 @@ Status RoutineLoadTaskExecutor::submit_task(const TRoutineLoadTask& task) { if (ctx->unref()) { delete ctx; } - return Status("failed to submit routine load task"); + return Status::InternalError("failed to submit routine load task"); } else { LOG(INFO) << "submit a new routine load task: " << ctx->brief() << ", current tasks num: " << _task_map.size(); - return Status::OK; + return Status::OK(); } } @@ -193,7 +193,7 @@ void RoutineLoadTaskExecutor::exec_task( default: { std::stringstream ss; ss << "unknown routine load task type: " << ctx->load_type; - err_handler(ctx, Status::CANCELLED, ss.str()); + err_handler(ctx, Status::Cancelled("Cancelled"), ss.str()); cb(ctx); return; } @@ -265,7 +265,7 @@ Status RoutineLoadTaskExecutor::_execute_plan_for_test(StreamLoadContext* ctx) { } if (eof) { - ctx->promise.set_value(Status::OK); + ctx->promise.set_value(Status::OK()); break; } @@ -281,7 +281,7 @@ Status RoutineLoadTaskExecutor::_execute_plan_for_test(StreamLoadContext* ctx) { std::thread t1(mock_consumer); t1.detach(); - return Status::OK; + return Status::OK(); } } // end namespace diff --git a/be/src/runtime/row_batch.cpp b/be/src/runtime/row_batch.cpp index d65e7a575e..cd96311f23 100644 --- a/be/src/runtime/row_batch.cpp +++ b/be/src/runtime/row_batch.cpp @@ -441,7 +441,7 @@ Status RowBatch::resize_and_allocate_tuple_buffer(RuntimeState* state, LOG(WARNING) << ss.str(); return state->set_mem_limit_exceeded(ss.str()); } - return Status::OK; + return Status::OK(); } void RowBatch::add_tuple_stream(BufferedTupleStream2* stream) { diff --git a/be/src/runtime/row_batch.h b/be/src/runtime/row_batch.h index 7a71b6b7fc..93e562bec1 100644 --- a/be/src/runtime/row_batch.h +++ b/be/src/runtime/row_batch.h @@ -410,7 +410,7 @@ public: /// Allocates a buffer large enough for the fixed-length portion of 'capacity_' rows in /// this batch from 'tuple_data_pool_'. 'capacity_' is reduced if the allocation would /// exceed FIXED_LEN_BUFFER_LIMIT. Always returns enough space for at least one row. - /// Returns Status::MEM_LIMIT_EXCEEDED and sets 'buffer' to NULL if a memory limit would + /// Returns Status::MemoryLimitExceeded("Memory limit exceeded") and sets 'buffer' to NULL if a memory limit would /// have been exceeded. 'state' is used to log the error. /// On success, sets 'buffer_size' to the size in bytes and 'buffer' to the buffer. Status resize_and_allocate_tuple_buffer(RuntimeState* state, int64_t* buffer_size, diff --git a/be/src/runtime/runtime_state.cpp b/be/src/runtime/runtime_state.cpp index 84acdb4481..257c6f9036 100644 --- a/be/src/runtime/runtime_state.cpp +++ b/be/src/runtime/runtime_state.cpp @@ -192,7 +192,7 @@ Status RuntimeState::init( _db_name = "insert_stmt"; _import_label = print_id(fragment_instance_id); - return Status::OK; + return Status::OK(); } Status RuntimeState::init_mem_trackers(const TUniqueId& query_id) { @@ -233,7 +233,7 @@ Status RuntimeState::init_mem_trackers(const TUniqueId& query_id) { std::numeric_limits::max()); } - return Status::OK; + return Status::OK(); } Status RuntimeState::init_buffer_poolstate() { @@ -258,7 +258,7 @@ Status RuntimeState::init_buffer_poolstate() { _buffer_reservation->InitChildTracker( NULL, exec_env->buffer_reservation(), _query_mem_tracker.get(), max_reservation); - return Status::OK; + return Status::OK(); } Status RuntimeState::create_block_mgr() { @@ -274,7 +274,7 @@ Status RuntimeState::create_block_mgr() { RETURN_IF_ERROR(BufferedBlockMgr2::create(this, _query_mem_tracker.get(), runtime_profile(), _exec_env->tmp_file_mgr(), block_mgr_limit, _exec_env->disk_io_mgr()->max_read_buffer_size(), &_block_mgr2)); - return Status::OK; + return Status::OK(); } Status RuntimeState::create_codegen() { @@ -282,7 +282,7 @@ Status RuntimeState::create_codegen() { _obj_pool.get(), print_id(fragment_instance_id()), &_codegen)); _codegen->enable_optimizations(true); _profile.add_child(_codegen->runtime_profile(), true, NULL); - return Status::OK; + return Status::OK(); } bool RuntimeState::error_log_is_empty() { @@ -331,10 +331,10 @@ Status RuntimeState::set_mem_limit_exceeded( { boost::lock_guard l(_process_status_lock); if (_process_status.ok()) { - _process_status = Status::MEM_LIMIT_EXCEEDED; - if (msg != NULL) { - // _process_status.MergeStatus(*msg); - _process_status.add_error_msg(*msg); + if (msg != nullptr) { + _process_status = Status::MemoryLimitExceeded(*msg); + } else { + _process_status = Status::MemoryLimitExceeded("Memory limit exceeded"); } } else { return _process_status; @@ -371,7 +371,7 @@ Status RuntimeState::set_mem_limit_exceeded( Status RuntimeState::check_query_state() { // TODO: it would be nice if this also checked for cancellation, but doing so breaks - // cases where we use Status::CANCELLED to indicate that the limit was reached. + // cases where we use Status::Cancelled("Cancelled") to indicate that the limit was reached. if (_instance_mem_tracker->any_limit_exceeded()) { return set_mem_limit_exceeded(); } @@ -383,7 +383,7 @@ const int64_t MAX_ERROR_NUM = 50; Status RuntimeState::create_load_dir() { if (!_load_dir.empty()) { - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(_exec_env->load_path_mgr()->allocate_dir( _db_name, _import_label, &_load_dir)); @@ -409,11 +409,11 @@ Status RuntimeState::create_error_log_file() { std::stringstream error_msg; error_msg << "Fail to open error file: [" << _error_log_file_path << "]."; LOG(WARNING) << error_msg.str(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } VLOG_ROW << "create error log file: " << _error_log_file_path; - return Status::OK; + return Status::OK(); } void RuntimeState::append_error_msg_to_file( @@ -488,7 +488,7 @@ Status RuntimeState::get_codegen(LlvmCodeGen** codegen, bool initialize) { RETURN_IF_ERROR(create_codegen()); } *codegen = _codegen.get(); - return Status::OK; + return Status::OK(); } Status RuntimeState::get_codegen(LlvmCodeGen** codegen) { @@ -498,7 +498,7 @@ Status RuntimeState::get_codegen(LlvmCodeGen** codegen) { // TODO chenhao , check scratch_limit, disable_spilling and file_group // before spillng Status RuntimeState::StartSpilling(MemTracker* mem_tracker) { - return Status("Mem limit exceeded."); + return Status::InternalError("Mem limit exceeded."); } } // end namespace doris diff --git a/be/src/runtime/runtime_state.h b/be/src/runtime/runtime_state.h index 920d178381..24aedd6a30 100644 --- a/be/src/runtime/runtime_state.h +++ b/be/src/runtime/runtime_state.h @@ -294,12 +294,10 @@ public: // Sets _process_status with err_msg if no error has been set yet. void set_process_status(const std::string& err_msg) { boost::lock_guard l(_process_status_lock); - if (!_process_status.ok()) { return; } - - _process_status = Status(err_msg); + _process_status = Status::InternalError(err_msg); } void set_process_status(const Status& status) { @@ -643,7 +641,7 @@ private: #define RETURN_IF_CANCELLED(state) \ do { \ - if (UNLIKELY((state)->is_cancelled())) return Status::CANCELLED; \ + if (UNLIKELY((state)->is_cancelled())) return Status::Cancelled("Cancelled"); \ } while (false) } diff --git a/be/src/runtime/small_file_mgr.cpp b/be/src/runtime/small_file_mgr.cpp index eed5eea22c..ced27a3477 100644 --- a/be/src/runtime/small_file_mgr.cpp +++ b/be/src/runtime/small_file_mgr.cpp @@ -47,7 +47,7 @@ SmallFileMgr::~SmallFileMgr() { Status SmallFileMgr::init() { RETURN_IF_ERROR(_load_local_files()); - return Status::OK; + return Status::OK(); } Status SmallFileMgr::_load_local_files() { @@ -62,7 +62,7 @@ Status SmallFileMgr::_load_local_files() { }; RETURN_IF_ERROR(FileUtils::scan_dir(_local_path, scan_cb)); - return Status::OK; + return Status::OK(); } Status SmallFileMgr::_load_single_file( @@ -73,19 +73,19 @@ Status SmallFileMgr::_load_single_file( std::vector parts; boost::split(parts, file_name, boost::is_any_of(".")); if (parts.size() != 2) { - return Status("Not a valid file name: " + file_name); + return Status::InternalError("Not a valid file name: " + file_name); } int64_t file_id = std::stol(parts[0]); std::string md5 = parts[1]; if (_file_cache.find(file_id) != _file_cache.end()) { - return Status("File with same id is already been loaded: " + file_id); + return Status::InternalError("File with same id is already been loaded: " + file_id); } std::string file_md5; RETURN_IF_ERROR(FileUtils::md5sum(path + "/" + file_name, &file_md5)); if (file_md5 != md5) { - return Status("Invalid md5 of file: " + file_name); + return Status::InternalError("Invalid md5 of file: " + file_name); } CacheEntry entry; @@ -93,7 +93,7 @@ Status SmallFileMgr::_load_single_file( entry.md5 = file_md5; _file_cache.emplace(file_id, entry); - return Status::OK; + return Status::OK(); } Status SmallFileMgr::get_file( @@ -113,30 +113,30 @@ Status SmallFileMgr::get_file( if (remove(entry.path.c_str()) != 0) { std::stringstream ss; ss << "failed to remove file: " << file_id << ", err: "<< std::strerror(errno); - return Status(ss.str()); + return Status::InternalError(ss.str()); } _file_cache.erase(it); } else { // check ok, return the path *file_path = entry.path; - return Status::OK; + return Status::OK(); } } // file not found in cache. download it from FE RETURN_IF_ERROR(_download_file(file_id, md5, file_path)); - return Status::OK; + return Status::OK(); } Status SmallFileMgr::_check_file(const CacheEntry& entry, const std::string& md5) { if (!FileUtils::check_exist(entry.path)) { - return Status("file not exist"); + return Status::InternalError("file not exist"); } if (!boost::iequals(md5, entry.md5)) { - return Status("invalid MD5"); + return Status::InternalError("invalid MD5"); } - return Status::OK; + return Status::OK(); } Status SmallFileMgr::_download_file( @@ -156,7 +156,7 @@ Status SmallFileMgr::_download_file( std::unique_ptr fp(fopen(tmp_file.c_str(), "w"), fp_closer); if (fp == nullptr) { LOG(WARNING) << "fail to open file, file=" << tmp_file; - return Status("fail to open file"); + return Status::InternalError("fail to open file"); } HttpClient client; @@ -183,7 +183,7 @@ Status SmallFileMgr::_download_file( if (res != 1) { LOG(WARNING) << "fail to write data to file, file=" << tmp_file << ", error=" << ferror(fp.get()); - status = Status("fail to write data when download"); + status = Status::InternalError("fail to write data when download"); return false; } return true; @@ -195,7 +195,7 @@ Status SmallFileMgr::_download_file( if (!boost::iequals(digest.hex(), md5)) { LOG(WARNING) << "file's checksum is not equal, download: " << digest.hex() << ", expected: " << md5 << ", file: " << file_id; - return Status("download with invalid md5"); + return Status::InternalError("download with invalid md5"); } // close this file @@ -213,7 +213,7 @@ Status SmallFileMgr::_download_file( << ", errno=" << errno << ", errmsg=" << strerror_r(errno, buf, 64); remove(tmp_file.c_str()); remove(real_file_path.c_str()); - return Status("fail to rename file"); + return Status::InternalError("fail to rename file"); } // add to file cache @@ -225,7 +225,7 @@ Status SmallFileMgr::_download_file( *file_path = real_file_path; LOG(INFO) << "finished to download file: " << file_path; - return Status::OK; + return Status::OK(); } } // end namespace doris diff --git a/be/src/runtime/snapshot_loader.cpp b/be/src/runtime/snapshot_loader.cpp index 0ff606d385..a3d11b665e 100644 --- a/be/src/runtime/snapshot_loader.cpp +++ b/be/src/runtime/snapshot_loader.cpp @@ -88,7 +88,7 @@ Status SnapshotLoader::upload( RETURN_IF_ERROR(_report_every(0, &tmp_counter, 0, 0, TTaskType::type::UPLOAD)); - Status status = Status::OK; + Status status = Status::OK(); // 1. validate local tablet snapshot paths RETURN_IF_ERROR(_check_local_snapshot_paths(src_to_dest_path, true)); @@ -100,7 +100,7 @@ Status SnapshotLoader::upload( << "broker addr: " << broker_addr << ". msg: " << status.get_error_msg(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } std::vector broker_addrs; @@ -150,7 +150,7 @@ Status SnapshotLoader::upload( ss << "failed to get md5sum of file: " << local_file << ": " << status.get_error_msg(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } VLOG(2) << "get file checksum: " << local_file << ": " << md5sum; local_files_with_checksum.push_back(local_file + "." + md5sum); @@ -194,12 +194,12 @@ Status SnapshotLoader::upload( FileHandler file_handler; OLAPStatus ost = file_handler.open(full_local_file, O_RDONLY); if (ost != OLAP_SUCCESS) { - return Status("failed to open file: " + full_local_file); + return Status::InternalError("failed to open file: " + full_local_file); } size_t file_len = file_handler.length(); if (file_len == -1) { - return Status("failed to get length of file: " + full_local_file); + return Status::InternalError("failed to get length of file: " + full_local_file); } constexpr size_t buf_sz = 1024 * 1024; @@ -210,7 +210,7 @@ Status SnapshotLoader::upload( size_t read_len = left_len > buf_sz ? buf_sz : left_len; ost = file_handler.pread(read_buf, read_len, read_offset); if (ost != OLAP_SUCCESS) { - return Status("failed to read file: " + full_local_file); + return Status::InternalError("failed to read file: " + full_local_file); } // write through broker size_t write_len = 0; @@ -264,7 +264,7 @@ Status SnapshotLoader::download( RETURN_IF_ERROR(_report_every(0, &tmp_counter, 0, 0, TTaskType::type::DOWNLOAD)); - Status status = Status::OK; + Status status = Status::OK(); // 1. validate local tablet snapshot paths RETURN_IF_ERROR(_check_local_snapshot_paths(src_to_dest_path, false)); @@ -276,7 +276,7 @@ Status SnapshotLoader::download( << "broker addr: " << broker_addr << ". msg: " << status.get_error_msg(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } std::vector broker_addrs; @@ -314,7 +314,7 @@ Status SnapshotLoader::download( std::stringstream ss; ss << "get nothing from remote path: " << remote_path; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } for (auto& iter : remote_files) { @@ -390,7 +390,7 @@ Status SnapshotLoader::download( OLAPStatus ost = file_handler.open_with_mode(full_local_file, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR); if (ost != OLAP_SUCCESS) { - return Status("failed to open file: " + full_local_file); + return Status::InternalError("failed to open file: " + full_local_file); } // 4. read remote and write to local @@ -412,7 +412,7 @@ Status SnapshotLoader::download( if (read_len > 0) { ost = file_handler.pwrite(read_buf, read_len, write_offset); if (ost != OLAP_SUCCESS) { - return Status("failed to write file: " + full_local_file); + return Status::InternalError("failed to write file: " + full_local_file); } write_offset += read_len; @@ -427,7 +427,7 @@ Status SnapshotLoader::download( std::stringstream ss; ss << "failed to get md5sum of file: " << full_local_file; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } VLOG(2) << "get downloaded file checksum: " << full_local_file << ": " << downloaded_md5sum; @@ -436,7 +436,7 @@ Status SnapshotLoader::download( ss << "invalid md5 of downloaded file: " << full_local_file << ", expected: " << file_stat.md5 << ", get: " << downloaded_md5sum; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // local_files always keep the updated local files @@ -497,7 +497,7 @@ Status SnapshotLoader::move( << ", store: " << store_path << ", job: " << _job_id << ", task id: " << _task_id; - Status status = Status::OK; + Status status = Status::OK(); // validate snapshot_path and tablet_path int64_t snapshot_tablet_id = 0; @@ -516,7 +516,7 @@ Status SnapshotLoader::move( ss << "path does not match. snapshot: " << snapshot_path << ", tablet path: " << tablet_path; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } boost::filesystem::path tablet_dir(tablet_path); @@ -525,14 +525,14 @@ Status SnapshotLoader::move( std::stringstream ss; ss << "tablet path does not exist: " << tablet_path; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (!boost::filesystem::exists(snapshot_dir)) { std::stringstream ss; ss << "snapshot path does not exist: " << snapshot_path; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (overwrite) { @@ -565,7 +565,7 @@ Status SnapshotLoader::move( ss << "failed to move tablet path: " << tablet_path << ". err: " << e.what(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // link files one by one @@ -583,7 +583,7 @@ Status SnapshotLoader::move( remove(linked_file.c_str()); } - return Status("move tablet failed"); + return Status::InternalError("move tablet failed"); } linked_files.push_back(full_dest_path); VLOG(2) << "link file from " << full_src_path << " to " << full_dest_path; @@ -629,7 +629,7 @@ Status SnapshotLoader::move( ss << "failed to get tablet: " << tablet_id << ", schema hash: " << schema_hash; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // get base version tablet->obtain_header_rdlock(); @@ -639,7 +639,7 @@ Status SnapshotLoader::move( std::stringstream ss; ss << "failed to get base version of tablet: " << tablet_id; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } int32_t end_version = base_version->end_version(); @@ -653,7 +653,7 @@ Status SnapshotLoader::move( OLAPStatus ost = snapshot_header.load_and_init(); if (ost != OLAP_SUCCESS) { LOG(WARNING) << "failed to load snapshot header: " << snapshot_header_file; - return Status("failed to load snapshot header: " + snapshot_header_file); + return Status::InternalError("failed to load snapshot header: " + snapshot_header_file); } LOG(INFO) << "begin to move snapshot files from version 0 to " @@ -711,7 +711,7 @@ Status SnapshotLoader::move( ss << "failed to move tablet path: " << tablet_path << ". err: " << e.what(); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } // merge 2 headers @@ -720,7 +720,7 @@ Status SnapshotLoader::move( std::stringstream ss; ss << "failed to move tablet path: " << tablet_path; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -745,7 +745,7 @@ Status SnapshotLoader::_get_tablet_id_and_schema_hash_from_file_path( // we try to extract tablet_id from path size_t pos = src_path.find_last_of("/"); if (pos == std::string::npos || pos == src_path.length() - 1) { - return Status("failed to get tablet id from path: " + src_path); + return Status::InternalError("failed to get tablet id from path: " + src_path); } std::string schema_hash_str = src_path.substr(pos + 1); @@ -756,7 +756,7 @@ Status SnapshotLoader::_get_tablet_id_and_schema_hash_from_file_path( // skip schema hash part size_t pos2 = src_path.find_last_of("/", pos - 1); if (pos2 == std::string::npos) { - return Status("failed to get tablet id from path: " + src_path); + return Status::InternalError("failed to get tablet id from path: " + src_path); } std::string tablet_str = src_path.substr(pos2 + 1, pos - pos2); @@ -767,7 +767,7 @@ Status SnapshotLoader::_get_tablet_id_and_schema_hash_from_file_path( VLOG(2) << "get tablet id " << *tablet_id << ", schema hash: " << *schema_hash << " from path: " << src_path; - return Status::OK; + return Status::OK(); } Status SnapshotLoader::_check_local_snapshot_paths( @@ -783,11 +783,11 @@ Status SnapshotLoader::_check_local_snapshot_paths( std::stringstream ss; ss << "snapshot path is not directory or does not exist: " << path; LOG(WARNING) << ss.str(); - return Status(TStatusCode::RUNTIME_ERROR, ss.str(), true); + return Status::RuntimeError(ss.str()); } } LOG(INFO) << "all local snapshot paths are existing. num: " << src_to_dest_path.size(); - return Status::OK; + return Status::OK(); } Status SnapshotLoader::_get_existing_files_from_remote( @@ -814,13 +814,13 @@ Status SnapshotLoader::_get_existing_files_from_remote( if (list_rep.opStatus.statusCode == TBrokerOperationStatusCode::FILE_NOT_FOUND) { LOG(INFO) << "path does not exist: " << remote_path; - return Status::OK; + return Status::OK(); } else if (list_rep.opStatus.statusCode != TBrokerOperationStatusCode::OK) { std::stringstream ss; ss << "failed to list files from remote path: " << remote_path << ", msg: " << list_rep.opStatus.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } LOG(INFO) << "finished to list files from remote path. file num: " << list_rep.files.size(); @@ -852,10 +852,10 @@ Status SnapshotLoader::_get_existing_files_from_remote( std::stringstream ss; ss << "failed to list files in remote path: " << remote_path << ", msg: " << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } - return Status::OK; + return Status::OK(); } Status SnapshotLoader::_get_existing_files_from_local( @@ -872,7 +872,7 @@ Status SnapshotLoader::_get_existing_files_from_local( } LOG(INFO) << "finished to list files in local path: " << local_path << ", file num: " << local_files->size(); - return Status::OK; + return Status::OK(); } Status SnapshotLoader::_rename_remote_file( @@ -900,20 +900,20 @@ Status SnapshotLoader::_rename_remote_file( ss << "Fail to rename file: " << orig_name << " to: " << new_name << " msg:" << op_status.message; LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } catch (apache::thrift::TException& e) { std::stringstream ss; ss << "Fail to rename file: " << orig_name << " to: " << new_name << " msg:" << e.what(); LOG(WARNING) << ss.str(); - return Status(TStatusCode::THRIFT_RPC_ERROR, ss.str(), false); + return Status::ThriftRpcError(ss.str()); } LOG(INFO) << "finished to rename file. orig: " << orig_name << ", new: " << new_name; - return Status::OK; + return Status::OK(); } void SnapshotLoader::_assemble_file_name( @@ -954,21 +954,21 @@ Status SnapshotLoader::_replace_tablet_id( std::stringstream ss; ss << tablet_id << ".hdr"; *new_file_name = ss.str(); - return Status::OK; + return Status::OK(); } else if (_end_with(file_name, ".idx") || _end_with(file_name, ".dat")) { size_t pos = file_name.find_first_of("_"); if (pos == std::string::npos) { - return Status("invalid tablet file name: " + file_name); + return Status::InternalError("invalid tablet file name: " + file_name); } std::string suffix_part = file_name.substr(pos); std::stringstream ss; ss << tablet_id << suffix_part; *new_file_name = ss.str(); - return Status::OK; + return Status::OK(); } else { - return Status("invalid tablet file name: " + file_name); + return Status::InternalError("invalid tablet file name: " + file_name); } } @@ -980,7 +980,7 @@ Status SnapshotLoader::_get_tablet_id_from_remote_path( // bos://xxx/../__tbl_10004/__part_10003/__idx_10004/__10005 size_t pos = remote_path.find_last_of("_"); if (pos == std::string::npos) { - return Status("invalid remove file path: " + remote_path); + return Status::InternalError("invalid remove file path: " + remote_path); } std::string tablet_id_str = remote_path.substr(pos + 1); @@ -988,7 +988,7 @@ Status SnapshotLoader::_get_tablet_id_from_remote_path( ss << tablet_id_str; ss >> *tablet_id; - return Status::OK; + return Status::OK(); } // only return CANCELLED if FE return that job is cancelled. @@ -1002,7 +1002,7 @@ Status SnapshotLoader::_report_every( ++*counter; if (*counter <= report_threshold) { - return Status::OK; + return Status::OK(); } LOG(INFO) << "report to frontend. job id: " << _job_id @@ -1028,7 +1028,7 @@ Status SnapshotLoader::_report_every( if (!rpcStatus.ok()) { // rpc failed, ignore - return Status::OK; + return Status::OK(); } // reset @@ -1036,9 +1036,9 @@ Status SnapshotLoader::_report_every( if (report_st.status_code == TStatusCode::CANCELLED) { LOG(INFO) << "job is cancelled. job id: " << _job_id << ", task id: " << _task_id; - return Status::CANCELLED; + return Status::Cancelled("Cancelled"); } - return Status::OK; + return Status::OK(); } diff --git a/be/src/runtime/sorted_run_merger.cc b/be/src/runtime/sorted_run_merger.cc index 90494147ce..60ed6d86b5 100644 --- a/be/src/runtime/sorted_run_merger.cc +++ b/be/src/runtime/sorted_run_merger.cc @@ -55,10 +55,10 @@ public: RETURN_IF_ERROR(_sorted_run(&_input_row_batch)); if (_input_row_batch == NULL) { *done = true; - return Status::OK; + return Status::OK(); } RETURN_IF_ERROR(next(NULL, done)); - return Status::OK; + return Status::OK(); } // Increment the current row index. If the current input batch is exhausted fetch the @@ -79,7 +79,7 @@ public: *done = _input_row_batch == NULL; _input_row_batch_index = 0; } - return Status::OK; + return Status::OK(); } TupleRow* current_row() const { @@ -154,14 +154,14 @@ Status SortedRunMerger::prepare(const vector& input_runs) { for (int i = last_parent; i >= 0; --i) { heapify(i); } - return Status::OK; + return Status::OK(); } Status SortedRunMerger::get_next(RowBatch* output_batch, bool* eos) { ScopedTimer timer(_get_next_timer); if (_min_heap.empty()) { *eos = true; - return Status::OK; + return Status::OK(); } while (!output_batch->at_capacity()) { @@ -195,7 +195,7 @@ Status SortedRunMerger::get_next(RowBatch* output_batch, bool* eos) { } *eos = _min_heap.empty(); - return Status::OK; + return Status::OK(); } } // namespace doris diff --git a/be/src/runtime/sorter.h b/be/src/runtime/sorter.h index 6b2ee148ff..33487076f8 100644 --- a/be/src/runtime/sorter.h +++ b/be/src/runtime/sorter.h @@ -35,12 +35,12 @@ public: } virtual Status prepare(RuntimeState* state) { - return Status::OK; + return Status::OK(); } // Add data to be sorted. virtual Status add_batch(RowBatch* batch) { - return Status::OK; + return Status::OK(); } // call when all data be added @@ -51,7 +51,7 @@ public: virtual Status get_next(RowBatch* batch, bool* eos) = 0; virtual Status close(RuntimeState* state) { - return Status::OK; + return Status::OK(); } }; diff --git a/be/src/runtime/spill_sorter.cc b/be/src/runtime/spill_sorter.cc index a4627ac540..0a5b3ffa1b 100644 --- a/be/src/runtime/spill_sorter.cc +++ b/be/src/runtime/spill_sorter.cc @@ -399,10 +399,7 @@ Status SpillSorter::Run::init() { RETURN_IF_ERROR( _sorter->_block_mgr->get_new_block(_sorter->_block_mgr_client, NULL, &block)); if (block == NULL) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(MEM_ALLOC_FAILED_ERROR_MSG, "fixed")); - status.add_error_msg(get_mem_alloc_failed_error_msg("fixed")); - return status; + return Status::MemoryLimitExceeded(get_mem_alloc_failed_error_msg("fixed")); } _fixed_len_blocks.push_back(block); @@ -410,10 +407,7 @@ Status SpillSorter::Run::init() { RETURN_IF_ERROR( _sorter->_block_mgr->get_new_block(_sorter->_block_mgr_client, NULL, &block)); if (block == NULL) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(MEM_ALLOC_FAILED_ERROR_MSG, "variable")); - status.add_error_msg(get_mem_alloc_failed_error_msg("variable")); - return status; + return Status::MemoryLimitExceeded(get_mem_alloc_failed_error_msg("variable")); } _var_len_blocks.push_back(block); @@ -421,17 +415,14 @@ Status SpillSorter::Run::init() { RETURN_IF_ERROR(_sorter->_block_mgr->get_new_block( _sorter->_block_mgr_client, NULL, &_var_len_copy_block)); if (_var_len_copy_block == NULL) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(MEM_ALLOC_FAILED_ERROR_MSG, "variable")); - status.add_error_msg(get_mem_alloc_failed_error_msg("variable")); - return status; + return Status::MemoryLimitExceeded(get_mem_alloc_failed_error_msg("variable")); } } } if (!_is_sorted) { _sorter->_initial_runs_counter->update(1); } - return Status::OK; + return Status::OK(); } template @@ -478,7 +469,7 @@ Status SpillSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr std::stringstream error_msg; error_msg << "Variable length data in a single tuple larger than block size " << total_var_len << " > " << _sorter->_block_mgr->max_block_size(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } } else { memcpy(new_tuple, input_row->get_tuple(0), _sort_tuple_size); @@ -499,7 +490,7 @@ Status SpillSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr // There was not enough space in the last var-len block for this tuple, and // the run could not be extended. Return the fixed-len allocation and exit. cur_fixed_len_block->return_allocation(_sort_tuple_size); - return Status::OK; + return Status::OK(); } } @@ -532,11 +523,11 @@ Status SpillSorter::Run::add_batch(RowBatch* batch, int start_index, int* num_pr if (added) { cur_fixed_len_block = _fixed_len_blocks.back(); } else { - return Status::OK; + return Status::OK(); } } } - return Status::OK; + return Status::OK(); } void SpillSorter::Run::transfer_resources(RowBatch* row_batch) { @@ -633,7 +624,7 @@ Status SpillSorter::Run::unpin_all_blocks() { // needed. _var_len_copy_block = NULL; _is_pinned = false; - return Status::OK; + return Status::OK(); } Status SpillSorter::Run::prepare_read() { @@ -652,7 +643,7 @@ Status SpillSorter::Run::prepare_read() { // If the run is pinned, merge is not invoked, so _buffered_batch is not needed // and the individual blocks do not need to be pinned. if (_is_pinned) { - return Status::OK; + return Status::OK(); } // Attempt to pin the first fixed and var-length blocks. In either case, pinning may @@ -663,10 +654,7 @@ Status SpillSorter::Run::prepare_read() { // Temporary work-around for IMPALA-1868. Fail the query with OOM rather than // DCHECK in case block pin fails. if (!pinned) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(PIN_FAILED_ERROR_MSG, "fixed")); - status.add_error_msg(get_pin_failed_error_msg("fixed")); - return status; + return Status::MemoryLimitExceeded(get_pin_failed_error_msg("fixed")); } } @@ -676,13 +664,10 @@ Status SpillSorter::Run::prepare_read() { // Temporary work-around for IMPALA-1590. Fail the query with OOM rather than // DCHECK in case block pin fails. if (!pinned) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(PIN_FAILED_ERROR_MSG, "variable")); - status.add_error_msg(get_pin_failed_error_msg("variable")); - return status; + return Status::MemoryLimitExceeded(get_pin_failed_error_msg("variable")); } } - return Status::OK; + return Status::OK(); } Status SpillSorter::Run::get_next_batch(RowBatch** output_batch) { @@ -717,7 +702,7 @@ Status SpillSorter::Run::get_next_batch(RowBatch** output_batch) { // *output_batch == NULL indicates eos. *output_batch = _buffered_batch.get(); - return Status::OK; + return Status::OK(); } template @@ -725,7 +710,7 @@ Status SpillSorter::Run::get_next(RowBatch* output_batch, bool* eos) { if (_fixed_len_blocks_index == _fixed_len_blocks.size()) { *eos = true; DCHECK_EQ(_num_tuples_returned, _num_tuples); - return Status::OK; + return Status::OK(); } else { *eos = false; } @@ -743,10 +728,7 @@ Status SpillSorter::Run::get_next(RowBatch* output_batch, bool* eos) { // Temporary work-around for IMPALA-2344. Fail the query with OOM rather than // DCHECK in case block pin fails. if (!pinned) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(PIN_FAILED_ERROR_MSG, "fixed")); - status.add_error_msg(get_pin_failed_error_msg("fixed")); - return status; + return Status::MemoryLimitExceeded(get_pin_failed_error_msg("fixed")); } _pin_next_fixed_len_block = false; } @@ -758,10 +740,7 @@ Status SpillSorter::Run::get_next(RowBatch* output_batch, bool* eos) { // Temporary work-around for IMPALA-2344. Fail the query with OOM rather than // DCHECK in case block pin fails. if (!pinned) { - Status status = Status::MEM_LIMIT_EXCEEDED; - // status.AddDetail(Substitute(PIN_FAILED_ERROR_MSG, "variable")); - status.add_error_msg(get_pin_failed_error_msg("variable")); - return status; + return Status::MemoryLimitExceeded(get_pin_failed_error_msg("variable")); } _pin_next_var_len_block = false; } @@ -831,7 +810,7 @@ Status SpillSorter::Run::get_next(RowBatch* output_batch, bool* eos) { ++_fixed_len_blocks_index; _fixed_len_block_offset = 0; } - return Status::OK; + return Status::OK(); } void SpillSorter::Run::collect_non_null_varslots( @@ -868,7 +847,7 @@ Status SpillSorter::Run::try_add_block( } else { *added = false; } - return Status::OK; + return Status::OK(); } void SpillSorter::Run::copy_var_len_data(char* dest, const vector& string_values) { @@ -1073,7 +1052,7 @@ Status SpillSorter::init() { DCHECK(_unsorted_run != NULL); RETURN_IF_ERROR(_unsorted_run->init()); - return Status::OK; + return Status::OK(); } Status SpillSorter::add_batch(RowBatch* batch) { @@ -1097,7 +1076,7 @@ Status SpillSorter::add_batch(RowBatch* batch) { RETURN_IF_ERROR(_unsorted_run->init()); } } - return Status::OK; + return Status::OK(); } Status SpillSorter::input_done() { @@ -1141,7 +1120,7 @@ Status SpillSorter::input_done() { // Create the final merger. RETURN_IF_ERROR(create_merger(_sorted_runs.size())); } - return Status::OK; + return Status::OK(); } Status SpillSorter::get_next(RowBatch* output_batch, bool* eos) { @@ -1157,7 +1136,7 @@ Status SpillSorter::get_next(RowBatch* output_batch, bool* eos) { // In this case, rows are deep copied into output_batch. RETURN_IF_ERROR(_merger->get_next(output_batch, eos)); } - return Status::OK; + return Status::OK(); } Status SpillSorter::reset() { @@ -1169,7 +1148,7 @@ Status SpillSorter::reset() { _unsorted_run = _obj_pool.add( new Run(this, _output_row_desc->tuple_descriptors()[0], true)); RETURN_IF_ERROR(_unsorted_run->init()); - return Status::OK; + return Status::OK(); } Status SpillSorter::sort_run() { @@ -1201,7 +1180,7 @@ Status SpillSorter::sort_run() { } _sorted_runs.push_back(_unsorted_run); _unsorted_run = NULL; - return Status::OK; + return Status::OK(); } uint64_t SpillSorter::estimate_merge_mem( @@ -1284,7 +1263,7 @@ Status SpillSorter::merge_intermediate_runs() { _sorted_runs.push_back(merged_run); } - return Status::OK; + return Status::OK(); } Status SpillSorter::create_merger(int num_runs) { @@ -1313,7 +1292,7 @@ Status SpillSorter::create_merger(int num_runs) { RETURN_IF_ERROR(_merger->prepare(merge_runs)); _num_merges_counter->update(1); - return Status::OK; + return Status::OK(); } } // namespace impala diff --git a/be/src/runtime/stream_load/load_stream_mgr.h b/be/src/runtime/stream_load/load_stream_mgr.h index cfaf08a94a..9f4b21741d 100644 --- a/be/src/runtime/stream_load/load_stream_mgr.h +++ b/be/src/runtime/stream_load/load_stream_mgr.h @@ -37,11 +37,11 @@ public: std::lock_guard l(_lock); auto it = _stream_map.find(id); if (it != std::end(_stream_map)) { - return Status("id already exist"); + return Status::InternalError("id already exist"); } _stream_map.emplace(id, stream); VLOG(3) << "put stream load pipe: " << id; - return Status::OK; + return Status::OK(); } std::shared_ptr get(const UniqueId& id) { diff --git a/be/src/runtime/stream_load/stream_load_executor.cpp b/be/src/runtime/stream_load/stream_load_executor.cpp index 3bfc4d2a2b..d6a3b6bffd 100644 --- a/be/src/runtime/stream_load/stream_load_executor.cpp +++ b/be/src/runtime/stream_load/stream_load_executor.cpp @@ -61,10 +61,10 @@ Status StreamLoadExecutor::execute_plan_fragment(StreamLoadContext* ctx) { int64_t num_selected_rows = ctx->number_total_rows - ctx->number_unselected_rows; if ((0.0 + ctx->number_filtered_rows) / num_selected_rows > ctx->max_filter_ratio) { - status = Status("too many filtered rows"); + status = Status::InternalError("too many filtered rows"); } else if(ctx->number_loaded_rows == 0){ - status = Status("all partitions have no load data"); + status = Status::InternalError("all partitions have no load data"); } if (ctx->number_filtered_rows > 0 && !executor->runtime_state()->get_error_log_file_path().empty()) { @@ -108,7 +108,7 @@ Status StreamLoadExecutor::execute_plan_fragment(StreamLoadContext* ctx) { #else ctx->promise.set_value(k_stream_load_plan_status); #endif - return Status::OK; + return Status::OK(); } Status StreamLoadExecutor::begin_txn(StreamLoadContext* ctx) { @@ -142,7 +142,7 @@ Status StreamLoadExecutor::begin_txn(StreamLoadContext* ctx) { ctx->txn_id = result.txnId; ctx->need_rollback = true; - return Status::OK; + return Status::OK(); } Status StreamLoadExecutor::commit_txn(StreamLoadContext* ctx) { @@ -185,7 +185,7 @@ Status StreamLoadExecutor::commit_txn(StreamLoadContext* ctx) { } // commit success, set need_rollback to false ctx->need_rollback = false; - return Status::OK; + return Status::OK(); } void StreamLoadExecutor::rollback_txn(StreamLoadContext* ctx) { diff --git a/be/src/runtime/stream_load/stream_load_pipe.h b/be/src/runtime/stream_load/stream_load_pipe.h index c422967635..cf806e5817 100644 --- a/be/src/runtime/stream_load/stream_load_pipe.h +++ b/be/src/runtime/stream_load/stream_load_pipe.h @@ -46,7 +46,7 @@ public: if (_write_buf != nullptr) { if (size < _write_buf->remaining()) { _write_buf->put_bytes(data, size); - return Status::OK; + return Status::OK(); } else { pos = _write_buf->remaining(); _write_buf->put_bytes(data, pos); @@ -61,7 +61,7 @@ public: chunk_size = BitUtil::RoundUpToPowerOfTwo(chunk_size); _write_buf = ByteBuffer::allocate(chunk_size); _write_buf->put_bytes(data + pos, size - pos); - return Status::OK; + return Status::OK(); } Status append(const ByteBufferPtr& buf) override { @@ -82,14 +82,14 @@ public: } // cancelled if (_cancelled) { - return Status("cancelled"); + return Status::InternalError("cancelled"); } // finished if (_buf_queue.empty()) { DCHECK(_finished); *data_size = bytes_read; *eof = (bytes_read == 0); - return Status::OK; + return Status::OK(); } auto buf = _buf_queue.front(); size_t copy_size = std::min(*data_size - bytes_read, buf->remaining()); @@ -104,7 +104,7 @@ public: DCHECK(bytes_read == *data_size) << "bytes_read=" << bytes_read << ", *data_size=" << *data_size; *eof = false; - return Status::OK; + return Status::OK(); } // called when comsumer finished @@ -124,7 +124,7 @@ public: _finished = true; } _get_cond.notify_all(); - return Status::OK; + return Status::OK(); } // called when producer/comsumer failed @@ -148,13 +148,13 @@ private: _put_cond.wait(l); } if (_cancelled) { - return Status("cancelled"); + return Status::InternalError("cancelled"); } _buf_queue.push_back(buf); _buffered_bytes += buf->remaining(); } _get_cond.notify_one(); - return Status::OK; + return Status::OK(); } // Blocking queue diff --git a/be/src/runtime/tablet_writer_mgr.cpp b/be/src/runtime/tablet_writer_mgr.cpp index 59bd5cd70a..498a61abdc 100644 --- a/be/src/runtime/tablet_writer_mgr.cpp +++ b/be/src/runtime/tablet_writer_mgr.cpp @@ -102,7 +102,7 @@ Status TabletsChannel::open(const PTabletWriterOpenRequest& params) { std::lock_guard l(_lock); if (_opened) { // Normal case, already open by other sender - return Status::OK; + return Status::OK(); } _txn_id = params.txn_id(); _index_id = params.index_id(); @@ -119,7 +119,7 @@ Status TabletsChannel::open(const PTabletWriterOpenRequest& params) { _opened = true; _last_updated_time = time(nullptr); - return Status::OK; + return Status::OK(); } Status TabletsChannel::add_batch(const PTabletWriterAddBatchRequest& params) { @@ -131,11 +131,11 @@ Status TabletsChannel::add_batch(const PTabletWriterAddBatchRequest& params) { if (params.packet_seq() < next_seq) { LOG(INFO) << "packet has already recept before, expect_seq=" << next_seq << ", recept_seq=" << params.packet_seq(); - return Status::OK; + return Status::OK(); } else if (params.packet_seq() > next_seq) { LOG(WARNING) << "lost data packet, expect_seq=" << next_seq << ", recept_seq=" << params.packet_seq(); - return Status("lost data packet"); + return Status::InternalError("lost data packet"); } RowBatch row_batch(*_row_desc, params.row_batch(), &_mem_tracker); @@ -147,18 +147,18 @@ Status TabletsChannel::add_batch(const PTabletWriterAddBatchRequest& params) { if (it == std::end(_tablet_writers)) { std::stringstream ss; ss << "unknown tablet to append data, tablet=" << tablet_id; - return Status(ss.str()); + return Status::InternalError(ss.str()); } auto st = it->second->write(row_batch.get_row(i)->get_tuple(0)); if (st != OLAP_SUCCESS) { LOG(WARNING) << "tablet writer writer failed, tablet_id=" << it->first << ", transaction_id=" << _txn_id; - return Status("tablet writer write failed"); + return Status::InternalError("tablet writer write failed"); } } _next_seqs[params.sender_id()]++; _last_updated_time = time(nullptr); - return Status::OK; + return Status::OK(); } Status TabletsChannel::close(int sender_id, bool* finished, @@ -184,7 +184,7 @@ Status TabletsChannel::close(int sender_id, bool* finished, if (st != OLAP_SUCCESS) { LOG(WARNING) << "close tablet writer failed, tablet_id=" << it.first << ", transaction_id=" << _txn_id; - _close_status = Status("close tablet writer failed"); + _close_status = Status::InternalError("close tablet writer failed"); return _close_status; } } else { @@ -196,7 +196,7 @@ Status TabletsChannel::close(int sender_id, bool* finished, } } } - return Status::OK; + return Status::OK(); } Status TabletsChannel::_open_all_writers(const PTabletWriterOpenRequest& params) { @@ -212,7 +212,7 @@ Status TabletsChannel::_open_all_writers(const PTabletWriterOpenRequest& params) if (columns == nullptr) { std::stringstream ss; ss << "unknown index id, key=" << _key; - return Status(ss.str()); + return Status::InternalError(ss.str()); } for (auto& tablet : params.tablets()) { WriteRequest request; @@ -232,12 +232,12 @@ Status TabletsChannel::_open_all_writers(const PTabletWriterOpenRequest& params) << ", transaction_id=" << _txn_id << ", partition_id=" << tablet.partition_id() << ", status=" << st; - return Status("open tablet writer failed"); + return Status::InternalError("open tablet writer failed"); } _tablet_writers.emplace(tablet.tablet_id(), writer); } DCHECK(_tablet_writers.size() == params.tablets_size()); - return Status::OK; + return Status::OK(); } TabletWriterMgr::TabletWriterMgr(ExecEnv* exec_env) :_exec_env(exec_env) { @@ -264,7 +264,7 @@ Status TabletWriterMgr::open(const PTabletWriterOpenRequest& params) { } } RETURN_IF_ERROR(channel->open(params)); - return Status::OK; + return Status::OK(); } static void dummy_deleter(const CacheKey& key, void* value) { @@ -283,11 +283,11 @@ Status TabletWriterMgr::add_batch( // success only when eos be true if (handle != nullptr && request.has_eos() && request.eos()) { _lastest_success_channel->release(handle); - return Status::OK; + return Status::OK(); } std::stringstream ss; ss << "TabletWriter add batch with unknown id, key=" << key; - return Status(ss.str()); + return Status::InternalError(ss.str()); } channel = *value; } @@ -322,7 +322,7 @@ Status TabletWriterMgr::cancel(const PTabletWriterCancelRequest& params) { std::lock_guard l(_lock); _tablets_channels.erase(key); } - return Status::OK; + return Status::OK(); } Status TabletWriterMgr::start_bg_worker() { @@ -339,7 +339,7 @@ Status TabletWriterMgr::start_bg_worker() { } }); _tablets_channel_clean_thread.detach(); - return Status::OK; + return Status::OK(); } Status TabletWriterMgr::_start_tablets_channel_clean() { @@ -361,7 +361,7 @@ Status TabletWriterMgr::_start_tablets_channel_clean() { LOG(INFO) << "erase timeout tablets channel: " << key; } } - return Status::OK; + return Status::OK(); } std::string TabletsChannelKey::to_string() const { diff --git a/be/src/runtime/test_env.cc b/be/src/runtime/test_env.cc index 42edc97c8c..691a08ee3d 100644 --- a/be/src/runtime/test_env.cc +++ b/be/src/runtime/test_env.cc @@ -73,7 +73,7 @@ Status TestEnv::create_query_state(int64_t query_id, int max_buffers, int block_ RuntimeState** runtime_state) { *runtime_state = create_runtime_state(query_id); if (*runtime_state == NULL) { - return Status("Unexpected error creating RuntimeState"); + return Status::InternalError("Unexpected error creating RuntimeState"); } shared_ptr mgr; @@ -85,7 +85,7 @@ Status TestEnv::create_query_state(int64_t query_id, int max_buffers, int block_ // (*runtime_state)->_block_mgr = mgr; _query_states.push_back(shared_ptr(*runtime_state)); - return Status::OK; + return Status::OK(); } Status TestEnv::create_query_states(int64_t start_query_id, int num_mgrs, @@ -97,7 +97,7 @@ Status TestEnv::create_query_states(int64_t start_query_id, int num_mgrs, &runtime_state)); runtime_states->push_back(runtime_state); } - return Status::OK; + return Status::OK(); } void TestEnv::tear_down_query_states() { diff --git a/be/src/runtime/tmp_file_mgr.cc b/be/src/runtime/tmp_file_mgr.cc index 51aae7ea66..1c6cfc9ae3 100644 --- a/be/src/runtime/tmp_file_mgr.cc +++ b/be/src/runtime/tmp_file_mgr.cc @@ -139,7 +139,7 @@ Status TmpFileMgr::init_custom( << "directories in list: " << join(tmp_dirs, ",") << ". See previous warnings for information on causes."; } - return Status::OK; + return Status::OK(); } Status TmpFileMgr::get_file( @@ -152,7 +152,7 @@ Status TmpFileMgr::get_file( if (is_blacklisted(device_id)) { std::stringstream error_msg; error_msg << "path is blacklist. path: " << _tmp_dirs[device_id].path(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } // Generate the full file path. @@ -163,7 +163,7 @@ Status TmpFileMgr::get_file( new_file_path /= file_name.str(); *new_file = new File(this, device_id, new_file_path.string()); - return Status::OK; + return Status::OK(); } string TmpFileMgr::get_tmp_dir_path(DeviceId device_id) const { @@ -236,7 +236,7 @@ Status TmpFileMgr::File::allocate_space(int64_t write_size, int64_t* offset) { _blacklisted = true; std::stringstream error_msg; error_msg << "path is blacklist. path: " << _path; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } if (_current_size == 0) { // First call to AllocateSpace. Create the file. @@ -255,7 +255,7 @@ Status TmpFileMgr::File::allocate_space(int64_t write_size, int64_t* offset) { } *offset = _current_size; _current_size = new_size; - return Status::OK; + return Status::OK(); } void TmpFileMgr::File::report_io_error(const std::string& error_msg) { @@ -275,7 +275,7 @@ Status TmpFileMgr::File::remove() { if (_current_size > 0) { FileSystemUtil::remove_paths(vector(1, _path)); } - return Status::OK; + return Status::OK(); } } //namespace doris diff --git a/be/src/runtime/tmp_file_mgr.h b/be/src/runtime/tmp_file_mgr.h index c058687694..6ba9f5707f 100644 --- a/be/src/runtime/tmp_file_mgr.h +++ b/be/src/runtime/tmp_file_mgr.h @@ -54,7 +54,7 @@ public: // Allocates 'write_size' bytes in this file for a new block of data. // The file size is increased by a call to truncate() if necessary. // The physical file is created on the first call to AllocateSpace(). - // Returns Status::OK() and sets offset on success. + // Returns Status::OK()() and sets offset on success. // Returns an error status if an unexpected error occurs. // If an error status is returned, the caller can try a different temporary file. Status allocate_space(int64_t write_size, int64_t* offset); diff --git a/be/src/runtime/user_function_cache.cpp b/be/src/runtime/user_function_cache.cpp index fc6001aa9a..26b6ebdae5 100644 --- a/be/src/runtime/user_function_cache.cpp +++ b/be/src/runtime/user_function_cache.cpp @@ -119,18 +119,18 @@ Status UserFunctionCache::init(const std::string& lib_dir) { RETURN_IF_ERROR(dynamic_open(nullptr, &_current_process_handle)); // 2. load all cached RETURN_IF_ERROR(_load_cached_lib()); - return Status::OK; + return Status::OK(); } Status UserFunctionCache::_load_entry_from_lib(const std::string& dir, const std::string& file) { if (!boost::algorithm::ends_with(file, ".so")) { - return Status("unknown library file format"); + return Status::InternalError("unknown library file format"); } std::vector split_parts; boost::split(split_parts, file, boost::is_any_of(".")); if (split_parts.size() != 3) { - return Status("user function's name should be function_id.checksum.so"); + return Status::InternalError("user function's name should be function_id.checksum.so"); } int64_t function_id = std::stol(split_parts[0]); std::string checksum = split_parts[1]; @@ -138,7 +138,7 @@ Status UserFunctionCache::_load_entry_from_lib(const std::string& dir, const std if (it != _entry_map.end()) { LOG(WARNING) << "meet a same function id user function library, function_id=" << function_id << ", one_checksum=" << checksum << ", other_checksum=" << it->second->checksum; - return Status("duplicate function id"); + return Status::InternalError("duplicate function id"); } // create a cache entry and put it into entry map UserFunctionCacheEntry* entry = new UserFunctionCacheEntry( @@ -148,7 +148,7 @@ Status UserFunctionCache::_load_entry_from_lib(const std::string& dir, const std entry->ref(); _entry_map[function_id] = entry; - return Status::OK; + return Status::OK(); } Status UserFunctionCache::_load_cached_lib() { @@ -167,7 +167,7 @@ Status UserFunctionCache::_load_cached_lib() { RETURN_IF_ERROR(FileUtils::create_dir(sub_dir)); RETURN_IF_ERROR(FileUtils::scan_dir(sub_dir, scan_cb)); } - return Status::OK; + return Status::OK(); } std::string get_real_symbol(const std::string& symbol) { @@ -189,7 +189,7 @@ Status UserFunctionCache::get_function_ptr( if (fid == 0) { // Just loading a function ptr in the current process. No need to take any locks. RETURN_IF_ERROR(dynamic_lookup(_current_process_handle, symbol.c_str(), fn_ptr)); - return Status::OK; + return Status::OK(); } // if we need to unref entry @@ -264,7 +264,7 @@ Status UserFunctionCache::_get_cache_entry( } *output_entry = entry; - return Status::OK; + return Status::OK(); } void UserFunctionCache::_destroy_cache_entry(UserFunctionCacheEntry* entry) { @@ -287,7 +287,7 @@ void UserFunctionCache::_destroy_cache_entry(UserFunctionCacheEntry* entry) { Status UserFunctionCache::_load_cache_entry( const std::string& url, UserFunctionCacheEntry* entry) { if (entry->is_loaded.load()) { - return Status::OK; + return Status::OK(); } std::unique_lock l(entry->load_lock); @@ -296,7 +296,7 @@ Status UserFunctionCache::_load_cache_entry( } RETURN_IF_ERROR(_load_cache_entry_internal(entry)); - return Status::OK; + return Status::OK(); } // entry's lock must be held @@ -311,7 +311,7 @@ Status UserFunctionCache::_download_lib( if (fp == nullptr) { LOG(WARNING) << "fail to open file, file=" << tmp_file << ", error=" << ferror(fp.get()); - return Status("fail to open file"); + return Status::InternalError("fail to open file"); } Md5Digest digest; @@ -324,7 +324,7 @@ Status UserFunctionCache::_download_lib( if (res != 1) { LOG(WARNING) << "fail to write data to file, file=" << tmp_file << ", error=" << ferror(fp.get()); - status = Status("fail to write data when download"); + status = Status::InternalError("fail to write data when download"); return false; } return true; @@ -335,7 +335,7 @@ Status UserFunctionCache::_download_lib( if (!boost::iequals(digest.hex(), entry->checksum)) { LOG(WARNING) << "UDF's checksum is not equal, one=" << digest.hex() << ", other=" << entry->checksum; - return Status("UDF's library checksum is not match"); + return Status::InternalError("UDF's library checksum is not match"); } // close this file fp.reset(); @@ -346,19 +346,19 @@ Status UserFunctionCache::_download_lib( char buf[64]; LOG(WARNING) << "fail to rename file from=" << tmp_file << ", to=" << entry->lib_file << ", errno=" << errno << ", errmsg=" << strerror_r(errno, buf, 64); - return Status("fail to rename file"); + return Status::InternalError("fail to rename file"); } // check download entry->is_downloaded = true; - return Status::OK; + return Status::OK(); } // entry's lock must be held Status UserFunctionCache::_load_cache_entry_internal(UserFunctionCacheEntry* entry) { RETURN_IF_ERROR(dynamic_open(entry->lib_file.c_str(), &entry->lib_handle)); entry->is_loaded.store(true); - return Status::OK; + return Status::OK(); } std::string UserFunctionCache::_make_lib_file(int64_t function_id, const std::string& checksum) { diff --git a/be/src/service/backend_service.cpp b/be/src/service/backend_service.cpp index 193d4a79dc..b5d6659b48 100644 --- a/be/src/service/backend_service.cpp +++ b/be/src/service/backend_service.cpp @@ -75,7 +75,7 @@ Status BackendService::create_service(ExecEnv* exec_env, int port, ThriftServer* LOG(INFO) << "DorisInternalService listening on " << port; - return Status::OK; + return Status::OK(); } void BackendService::exec_plan_fragment(TExecPlanFragmentResult& return_val, @@ -89,7 +89,7 @@ void BackendService::exec_plan_fragment(TExecPlanFragmentResult& return_val, Status BackendService::start_plan_fragment_execution(const TExecPlanFragmentParams& exec_params) { if (!exec_params.fragment.__isset.output_sink) { - return Status("missing sink in plan fragment"); + return Status::InternalError("missing sink in plan fragment"); } return _exec_env->fragment_mgr()->exec_plan_fragment(exec_params); } @@ -242,7 +242,7 @@ void BackendService::submit_routine_load_task( // we do not care about each task's submit result. just return OK. // FE will handle the failure. - return Status::OK.to_thrift(&t_status); + return Status::OK().to_thrift(&t_status); } } // namespace doris diff --git a/be/src/service/brpc_service.cpp b/be/src/service/brpc_service.cpp index 75e7350ebe..fc39f39e0e 100644 --- a/be/src/service/brpc_service.cpp +++ b/be/src/service/brpc_service.cpp @@ -45,9 +45,9 @@ Status BRpcService::start(int port) { char buf[64]; LOG(WARNING) << "start brpc failed, errno=" << errno << ", errmsg=" << strerror_r(errno, buf, 64) << ", port=" << port; - return Status("start brpc service failed"); + return Status::InternalError("start brpc service failed"); } - return Status::OK; + return Status::OK(); } void BRpcService::join() { diff --git a/be/src/service/http_service.cpp b/be/src/service/http_service.cpp index ece66e6d9c..2e977eca5c 100644 --- a/be/src/service/http_service.cpp +++ b/be/src/service/http_service.cpp @@ -113,7 +113,7 @@ Status HttpService::start() { #endif RETURN_IF_ERROR(_ev_http_server->start()); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/service/internal_service.cpp b/be/src/service/internal_service.cpp index fb4baac6cf..6b6069f28f 100644 --- a/be/src/service/internal_service.cpp +++ b/be/src/service/internal_service.cpp @@ -204,7 +204,7 @@ void PInternalServiceImpl::get_info( st.to_protobuf(response->mutable_status()); return; } - Status::OK.to_protobuf(response->mutable_status()); + Status::OK().to_protobuf(response->mutable_status()); } diff --git a/be/src/util/broker_load_error_hub.cpp b/be/src/util/broker_load_error_hub.cpp index fa814d7022..831d3b3fd9 100644 --- a/be/src/util/broker_load_error_hub.cpp +++ b/be/src/util/broker_load_error_hub.cpp @@ -43,7 +43,7 @@ Status BrokerLoadErrorHub::prepare() { RETURN_IF_ERROR(_broker_writer->open()); _is_valid = true; - return Status::OK; + return Status::OK(); } Status BrokerLoadErrorHub::export_error(const ErrorMsg& error_msg) { @@ -51,7 +51,7 @@ Status BrokerLoadErrorHub::export_error(const ErrorMsg& error_msg) { ++_total_error_num; if (!_is_valid) { - return Status::OK; + return Status::OK(); } _error_msgs.push(error_msg); @@ -59,14 +59,14 @@ Status BrokerLoadErrorHub::export_error(const ErrorMsg& error_msg) { RETURN_IF_ERROR(write_to_broker()); } - return Status::OK; + return Status::OK(); } Status BrokerLoadErrorHub::close() { std::lock_guard lock(_mtx); if (!_is_valid) { - return Status::OK; + return Status::OK(); } if (!_error_msgs.empty()) { @@ -77,7 +77,7 @@ Status BrokerLoadErrorHub::close() { _broker_writer->close(); _is_valid = false; - return Status::OK; + return Status::OK(); } Status BrokerLoadErrorHub::write_to_broker() { @@ -91,7 +91,7 @@ Status BrokerLoadErrorHub::write_to_broker() { const std::string& msg = ss.str(); size_t written_len = 0; RETURN_IF_ERROR(_broker_writer->write((uint8_t*) msg.c_str(), msg.length(), &written_len)); - return Status::OK; + return Status::OK(); } std::string BrokerLoadErrorHub::debug_string() const { diff --git a/be/src/util/codec.cpp b/be/src/util/codec.cpp index bd70a0174a..b18c790d61 100644 --- a/be/src/util/codec.cpp +++ b/be/src/util/codec.cpp @@ -71,14 +71,14 @@ Status Codec::create_compressor(RuntimeState* runtime_state, MemPool* mem_pool, if (type == CODEC_MAP.end()) { std::stringstream ss; ss << UNKNOWN_CODEC_ERROR << codec; - return Status(ss.str()); + return Status::InternalError(ss.str()); } Codec* comp = NULL; RETURN_IF_ERROR( create_compressor(runtime_state, mem_pool, reuse, type->second, &comp)); compressor->reset(comp); - return Status::OK; + return Status::OK(); } Status Codec::create_compressor(RuntimeState* runtime_state, MemPool* mem_pool, @@ -88,7 +88,7 @@ Status Codec::create_compressor(RuntimeState* runtime_state, MemPool* mem_pool, RETURN_IF_ERROR( create_compressor(runtime_state, mem_pool, reuse, format, &comp)); compressor->reset(comp); - return Status::OK; + return Status::OK(); } Status Codec::create_compressor(RuntimeState* runtime_state, MemPool* mem_pool, @@ -97,7 +97,7 @@ Status Codec::create_compressor(RuntimeState* runtime_state, MemPool* mem_pool, switch (format) { case THdfsCompression::NONE: *compressor = NULL; - return Status::OK; + return Status::OK(); case THdfsCompression::GZIP: *compressor = new GzipCompressor(GzipCompressor::GZIP, mem_pool, reuse); @@ -136,14 +136,14 @@ Status Codec::create_decompressor(RuntimeState* runtime_state, MemPool* mem_pool if (type == CODEC_MAP.end()) { std::stringstream ss; ss << UNKNOWN_CODEC_ERROR << codec; - return Status(ss.str()); + return Status::InternalError(ss.str()); } Codec* decom = NULL; RETURN_IF_ERROR( create_decompressor(runtime_state, mem_pool, reuse, type->second, &decom)); decompressor->reset(decom); - return Status::OK; + return Status::OK(); } Status Codec::create_decompressor(RuntimeState* runtime_state, MemPool* mem_pool, @@ -153,7 +153,7 @@ Status Codec::create_decompressor(RuntimeState* runtime_state, MemPool* mem_pool RETURN_IF_ERROR( create_decompressor(runtime_state, mem_pool, reuse, format, &decom)); decompressor->reset(decom); - return Status::OK; + return Status::OK(); } Status Codec::create_decompressor(RuntimeState* runtime_state, MemPool* mem_pool, @@ -162,7 +162,7 @@ Status Codec::create_decompressor(RuntimeState* runtime_state, MemPool* mem_pool switch (format) { case THdfsCompression::NONE: *decompressor = NULL; - return Status::OK; + return Status::OK(); case THdfsCompression::DEFAULT: case THdfsCompression::GZIP: diff --git a/be/src/util/compress.cpp b/be/src/util/compress.cpp index e24c30d006..fca5f44c47 100644 --- a/be/src/util/compress.cpp +++ b/be/src/util/compress.cpp @@ -50,10 +50,10 @@ Status GzipCompressor::init() { if ((ret = deflateInit2(&_stream, Z_DEFAULT_COMPRESSION, Z_DEFLATED, window_bits, 9, Z_DEFAULT_STRATEGY)) != Z_OK) { - return Status("zlib deflateInit failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib deflateInit failed: " + std::string(_stream.msg)); } - return Status::OK; + return Status::OK(); } int GzipCompressor::max_compressed_len(int input_length) { @@ -76,16 +76,16 @@ Status GzipCompressor::compress( if ((ret = deflate(&_stream, Z_FINISH)) != Z_STREAM_END) { std::stringstream ss; ss << "zlib deflate failed: " << _stream.msg; - return Status(ss.str()); + return Status::InternalError(ss.str()); } *output_length = *output_length - _stream.avail_out; if (deflateReset(&_stream) != Z_OK) { - return Status("zlib deflateReset failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib deflateReset failed: " + std::string(_stream.msg)); } - return Status::OK; + return Status::OK(); } Status GzipCompressor::process_block( @@ -110,7 +110,7 @@ Status GzipCompressor::process_block( RETURN_IF_ERROR(compress(input_length, input, output_length, _out_buffer)); *output = _out_buffer; - return Status::OK; + return Status::OK(); } BzipCompressor::BzipCompressor(MemPool* mem_pool, bool reuse_buffer) : @@ -153,7 +153,7 @@ Status BzipCompressor::process_block( DCHECK_EQ(*output_length, 0); if (*output_length != 0) { - return Status("Too small buffer passed to BzipCompressor"); + return Status::InternalError("Too small buffer passed to BzipCompressor"); } _out_buffer = NULL; @@ -163,14 +163,14 @@ Status BzipCompressor::process_block( if (ret != BZ_OK) { std::stringstream ss; ss << "bzlib BZ2_bzBuffToBuffCompressor failed: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } *output = _out_buffer; *output_length = outlen; _memory_pool->acquire_data(&_temp_memory_pool, false); - return Status::OK; + return Status::OK(); } // Currently this is only use for testing of the decompressor. @@ -223,7 +223,7 @@ Status SnappyBlockCompressor::process_block( *output = _out_buffer; *output_length = outp - _out_buffer; - return Status::OK; + return Status::OK(); } SnappyCompressor::SnappyCompressor(MemPool* mem_pool, bool reuse_buffer) : @@ -242,7 +242,7 @@ Status SnappyCompressor::compress(int input_len, uint8_t* input, static_cast(input_len), reinterpret_cast(output), &out_len); *output_len = out_len; - return Status::OK; + return Status::OK(); } Status SnappyCompressor::process_block(int input_length, uint8_t* input, @@ -250,7 +250,7 @@ Status SnappyCompressor::process_block(int input_length, uint8_t* input, int max_compressed_len = this->max_compressed_len(input_length); if (*output_length != 0 && *output_length < max_compressed_len) { - return Status("process_block: output length too small"); + return Status::InternalError("process_block: output length too small"); } if (*output_length != 0) { diff --git a/be/src/util/compress.h b/be/src/util/compress.h index 91564f5f19..5a1527eab7 100644 --- a/be/src/util/compress.h +++ b/be/src/util/compress.h @@ -83,7 +83,7 @@ public: int* output_length, uint8_t** output); // Initialize the compressor. virtual Status init() { - return Status::OK; + return Status::OK(); } }; @@ -99,7 +99,7 @@ public: protected: // Snappy does not need initialization virtual Status init() { - return Status::OK; + return Status::OK(); } }; @@ -125,7 +125,7 @@ public: protected: // Snappy does not need initialization virtual Status init() { - return Status::OK; + return Status::OK(); } }; diff --git a/be/src/util/decompress.cpp b/be/src/util/decompress.cpp index 7443681189..49b0600dc7 100644 --- a/be/src/util/decompress.cpp +++ b/be/src/util/decompress.cpp @@ -44,10 +44,10 @@ Status GzipDecompressor::init() { int window_bits = _is_deflate ? -WINDOW_BITS : WINDOW_BITS | DETECT_CODEC; if ((ret = inflateInit2(&_stream, window_bits)) != Z_OK) { - return Status("zlib inflateInit failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib inflateInit failed: " + std::string(_stream.msg)); } - return Status::OK; + return Status::OK(); } Status GzipDecompressor::process_block(int input_length, uint8_t* input, @@ -63,7 +63,7 @@ Status GzipDecompressor::process_block(int input_length, uint8_t* input, _buffer_length = input_length * 2; if (_buffer_length > MAX_BLOCK_SIZE) { - return Status("Decompressor: block size is too big"); + return Status::InternalError("Decompressor: block size is too big"); } _out_buffer = _temp_memory_pool.allocate(_buffer_length); @@ -86,31 +86,31 @@ Status GzipDecompressor::process_block(int input_length, uint8_t* input, DCHECK_EQ(*output_length, 0); if (*output_length != 0) { - return Status("Too small a buffer passed to GzipDecompressor"); + return Status::InternalError("Too small a buffer passed to GzipDecompressor"); } _temp_memory_pool.clear(); _buffer_length *= 2; if (_buffer_length > MAX_BLOCK_SIZE) { - return Status("Decompressor: block size is too big"); + return Status::InternalError("Decompressor: block size is too big"); } _out_buffer = _temp_memory_pool.allocate(_buffer_length); if (inflateReset(&_stream) != Z_OK) { - return Status("zlib inflateEnd failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib inflateEnd failed: " + std::string(_stream.msg)); } continue; } - return Status("zlib inflate failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib inflate failed: " + std::string(_stream.msg)); } } if (inflateReset(&_stream) != Z_OK) { - return Status("zlib inflateEnd failed: " + std::string(_stream.msg)); + return Status::InternalError("zlib inflateEnd failed: " + std::string(_stream.msg)); } *output = _out_buffer; @@ -125,7 +125,7 @@ Status GzipDecompressor::process_block(int input_length, uint8_t* input, _memory_pool->acquire_data(&_temp_memory_pool, _reuse_buffer); } - return Status::OK; + return Status::OK(); } BzipDecompressor::BzipDecompressor(MemPool* mem_pool, bool reuse_buffer) : @@ -145,7 +145,7 @@ Status BzipDecompressor::process_block(int input_length, uint8_t* input, _buffer_length = input_length * 2; if (_buffer_length > MAX_BLOCK_SIZE) { - return Status("Decompressor: block size is too big"); + return Status::InternalError("Decompressor: block size is too big"); } _out_buffer = _temp_memory_pool.allocate(_buffer_length); @@ -162,7 +162,7 @@ Status BzipDecompressor::process_block(int input_length, uint8_t* input, _buffer_length = _buffer_length * 2; if (_buffer_length > MAX_BLOCK_SIZE) { - return Status("Decompressor: block size is too big"); + return Status::InternalError("Decompressor: block size is too big"); } _out_buffer = _temp_memory_pool.allocate(_buffer_length); @@ -178,7 +178,7 @@ Status BzipDecompressor::process_block(int input_length, uint8_t* input, DCHECK_EQ(*output_length, 0); if (*output_length != 0) { - return Status("Too small a buffer passed to BzipDecompressor"); + return Status::InternalError("Too small a buffer passed to BzipDecompressor"); } _out_buffer = NULL; @@ -188,7 +188,7 @@ Status BzipDecompressor::process_block(int input_length, uint8_t* input, if (ret != BZ_OK) { std::stringstream ss; ss << "bzlib BZ2_bzBuffToBuffDecompressor failed: " << ret; - return Status(ss.str()); + return Status::InternalError(ss.str()); } @@ -202,7 +202,7 @@ Status BzipDecompressor::process_block(int input_length, uint8_t* input, _memory_pool->acquire_data(&_temp_memory_pool, _reuse_buffer); } - return Status::OK; + return Status::OK(); } SnappyDecompressor::SnappyDecompressor(MemPool* mem_pool, bool reuse_buffer) @@ -221,14 +221,14 @@ Status SnappyDecompressor::process_block(int input_length, uint8_t* input, // Snappy saves the uncompressed length so we never have to retry. if (!snappy::GetUncompressedLength(reinterpret_cast(input), input_length, &uncompressed_length)) { - return Status("Snappy: GetUncompressedLength failed"); + return Status::InternalError("Snappy: GetUncompressedLength failed"); } if (!_reuse_buffer || _out_buffer == NULL || _buffer_length < uncompressed_length) { _buffer_length = uncompressed_length; if (_buffer_length > MAX_BLOCK_SIZE) { - return Status("Decompressor: block size is too big"); + return Status::InternalError("Decompressor: block size is too big"); } _out_buffer = _memory_pool->allocate(_buffer_length); @@ -238,7 +238,7 @@ Status SnappyDecompressor::process_block(int input_length, uint8_t* input, if (!snappy::RawUncompress( reinterpret_cast(input), static_cast(input_length), reinterpret_cast(_out_buffer))) { - return Status("Snappy: RawUncompress failed"); + return Status::InternalError("Snappy: RawUncompress failed"); } if (*output_length == 0) { @@ -246,7 +246,7 @@ Status SnappyDecompressor::process_block(int input_length, uint8_t* input, *output = _out_buffer; } - return Status::OK; + return Status::OK(); } SnappyBlockDecompressor::SnappyBlockDecompressor(MemPool* mem_pool, bool reuse_buffer) : @@ -290,7 +290,7 @@ static Status snappy_block_decompress(int input_len, uint8_t* input, bool size_o std::stringstream ss; ss << "Decompressor: block size is too big. Data is likely corrupt. " << "Size: " << uncompressed_block_len; - return Status(ss.str()); + return Status::InternalError(ss.str()); } break; @@ -309,7 +309,7 @@ static Status snappy_block_decompress(int input_len, uint8_t* input, bool size_o if (compressed_len == 0 || compressed_len > input_len) { if (uncompressed_total_len == 0) { - return Status( + return Status::InternalError( "Decompressor: invalid compressed length. Data is likely corrupt."); } @@ -323,7 +323,7 @@ static Status snappy_block_decompress(int input_len, uint8_t* input, bool size_o if (!snappy::GetUncompressedLength(reinterpret_cast(input), input_len, &uncompressed_len)) { if (uncompressed_total_len == 0) { - return Status("Snappy: GetUncompressedLength failed"); + return Status::InternalError("Snappy: GetUncompressedLength failed"); } input_len = 0; @@ -336,7 +336,7 @@ static Status snappy_block_decompress(int input_len, uint8_t* input, bool size_o // Decompress this snappy block if (!snappy::RawUncompress(reinterpret_cast(input), compressed_len, output)) { - return Status("Snappy: RawUncompress failed"); + return Status::InternalError("Snappy: RawUncompress failed"); } output += uncompressed_len; @@ -352,10 +352,10 @@ static Status snappy_block_decompress(int input_len, uint8_t* input, bool size_o if (size_only) { *output_len = uncompressed_total_len; } else if (*output_len != uncompressed_total_len) { - return Status("Snappy: Decompressed size is not correct."); + return Status::InternalError("Snappy: Decompressed size is not correct."); } - return Status::OK; + return Status::OK(); } Status SnappyBlockDecompressor::process_block(int input_len, uint8_t* input, @@ -381,12 +381,12 @@ Status SnappyBlockDecompressor::process_block(int input_len, uint8_t* input, std::stringstream ss; ss << "Decompressor: block size is too big. Data is likely corrupt. " << "Size: " << *output_len; - return Status(ss.str()); + return Status::InternalError(ss.str()); } char* out_ptr = reinterpret_cast(*output); RETURN_IF_ERROR(snappy_block_decompress(input_len, input, false, output_len, out_ptr)); - return Status::OK; + return Status::OK(); } } diff --git a/be/src/util/decompress.h b/be/src/util/decompress.h index 27d762dcce..03514d0758 100644 --- a/be/src/util/decompress.h +++ b/be/src/util/decompress.h @@ -61,7 +61,7 @@ public: protected: // Bzip does not need initialization virtual Status init() { - return Status::OK; + return Status::OK(); } }; @@ -77,7 +77,7 @@ public: protected: // Snappy does not need initialization virtual Status init() { - return Status::OK; + return Status::OK(); } }; @@ -94,7 +94,7 @@ public: protected: // Snappy does not need initialization virtual Status init() { - return Status::OK; + return Status::OK(); } }; diff --git a/be/src/util/disk_info.cpp b/be/src/util/disk_info.cpp index b2ac7dec2e..5767de3643 100644 --- a/be/src/util/disk_info.cpp +++ b/be/src/util/disk_info.cpp @@ -163,7 +163,7 @@ Status DiskInfo::get_disk_devices(const std::vector& paths, ss << "open /proc/mounts failed, errno:" << errno << ", message:" << strerror_r(errno, buf, 64); LOG(WARNING) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } Status status; @@ -199,7 +199,7 @@ Status DiskInfo::get_disk_devices(const std::vector& paths, ss << "open /proc/mounts failed, errno:" << errno << ", message:" << strerror_r(errno, buf, 64); LOG(WARNING) << ss.str(); - status = Status(ss.str()); + status = Status::InternalError(ss.str()); break; } if (max_mount_size > 0) { diff --git a/be/src/util/dynamic_util.cpp b/be/src/util/dynamic_util.cpp index 552e4fe5f8..0c9413c7f0 100644 --- a/be/src/util/dynamic_util.cpp +++ b/be/src/util/dynamic_util.cpp @@ -29,10 +29,10 @@ Status dynamic_lookup(void* handle, const char* symbol, void** fn_ptr) { if (error != NULL) { std::stringstream ss; ss << "Unable to find " << symbol << "\ndlerror: " << error; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status dynamic_open(const char* library, void** handle) { @@ -43,10 +43,10 @@ Status dynamic_open(const char* library, void** handle) { if (*handle == NULL) { std::stringstream ss; ss << "Unable to load " << library << "\ndlerror: " << dlerror(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } void dynamic_close(void* handle) { diff --git a/be/src/util/file_utils.cpp b/be/src/util/file_utils.cpp index bc3dfbdb60..8073ee39e1 100644 --- a/be/src/util/file_utils.cpp +++ b/be/src/util/file_utils.cpp @@ -45,21 +45,21 @@ Status FileUtils::create_dir(const std::string& dir_path) { if (!boost::filesystem::is_directory(dir_path.c_str())) { std::stringstream ss; ss << "Path(" << dir_path << ") already exists, but not a directory."; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } else { if (!boost::filesystem::create_directories(dir_path.c_str())) { std::stringstream ss; ss << "make directory failed. path=" << dir_path; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } } catch (...) { std::stringstream ss; ss << "make directory failed. path=" << dir_path; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status FileUtils::remove_all(const std::string& file_path) { @@ -71,14 +71,14 @@ Status FileUtils::remove_all(const std::string& file_path) { std::stringstream ss; ss << "remove all(" << file_path << ") failed, because: " << ec; - return Status(ss.str()); + return Status::InternalError(ss.str()); } } catch (...) { std::stringstream ss; ss << "remove all(" << file_path << ") failed, because: exception"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status FileUtils::scan_dir( @@ -90,7 +90,7 @@ Status FileUtils::scan_dir( char buf[64]; std::stringstream ss; ss << "opendir(" << dir_path << ") failed, because: " << strerror_r(errno, buf, 64); - return Status(ss.str()); + return Status::InternalError(ss.str()); } DeferOp close_dir(std::bind(&closedir, dir)); @@ -115,7 +115,7 @@ Status FileUtils::scan_dir( *file_count = count; } - return Status::OK; + return Status::OK(); } Status FileUtils::scan_dir( @@ -126,7 +126,7 @@ Status FileUtils::scan_dir( if (dir == nullptr) { char buf[64]; LOG(WARNING) << "fail to open dir, dir=" << dir_path << ", errmsg=" << strerror_r(errno, buf, 64); - return Status("fail to opendir"); + return Status::InternalError("fail to opendir"); } while (true) { @@ -144,7 +144,7 @@ Status FileUtils::scan_dir( } } - return Status::OK; + return Status::OK(); } bool FileUtils::is_dir(const std::string& path) { @@ -181,7 +181,7 @@ Status FileUtils::split_pathes(const char* path, std::vector* path_ } catch (...) { std::stringstream ss; ss << "Boost split path failed.[path=" << path << "]"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } for (std::vector::iterator it = path_vec->begin(); it != path_vec->end();) { @@ -200,16 +200,16 @@ Status FileUtils::split_pathes(const char* path, std::vector* path_ if (std::unique(path_vec->begin(), path_vec->end()) != path_vec->end()) { std::stringstream ss; ss << "Same path in path.[path=" << path << "]"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } if (path_vec->size() == 0) { std::stringstream ss; ss << "Size of vector after split is zero.[path=" << path << "]"; - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } Status FileUtils::copy_file(const std::string& src_path, const std::string& dest_path) { @@ -218,7 +218,7 @@ Status FileUtils::copy_file(const std::string& src_path, const std::string& dest if (src_file.open(src_path.c_str(), O_RDONLY) != OLAP_SUCCESS) { char errmsg[64]; LOG(ERROR) << "open file failed: " << src_path << strerror_r(errno, errmsg, 64); - return Status("Internal Error"); + return Status::InternalError("Internal Error"); } // create dest file and overwrite existing file FileHandler dest_file; @@ -226,7 +226,7 @@ Status FileUtils::copy_file(const std::string& src_path, const std::string& dest != OLAP_SUCCESS) { char errmsg[64]; LOG(ERROR) << "open file failed: " << dest_path << strerror_r(errno, errmsg, 64); - return Status("Internal Error"); + return Status::InternalError("Internal Error"); } const int64_t BUF_SIZE = 8192; @@ -237,28 +237,28 @@ Status FileUtils::copy_file(const std::string& src_path, const std::string& dest while (src_length > 0) { int64_t to_read = BUF_SIZE < src_length ? BUF_SIZE : src_length; if (OLAP_SUCCESS != (src_file.pread(buf, to_read, offset))) { - return Status("Internal Error"); + return Status::InternalError("Internal Error"); } if (OLAP_SUCCESS != (dest_file.pwrite(buf, to_read, offset))) { - return Status("Internal Error"); + return Status::InternalError("Internal Error"); } offset += to_read; src_length -= to_read; } - return Status::OK; + return Status::OK(); } Status FileUtils::md5sum(const std::string& file, std::string* md5sum) { int fd = open(file.c_str(), O_RDONLY); if (fd < 0) { - return Status("failed to open file"); + return Status::InternalError("failed to open file"); } struct stat statbuf; if (fstat(fd, &statbuf) < 0) { close(fd); - return Status("failed to stat file"); + return Status::InternalError("failed to stat file"); } size_t file_len = statbuf.st_size; void* buf = mmap(0, file_len, PROT_READ, MAP_SHARED, fd, 0); @@ -274,7 +274,7 @@ Status FileUtils::md5sum(const std::string& file, std::string* md5sum) { ss >> *md5sum; close(fd); - return Status::OK; + return Status::OK(); } bool FileUtils::check_exist(const std::string& path) { diff --git a/be/src/util/file_utils.h b/be/src/util/file_utils.h index fca04f41b8..dd0343f7db 100644 --- a/be/src/util/file_utils.h +++ b/be/src/util/file_utils.h @@ -32,7 +32,7 @@ public: // if dir's parent directory doesn't exist // // RETURNS: - // Status::OK if create directory success or directory already exists + // Status::OK() if create directory success or directory already exists static Status create_dir(const std::string& dir_path); // Delete file recursively. diff --git a/be/src/util/filesystem_util.cc b/be/src/util/filesystem_util.cc index 01f7fc16e5..f25ec75dae 100644 --- a/be/src/util/filesystem_util.cc +++ b/be/src/util/filesystem_util.cc @@ -45,7 +45,7 @@ Status FileSystemUtil::create_directory(const string& directory) { std::stringstream error_msg; error_msg << "Encountered error checking existence of directory: " << directory << ": " << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } if (exists) { // Attempt to remove the directory and its contents so that we can create a fresh @@ -54,16 +54,16 @@ Status FileSystemUtil::create_directory(const string& directory) { if (errcode != errc::success) { std::stringstream error_msg; error_msg << "Encountered error removing directory " << directory << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } } filesystem::create_directories(directory, errcode); if (errcode != errc::success) { std::stringstream error_msg; error_msg << "Encountered error creating directory " << directory << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } Status FileSystemUtil::remove_paths(const vector& directories) { @@ -74,11 +74,11 @@ Status FileSystemUtil::remove_paths(const vector& directories) { std::stringstream error_msg; error_msg << "Encountered error removing directory " << directories[i] << ": " << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } } - return Status::OK; + return Status::OK(); } Status FileSystemUtil::create_file(const string& file_path) { @@ -89,7 +89,7 @@ Status FileSystemUtil::create_file(const string& file_path) { error_msg << "Create file " << file_path.c_str() << " failed with errno=" << errno << "description=" << get_str_err_msg(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } int success = close(fd); @@ -98,10 +98,10 @@ Status FileSystemUtil::create_file(const string& file_path) { error_msg << "Close file " << file_path.c_str() << " failed with errno=" << errno << " description=" << get_str_err_msg(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } Status FileSystemUtil::resize_file(const string& file_path, int64_t trunc_len) { @@ -110,10 +110,10 @@ Status FileSystemUtil::resize_file(const string& file_path, int64_t trunc_len) { std::stringstream error_msg; error_msg << "Truncate file " << file_path << " to length " << trunc_len << " failed with " << errno << " (" << get_str_err_msg() << ")"; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } Status FileSystemUtil::verify_is_directory(const string& directory_path) { @@ -123,26 +123,26 @@ Status FileSystemUtil::verify_is_directory(const string& directory_path) { std::stringstream error_msg; error_msg << "Encountered exception while verifying existence of directory path " << directory_path << ": " << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } if (!exists) { std::stringstream error_msg; error_msg << "Directory path " << directory_path << " does not exist "; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } bool is_dir = filesystem::is_directory(directory_path, errcode); if (errcode != errc::success) { std::stringstream error_msg; error_msg << "Encountered exception while verifying existence of directory path " << directory_path << ": " << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } if (!is_dir) { std::stringstream error_msg; error_msg << "Path " << directory_path << " is not a directory"; - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } - return Status::OK; + return Status::OK(); } Status FileSystemUtil::get_space_available(const string& directory_path, @@ -153,10 +153,10 @@ Status FileSystemUtil::get_space_available(const string& directory_path, std::stringstream error_msg; error_msg << "Encountered exception while checking available space for path " << directory_path << ": " << errcode.message(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } *available_bytes = info.available; - return Status::OK; + return Status::OK(); } uint64_t FileSystemUtil::max_num_file_handles() { diff --git a/be/src/util/filesystem_util.h b/be/src/util/filesystem_util.h index ef7096c0b7..5aaf468a85 100644 --- a/be/src/util/filesystem_util.h +++ b/be/src/util/filesystem_util.h @@ -30,7 +30,7 @@ class FileSystemUtil { public: // Create the specified directory and any ancestor directories that do not exist yet. // The directory and its contents are destroyed if it already exists. - // Returns Status::OK if successful, or a runtime error with a message otherwise. + // Returns Status::OK() if successful, or a runtime error with a message otherwise. static Status create_directory(const std::string& directory); // Create a file at the specified path. @@ -43,7 +43,7 @@ public: static Status remove_paths(const std::vector& directories); // Verify that the specified path is an existing directory. - // Returns Status::OK if it is, or a runtime error with a message otherwise. + // Returns Status::OK() if it is, or a runtime error with a message otherwise. static Status verify_is_directory(const std::string& directory_path); // Returns the space available on the file system containing 'directory_path' diff --git a/be/src/util/frontend_helper.cpp b/be/src/util/frontend_helper.cpp index ddc36befc0..b7bd274a4c 100644 --- a/be/src/util/frontend_helper.cpp +++ b/be/src/util/frontend_helper.cpp @@ -81,10 +81,9 @@ Status FrontendHelper::rpc( client.reopen(timeout_ms); LOG(WARNING) << "call frontend service failed, address=" << address << ", reason=" << e.what(); - return Status(TStatusCode::THRIFT_RPC_ERROR, - "failed to call frontend service", false); + return Status::ThriftRpcError("failed to call frontend service"); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/util/load_error_hub.cpp b/be/src/util/load_error_hub.cpp index 671ab8cf66..52ae317332 100644 --- a/be/src/util/load_error_hub.cpp +++ b/be/src/util/load_error_hub.cpp @@ -36,7 +36,7 @@ Status LoadErrorHub::create_hub( tmp_hub = new NullLoadErrorHub(); tmp_hub->prepare(); hub->reset(tmp_hub); - return Status::OK; + return Status::OK(); } VLOG_ROW << "create_hub: " << apache::thrift::ThriftDebugString(*t_hub_info).c_str(); @@ -49,7 +49,7 @@ Status LoadErrorHub::create_hub( hub->reset(tmp_hub); break; #else - return Status("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); + return Status::InternalError("Don't support MySQL table, you should rebuild Doris with WITH_MYSQL option ON"); #endif case TErrorHubType::BROKER: { // the origin file name may contains __shard_0/xxx @@ -70,10 +70,10 @@ Status LoadErrorHub::create_hub( default: std::stringstream err; err << "Unknown hub type." << t_hub_info->type; - return Status(err.str()); + return Status::InternalError(err.str()); } - return Status::OK; + return Status::OK(); } } // end namespace doris diff --git a/be/src/util/mysql_load_error_hub.cpp b/be/src/util/mysql_load_error_hub.cpp index 1d13bbeca1..da6004b70c 100644 --- a/be/src/util/mysql_load_error_hub.cpp +++ b/be/src/util/mysql_load_error_hub.cpp @@ -32,7 +32,7 @@ MysqlLoadErrorHub::~MysqlLoadErrorHub() { Status MysqlLoadErrorHub::prepare() { _is_valid = true; - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::export_error(const ErrorMsg& error_msg) { @@ -40,7 +40,7 @@ Status MysqlLoadErrorHub::export_error(const ErrorMsg& error_msg) { ++_total_error_num; if (!_is_valid) { - return Status::OK; + return Status::OK(); } _error_msgs.push(error_msg); @@ -48,21 +48,21 @@ Status MysqlLoadErrorHub::export_error(const ErrorMsg& error_msg) { RETURN_IF_ERROR(write_mysql()); } - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::close() { std::lock_guard lock(_mtx); if (!_is_valid) { - return Status::OK; + return Status::OK(); } if (!_error_msgs.empty()) { RETURN_IF_ERROR(write_mysql()); } - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::write_mysql() { @@ -93,7 +93,7 @@ Status MysqlLoadErrorHub::write_mysql() { VLOG_PROGRESS << "mysql query success. query =" << sql_stream.str(); - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::gen_sql(MYSQL* my_conn, @@ -112,14 +112,14 @@ Status MysqlLoadErrorHub::gen_sql(MYSQL* my_conn, (*sql_stream) << "insert into " << _info.table << " (job_id, error_msg) values(" << error_msg.job_id << ", '" << sql_start << "'); "; - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::open_mysql_conn(MYSQL** my_conn) { *my_conn = mysql_init(nullptr); if (nullptr == *my_conn) { LOG(WARNING) << "load error export's mysql init failed."; - return Status("mysql init failed."); + return Status::InternalError("mysql init failed."); } VLOG_ROW << "MysqlLoadErrorHub::init"; @@ -133,14 +133,14 @@ Status MysqlLoadErrorHub::open_mysql_conn(MYSQL** my_conn) { return error_status("loal error mysql real connect failed.", *my_conn); } - return Status::OK; + return Status::OK(); } Status MysqlLoadErrorHub::error_status(const std::string& prefix, MYSQL* my_conn) { std::stringstream msg; msg << prefix << " Err: " << mysql_error(my_conn); LOG(WARNING) << msg.str(); - return Status(msg.str()); + return Status::InternalError(msg.str()); } std::string MysqlLoadErrorHub::debug_string() const { diff --git a/be/src/util/network_util.cpp b/be/src/util/network_util.cpp index 4de90bb0db..47511a4ed0 100644 --- a/be/src/util/network_util.cpp +++ b/be/src/util/network_util.cpp @@ -59,11 +59,11 @@ Status get_hostname(std::string* hostname) { if (ret != 0) { std::stringstream ss; ss << "Could not get hostname: errno: " << errno; - return Status(ss.str()); + return Status::InternalError(ss.str()); } *hostname = std::string(name); - return Status::OK; + return Status::OK(); } Status hostname_to_ip_addrs(const std::string& name, std::vector* addresses) { @@ -77,7 +77,7 @@ Status hostname_to_ip_addrs(const std::string& name, std::vector* a if (getaddrinfo(name.c_str(), NULL, &hints, &addr_info) != 0) { std::stringstream ss; ss << "Could not find IPv4 address for: " << name; - return Status(ss.str()); + return Status::InternalError(ss.str()); } addrinfo* it = addr_info; @@ -91,7 +91,7 @@ Status hostname_to_ip_addrs(const std::string& name, std::vector* a std::stringstream ss; ss << "Could not convert IPv4 address for: " << name; freeaddrinfo(addr_info); - return Status(ss.str()); + return Status::InternalError(ss.str()); } addresses->push_back(std::string(addr_buf)); @@ -99,7 +99,7 @@ Status hostname_to_ip_addrs(const std::string& name, std::vector* a } freeaddrinfo(addr_info); - return Status::OK; + return Status::OK(); } bool find_first_non_localhost(const std::vector& addresses, std::string* addr) { @@ -119,7 +119,7 @@ Status get_hosts_v4(std::vector* hosts) { std::stringstream ss; char buf[64]; ss << "getifaddrs failed because " << strerror_r(errno, buf, sizeof(buf)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } for (ifaddrs* if_addr = if_addrs; if_addr != nullptr; if_addr = if_addr->ifa_next) { @@ -147,7 +147,7 @@ Status get_hosts_v4(std::vector* hosts) { freeifaddrs(if_addrs); } - return Status::OK; + return Status::OK(); } TNetworkAddress make_network_address(const std::string& hostname, int port) { @@ -164,7 +164,7 @@ Status get_inet_interfaces(std::vector* interfaces, bool include_ip char buf[64]; ss << "getifaddrs failed, errno:" << errno << ", message" << strerror_r(errno, buf, sizeof(buf)); - return Status(ss.str()); + return Status::InternalError(ss.str()); } for (ifaddrs* if_addr = if_addrs; if_addr != nullptr; if_addr = if_addr->ifa_next) { @@ -179,7 +179,7 @@ Status get_inet_interfaces(std::vector* interfaces, bool include_ip if (if_addrs != nullptr) { freeifaddrs(if_addrs); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/util/null_load_error_hub.cpp b/be/src/util/null_load_error_hub.cpp index b698707a90..5479b1ac18 100644 --- a/be/src/util/null_load_error_hub.cpp +++ b/be/src/util/null_load_error_hub.cpp @@ -27,18 +27,18 @@ NullLoadErrorHub::~NullLoadErrorHub() { Status NullLoadErrorHub::prepare() { _is_valid = true; - return Status::OK; + return Status::OK(); } Status NullLoadErrorHub::export_error(const ErrorMsg& error_msg) { std::lock_guard lock(_mtx); ++_total_error_num; - return Status::OK; + return Status::OK(); } Status NullLoadErrorHub::close() { - return Status::OK; + return Status::OK(); } std::string NullLoadErrorHub::debug_string() const { diff --git a/be/src/util/thrift_client.cpp b/be/src/util/thrift_client.cpp index 37ed7da91d..79f25da21e 100644 --- a/be/src/util/thrift_client.cpp +++ b/be/src/util/thrift_client.cpp @@ -32,10 +32,10 @@ Status ThriftClientImpl::open() { std::stringstream msg; msg << "Couldn't open transport for " << ipaddress() << ":" << port() << "(" << e.what() << ")"; - return Status(TStatusCode::THRIFT_RPC_ERROR, msg.str(), false); + return Status::ThriftRpcError(msg.str()); } - return Status::OK; + return Status::OK(); } Status ThriftClientImpl::open_with_retry(int num_tries, int wait_ms) { @@ -70,7 +70,7 @@ Status ThriftClientImpl::close() { _transport->close(); } - return Status::OK; + return Status::OK(); } } diff --git a/be/src/util/thrift_server.cpp b/be/src/util/thrift_server.cpp index 885a1ee3e4..c9b69d8974 100644 --- a/be/src/util/thrift_server.cpp +++ b/be/src/util/thrift_server.cpp @@ -115,7 +115,7 @@ Status ThriftServer::ThriftServerEventProcessor::start_and_wait_for_server() { << _thrift_server->_port << ") did not start within " << TIMEOUT_MS << "ms"; LOG(ERROR) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } } @@ -126,10 +126,10 @@ Status ThriftServer::ThriftServerEventProcessor::start_and_wait_for_server() { ss << "ThriftServer '" << _thrift_server->_name << "' (on port: " << _thrift_server->_port << ") did not start correctly "; LOG(ERROR) << ss.str(); - return Status(ss.str()); + return Status::InternalError(ss.str()); } - return Status::OK; + return Status::OK(); } void ThriftServer::ThriftServerEventProcessor::supervise() { @@ -352,7 +352,7 @@ Status ThriftServer::start() { std::stringstream error_msg; error_msg << "Unsupported server type: " << _server_type; LOG(ERROR) << error_msg.str(); - return Status(error_msg.str()); + return Status::InternalError(error_msg.str()); } boost::shared_ptr event_processor( @@ -364,7 +364,7 @@ Status ThriftServer::start() { LOG(INFO) << "ThriftServer '" << _name << "' started on port: " << _port; DCHECK(_started); - return Status::OK; + return Status::OK(); } void ThriftServer::stop() { diff --git a/be/src/util/thrift_util.cpp b/be/src/util/thrift_util.cpp index a93e837c5d..50a9ddfd10 100644 --- a/be/src/util/thrift_util.cpp +++ b/be/src/util/thrift_util.cpp @@ -110,7 +110,7 @@ Status wait_for_server( socket.setConnTimeout(500); socket.open(); socket.close(); - return Status::OK; + return Status::OK(); } catch (apache::thrift::transport::TTransportException& e) { VLOG_QUERY << "Connection failed: " << e.what(); } @@ -123,7 +123,7 @@ Status wait_for_server( usleep(retry_interval_ms * 1000); } - return Status("Server did not come up"); + return Status::InternalError("Server did not come up"); } void t_network_address_to_string(const TNetworkAddress& address, std::string* out) { diff --git a/be/src/util/thrift_util.h b/be/src/util/thrift_util.h index 80276fb69c..01fa1b4c33 100644 --- a/be/src/util/thrift_util.h +++ b/be/src/util/thrift_util.h @@ -52,7 +52,7 @@ public: RETURN_IF_ERROR(serialize(obj, &len, &buffer)); result->resize(len); memcpy(&((*result)[0]), buffer, len); - return Status::OK; + return Status::OK(); } // serialize obj into a memory buffer. The result is returned in buffer/len. The @@ -66,11 +66,11 @@ public: } catch (std::exception& e) { std::stringstream msg; msg << "Couldn't serialize thrift object:\n" << e.what(); - return Status(msg.str()); + return Status::InternalError(msg.str()); } _mem_buffer->getBuffer(buffer, len); - return Status::OK; + return Status::OK(); } template @@ -81,11 +81,11 @@ public: } catch (apache::thrift::TApplicationException& e) { std::stringstream msg; msg << "Couldn't serialize thrift object:\n" << e.what(); - return Status(msg.str()); + return Status::InternalError(msg.str()); } *result = _mem_buffer->getBufferAsString(); - return Status::OK; + return Status::OK(); } template @@ -96,10 +96,10 @@ public: } catch (apache::thrift::TApplicationException& e) { std::stringstream msg; msg << "Couldn't serialize thrift object:\n" << e.what(); - return Status(msg.str()); + return Status::InternalError(msg.str()); } - return Status::OK; + return Status::OK(); } void get_buffer(uint8_t** buffer, uint32_t* length) { @@ -149,15 +149,15 @@ Status deserialize_thrift_msg( } catch (std::exception& e) { std::stringstream msg; msg << "couldn't deserialize thrift msg:\n" << e.what(); - return Status(msg.str()); + return Status::InternalError(msg.str()); } catch (...) { // TODO: Find the right exception for 0 bytes - return Status("Unknown exception"); + return Status::InternalError("Unknown exception"); } uint32_t bytes_left = tmem_transport->available_read(); *len = *len - bytes_left; - return Status::OK; + return Status::OK(); } // Redirects all Thrift logging to VLOG(1) diff --git a/be/test/common/CMakeLists.txt b/be/test/common/CMakeLists.txt index 4c7e7db710..bfa958aeaf 100644 --- a/be/test/common/CMakeLists.txt +++ b/be/test/common/CMakeLists.txt @@ -19,3 +19,4 @@ set(EXECUTABLE_OUTPUT_PATH "${BUILD_DIR}/test/common") ADD_BE_TEST(resource_tls_test) +ADD_BE_TEST(status_test) diff --git a/be/test/common/status_test.cpp b/be/test/common/status_test.cpp new file mode 100644 index 0000000000..499ad14522 --- /dev/null +++ b/be/test/common/status_test.cpp @@ -0,0 +1,83 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "common/status.h" + +#include + +#include "gen_cpp/Types_types.h" +#include "util/logging.h" + +namespace doris { + +class StatusTest : public testing::Test { +}; + +TEST_F(StatusTest, OK) { + // default + Status st; + ASSERT_TRUE(st.ok()); + ASSERT_EQ("", st.get_error_msg()); + ASSERT_EQ("OK", st.to_string()); + // copy + { + Status other = st; + ASSERT_TRUE(other.ok()); + } + // move assign + st = Status(); + ASSERT_TRUE(st.ok()); + // move construct + { + Status other = std::move(st); + ASSERT_TRUE(other.ok()); + } +} + +TEST_F(StatusTest, Error) { + // default + Status st = Status::InternalError("123"); + ASSERT_FALSE(st.ok()); + ASSERT_EQ("123", st.get_error_msg()); + ASSERT_EQ("Internal error: 123", st.to_string()); + // copy + { + Status other = st; + ASSERT_FALSE(other.ok()); + ASSERT_EQ("123", st.get_error_msg()); + } + // move assign + st = Status::InternalError("456"); + ASSERT_FALSE(st.ok()); + ASSERT_EQ("456", st.get_error_msg()); + // move construct + { + Status other = std::move(st); + ASSERT_FALSE(other.ok()); + ASSERT_EQ("456", other.get_error_msg()); + ASSERT_EQ("Internal error: 456", other.to_string()); + ASSERT_TRUE(st.ok()); + ASSERT_EQ("OK", st.to_string()); + } +} + +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/be/test/exec/olap_table_sink_test.cpp b/be/test/exec/olap_table_sink_test.cpp index 8709d1ad47..a63278803e 100644 --- a/be/test/exec/olap_table_sink_test.cpp +++ b/be/test/exec/olap_table_sink_test.cpp @@ -43,7 +43,7 @@ public: OlapTableSinkTest() { } virtual ~OlapTableSinkTest() { } void SetUp() override { - k_add_batch_status = Status::OK; + k_add_batch_status = Status::OK(); _env._thread_mgr = new ThreadResourceMgr(); _env._master_info = new TMasterInfo(); @@ -412,7 +412,7 @@ TEST_F(OlapTableSinkTest, normal) { st = sink.send(&state, &batch); ASSERT_TRUE(st.ok()); // close - st = sink.close(&state, Status::OK); + st = sink.close(&state, Status::OK()); ASSERT_TRUE(st.ok()); // each node has a eof @@ -546,7 +546,7 @@ TEST_F(OlapTableSinkTest, convert) { st = sink.send(&state, &batch); ASSERT_TRUE(st.ok()); // close - st = sink.close(&state, Status::OK); + st = sink.close(&state, Status::OK()); ASSERT_TRUE(st.ok()); // each node has a eof @@ -821,11 +821,11 @@ TEST_F(OlapTableSinkTest, add_batch_failed) { memcpy(str_val->ptr, "abc", str_val->len); batch.commit_last_row(); } - k_add_batch_status = Status("dummy failed"); + k_add_batch_status = Status::InternalError("dummy failed"); st = sink.send(&state, &batch); ASSERT_TRUE(st.ok()); // close - st = sink.close(&state, Status::OK); + st = sink.close(&state, Status::OK()); ASSERT_FALSE(st.ok()); server->Stop(100); @@ -918,7 +918,7 @@ TEST_F(OlapTableSinkTest, decimal) { st = sink.send(&state, &batch); ASSERT_TRUE(st.ok()); // close - st = sink.close(&state, Status::OK); + st = sink.close(&state, Status::OK()); ASSERT_TRUE(st.ok()); ASSERT_EQ(2, output_set.size()); diff --git a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp index 204e8e50c8..b9cfc173e3 100644 --- a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaColumnsScannerTest : public testing::Test { @@ -166,7 +166,7 @@ TEST_F(SchemaColumnsScannerTest, table_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_table_result = Status("get table failed"); + s_table_result = Status::InternalError("get table failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -183,7 +183,7 @@ TEST_F(SchemaColumnsScannerTest, desc_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_desc_result = Status("get desc failed"); + s_desc_result = Status::InternalError("get desc failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -195,7 +195,7 @@ TEST_F(SchemaColumnsScannerTest, start_fail) { SchemaColumnsScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp index 32ea49030a..af06cd43cc 100644 --- a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaCreateTableScannerTest : public testing::Test { @@ -166,7 +166,7 @@ TEST_F(SchemaCreateTableScannerTest, table_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_table_result = Status("get table failed"); + s_table_result = Status::InternalError("get table failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -183,7 +183,7 @@ TEST_F(SchemaCreateTableScannerTest, desc_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_desc_result = Status("get desc failed"); + s_desc_result = Status::InternalError("get desc failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -195,7 +195,7 @@ TEST_F(SchemaCreateTableScannerTest, start_fail) { SchemaCreateTableScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp index e1b8e97793..387afacffa 100644 --- a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaOpenTablesScannerTest : public testing::Test { @@ -166,7 +166,7 @@ TEST_F(SchemaOpenTablesScannerTest, table_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_table_result = Status("get table failed"); + s_table_result = Status::InternalError("get table failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -183,7 +183,7 @@ TEST_F(SchemaOpenTablesScannerTest, desc_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_desc_result = Status("get desc failed"); + s_desc_result = Status::InternalError("get desc failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -195,7 +195,7 @@ TEST_F(SchemaOpenTablesScannerTest, start_fail) { SchemaOpenTablesScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp b/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp index ae5a1b5fe9..cd787b57d6 100644 --- a/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaSchemataScannerTest : public testing::Test { @@ -161,7 +161,7 @@ TEST_F(SchemaSchemataScannerTest, start_fail) { SchemaSchemataScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp b/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp index 5c978b97b0..d5660a7fe7 100644 --- a/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaTableNamesScannerTest : public testing::Test { @@ -166,7 +166,7 @@ TEST_F(SchemaTableNamesScannerTest, table_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_table_result = Status("get table failed"); + s_table_result = Status::InternalError("get table failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -178,7 +178,7 @@ TEST_F(SchemaTableNamesScannerTest, start_fail) { SchemaTableNamesScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp index deda22b82e..6f9b0c9bbd 100644 --- a/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp @@ -67,9 +67,9 @@ void init_mock() { db_num = 0; table_num = 0; desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; + s_db_result = Status::OK(); + s_table_result = Status::OK(); + s_desc_result = Status::OK(); } class SchemaTablesScannerTest : public testing::Test { @@ -166,7 +166,7 @@ TEST_F(SchemaTablesScannerTest, table_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_table_result = Status("get table failed"); + s_table_result = Status::InternalError("get table failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -183,7 +183,7 @@ TEST_F(SchemaTablesScannerTest, desc_fail) { ASSERT_TRUE(status.ok()); Tuple *tuple = (Tuple *)g_tuple_buf; bool eos = false; - s_desc_result = Status("get desc failed"); + s_desc_result = Status::InternalError("get desc failed"); status = scanner.get_next_row(tuple, &_mem_pool, &eos); ASSERT_FALSE(status.ok()); } @@ -195,7 +195,7 @@ TEST_F(SchemaTablesScannerTest, start_fail) { SchemaTablesScanner scanner; Status status = scanner.init(&_param, &_obj_pool); ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); + s_db_result = Status::InternalError("get db failed."); status = scanner.start((RuntimeState *)1); ASSERT_FALSE(status.ok()); } diff --git a/be/test/http/stream_load_test.cpp b/be/test/http/stream_load_test.cpp index 3090ae42ed..142e339ee5 100644 --- a/be/test/http/stream_load_test.cpp +++ b/be/test/http/stream_load_test.cpp @@ -74,7 +74,7 @@ public: k_stream_load_commit_result = TLoadTxnCommitResult(); k_stream_load_rollback_result = TLoadTxnRollbackResult(); k_stream_load_put_result = TStreamLoadPutResult(); - k_stream_load_plan_status = Status::OK; + k_stream_load_plan_status = Status::OK(); k_response_str = ""; config::streaming_load_max_mb = 1; @@ -183,7 +183,7 @@ TEST_F(StreamLoadActionTest, put_fail) { request._headers.emplace(HttpHeaders::AUTHORIZATION, "Basic cm9vdDo="); request._headers.emplace(HttpHeaders::CONTENT_LENGTH, "16"); - Status status("TestFail"); + Status status= Status::InternalError("TestFail"); status.to_thrift(&k_stream_load_put_result.status); action.on_header(&request); action.handle(&request); @@ -203,7 +203,7 @@ TEST_F(StreamLoadActionTest, commit_fail) { request._ev_req = &ev_req; request._headers.emplace(HttpHeaders::AUTHORIZATION, "Basic cm9vdDo="); request._headers.emplace(HttpHeaders::CONTENT_LENGTH, "16"); - Status status("TestFail"); + Status status = Status::InternalError("TestFail"); status.to_thrift(&k_stream_load_commit_result.status); action.on_header(&request); action.handle(&request); @@ -223,7 +223,7 @@ TEST_F(StreamLoadActionTest, begin_fail) { request._ev_req = &ev_req; request._headers.emplace(HttpHeaders::AUTHORIZATION, "Basic cm9vdDo="); request._headers.emplace(HttpHeaders::CONTENT_LENGTH, "16"); - Status status("TestFail"); + Status status = Status::InternalError("TestFail"); status.to_thrift(&k_stream_load_begin_result.status); action.on_header(&request); action.handle(&request); @@ -260,7 +260,7 @@ TEST_F(StreamLoadActionTest, plan_fail) { request._ev_req = &ev_req; request._headers.emplace(HttpHeaders::AUTHORIZATION, "Basic cm9vdDo="); request._headers.emplace(HttpHeaders::CONTENT_LENGTH, "16"); - k_stream_load_plan_status = Status("TestFail"); + k_stream_load_plan_status = Status::InternalError("TestFail"); action.on_header(&request); action.handle(&request); diff --git a/be/test/runtime/buffer_control_block_test.cpp b/be/test/runtime/buffer_control_block_test.cpp index 765802bb66..962dd98890 100644 --- a/be/test/runtime/buffer_control_block_test.cpp +++ b/be/test/runtime/buffer_control_block_test.cpp @@ -60,7 +60,7 @@ TEST_F(BufferControlBlockTest, get_one_after_close) { BufferControlBlock control_block(TUniqueId(), 1024); ASSERT_TRUE(control_block.init().ok()); - control_block.close(Status::OK); + control_block.close(Status::OK()); TFetchDataResult get_result; ASSERT_TRUE(control_block.get_batch(&get_result).ok()); ASSERT_TRUE(get_result.eos); @@ -162,7 +162,7 @@ TEST_F(BufferControlBlockTest, get_then_add) { void* close_thread(void* param) { BufferControlBlock* control_block = static_cast(param); sleep(1); - control_block->close(Status::OK); + control_block->close(Status::OK()); return NULL; } diff --git a/be/test/runtime/data_spliter_test.cpp b/be/test/runtime/data_spliter_test.cpp index 97acad8940..c5db78729a 100644 --- a/be/test/runtime/data_spliter_test.cpp +++ b/be/test/runtime/data_spliter_test.cpp @@ -263,7 +263,7 @@ TEST_F(DataSplitTest, NoData) { batch.commit_last_row(); } ASSERT_TRUE(spliter.send(_state, &batch).ok()); - ASSERT_TRUE(spliter.close(_state, Status::OK).ok()); + ASSERT_TRUE(spliter.close(_state, Status::OK()).ok()); } } diff --git a/be/test/runtime/data_stream_test.cpp b/be/test/runtime/data_stream_test.cpp index 6b030617e2..253cc7afae 100644 --- a/be/test/runtime/data_stream_test.cpp +++ b/be/test/runtime/data_stream_test.cpp @@ -587,7 +587,7 @@ protected: } VLOG_QUERY << "closing sender" << sender_num; - info.status = sender.close(&state, Status::OK); + info.status = sender.close(&state, Status::OK()); info.num_bytes_sent = sender.get_num_data_bytes_sent(); batch->reset(); diff --git a/be/test/runtime/disk_io_mgr_test.cpp b/be/test/runtime/disk_io_mgr_test.cpp index bad82aa358..e23b18c484 100644 --- a/be/test/runtime/disk_io_mgr_test.cpp +++ b/be/test/runtime/disk_io_mgr_test.cpp @@ -223,7 +223,7 @@ TEST_F(DiskIoMgrTest, SingleWriter) { DiskIoMgr::WriteRange** new_range = _pool->add(new DiskIoMgr::WriteRange*); DiskIoMgr::WriteRange::WriteDoneCallback callback = bind(mem_fn(&DiskIoMgrTest::write_validate_callback), this, num_ranges, - new_range, read_io_mgr.get(), reader, data, Status::OK, _1); + new_range, read_io_mgr.get(), reader, data, Status::OK(), _1); *new_range = _pool->add(new DiskIoMgr::WriteRange(tmp_file, cur_offset, num_ranges % num_disks, callback)); (*new_range)->set_data(reinterpret_cast(data), sizeof(int32_t)); @@ -267,7 +267,7 @@ TEST_F(DiskIoMgrTest, InvalidWrite) { DiskIoMgr::WriteRange::WriteDoneCallback callback = bind(mem_fn(&DiskIoMgrTest::write_validate_callback), this, 2, new_range, (DiskIoMgr*)NULL, (DiskIoMgr::RequestContext*)NULL, - data, Status("Test Failure"), _1); + data, Status::InternalError("Test Failure"), _1); *new_range = _pool->add(new DiskIoMgr::WriteRange(tmp_file, rand(), 0, callback)); (*new_range)->set_data(reinterpret_cast(data), sizeof(int32_t)); @@ -285,7 +285,7 @@ TEST_F(DiskIoMgrTest, InvalidWrite) { new_range = _pool->add(new DiskIoMgr::WriteRange*); callback = bind(mem_fn(&DiskIoMgrTest::write_validate_callback), this, 2, new_range, (DiskIoMgr*)NULL, (DiskIoMgr::RequestContext*)NULL, - data, Status("Test Failure"), _1); + data, Status::InternalError("Test Failure"), _1); *new_range = _pool->add(new DiskIoMgr::WriteRange(tmp_file, -1, 0, callback)); (*new_range)->set_data(reinterpret_cast(data), sizeof(int32_t)); @@ -334,11 +334,11 @@ TEST_F(DiskIoMgrTest, SingleWriterCancel) { status = io_mgr.init(&mem_tracker); DiskIoMgr::RequestContext* writer; io_mgr.register_context(&writer, &mem_tracker); - Status validate_status = Status::OK; + Status validate_status = Status::OK(); for (int i = 0; i < num_ranges; ++i) { if (i == num_ranges_before_cancel) { io_mgr.cancel_context(writer); - validate_status = Status::CANCELLED; + validate_status = Status::Cancelled(""); } int32_t* data = _pool->add(new int32_t); *data = rand(); @@ -346,7 +346,7 @@ TEST_F(DiskIoMgrTest, SingleWriterCancel) { DiskIoMgr::WriteRange::WriteDoneCallback callback = bind(mem_fn(&DiskIoMgrTest::write_validate_callback), this, num_ranges_before_cancel, new_range, read_io_mgr.get(), reader, data, - Status::CANCELLED, _1); + Status::Cancelled(""), _1); *new_range = _pool->add(new DiskIoMgr::WriteRange(tmp_file, cur_offset, num_ranges % num_disks, callback)); (*new_range)->set_data(reinterpret_cast(data), sizeof(int32_t)); @@ -418,7 +418,7 @@ TEST_F(DiskIoMgrTest, SingleReader) { thread_group threads; for (int i = 0; i < num_read_threads; ++i) { threads.add_thread(new thread(scan_range_thread, &io_mgr, reader, data, - len, Status::OK, 0, &num_ranges_processed)); + len, Status::OK(), 0, &num_ranges_processed)); } threads.join_all(); @@ -482,7 +482,7 @@ TEST_F(DiskIoMgrTest, AddScanRangeTest) { ASSERT_TRUE(status.ok()); // Read a couple of them - scan_range_thread(&io_mgr, reader, data, strlen(data), Status::OK, 2, + scan_range_thread(&io_mgr, reader, data, strlen(data), Status::OK(), 2, &num_ranges_processed); // Issue second half @@ -493,7 +493,7 @@ TEST_F(DiskIoMgrTest, AddScanRangeTest) { thread_group threads; for (int i = 0; i < 3; ++i) { threads.add_thread(new thread(scan_range_thread, &io_mgr, reader, data, - strlen(data), Status::CANCELLED, 0, &num_ranges_processed)); + strlen(data), Status::Cancelled(""), 0, &num_ranges_processed)); } threads.join_all(); @@ -561,7 +561,7 @@ TEST_F(DiskIoMgrTest, SyncReadTest) { thread_group threads; for (int i = 0; i < 5; ++i) { threads.add_thread(new thread(scan_range_thread, &io_mgr, reader, data, - strlen(data), Status::OK, 0, &num_ranges_processed)); + strlen(data), Status::OK(), 0, &num_ranges_processed)); } // Issue some more sync ranges @@ -627,7 +627,7 @@ TEST_F(DiskIoMgrTest, SingleReaderCancel) { int num_succesful_ranges = ranges.size() / 2; // Read half the ranges for (int i = 0; i < num_succesful_ranges; ++i) { - scan_range_thread(&io_mgr, reader, data, strlen(data), Status::OK, 1, + scan_range_thread(&io_mgr, reader, data, strlen(data), Status::OK(), 1, &num_ranges_processed); } EXPECT_EQ(num_ranges_processed, num_succesful_ranges); @@ -636,7 +636,7 @@ TEST_F(DiskIoMgrTest, SingleReaderCancel) { thread_group threads; for (int i = 0; i < 3; ++i) { threads.add_thread(new thread(scan_range_thread, &io_mgr, reader, data, - strlen(data), Status::CANCELLED, 0, &num_ranges_processed)); + strlen(data), Status::Cancelled(""), 0, &num_ranges_processed)); } io_mgr.cancel_context(reader); @@ -696,7 +696,7 @@ TEST_F(DiskIoMgrTest, MemTrackers) { vector buffers; AtomicInt num_ranges_processed; - scan_range_thread(&io_mgr, reader, data, strlen(data), Status::MEM_LIMIT_EXCEEDED, + scan_range_thread(&io_mgr, reader, data, strlen(data), Status::MemoryLimitExceeded("Mem"), 1, &num_ranges_processed); char result[strlen(data) + 1]; @@ -782,7 +782,7 @@ TEST_F(DiskIoMgrTest, CachedReads) { thread_group threads; for (int i = 0; i < 5; ++i) { threads.add_thread(new thread(scan_range_thread, &io_mgr, reader, data, - strlen(data), Status::OK, 0, &num_ranges_processed)); + strlen(data), Status::OK(), 0, &num_ranges_processed)); } // Issue some more sync ranges @@ -855,7 +855,7 @@ TEST_F(DiskIoMgrTest, MultipleReaderWriter) { threads.add_thread(new thread(scan_range_thread, &io_mgr, contexts[context_index], reinterpret_cast(data + (read_offset % strlen(data))), 1, - Status::OK, num_scan_ranges, &num_ranges_processed)); + Status::OK(), num_scan_ranges, &num_ranges_processed)); ++read_offset; } @@ -973,7 +973,7 @@ TEST_F(DiskIoMgrTest, MultipleReader) { for (int i = 0; i < NUM_READERS; ++i) { for (int j = 0; j < NUM_THREADS_PER_READER; ++j) { threads.add_thread(new thread(scan_range_thread, &io_mgr, readers[i], - data[i].c_str(), data[i].size(), Status::OK, 0, + data[i].c_str(), data[i].size(), Status::OK(), 0, &num_ranges_processed)); } } diff --git a/be/test/runtime/etl_job_mgr_test.cpp b/be/test/runtime/etl_job_mgr_test.cpp index 8135a9dca8..6a256b3a41 100644 --- a/be/test/runtime/etl_job_mgr_test.cpp +++ b/be/test/runtime/etl_job_mgr_test.cpp @@ -27,7 +27,7 @@ namespace doris { // Mock fragment mgr Status FragmentMgr::exec_plan_fragment(const TExecPlanFragmentParams& params, FinishCallback cb) { - return Status::OK; + return Status::OK(); } FragmentMgr::FragmentMgr(ExecEnv* exec_env) : @@ -69,7 +69,7 @@ TEST_F(EtlJobMgrTest, NormalCase) { // make it finishing EtlJobResult job_result; job_result.file_map["abc"] = 100L; - ASSERT_TRUE(mgr.finish_job(id, Status::OK, job_result).ok()); + ASSERT_TRUE(mgr.finish_job(id, Status::OK(), job_result).ok()); ASSERT_TRUE(mgr.get_job_state(id, &res).ok()); ASSERT_EQ(TEtlState::FINISHED, res.etl_state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -127,7 +127,7 @@ TEST_F(EtlJobMgrTest, RunAfterSuccess) { // make it finishing EtlJobResult job_result; job_result.file_map["abc"] = 100L; - ASSERT_TRUE(mgr.finish_job(id, Status::OK, job_result).ok()); + ASSERT_TRUE(mgr.finish_job(id, Status::OK(), job_result).ok()); ASSERT_TRUE(mgr.get_job_state(id, &res).ok()); ASSERT_EQ(TEtlState::FINISHED, res.etl_state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -162,7 +162,7 @@ TEST_F(EtlJobMgrTest, RunAfterFail) { // make it finishing EtlJobResult job_result; job_result.debug_path = "abc"; - ASSERT_TRUE(mgr.finish_job(id, Status::THRIFT_RPC_ERROR, job_result).ok()); + ASSERT_TRUE(mgr.finish_job(id, Status::ThriftRpcError("Thrift rpc error"), job_result).ok()); ASSERT_TRUE(mgr.get_job_state(id, &res).ok()); ASSERT_EQ(TEtlState::CANCELLED, res.etl_state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -216,7 +216,7 @@ TEST_F(EtlJobMgrTest, FinishUnknowJob) { // make it finishing EtlJobResult job_result; job_result.debug_path = "abc"; - ASSERT_FALSE(mgr.finish_job(id, Status::THRIFT_RPC_ERROR, job_result).ok()); + ASSERT_FALSE(mgr.finish_job(id, Status::ThriftRpcError("Thrift rpc error"), job_result).ok()); ASSERT_TRUE(mgr.get_job_state(id, &res).ok()); ASSERT_EQ(TEtlState::CANCELLED, res.etl_state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); diff --git a/be/test/runtime/export_task_mgr_test.cpp b/be/test/runtime/export_task_mgr_test.cpp index e04c332f75..9d67751f54 100644 --- a/be/test/runtime/export_task_mgr_test.cpp +++ b/be/test/runtime/export_task_mgr_test.cpp @@ -30,7 +30,7 @@ namespace doris { // Mock fragment mgr Status FragmentMgr::exec_plan_fragment(const TExecPlanFragmentParams& params, FinishCallback cb) { - return Status::OK; + return Status::OK(); } FragmentMgr::FragmentMgr(ExecEnv* exec_env) : @@ -70,7 +70,7 @@ TEST_F(ExportTaskMgrTest, NormalCase) { // make it finishing ExportTaskResult task_result; task_result.files.push_back("path/file1"); - ASSERT_TRUE(mgr.finish_task(id, Status::OK, task_result).ok()); + ASSERT_TRUE(mgr.finish_task(id, Status::OK(), task_result).ok()); ASSERT_TRUE(mgr.get_task_state(id, &res).ok()); ASSERT_EQ(TExportState::FINISHED, res.state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -125,7 +125,7 @@ TEST_F(ExportTaskMgrTest, RunAfterSuccess) { // make it finishing ExportTaskResult task_result; task_result.files.push_back("path/file1"); - ASSERT_TRUE(mgr.finish_task(id, Status::OK, task_result).ok()); + ASSERT_TRUE(mgr.finish_task(id, Status::OK(), task_result).ok()); ASSERT_TRUE(mgr.get_task_state(id, &res).ok()); ASSERT_EQ(TExportState::FINISHED, res.state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -159,7 +159,7 @@ TEST_F(ExportTaskMgrTest, RunAfterFail) { // make it finishing ExportTaskResult task_result; - ASSERT_TRUE(mgr.finish_task(id, Status::THRIFT_RPC_ERROR, task_result).ok()); + ASSERT_TRUE(mgr.finish_task(id, Status::ThriftRpcError("Thrift rpc error"), task_result).ok()); ASSERT_TRUE(mgr.get_task_state(id, &res).ok()); ASSERT_EQ(TExportState::CANCELLED, res.state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); @@ -211,7 +211,7 @@ TEST_F(ExportTaskMgrTest, FinishUnknowJob) { // make it finishing ExportTaskResult task_result; - ASSERT_FALSE(mgr.finish_task(id, Status::THRIFT_RPC_ERROR, task_result).ok()); + ASSERT_FALSE(mgr.finish_task(id, Status::ThriftRpcError("Thrift rpc error"), task_result).ok()); ASSERT_TRUE(mgr.get_task_state(id, &res).ok()); ASSERT_EQ(TExportState::CANCELLED, res.state); ASSERT_EQ(TStatusCode::OK, res.status.status_code); diff --git a/be/test/runtime/fragment_mgr_test.cpp b/be/test/runtime/fragment_mgr_test.cpp index 1e4c0e6a5d..9a398cf69a 100644 --- a/be/test/runtime/fragment_mgr_test.cpp +++ b/be/test/runtime/fragment_mgr_test.cpp @@ -58,8 +58,8 @@ public: protected: virtual void SetUp() { - s_prepare_status = Status::OK; - s_open_status = Status::OK; + s_prepare_status = Status::OK(); + s_open_status = Status::OK(); LOG(INFO) << "fragment_pool_thread_num=" << config::fragment_pool_thread_num << ", pool_size=" << config::fragment_pool_queue_size; config::fragment_pool_thread_num = 32; config::fragment_pool_queue_size = 1024; @@ -111,7 +111,7 @@ TEST_F(FragmentMgrTest, CancelWithoutAdd) { } TEST_F(FragmentMgrTest, PrepareFailed) { - s_prepare_status = Status("Prepare failed."); + s_prepare_status = Status::InternalError("Prepare failed."); FragmentMgr mgr(nullptr); TExecPlanFragmentParams params; params.params.fragment_instance_id = TUniqueId(); diff --git a/be/test/runtime/result_sink_test.cpp b/be/test/runtime/result_sink_test.cpp index 98ec599644..bfe151b18c 100644 --- a/be/test/runtime/result_sink_test.cpp +++ b/be/test/runtime/result_sink_test.cpp @@ -86,7 +86,7 @@ TEST_F(ResultSinkTest, init_normal) { row_batch.add_row(); row_batch.commit_last_row(); ASSERT_TRUE(sink.send(_runtime_state, &row_batch).ok()); - ASSERT_TRUE(sink.close(_runtime_state, Status::OK).ok()); + ASSERT_TRUE(sink.close(_runtime_state, Status::OK()).ok()); } } diff --git a/be/test/util/json_util_test.cpp b/be/test/util/json_util_test.cpp index a8efccb4b3..5e6951b146 100644 --- a/be/test/util/json_util_test.cpp +++ b/be/test/util/json_util_test.cpp @@ -41,7 +41,7 @@ TEST_F(JsonUtilTest, success) { } TEST_F(JsonUtilTest, normal_fail) { - Status status("so bad"); + Status status = Status::InternalError("so bad"); auto str = to_json(status); @@ -52,7 +52,7 @@ TEST_F(JsonUtilTest, normal_fail) { } TEST_F(JsonUtilTest, normal_fail_str) { - Status status("\"so bad\""); + Status status = Status::InternalError("\"so bad\""); auto str = to_json(status); diff --git a/gensrc/thrift/Status.thrift b/gensrc/thrift/Status.thrift index abebc39afb..90612c3b65 100644 --- a/gensrc/thrift/Status.thrift +++ b/gensrc/thrift/Status.thrift @@ -40,7 +40,24 @@ enum TStatusCode { ES_SHARD_NOT_FOUND, ES_INVALID_CONTEXTID, ES_INVALID_OFFSET, - ES_REQUEST_ERROR + ES_REQUEST_ERROR, + + // end of file + END_OF_FILE = 30, + NOT_FOUND = 31, + CORRUPTION = 32, + INVALID_ARGUMENT = 33, + IO_ERROR = 34, + ALREADY_EXIST = 35, + NETWORK_ERROR = 36, + ILLEGAL_STATE = 37, + NOT_AUTHORIZED = 38, + ABORTED = 39, + REMOTE_ERROR = 40, + SERVICE_UNAVAILABLE = 41, + UNINITIALIZED = 42, + CONFIGURATION_ERROR = 43, + INCOMPLETE = 44 } struct TStatus { diff --git a/run-ut.sh b/run-ut.sh index e96df306df..c85640a031 100755 --- a/run-ut.sh +++ b/run-ut.sh @@ -131,6 +131,9 @@ if [ -d ${DORIS_TEST_BINARY_DIR}/util/test_data ]; then fi cp -r ${DORIS_HOME}/be/test/util/test_data ${DORIS_TEST_BINARY_DIR}/util/ +# Running common Unittest +${DORIS_TEST_BINARY_DIR}/common/status_test + # Running Util Unittest ${DORIS_TEST_BINARY_DIR}/util/bit_util_test ${DORIS_TEST_BINARY_DIR}/util/bitmap_test