[CodeFormat] Clang-format cpp sources (#4965)
Clang-format all c++ source files.
This commit is contained in:
@ -22,11 +22,10 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "gutil/strings/split.h"
|
||||
|
||||
#include "common/status.h"
|
||||
#include "service/backend_options.h"
|
||||
#include "gutil/strings/split.h"
|
||||
#include "runtime/small_file_mgr.h"
|
||||
#include "service/backend_options.h"
|
||||
#include "util/defer_op.h"
|
||||
#include "util/stopwatch.hpp"
|
||||
#include "util/uid_util.h"
|
||||
@ -42,10 +41,10 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
RdKafka::Conf* conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
|
||||
// conf has to be deleted finally
|
||||
auto conf_deleter = [conf] () { delete conf; };
|
||||
auto conf_deleter = [conf]() { delete conf; };
|
||||
DeferOp delete_conf(std::bind<void>(conf_deleter));
|
||||
|
||||
std::stringstream ss;
|
||||
@ -65,7 +64,8 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
|
||||
return Status::OK();
|
||||
} else if (res != RdKafka::Conf::CONF_OK) {
|
||||
std::stringstream ss;
|
||||
ss << "PAUSE: failed to set '" << conf_key << "', value: '" << conf_val << "', err: " << errstr;
|
||||
ss << "PAUSE: failed to set '" << conf_key << "', value: '" << conf_val
|
||||
<< "', err: " << errstr;
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
@ -86,10 +86,11 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
|
||||
for (auto& item : ctx->kafka_info->properties) {
|
||||
if (boost::algorithm::starts_with(item.second, "FILE:")) {
|
||||
// file property should has format: FILE:file_id:md5
|
||||
std::vector<std::string> parts = strings::Split(
|
||||
item.second, ":", strings::SkipWhitespace());
|
||||
std::vector<std::string> parts =
|
||||
strings::Split(item.second, ":", strings::SkipWhitespace());
|
||||
if (parts.size() != 3) {
|
||||
return Status::InternalError("PAUSE: Invalid file property of kafka: " + item.second);
|
||||
return Status::InternalError("PAUSE: Invalid file property of kafka: " +
|
||||
item.second);
|
||||
}
|
||||
int64_t file_id = std::stol(parts[1]);
|
||||
std::string file_path;
|
||||
@ -97,7 +98,7 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
|
||||
if (!st.ok()) {
|
||||
std::stringstream ss;
|
||||
ss << "PAUSE: failed to get file for config: " << item.first
|
||||
<< ", error: " << st.get_error_msg();
|
||||
<< ", error: " << st.get_error_msg();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
RETURN_IF_ERROR(set_conf(item.first, file_path));
|
||||
@ -128,28 +129,26 @@ Status KafkaDataConsumer::init(StreamLoadContext* ctx) {
|
||||
}
|
||||
|
||||
Status KafkaDataConsumer::assign_topic_partitions(
|
||||
const std::map<int32_t, int64_t>& begin_partition_offset,
|
||||
const std::string& topic,
|
||||
const std::map<int32_t, int64_t>& begin_partition_offset, const std::string& topic,
|
||||
StreamLoadContext* ctx) {
|
||||
|
||||
DCHECK(_k_consumer);
|
||||
// create TopicPartitions
|
||||
std::stringstream ss;
|
||||
std::vector<RdKafka::TopicPartition*> topic_partitions;
|
||||
for (auto& entry : begin_partition_offset) {
|
||||
RdKafka::TopicPartition* tp1 = RdKafka::TopicPartition::create(
|
||||
topic, entry.first, entry.second);
|
||||
RdKafka::TopicPartition* tp1 =
|
||||
RdKafka::TopicPartition::create(topic, entry.first, entry.second);
|
||||
topic_partitions.push_back(tp1);
|
||||
ss << "[" << entry.first << ": " << entry.second << "] ";
|
||||
}
|
||||
|
||||
LOG(INFO) << "consumer: " << _id << ", grp: " << _grp_id
|
||||
<< " assign topic partitions: " << topic << ", " << ss.str();
|
||||
<< " assign topic partitions: " << topic << ", " << ss.str();
|
||||
|
||||
// delete TopicPartition finally
|
||||
auto tp_deleter = [&topic_partitions] () {
|
||||
std::for_each(topic_partitions.begin(), topic_partitions.end(),
|
||||
[](RdKafka::TopicPartition* tp1) { delete tp1; });
|
||||
auto tp_deleter = [&topic_partitions]() {
|
||||
std::for_each(topic_partitions.begin(), topic_partitions.end(),
|
||||
[](RdKafka::TopicPartition* tp1) { delete tp1; });
|
||||
};
|
||||
DeferOp delete_tp(std::bind<void>(tp_deleter));
|
||||
|
||||
@ -157,20 +156,19 @@ Status KafkaDataConsumer::assign_topic_partitions(
|
||||
RdKafka::ErrorCode err = _k_consumer->assign(topic_partitions);
|
||||
if (err) {
|
||||
LOG(WARNING) << "failed to assign topic partitions: " << ctx->brief(true)
|
||||
<< ", err: " << RdKafka::err2str(err);
|
||||
<< ", err: " << RdKafka::err2str(err);
|
||||
return Status::InternalError("failed to assign topic partitions");
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status KafkaDataConsumer::group_consume(
|
||||
BlockingQueue<RdKafka::Message*>* queue,
|
||||
int64_t max_running_time_ms) {
|
||||
Status KafkaDataConsumer::group_consume(BlockingQueue<RdKafka::Message*>* queue,
|
||||
int64_t max_running_time_ms) {
|
||||
_last_visit_time = time(nullptr);
|
||||
int64_t left_time = max_running_time_ms;
|
||||
LOG(INFO) << "start kafka consumer: " << _id << ", grp: " << _grp_id
|
||||
<< ", max running time(ms): " << left_time;
|
||||
<< ", max running time(ms): " << left_time;
|
||||
|
||||
int64_t received_rows = 0;
|
||||
int64_t put_rows = 0;
|
||||
@ -181,10 +179,14 @@ Status KafkaDataConsumer::group_consume(
|
||||
while (true) {
|
||||
{
|
||||
std::unique_lock<std::mutex> l(_lock);
|
||||
if (_cancelled) { break; }
|
||||
if (_cancelled) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (left_time <= 0) { break; }
|
||||
if (left_time <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
bool done = false;
|
||||
// consume 1 message at a time
|
||||
@ -192,76 +194,76 @@ Status KafkaDataConsumer::group_consume(
|
||||
std::unique_ptr<RdKafka::Message> msg(_k_consumer->consume(1000 /* timeout, ms */));
|
||||
consumer_watch.stop();
|
||||
switch (msg->err()) {
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
if (msg->len() == 0) {
|
||||
// ignore msg with length 0.
|
||||
// put empty msg into queue will cause the load process shutting down.
|
||||
break;
|
||||
} else if (!queue->blocking_put(msg.get())) {
|
||||
// queue is shutdown
|
||||
done = true;
|
||||
} else {
|
||||
++put_rows;
|
||||
msg.release(); // release the ownership, msg will be deleted after being processed
|
||||
}
|
||||
++received_rows;
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
if (msg->len() == 0) {
|
||||
// ignore msg with length 0.
|
||||
// put empty msg into queue will cause the load process shutting down.
|
||||
break;
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
// leave the status as OK, because this may happened
|
||||
// if there is no data in kafka.
|
||||
LOG(INFO) << "kafka consume timeout: " << _id;
|
||||
break;
|
||||
default:
|
||||
LOG(WARNING) << "kafka consume failed: " << _id
|
||||
<< ", msg: " << msg->errstr();
|
||||
} else if (!queue->blocking_put(msg.get())) {
|
||||
// queue is shutdown
|
||||
done = true;
|
||||
st = Status::InternalError(msg->errstr());
|
||||
break;
|
||||
} else {
|
||||
++put_rows;
|
||||
msg.release(); // release the ownership, msg will be deleted after being processed
|
||||
}
|
||||
++received_rows;
|
||||
break;
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
// leave the status as OK, because this may happened
|
||||
// if there is no data in kafka.
|
||||
LOG(INFO) << "kafka consume timeout: " << _id;
|
||||
break;
|
||||
default:
|
||||
LOG(WARNING) << "kafka consume failed: " << _id << ", msg: " << msg->errstr();
|
||||
done = true;
|
||||
st = Status::InternalError(msg->errstr());
|
||||
break;
|
||||
}
|
||||
|
||||
left_time = max_running_time_ms - watch.elapsed_time() / 1000 / 1000;
|
||||
if (done) { break; }
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
LOG(INFO) << "kafka consumer done: " << _id << ", grp: " << _grp_id
|
||||
<< ". cancelled: " << _cancelled
|
||||
<< ", left time(ms): " << left_time
|
||||
<< ", total cost(ms): " << watch.elapsed_time() / 1000 / 1000
|
||||
<< ", consume cost(ms): " << consumer_watch.elapsed_time() / 1000 / 1000
|
||||
<< ", received rows: " << received_rows
|
||||
<< ", put rows: " << put_rows;
|
||||
<< ". cancelled: " << _cancelled << ", left time(ms): " << left_time
|
||||
<< ", total cost(ms): " << watch.elapsed_time() / 1000 / 1000
|
||||
<< ", consume cost(ms): " << consumer_watch.elapsed_time() / 1000 / 1000
|
||||
<< ", received rows: " << received_rows << ", put rows: " << put_rows;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
Status KafkaDataConsumer::get_partition_meta(std::vector<int32_t>* partition_ids) {
|
||||
// create topic conf
|
||||
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
|
||||
auto conf_deleter = [tconf] () { delete tconf; };
|
||||
RdKafka::Conf* tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
|
||||
auto conf_deleter = [tconf]() { delete tconf; };
|
||||
DeferOp delete_conf(std::bind<void>(conf_deleter));
|
||||
|
||||
// create topic
|
||||
std::string errstr;
|
||||
RdKafka::Topic *topic = RdKafka::Topic::create(_k_consumer, _topic, tconf, errstr);
|
||||
RdKafka::Topic* topic = RdKafka::Topic::create(_k_consumer, _topic, tconf, errstr);
|
||||
if (topic == nullptr) {
|
||||
std::stringstream ss;
|
||||
ss << "failed to create topic: " << errstr;
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
auto topic_deleter = [topic] () { delete topic; };
|
||||
auto topic_deleter = [topic]() { delete topic; };
|
||||
DeferOp delete_topic(std::bind<void>(topic_deleter));
|
||||
|
||||
// get topic metadata
|
||||
RdKafka::Metadata* metadata = nullptr;
|
||||
RdKafka::ErrorCode err = _k_consumer->metadata(true/* for this topic */, topic, &metadata, 5000);
|
||||
RdKafka::ErrorCode err =
|
||||
_k_consumer->metadata(true /* for this topic */, topic, &metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR) {
|
||||
std::stringstream ss;
|
||||
ss << "failed to get partition meta: " << RdKafka::err2str(err);
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
auto meta_deleter = [metadata] () { delete metadata; };
|
||||
auto meta_deleter = [metadata]() { delete metadata; };
|
||||
DeferOp delete_meta(std::bind<void>(meta_deleter));
|
||||
|
||||
// get partition ids
|
||||
|
||||
Reference in New Issue
Block a user