Fix some typos in be/. (#9681)

This commit is contained in:
Shuangchi He
2022-05-19 20:55:39 +08:00
committed by GitHub
parent 87e3904cc6
commit 73c4ec7167
20 changed files with 28 additions and 28 deletions

View File

@ -704,7 +704,7 @@ CONF_Int32(max_minidump_file_number, "10");
// and the valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0.
CONF_String(kafka_broker_version_fallback, "0.10.0");
// The the number of pool siz of routine load consumer.
// The number of pool siz of routine load consumer.
// If you meet the error describe in https://github.com/edenhill/librdkafka/issues/3608
// Change this size to 0 to fix it temporarily.
CONF_Int32(routine_load_consumer_pool_size, "10");

View File

@ -384,7 +384,7 @@ std::string LzopDecompressor::debug_info() {
<< " header size: " << _header_info.header_size
<< " header checksum type: " << _header_info.header_checksum_type
<< " input checksum type: " << _header_info.input_checksum_type
<< " ouput checksum type: " << _header_info.output_checksum_type;
<< " output checksum type: " << _header_info.output_checksum_type;
return ss.str();
}
#endif // DORIS_WITH_LZO

View File

@ -405,7 +405,7 @@ private:
void Close(bool finalize_rows);
/// Spill this partition. 'more_aggregate_rows' = true means that more aggregate rows
/// may be appended to the the partition before appending unaggregated rows. On
/// may be appended to the partition before appending unaggregated rows. On
/// success, one of the streams is left with a write iterator: the aggregated stream
/// if 'more_aggregate_rows' is true or the unaggregated stream otherwise.
Status Spill(bool more_aggregate_rows);
@ -528,7 +528,7 @@ private:
/// GetNext() using the agg fn evaluators' Serialize() or Finalize().
/// For the Finalize() case if the output tuple is different from the intermediate
/// tuple, then a new tuple is allocated from 'pool' to hold the final result.
/// Grouping values are copied into the output tuple and the the output tuple holding
/// Grouping values are copied into the output tuple and the output tuple holding
/// the finalized/serialized aggregate values is returned.
/// TODO: Coordinate the allocation of new tuples with the release of memory
/// so as not to make memory consumption blow up.

View File

@ -950,7 +950,7 @@ bool SplitStringIntoKeyValuePairs(const string& line, const string& key_value_de
// values; just record that our split failed.
success = false;
}
// we expect atmost one value because we passed in an empty vsep to
// we expect at most one value because we passed in an empty vsep to
// SplitStringIntoKeyValues
DCHECK_LE(value.size(), 1);
kv_pairs->push_back(make_pair(key, value.empty() ? "" : value[0]));

View File

@ -205,7 +205,7 @@ void Compaction::gc_output_rowset() {
}
}
// Find the longest consecutive version path in "rowset", from begining.
// Find the longest consecutive version path in "rowset", from beginning.
// Two versions before and after the missing version will be saved in missing_version,
// if missing_version is not null.
Status Compaction::find_longest_consecutive_version(std::vector<RowsetSharedPtr>* rowsets,

View File

@ -245,7 +245,7 @@ void MemTable::_aggregate_two_row_in_block(RowInBlock* new_row, RowInBlock* row_
vectorized::Block MemTable::_collect_vskiplist_results() {
VecTable::Iterator it(_vec_skip_list.get());
vectorized::Block in_block = _input_mutable_block.to_block();
// TODO: should try to insert data by column, not by row. to opt the the code
// TODO: should try to insert data by column, not by row. to opt the code
if (_keys_type == KeysType::DUP_KEYS) {
for (it.SeekToFirst(); it.Valid(); it.Next()) {
_output_mutable_block.add_row(&in_block, it.key()->_row_pos);

View File

@ -377,7 +377,7 @@ const OLAPIndexOffset MemIndex::find(const RowCursor& k, RowCursor* helper_curso
BinarySearchIterator index_fin(_meta[off].count());
if (index_comparator.set_segment_id(off) != Status::OK()) {
throw "index of of range";
throw "index of range";
}
if (!find_last) {

View File

@ -260,7 +260,7 @@ private:
EncodingType _encoding;
uint16_t _num_literals;
int64_t _zig_zag_literals[MAX_SCOPE]; // for direct encoding
int64_t _base_reduced_literals[MAX_SCOPE]; // for for patched base encoding
int64_t _base_reduced_literals[MAX_SCOPE]; // for patched base encoding
int64_t _adj_deltas[MAX_SCOPE - 1]; // for delta encoding
int64_t _fixed_delta;
uint32_t _zz_bits_90p;

View File

@ -400,7 +400,7 @@ Status SegmentReader::_pick_delete_row_groups(uint32_t first_block, uint32_t las
if (true == del_not_satisfied || 0 == delete_condition.del_cond->columns().size()) {
//if state is DEL_PARTIAL_SATISFIED last_time, cannot be set as DEL_NOT_SATISFIED
//it is special for for delete condition
//it is special for delete condition
if (DEL_PARTIAL_SATISFIED == _include_blocks[j]) {
continue;
} else {

View File

@ -47,7 +47,7 @@ enum { BINARY_DICT_PAGE_HEADER_SIZE = 4 };
// Either header + embedded codeword page, which can be encoded with any
// int PageBuilder, when mode_ = DICT_ENCODING.
// Or header + embedded BinaryPlainPage, when mode_ = PLAIN_ENCODING.
// Data pages start with mode_ = DICT_ENCODING, when the the size of dictionary
// Data pages start with mode_ = DICT_ENCODING, when the size of dictionary
// page go beyond the option_->dict_page_size, the subsequent data pages will switch
// to string plain page automatically.
class BinaryDictPageBuilder : public PageBuilder {

View File

@ -797,7 +797,7 @@ void SegmentIterator::_init_current_block(
auto cid = _schema.column_id(i);
auto column_desc = _schema.column(cid);
// the column in in block must clear() here to insert new data
// the column in block must clear() here to insert new data
if (_is_pred_column[cid] ||
i >= block->columns()) { //todo(wb) maybe we can release it after output block
current_columns[cid]->clear();
@ -818,7 +818,7 @@ void SegmentIterator::_output_non_pred_columns(vectorized::Block* block) {
SCOPED_RAW_TIMER(&_opts.stats->output_col_ns);
for (auto cid : _non_predicate_columns) {
auto loc = _schema_block_id_map[cid];
// if loc < block->block->columns() means the the column is delete column and should
// if loc < block->block->columns() means the column is delete column and should
// not output by block, so just skip the column.
if (loc < block->columns()) {
block->replace_by_position(loc, std::move(_current_return_columns[cid]));

View File

@ -412,7 +412,7 @@ public:
// WEEK_YEAR (1)
// If not set:
// Week is in range 0-53
// Week 0 is returned for the the last week of the previous year (for
// Week 0 is returned for the last week of the previous year (for
// a date at start of january) In this case one can get 53 for the
// first week of next year. This flag ensures that the week is
// relevant for the given year. Note that this flag is only

View File

@ -166,7 +166,7 @@ class MemTracker;
// the cached buffer is returned (BufferDescriptor::Return()).
//
// Remote filesystem support (e.g. S3):
// Remote filesystems are modeled as "remote disks". That is, there is a seperate disk
// Remote filesystems are modeled as "remote disks". That is, there is a separate disk
// queue for each supported remote filesystem type. In order to maximize throughput,
// multiple connections are opened in parallel by having multiple threads running per
// queue. Also note that reading from a remote filesystem service can be more CPU
@ -226,7 +226,7 @@ public:
};
// Buffer struct that is used by the caller and IoMgr to pass read buffers.
// It is is expected that only one thread has ownership of this object at a
// It is expected that only one thread has ownership of this object at a
// time.
class BufferDescriptor {
public:

View File

@ -101,7 +101,7 @@ public:
~MemPool();
/// Allocates a section of memory of 'size' bytes with DEFAULT_ALIGNMENT at the end
/// of the the current chunk. Creates a new chunk if there aren't any chunks
/// of the current chunk. Creates a new chunk if there aren't any chunks
/// with enough capacity.
uint8_t* allocate(int64_t size, Status* rst = nullptr) {
return allocate<false>(size, DEFAULT_ALIGNMENT, rst);

View File

@ -121,7 +121,7 @@ private:
// into output_batch.
// If this run was unpinned, one block (2 if there are var-len slots) is pinned while
// rows are filled into output_batch. The block is unpinned before the next block is
// pinned. Atmost 1 (2) block(s) will be pinned at any time.
// pinned. At most 1 (2) block(s) will be pinned at any time.
// If the run was pinned, the blocks are not unpinned (SpillSorter holds on to the memory).
// In either case, all rows in output_batch will have their fixed and var-len data from
// the same block.

View File

@ -91,7 +91,7 @@ static Status read_cgroup_value(const string& limit_file_path, int64_t* val) {
strings::Substitute("Error reading $0: $1", limit_file_path, get_str_err_msg()));
}
StringParser::ParseResult pr;
// Parse into an an int64_t If it overflows, returning the max value of int64_t is ok because that
// Parse into an int64_t If it overflows, returning the max value of int64_t is ok because that
// is effectively unlimited.
*val = StringParser::string_to_int<int64_t>(line.c_str(), line.size(), &pr);
if ((pr != StringParser::PARSE_SUCCESS && pr != StringParser::PARSE_OVERFLOW)) {

View File

@ -230,7 +230,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorBitMap::convert_to_olap()
slice->size = slice_size;
raw_data += slice_size;
} else {
// TODO: this may not be neccessary, check and remove later
// TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@ -307,7 +307,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorHLL::convert_to_olap() {
slice->size = slice_size;
raw_data += slice_size;
} else {
// TODO: this may not be neccessary, check and remove later
// TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@ -388,7 +388,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorChar::convert_to_olap() {
slice->data = (char*)char_data + string_offset;
slice->size = string_length;
} else {
// TODO: this may not be neccessary, check and remove later
// TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@ -475,7 +475,7 @@ Status OlapBlockDataConvertor::OlapColumnDataConvertorVarChar::convert_to_olap()
"`string_type_length_soft_limit_bytes` in vec engine.");
}
} else {
// TODO: this may not be neccessary, check and remove later
// TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}

View File

@ -411,7 +411,7 @@ public:
// WEEK_YEAR (1)
// If not set:
// Week is in range 0-53
// Week 0 is returned for the the last week of the previous year (for
// Week 0 is returned for the last week of the previous year (for
// a date at start of january) In this case one can get 53 for the
// first week of next year. This flag ensures that the week is
// relevant for the given year. Note that this flag is only

View File

@ -561,7 +561,7 @@ Status VOlapTableSink::_validate_data(RuntimeState* state, vectorized::Block* bl
break;
}
// Dispose the the column should do not contain the NULL value
// Dispose the column should do not contain the NULL value
// Only tow case:
// 1. column is nullable but the desc is not nullable
// 2. desc->type is BITMAP

View File

@ -625,13 +625,13 @@ TEST_F(DiskIoMgrTest, SingleReaderCancel) {
EXPECT_TRUE(status.ok());
std::atomic<int> num_ranges_processed;
int num_succesful_ranges = ranges.size() / 2;
int num_successful_ranges = ranges.size() / 2;
// Read half the ranges
for (int i = 0; i < num_succesful_ranges; ++i) {
for (int i = 0; i < num_successful_ranges; ++i) {
scan_range_thread(&io_mgr, reader, data, strlen(data), Status::OK(), 1,
&num_ranges_processed);
}
EXPECT_EQ(num_ranges_processed, num_succesful_ranges);
EXPECT_EQ(num_ranges_processed, num_successful_ranges);
// Start up some threads and then cancel
ThreadGroup threads;