[fix](hive) do not split compress data file and support lz4/snappy block codec (#23245)

1. do not split compress data file
Some data file in hive is compressed with gzip, deflate, etc.
These kinds of file can not be splitted.

2. Support lz4 block codec
for hive scan node, use lz4 block codec instead of lz4 frame codec

4. Support snappy block codec
For hadoop snappy

5. Optimize the `count(*)` query of csv file
For query like `select count(*) from tbl`, only need to split the line, no need to split the column.

Need to pick to branch-2.0 after this PR: #22304
This commit is contained in:
Mingyu Chen
2023-08-26 12:59:05 +08:00
committed by GitHub
parent 36b7fcf055
commit 40be6a0b05
18 changed files with 465 additions and 57 deletions

View File

@ -42,6 +42,12 @@ Status Decompressor::create_decompressor(CompressType type, Decompressor** decom
case CompressType::LZ4FRAME:
*decompressor = new Lz4FrameDecompressor();
break;
case CompressType::LZ4BLOCK:
*decompressor = new Lz4BlockDecompressor();
break;
case CompressType::SNAPPYBLOCK:
*decompressor = new SnappyBlockDecompressor();
break;
#ifdef DORIS_WITH_LZO
case CompressType::LZOP:
*decompressor = new LzopDecompressor();
@ -59,6 +65,10 @@ Status Decompressor::create_decompressor(CompressType type, Decompressor** decom
return st;
}
uint32_t Decompressor::_read_int32(uint8_t* buf) {
return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
}
std::string Decompressor::debug_info() {
return "Decompressor";
}
@ -239,7 +249,7 @@ Status Lz4FrameDecompressor::decompress(uint8_t* input, size_t input_len, size_t
size_t* decompressed_len, bool* stream_end,
size_t* more_input_bytes, size_t* more_output_bytes) {
uint8_t* src = input;
size_t src_size = input_len;
size_t remaining_input_size = input_len;
size_t ret = 1;
*input_bytes_read = 0;
@ -257,7 +267,7 @@ Status Lz4FrameDecompressor::decompress(uint8_t* input, size_t input_len, size_t
}
LZ4F_frameInfo_t info;
ret = LZ4F_getFrameInfo(_dctx, &info, (void*)src, &src_size);
ret = LZ4F_getFrameInfo(_dctx, &info, (void*)src, &remaining_input_size);
if (LZ4F_isError(ret)) {
return Status::InternalError("LZ4F_getFrameInfo error: {}",
std::string(LZ4F_getErrorName(ret)));
@ -270,17 +280,17 @@ Status Lz4FrameDecompressor::decompress(uint8_t* input, size_t input_len, size_t
std::string(LZ4F_getErrorName(ret)));
}
*input_bytes_read = src_size;
*input_bytes_read = remaining_input_size;
src += src_size;
src_size = input_len - src_size;
src += remaining_input_size;
remaining_input_size = input_len - remaining_input_size;
LOG(INFO) << "lz4 block size: " << _expect_dec_buf_size;
}
// decompress
size_t output_len = output_max_len;
ret = LZ4F_decompress(_dctx, (void*)output, &output_len, (void*)src, &src_size,
ret = LZ4F_decompress(_dctx, (void*)output, &output_len, (void*)src, &remaining_input_size,
/* LZ4F_decompressOptions_t */ nullptr);
if (LZ4F_isError(ret)) {
return Status::InternalError("Decompression error: {}",
@ -288,7 +298,7 @@ Status Lz4FrameDecompressor::decompress(uint8_t* input, size_t input_len, size_t
}
// update
*input_bytes_read += src_size;
*input_bytes_read += remaining_input_size;
*decompressed_len = output_len;
if (ret == 0) {
*stream_end = true;
@ -324,4 +334,165 @@ size_t Lz4FrameDecompressor::get_block_size(const LZ4F_frameInfo_t* info) {
}
}
/// Lz4BlockDecompressor
Status Lz4BlockDecompressor::init() {
return Status::OK();
}
Status Lz4BlockDecompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read,
uint8_t* output, size_t output_max_len,
size_t* decompressed_len, bool* stream_end,
size_t* more_input_bytes, size_t* more_output_bytes) {
uint8_t* src = input;
size_t remaining_input_size = input_len;
int64_t uncompressed_total_len = 0;
*input_bytes_read = 0;
// The hadoop lz4 codec is as:
// <4 byte big endian uncompressed size>
// <4 byte big endian compressed size>
// <lz4 compressed block>
// ....
// <4 byte big endian uncompressed size>
// <4 byte big endian compressed size>
// <lz4 compressed block>
//
// See:
// https://github.com/apache/hadoop/blob/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc
while (remaining_input_size > 0) {
// Read uncompressed size
uint32_t uncompressed_block_len = Decompressor::_read_int32(src);
int64_t remaining_output_size = output_max_len - uncompressed_total_len;
if (remaining_output_size < uncompressed_block_len) {
// Need more output buffer
*more_output_bytes = uncompressed_block_len - remaining_output_size;
break;
}
// Read compressed size
size_t tmp_src_size = remaining_input_size - sizeof(uint32_t);
size_t compressed_len = Decompressor::_read_int32(src + sizeof(uint32_t));
if (compressed_len == 0 || compressed_len > tmp_src_size) {
// Need more input data
*more_input_bytes = compressed_len - tmp_src_size;
break;
}
src += 2 * sizeof(uint32_t);
remaining_input_size -= 2 * sizeof(uint32_t);
// Decompress
int uncompressed_len = LZ4_decompress_safe(reinterpret_cast<const char*>(src),
reinterpret_cast<char*>(output), compressed_len,
remaining_output_size);
if (uncompressed_len < 0 || uncompressed_len != uncompressed_block_len) {
return Status::InternalError(
"lz4 block decompress failed. uncompressed_len: {}, expected: {}",
uncompressed_len, uncompressed_block_len);
}
output += uncompressed_len;
src += compressed_len;
remaining_input_size -= compressed_len;
uncompressed_total_len += uncompressed_len;
}
*input_bytes_read += (input_len - remaining_input_size);
*decompressed_len = uncompressed_total_len;
// If no more input and output need, means this is the end of a compressed block
*stream_end = (*more_input_bytes == 0 && *more_output_bytes == 0);
return Status::OK();
}
std::string Lz4BlockDecompressor::debug_info() {
std::stringstream ss;
ss << "Lz4BlockDecompressor.";
return ss.str();
}
/// SnappyBlockDecompressor
Status SnappyBlockDecompressor::init() {
return Status::OK();
}
Status SnappyBlockDecompressor::decompress(uint8_t* input, size_t input_len,
size_t* input_bytes_read, uint8_t* output,
size_t output_max_len, size_t* decompressed_len,
bool* stream_end, size_t* more_input_bytes,
size_t* more_output_bytes) {
uint8_t* src = input;
size_t remaining_input_size = input_len;
int64_t uncompressed_total_len = 0;
*input_bytes_read = 0;
// The hadoop snappy codec is as:
// <4 byte big endian uncompressed size>
// <4 byte big endian compressed size>
// <snappy compressed block>
// ....
// <4 byte big endian uncompressed size>
// <4 byte big endian compressed size>
// <snappy compressed block>
//
// See:
// https://github.com/apache/hadoop/blob/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc
while (remaining_input_size > 0) {
// Read uncompressed size
uint32_t uncompressed_block_len = Decompressor::_read_int32(src);
int64_t remaining_output_size = output_max_len - uncompressed_total_len;
if (remaining_output_size < uncompressed_block_len) {
// Need more output buffer
*more_output_bytes = uncompressed_block_len - remaining_output_size;
break;
}
// Read compressed size
size_t tmp_src_size = remaining_input_size - sizeof(uint32_t);
size_t compressed_len = _read_int32(src + sizeof(uint32_t));
if (compressed_len == 0 || compressed_len > tmp_src_size) {
// Need more input data
*more_input_bytes = compressed_len - tmp_src_size;
break;
}
src += 2 * sizeof(uint32_t);
remaining_input_size -= 2 * sizeof(uint32_t);
// ATTN: the uncompressed len from GetUncompressedLength() is same as
// uncompressed_block_len, so I think it is unnecessary to get it again.
// Get uncompressed len from snappy
// size_t uncompressed_len;
// if (!snappy::GetUncompressedLength(reinterpret_cast<const char*>(src),
// compressed_len, &uncompressed_len)) {
// return Status::InternalError("snappy block decompress failed to get uncompressed len");
// }
// Decompress
if (!snappy::RawUncompress(reinterpret_cast<const char*>(src), compressed_len,
reinterpret_cast<char*>(output))) {
return Status::InternalError("snappy block decompress failed. uncompressed_len: {}",
uncompressed_block_len);
}
output += uncompressed_block_len;
src += compressed_len;
remaining_input_size -= compressed_len;
uncompressed_total_len += uncompressed_block_len;
}
*input_bytes_read += (input_len - remaining_input_size);
*decompressed_len = uncompressed_total_len;
// If no more input and output need, means this is the end of a compressed block
*stream_end = (*more_input_bytes == 0 && *more_output_bytes == 0);
return Status::OK();
}
std::string SnappyBlockDecompressor::debug_info() {
std::stringstream ss;
ss << "SnappyBlockDecompressor.";
return ss.str();
}
} // namespace doris