[fix](multi-catalog) complex types parsing failed, with unexpected nulls and rows (#22228)
Fix tow bugs: 1. Unexpected null values in array column. If 65535 consecutive values are not null in nullable array column, this error will be triggered. The reason is that the array parser did not handle boundary conditions. 2. The number of rows of key filed, and that of value field in map column are not equal. Similarly, the number of rows among fields in struct column are not the same. This would be triggered when the number of rows are not equal among parquet pages of different columns in a row group.
This commit is contained in:
@ -74,7 +74,7 @@ static void fill_struct_null_map(FieldSchema* field, NullMap& null_map,
|
||||
null_map[pos++] = 1;
|
||||
}
|
||||
}
|
||||
null_map.resize(pos + 1);
|
||||
null_map.resize(pos);
|
||||
}
|
||||
|
||||
static void fill_array_offset(FieldSchema* field, ColumnArray::Offsets64& offsets_data,
|
||||
@ -394,10 +394,10 @@ Status ScalarColumnReader::_read_nested_column(ColumnPtr& doris_column, DataType
|
||||
continue;
|
||||
}
|
||||
bool is_null = def_level < _field_schema->definition_level;
|
||||
if (prev_is_null == is_null) {
|
||||
if (USHRT_MAX - null_map.back() >= loop_read) {
|
||||
null_map.back() += loop_read;
|
||||
}
|
||||
if (prev_is_null == is_null && (USHRT_MAX - null_map.back() >= loop_read)) {
|
||||
// If whether the values are nullable in current loop is the same the previous values,
|
||||
// we can save the memory usage in null map
|
||||
null_map.back() += loop_read;
|
||||
} else {
|
||||
if (!(prev_is_null ^ is_null)) {
|
||||
null_map.emplace_back(0);
|
||||
@ -633,10 +633,14 @@ Status MapColumnReader::read_column_data(ColumnPtr& doris_column, DataTypePtr& t
|
||||
bool value_eof = false;
|
||||
RETURN_IF_ERROR(_key_reader->read_column_data(key_column, key_type, select_vector, batch_size,
|
||||
&key_rows, &key_eof, is_dict_filter));
|
||||
select_vector.reset();
|
||||
RETURN_IF_ERROR(_value_reader->read_column_data(value_column, value_type, select_vector,
|
||||
batch_size, &value_rows, &value_eof,
|
||||
is_dict_filter));
|
||||
while (value_rows < key_rows && !value_eof) {
|
||||
size_t loop_rows = 0;
|
||||
select_vector.reset();
|
||||
RETURN_IF_ERROR(_value_reader->read_column_data(value_column, value_type, select_vector,
|
||||
key_rows - value_rows, &loop_rows,
|
||||
&value_eof, is_dict_filter));
|
||||
value_rows += loop_rows;
|
||||
}
|
||||
DCHECK_EQ(key_rows, value_rows);
|
||||
DCHECK_EQ(key_eof, value_eof);
|
||||
*read_rows = key_rows;
|
||||
@ -686,16 +690,24 @@ Status StructColumnReader::read_column_data(ColumnPtr& doris_column, DataTypePtr
|
||||
ColumnPtr& doris_field = doris_struct.get_column_ptr(i);
|
||||
DataTypePtr& doris_type = const_cast<DataTypePtr&>(doris_struct_type->get_element(i));
|
||||
select_vector.reset();
|
||||
size_t loop_rows = 0;
|
||||
bool loop_eof = false;
|
||||
_child_readers[i]->read_column_data(doris_field, doris_type, select_vector, batch_size,
|
||||
&loop_rows, &loop_eof, is_dict_filter);
|
||||
if (i != 0) {
|
||||
DCHECK_EQ(*read_rows, loop_rows);
|
||||
DCHECK_EQ(*eof, loop_eof);
|
||||
size_t field_rows = 0;
|
||||
bool field_eof = false;
|
||||
if (i == 0) {
|
||||
_child_readers[i]->read_column_data(doris_field, doris_type, select_vector, batch_size,
|
||||
&field_rows, &field_eof, is_dict_filter);
|
||||
*read_rows = field_rows;
|
||||
*eof = field_eof;
|
||||
} else {
|
||||
*read_rows = loop_rows;
|
||||
*eof = loop_eof;
|
||||
while (field_rows < *read_rows && !field_eof) {
|
||||
size_t loop_rows = 0;
|
||||
select_vector.reset();
|
||||
_child_readers[i]->read_column_data(doris_field, doris_type, select_vector,
|
||||
*read_rows - field_rows, &loop_rows, &field_eof,
|
||||
is_dict_filter);
|
||||
field_rows += loop_rows;
|
||||
}
|
||||
DCHECK_EQ(*read_rows, field_rows);
|
||||
DCHECK_EQ(*eof, field_eof);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@ -0,0 +1,54 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
suite("test_complex_types", "p2") {
|
||||
String enabled = context.config.otherConfigs.get("enableExternalHiveTest")
|
||||
if (enabled != null && enabled.equalsIgnoreCase("true")) {
|
||||
String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost")
|
||||
String extHiveHmsPort = context.config.otherConfigs.get("extHiveHmsPort")
|
||||
String catalog_name = "test_complex_types"
|
||||
sql """drop catalog if exists ${catalog_name};"""
|
||||
sql """
|
||||
create catalog if not exists ${catalog_name} properties (
|
||||
'type'='hms',
|
||||
'hadoop.username' = 'hadoop',
|
||||
'hive.metastore.uris' = 'thrift://${extHiveHmsHost}:${extHiveHmsPort}'
|
||||
);
|
||||
"""
|
||||
logger.info("catalog " + catalog_name + " created")
|
||||
sql """switch ${catalog_name};"""
|
||||
logger.info("switched to catalog " + catalog_name)
|
||||
|
||||
sql """ use multi_catalog """
|
||||
|
||||
qt_null_struct_element """select count(struct_element(favor, 'tip')) from byd where id % 13 = 0"""
|
||||
|
||||
qt_map_key_select """select id, singles["p0X72J-mkMe40O-vOa-opfI"] as map_key from byd where singles["p0X72J-mkMe40O-vOa-opfI"] is not null"""
|
||||
|
||||
qt_map_keys """select map_keys(singles) from byd where id = 1077"""
|
||||
|
||||
qt_map_values """select map_values(singles) from byd where id = 1433"""
|
||||
|
||||
qt_map_contains_key """select * from byd where map_contains_key(singles, 'B0mXFX-QvgUgo7-Dih-6rDu') = 1"""
|
||||
|
||||
qt_array_max """select count(array_max(capacity)) from byd where array_max(capacity) > 0.99"""
|
||||
|
||||
qt_array_filter """select count(array_size(array_filter(i -> (i > 0.99), capacity))) from byd where array_size(array_filter(i -> (i > 0.99), capacity))"""
|
||||
|
||||
qt_array_last """select max(array_last(i -> i > 0, capacity)) from byd where array_last(i -> i > 0, capacity) < 0.99"""
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user