[Bug](materialized-view) fix where clause persistence replay incorrect (#18228)
fix where clause persistence replay incorrect
This commit is contained in:
@ -193,21 +193,10 @@ std::string SchemaColumnsScanner::_type_to_string(TColumnDesc& desc) {
|
||||
case TPrimitiveType::DATETIME:
|
||||
return "datetime";
|
||||
case TPrimitiveType::DECIMALV2: {
|
||||
std::stringstream stream;
|
||||
stream << "decimal(";
|
||||
if (desc.__isset.columnPrecision) {
|
||||
stream << desc.columnPrecision;
|
||||
} else {
|
||||
stream << 27;
|
||||
}
|
||||
stream << ",";
|
||||
if (desc.__isset.columnScale) {
|
||||
stream << desc.columnScale;
|
||||
} else {
|
||||
stream << 9;
|
||||
}
|
||||
stream << ")";
|
||||
return stream.str();
|
||||
return fmt::format(
|
||||
"decimal({}, {})",
|
||||
desc.__isset.columnPrecision ? std::to_string(desc.columnPrecision) : "27",
|
||||
desc.__isset.columnScale ? std::to_string(desc.columnScale) : "9");
|
||||
}
|
||||
case TPrimitiveType::DECIMAL32:
|
||||
case TPrimitiveType::DECIMAL64:
|
||||
@ -254,7 +243,7 @@ Status SchemaColumnsScanner::_get_new_desc() {
|
||||
}
|
||||
desc_params.tables_name.push_back(_table_result.tables[_table_index++]);
|
||||
}
|
||||
LOG(WARNING) << "_get_new_desc tables_name size: " << desc_params.tables_name.size();
|
||||
|
||||
if (nullptr != _param->current_user_ident) {
|
||||
desc_params.__set_current_user_ident(*(_param->current_user_ident));
|
||||
} else {
|
||||
|
||||
@ -133,8 +133,7 @@ void EvHttpServer::stop() {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(_event_bases_lock);
|
||||
for (int i = 0; i < _num_workers; ++i) {
|
||||
LOG(WARNING) << "event_base_loopbreak ret: "
|
||||
<< event_base_loopbreak(_event_bases[i].get());
|
||||
event_base_loopbreak(_event_bases[i].get());
|
||||
}
|
||||
_event_bases.clear();
|
||||
}
|
||||
|
||||
@ -525,14 +525,14 @@ public class ScalarType extends Type {
|
||||
return "CHAR(" + len + ")";
|
||||
} else if (type == PrimitiveType.DECIMALV2) {
|
||||
if (isWildcardDecimal()) {
|
||||
return "DECIMAL(*,*)";
|
||||
return "DECIMAL(*, *)";
|
||||
}
|
||||
return "DECIMAL(" + precision + "," + scale + ")";
|
||||
return "DECIMAL(" + precision + ", " + scale + ")";
|
||||
} else if (type.isDecimalV3Type()) {
|
||||
if (isWildcardDecimal()) {
|
||||
return "DECIMALV3(*,*)";
|
||||
return "DECIMALV3(*, *)";
|
||||
}
|
||||
return "DECIMALV3(" + precision + "," + scale + ")";
|
||||
return "DECIMALV3(" + precision + ", " + scale + ")";
|
||||
} else if (type == PrimitiveType.DATETIMEV2) {
|
||||
return "DATETIMEV2(" + scale + ")";
|
||||
} else if (type == PrimitiveType.TIMEV2) {
|
||||
|
||||
@ -141,6 +141,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
// save failed task after retry three times, tabletId -> agentTask
|
||||
private Map<Long, List<AgentTask>> failedAgentTasks = Maps.newHashMap();
|
||||
|
||||
private Analyzer analyzer;
|
||||
|
||||
private RollupJobV2() {
|
||||
super(JobType.ROLLUP);
|
||||
}
|
||||
@ -149,7 +151,7 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
long rollupIndexId, String baseIndexName, String rollupIndexName, List<Column> rollupSchema,
|
||||
Column whereColumn,
|
||||
int baseSchemaHash, int rollupSchemaHash, KeysType rollupKeysType, short rollupShortKeyColumnCount,
|
||||
OriginStatement origStmt) {
|
||||
OriginStatement origStmt) throws AnalysisException {
|
||||
super(jobId, JobType.ROLLUP, dbId, tableId, tableName, timeoutMs);
|
||||
|
||||
this.baseIndexId = baseIndexId;
|
||||
@ -166,6 +168,7 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
this.rollupShortKeyColumnCount = rollupShortKeyColumnCount;
|
||||
|
||||
this.origStmt = origStmt;
|
||||
initAnalyzer();
|
||||
}
|
||||
|
||||
public void addTabletIdMap(long partitionId, long rollupTabletId, long baseTabletId) {
|
||||
@ -182,6 +185,27 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
this.storageFormat = storageFormat;
|
||||
}
|
||||
|
||||
private void initAnalyzer() throws AnalysisException {
|
||||
ConnectContext connectContext = new ConnectContext();
|
||||
Database db;
|
||||
try {
|
||||
db = Env.getCurrentInternalCatalog().getDbOrMetaException(dbId);
|
||||
} catch (MetaNotFoundException e) {
|
||||
throw new AnalysisException("error happens when parsing create materialized view stmt: " + origStmt, e);
|
||||
}
|
||||
String clusterName = db.getClusterName();
|
||||
// It's almost impossible that db's cluster name is null, just in case
|
||||
// because before user want to create database, he must first enter a cluster
|
||||
// which means that cluster is set to current ConnectContext
|
||||
// then when createDBStmt is executed, cluster name is set to Database
|
||||
if (clusterName == null || clusterName.length() == 0) {
|
||||
clusterName = SystemInfoService.DEFAULT_CLUSTER;
|
||||
}
|
||||
connectContext.setCluster(clusterName);
|
||||
connectContext.setDatabase(db.getFullName());
|
||||
analyzer = new Analyzer(Env.getCurrentEnv(), connectContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* runPendingJob():
|
||||
* 1. Create all rollup replicas and wait them finished.
|
||||
@ -328,9 +352,9 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
partition.createRollupIndex(rollupIndex);
|
||||
}
|
||||
|
||||
tbl.setIndexMeta(rollupIndexId, rollupIndexName, rollupSchema, whereColumn, 0 /* init schema version */,
|
||||
tbl.setIndexMeta(rollupIndexId, rollupIndexName, rollupSchema, 0 /* init schema version */,
|
||||
rollupSchemaHash, rollupShortKeyColumnCount, TStorageType.COLUMN,
|
||||
rollupKeysType, origStmt);
|
||||
rollupKeysType, origStmt, analyzer != null ? new Analyzer(analyzer) : analyzer);
|
||||
tbl.rebuildFullSchema();
|
||||
}
|
||||
|
||||
@ -840,26 +864,9 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
|
||||
// parse the define stmt to schema
|
||||
SqlParser parser = new SqlParser(new SqlScanner(
|
||||
new StringReader(origStmt.originStmt), SqlModeHelper.MODE_DEFAULT));
|
||||
ConnectContext connectContext = new ConnectContext();
|
||||
Database db;
|
||||
try {
|
||||
db = Env.getCurrentInternalCatalog().getDbOrMetaException(dbId);
|
||||
} catch (MetaNotFoundException e) {
|
||||
throw new IOException("error happens when parsing create materialized view stmt: " + origStmt, e);
|
||||
}
|
||||
String clusterName = db.getClusterName();
|
||||
// It's almost impossible that db's cluster name is null, just in case
|
||||
// because before user want to create database, he must first enter a cluster
|
||||
// which means that cluster is set to current ConnectContext
|
||||
// then when createDBStmt is executed, cluster name is set to Database
|
||||
if (clusterName == null || clusterName.length() == 0) {
|
||||
clusterName = SystemInfoService.DEFAULT_CLUSTER;
|
||||
}
|
||||
connectContext.setCluster(clusterName);
|
||||
connectContext.setDatabase(db.getFullName());
|
||||
Analyzer analyzer = new Analyzer(Env.getCurrentEnv(), connectContext);
|
||||
CreateMaterializedViewStmt stmt = null;
|
||||
try {
|
||||
initAnalyzer();
|
||||
stmt = (CreateMaterializedViewStmt) SqlParserUtils.getStmt(parser, origStmt.idx);
|
||||
stmt.setIsReplay(true);
|
||||
stmt.analyze(analyzer);
|
||||
|
||||
@ -497,7 +497,7 @@ public class CreateMaterializedViewStmt extends DdlStmt {
|
||||
return new MVColumnItem(type, mvAggregateType, defineExpr, mvColumnBuilder(defineExpr.toSql()));
|
||||
}
|
||||
|
||||
public Map<String, Expr> parseDefineExprWithoutAnalyze() throws AnalysisException {
|
||||
public Map<String, Expr> parseDefineExpr(Analyzer analyzer) throws AnalysisException {
|
||||
Map<String, Expr> result = Maps.newHashMap();
|
||||
SelectList selectList = selectStmt.getSelectList();
|
||||
for (SelectListItem selectListItem : selectList.getItems()) {
|
||||
@ -513,7 +513,7 @@ public class CreateMaterializedViewStmt extends DdlStmt {
|
||||
case FunctionSet.BITMAP_UNION:
|
||||
case FunctionSet.HLL_UNION:
|
||||
case FunctionSet.COUNT:
|
||||
MVColumnItem item = buildMVColumnItem(null, functionCallExpr);
|
||||
MVColumnItem item = buildMVColumnItem(analyzer, functionCallExpr);
|
||||
expr = item.getDefineExpr();
|
||||
name = item.getName();
|
||||
break;
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
|
||||
package org.apache.doris.catalog;
|
||||
|
||||
import org.apache.doris.analysis.Analyzer;
|
||||
import org.apache.doris.analysis.CreateMaterializedViewStmt;
|
||||
import org.apache.doris.analysis.Expr;
|
||||
import org.apache.doris.analysis.SlotRef;
|
||||
@ -188,7 +189,7 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
|
||||
columnList += "]";
|
||||
|
||||
for (Column column : schema) {
|
||||
if (CreateMaterializedViewStmt.oldmvColumnBreaker(column.getName()).equals(name)) {
|
||||
if (CreateMaterializedViewStmt.oldmvColumnBreaker(column.getName()).equalsIgnoreCase(name)) {
|
||||
if (matchedColumn == null) {
|
||||
matchedColumn = column;
|
||||
} else {
|
||||
@ -198,6 +199,7 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matchedColumn != null) {
|
||||
LOG.debug("trans old MV, MV: {}, DefineExpr:{}, DefineName:{}",
|
||||
matchedColumn.getName(), entry.getValue().toSqlWithoutTbl(), entry.getKey());
|
||||
@ -265,6 +267,10 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
|
||||
@Override
|
||||
public void gsonPostProcess() throws IOException {
|
||||
initColumnNameMap();
|
||||
parseStmt(null);
|
||||
}
|
||||
|
||||
public void parseStmt(Analyzer analyzer) throws IOException {
|
||||
// analyze define stmt
|
||||
if (defineStmt == null) {
|
||||
return;
|
||||
@ -275,11 +281,16 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
|
||||
CreateMaterializedViewStmt stmt;
|
||||
try {
|
||||
stmt = (CreateMaterializedViewStmt) SqlParserUtils.getStmt(parser, defineStmt.idx);
|
||||
setWhereClause(stmt.getWhereClause());
|
||||
if (analyzer != null) {
|
||||
stmt.analyze(analyzer);
|
||||
}
|
||||
|
||||
stmt.setIsReplay(true);
|
||||
setWhereClause(stmt.getWhereClause());
|
||||
stmt.rewriteToBitmapWithCheck();
|
||||
Map<String, Expr> columnNameToDefineExpr = stmt.parseDefineExprWithoutAnalyze();
|
||||
Map<String, Expr> columnNameToDefineExpr = stmt.parseDefineExpr(analyzer);
|
||||
setColumnsDefineExpr(columnNameToDefineExpr);
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new IOException("error happens when parsing create materialized view stmt: " + defineStmt, e);
|
||||
}
|
||||
|
||||
@ -19,6 +19,7 @@ package org.apache.doris.catalog;
|
||||
|
||||
import org.apache.doris.alter.MaterializedViewHandler;
|
||||
import org.apache.doris.analysis.AggregateInfo;
|
||||
import org.apache.doris.analysis.Analyzer;
|
||||
import org.apache.doris.analysis.ColumnDef;
|
||||
import org.apache.doris.analysis.CreateTableStmt;
|
||||
import org.apache.doris.analysis.DataSortInfo;
|
||||
@ -288,14 +289,15 @@ public class OlapTable extends Table {
|
||||
|
||||
public void setIndexMeta(long indexId, String indexName, List<Column> schema, int schemaVersion, int schemaHash,
|
||||
short shortKeyColumnCount, TStorageType storageType, KeysType keysType) {
|
||||
setIndexMeta(indexId, indexName, schema, null, schemaVersion, schemaHash, shortKeyColumnCount, storageType,
|
||||
setIndexMeta(indexId, indexName, schema, schemaVersion, schemaHash, shortKeyColumnCount, storageType,
|
||||
keysType,
|
||||
null);
|
||||
null, null);
|
||||
}
|
||||
|
||||
public void setIndexMeta(long indexId, String indexName, List<Column> schema, Column whereColumn, int schemaVersion,
|
||||
public void setIndexMeta(long indexId, String indexName, List<Column> schema, int schemaVersion,
|
||||
int schemaHash,
|
||||
short shortKeyColumnCount, TStorageType storageType, KeysType keysType, OriginStatement origStmt) {
|
||||
short shortKeyColumnCount, TStorageType storageType, KeysType keysType, OriginStatement origStmt,
|
||||
Analyzer analyzer) {
|
||||
// Nullable when meta comes from schema change log replay.
|
||||
// The replay log only save the index id, so we need to get name by id.
|
||||
if (indexName == null) {
|
||||
@ -319,8 +321,10 @@ public class OlapTable extends Table {
|
||||
|
||||
MaterializedIndexMeta indexMeta = new MaterializedIndexMeta(indexId, schema, schemaVersion,
|
||||
schemaHash, shortKeyColumnCount, storageType, keysType, origStmt);
|
||||
if (whereColumn != null) {
|
||||
indexMeta.setWhereClause(whereColumn.getDefineExpr());
|
||||
try {
|
||||
indexMeta.parseStmt(analyzer);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("parse meta stmt failed", e);
|
||||
}
|
||||
|
||||
indexIdToMeta.put(indexId, indexMeta);
|
||||
|
||||
@ -78,10 +78,10 @@ public class Util {
|
||||
TYPE_STRING_MAP.put(PrimitiveType.VARCHAR, "varchar(%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.JSONB, "jsonb");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.STRING, "string");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMALV2, "decimal(%d,%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL32, "decimal(%d,%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL64, "decimal(%d,%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL128, "decimal(%d,%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMALV2, "decimal(%d, %d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL32, "decimal(%d, %d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL64, "decimal(%d, %d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.DECIMAL128, "decimal(%d, %d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.HLL, "varchar(%d)");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.BOOLEAN, "bool");
|
||||
TYPE_STRING_MAP.put(PrimitiveType.BITMAP, "bitmap");
|
||||
|
||||
@ -480,7 +480,7 @@ public class MasterImpl {
|
||||
|
||||
PublishVersionTask publishVersionTask = (PublishVersionTask) task;
|
||||
publishVersionTask.addErrorTablets(errorTabletIds);
|
||||
publishVersionTask.setIsFinished(true);
|
||||
publishVersionTask.setFinished(true);
|
||||
|
||||
if (request.getTaskStatus().getStatusCode() != TStatusCode.OK) {
|
||||
// not remove the task from queue and be will retry
|
||||
|
||||
@ -33,7 +33,6 @@ public class PublishVersionTask extends AgentTask {
|
||||
private long transactionId;
|
||||
private List<TPartitionVersionInfo> partitionVersionInfos;
|
||||
private List<Long> errorTablets;
|
||||
private boolean isFinished;
|
||||
|
||||
public PublishVersionTask(long backendId, long transactionId, long dbId,
|
||||
List<TPartitionVersionInfo> partitionVersionInfos, long createTime) {
|
||||
@ -69,12 +68,4 @@ public class PublishVersionTask extends AgentTask {
|
||||
}
|
||||
this.errorTablets.addAll(errorTablets);
|
||||
}
|
||||
|
||||
public void setIsFinished(boolean isFinished) {
|
||||
this.isFinished = isFinished;
|
||||
}
|
||||
|
||||
public boolean isFinished() {
|
||||
return isFinished;
|
||||
}
|
||||
}
|
||||
|
||||
@ -315,7 +315,7 @@ public class RollupJobV2Test {
|
||||
|
||||
@Test
|
||||
public void testSerializeOfRollupJob(@Mocked CreateMaterializedViewStmt stmt)
|
||||
throws IOException {
|
||||
throws IOException, AnalysisException {
|
||||
// prepare file
|
||||
File file = new File(fileName);
|
||||
file.createNewFile();
|
||||
|
||||
@ -140,9 +140,9 @@ public class CreateFunctionTest {
|
||||
|
||||
queryStr = "select db1.decimal(k3, 4, 1) from db1.tbl1;";
|
||||
if (Config.enable_decimal_conversion) {
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMALV3(4,1))"));
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMALV3(4, 1))"));
|
||||
} else {
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMAL(4,1))"));
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMAL(4, 1))"));
|
||||
}
|
||||
|
||||
// cast any type to varchar with fixed length
|
||||
@ -249,9 +249,9 @@ public class CreateFunctionTest {
|
||||
|
||||
queryStr = "select decimal(k3, 4, 1) from db2.tbl1;";
|
||||
if (Config.enable_decimal_conversion) {
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMALV3(4,1))"));
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMALV3(4, 1))"));
|
||||
} else {
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMAL(4,1))"));
|
||||
Assert.assertTrue(dorisAssert.query(queryStr).explainQuery().contains("CAST(`k3` AS DECIMAL(4, 1))"));
|
||||
}
|
||||
|
||||
// 5. cast any type to varchar with fixed length
|
||||
|
||||
@ -96,7 +96,7 @@ public class MaterializedIndexMetaTest {
|
||||
columnNameToDefineExpr.put(mvColumnName, new FunctionCallExpr(new FunctionName("to_bitmap"), params));
|
||||
new Expectations() {
|
||||
{
|
||||
stmt.parseDefineExprWithoutAnalyze();
|
||||
stmt.parseDefineExpr(null);
|
||||
result = columnNameToDefineExpr;
|
||||
}
|
||||
};
|
||||
|
||||
@ -11,11 +11,11 @@ char_50_key CHAR(50) No true \N BLOOM_FILTER
|
||||
character_key VARCHAR(500) No true \N BLOOM_FILTER
|
||||
char_key CHAR(1) No true \N BLOOM_FILTER
|
||||
character_most_key VARCHAR(65533) No true \N BLOOM_FILTER
|
||||
decimal_key DECIMAL(20,6) No true \N BLOOM_FILTER
|
||||
decimal_most_key DECIMAL(27,9) No true \N BLOOM_FILTER
|
||||
decimal32_key DECIMALV3(5,1) No true \N BLOOM_FILTER
|
||||
decimal64_key DECIMALV3(14,1) No true \N BLOOM_FILTER
|
||||
decimal128_key DECIMALV3(38,1) No true \N BLOOM_FILTER
|
||||
decimal_key DECIMAL(20, 6) No true \N BLOOM_FILTER
|
||||
decimal_most_key DECIMAL(27, 9) No true \N BLOOM_FILTER
|
||||
decimal32_key DECIMALV3(5, 1) No true \N BLOOM_FILTER
|
||||
decimal64_key DECIMALV3(14, 1) No true \N BLOOM_FILTER
|
||||
decimal128_key DECIMALV3(38, 1) No true \N BLOOM_FILTER
|
||||
date_key DATE No true \N BLOOM_FILTER
|
||||
datetime_key DATETIME No true \N BLOOM_FILTER
|
||||
datev2_key DATEV2 No true \N BLOOM_FILTER
|
||||
@ -30,11 +30,11 @@ char_50_value CHAR(50) No false \N REPLACE
|
||||
character_value VARCHAR(500) No false \N REPLACE
|
||||
char_value CHAR(1) No false \N REPLACE
|
||||
character_most_value VARCHAR(65533) No false \N REPLACE
|
||||
decimal_value DECIMAL(20,6) No false \N SUM
|
||||
decimal_most_value DECIMAL(27,9) No false \N SUM
|
||||
decimal32_value DECIMALV3(5,1) No false \N SUM
|
||||
decimal64_value DECIMALV3(14,1) No false \N SUM
|
||||
decimal128_value DECIMALV3(38,1) No false \N SUM
|
||||
decimal_value DECIMAL(20, 6) No false \N SUM
|
||||
decimal_most_value DECIMAL(27, 9) No false \N SUM
|
||||
decimal32_value DECIMALV3(5, 1) No false \N SUM
|
||||
decimal64_value DECIMALV3(14, 1) No false \N SUM
|
||||
decimal128_value DECIMALV3(38, 1) No false \N SUM
|
||||
date_value_max DATE No false \N MAX
|
||||
date_value_replace DATE No false \N REPLACE
|
||||
date_value_min DATE No false \N MIN
|
||||
|
||||
@ -293,6 +293,6 @@ s_name TEXT Yes false \N NONE
|
||||
s_address TEXT Yes false \N NONE
|
||||
s_nationkey INT Yes false \N NONE
|
||||
s_phone TEXT Yes false \N NONE
|
||||
s_acctbal DECIMAL(9,0) Yes false \N NONE
|
||||
s_acctbal DECIMAL(9, 0) Yes false \N NONE
|
||||
s_comment TEXT Yes false \N NONE
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_bigint
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_largeint 7 \N YES bigint unsigned \N \N 39 \N \N \N \N largeint 39 \N \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_float 8 \N YES float \N \N 7 7 \N \N \N float 7 7 \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_double 9 \N YES double \N \N 15 15 \N \N \N double 15 15 \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_decimal 10 \N YES decimal \N \N 20 3 \N \N \N decimal(20,3) 20 3 \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_decimal 10 \N YES decimal \N \N 20 3 \N \N \N decimal(20, 3) 20 3 \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_decimalv3 11 \N YES decimal \N \N 20 3 \N \N \N decimalv3(20, 3) 20 3 \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_date 12 \N YES date \N \N \N \N \N \N \N date \N \N \N \N
|
||||
internal regression_test_datatype_p0_scalar_types tbl_scalar_types_dup c_datetime 13 \N YES datetime \N \N \N \N \N \N \N datetime \N \N \N \N
|
||||
|
||||
@ -9,7 +9,7 @@ k6 VARCHAR(1) Yes false \N NONE
|
||||
k7 DATE Yes false \N NONE
|
||||
k8 DATETIME Yes false \N NONE
|
||||
k9 LARGEINT Yes false \N NONE
|
||||
k10 DECIMAL(9,0) Yes false \N NONE
|
||||
k10 DECIMAL(9, 0) Yes false \N NONE
|
||||
k11 BOOLEAN Yes false \N NONE
|
||||
k12 DATEV2 Yes false \N NONE
|
||||
k13 DATETIMEV2(0) Yes false \N NONE
|
||||
@ -46,7 +46,7 @@ k6 VARCHAR(1) Yes true \N
|
||||
k7 DATE Yes true \N
|
||||
k8 DATETIME Yes true \N
|
||||
k9 LARGEINT Yes true \N
|
||||
k10 DECIMAL(9,0) Yes true \N
|
||||
k10 DECIMAL(9, 0) Yes true \N
|
||||
k11 BOOLEAN Yes true \N
|
||||
k12 DATEV2 Yes true \N
|
||||
k13 DATETIMEV2(0) Yes true \N
|
||||
@ -84,7 +84,7 @@ k6 VARCHAR(1) Yes true \N
|
||||
k7 DATE Yes true \N
|
||||
k8 DATETIME Yes true \N
|
||||
k9 LARGEINT Yes true \N
|
||||
k10 DECIMAL(9,0) Yes true \N
|
||||
k10 DECIMAL(9, 0) Yes true \N
|
||||
k11 BOOLEAN Yes true \N
|
||||
k12 DATEV2 Yes false \N REPLACE
|
||||
k13 DATETIMEV2(0) Yes false \N REPLACE
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
a DECIMAL(12,6) No true \N
|
||||
a DECIMAL(12, 6) No true \N
|
||||
|
||||
-- !sql --
|
||||
default_cluster:regression_test_index_p0.test_decimal_bitmap_index_multi_page bitmap_index_multi_page a BITMAP
|
||||
|
||||
@ -9,7 +9,7 @@ k6 VARCHAR(1) Yes false \N NONE
|
||||
k7 DATE Yes false \N NONE
|
||||
k8 DATETIME Yes false \N NONE
|
||||
k9 LARGEINT Yes false \N NONE
|
||||
k10 DECIMAL(9,0) Yes false \N NONE
|
||||
k10 DECIMAL(9, 0) Yes false \N NONE
|
||||
k11 BOOLEAN Yes false \N NONE
|
||||
|
||||
-- !sql --
|
||||
@ -38,7 +38,7 @@ k6 VARCHAR(1) Yes true \N
|
||||
k7 DATE Yes true \N
|
||||
k8 DATETIME Yes true \N
|
||||
k9 LARGEINT Yes true \N
|
||||
k10 DECIMAL(9,0) Yes true \N
|
||||
k10 DECIMAL(9, 0) Yes true \N
|
||||
k11 BOOLEAN Yes true \N
|
||||
v1 INT Yes false \N SUM
|
||||
|
||||
@ -68,7 +68,7 @@ k6 VARCHAR(1) Yes true \N
|
||||
k7 DATE Yes true \N
|
||||
k8 DATETIME Yes true \N
|
||||
k9 LARGEINT Yes true \N
|
||||
k10 DECIMAL(9,0) Yes true \N
|
||||
k10 DECIMAL(9, 0) Yes true \N
|
||||
k11 BOOLEAN Yes true \N
|
||||
v1 INT Yes false \N REPLACE
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ suite("nereids_explain") {
|
||||
when 1>1 then cast(1 as float)
|
||||
else 0.0 end;
|
||||
"""
|
||||
contains "SlotDescriptor{id=0, col=null, colUniqueId=null, type=DECIMAL(14,7), nullable=false}"
|
||||
contains "SlotDescriptor{id=0, col=null, colUniqueId=null, type=DECIMAL(14, 7), nullable=false}"
|
||||
}
|
||||
|
||||
def explainStr = sql("select sum(if(lo_tax=1,lo_tax,0)) from lineorder where false").toString()
|
||||
|
||||
Reference in New Issue
Block a user