Fix window function with limit zero bug 2 (#4235)

This commit is contained in:
kangkaisen
2020-08-10 10:29:05 +08:00
committed by GitHub
parent c81862ebec
commit f516172f23
3 changed files with 52 additions and 22 deletions

View File

@ -1567,6 +1567,13 @@ public class SelectStmt extends QueryStmt {
return strBuilder.toString();
}
/**
* If the select statement has a sort/top that is evaluated, then the sort tuple
* is materialized. Else, if there is aggregation then the aggregate tuple id is
* materialized. Otherwise, all referenced tables are materialized as long as they are
* not semi-joined. If there are analytics and no sort, then the returned tuple
* ids also include the logical analytic output tuple.
*/
@Override
public void getMaterializedTupleIds(ArrayList<TupleId> tupleIdList) {
// If select statement has an aggregate, then the aggregate tuple id is materialized.
@ -1585,6 +1592,7 @@ public class SelectStmt extends QueryStmt {
tupleIdList.addAll(tblRef.getMaterializedTupleIds());
}
}
// Fixme(kks): get tuple id from analyticInfo is wrong, should get from AnalyticEvalNode
// We materialize the agg tuple or the table refs together with the analytic tuple.
if (hasAnalyticInfo() && isEvaluateOrderBy()) {
tupleIdList.add(analyticInfo.getOutputTupleId());

View File

@ -208,27 +208,6 @@ public class SingleNodePlanner {
*/
private PlanNode createQueryPlan(QueryStmt stmt, Analyzer analyzer, long defaultOrderByLimit)
throws UserException {
if (analyzer.hasEmptyResultSet()) {
PlanNode node = createEmptyNode(stmt, analyzer);
// handle window function with limit zero
if (stmt instanceof SelectStmt) {
SelectStmt selectStmt = (SelectStmt) stmt;
if (selectStmt.getAnalyticInfo() != null) {
AnalyticInfo analyticInfo = selectStmt.getAnalyticInfo();
AnalyticPlanner analyticPlanner = new AnalyticPlanner(analyticInfo, analyzer, ctx_);
List<Expr> inputPartitionExprs = Lists.newArrayList();
AggregateInfo aggInfo = selectStmt.getAggInfo();
PlanNode root = analyticPlanner.createSingleNodePlan(node,
aggInfo != null ? aggInfo.getGroupingExprs() : null, inputPartitionExprs);
// In order to substitute the analytic expr with slot in result exprs
node.setOutputSmap(root.outputSmap);
}
}
return node;
}
long newDefaultOrderByLimit = defaultOrderByLimit;
if (newDefaultOrderByLimit == -1) {
newDefaultOrderByLimit = 65535;
@ -300,6 +279,21 @@ public class SingleNodePlanner {
if (stmt.getAssertNumRowsElement() != null) {
root = createAssertRowCountNode(root, stmt.getAssertNumRowsElement(), analyzer);
}
if (analyzer.hasEmptyResultSet()) {
// Must clear the scanNodes, otherwise we will get NPE in Coordinator::computeScanRangeAssignment
scanNodes.clear();
PlanNode node = createEmptyNode(stmt, analyzer);
// Ensure result exprs will be substituted by right outputSmap
node.setOutputSmap(root.outputSmap);
// Currently, getMaterializedTupleIds for AnalyticEvalNode is wrong,
// So we explicitly add AnalyticEvalNode tuple ids to EmptySetNode
if (root instanceof AnalyticEvalNode) {
node.getTupleIds().addAll(root.tupleIds);
}
return node;
}
return root;
}
@ -687,7 +681,7 @@ public class SingleNodePlanner {
rowTuples.addAll(tblRef.getMaterializedTupleIds());
}
if (analyzer.hasEmptySpjResultSet()) {
if (analyzer.hasEmptySpjResultSet() && selectStmt.getAggInfo() != null) {
final PlanNode emptySetNode = new EmptySetNode(ctx_.getNextNodeId(), rowTuples);
emptySetNode.init(analyzer);
emptySetNode.setOutputSmap(selectStmt.getBaseTblSmap());

View File

@ -17,6 +17,7 @@
package org.apache.doris.planner;
import com.google.common.collect.Lists;
import org.apache.doris.analysis.CreateDbStmt;
import org.apache.doris.analysis.CreateTableStmt;
import org.apache.doris.analysis.DropDbStmt;
@ -969,4 +970,31 @@ public class QueryPlanTest {
explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, queryStr);
Assert.assertTrue(explainString.contains("INNER JOIN (BROADCAST)"));
}
@Test
public void testEmptyNode() throws Exception {
connectContext.setDatabase("default_cluster:test");
String emptyNode = "EMPTYSET";
String denseRank = "dense_rank";
List<String> sqls = Lists.newArrayList();
sqls.add("explain select * from baseall limit 0");
sqls.add("explain select count(*) from baseall limit 0;");
sqls.add("explain select k3, dense_rank() OVER () AS rank FROM baseall limit 0;");
sqls.add("explain select rank from (select k3, dense_rank() OVER () AS rank FROM baseall) a limit 0;");
sqls.add("explain select * from baseall join bigtable as b limit 0");
sqls.add("explain select * from baseall where 1 = 2");
sqls.add("explain select count(*) from baseall where 1 = 2;");
sqls.add("explain select k3, dense_rank() OVER () AS rank FROM baseall where 1 =2;");
sqls.add("explain select rank from (select k3, dense_rank() OVER () AS rank FROM baseall) a where 1 =2;");
sqls.add("explain select * from baseall join bigtable as b where 1 = 2");
for(String sql: sqls) {
String explainString = UtFrameUtils.getSQLPlanOrErrorMsg(connectContext, sql);
System.out.println(explainString);
Assert.assertTrue(explainString.contains(emptyNode));
Assert.assertFalse(explainString.contains(denseRank));
}
}
}