[feature](planner) remove restrict of offset without order by (#15218)
Support SELECT * FROM tbl LIMIT 5, 3;
This commit is contained in:
@ -37,7 +37,8 @@ VExchangeNode::VExchangeNode(ObjectPool* pool, const TPlanNode& tnode, const Des
|
||||
std::vector<bool>(tnode.nullable_tuples.begin(),
|
||||
tnode.nullable_tuples.begin() +
|
||||
tnode.exchange_node.input_row_tuples.size())),
|
||||
_offset(tnode.exchange_node.__isset.offset ? tnode.exchange_node.offset : 0) {}
|
||||
_offset(tnode.exchange_node.__isset.offset ? tnode.exchange_node.offset : 0),
|
||||
_num_rows_skipped(0) {}
|
||||
|
||||
Status VExchangeNode::init(const TPlanNode& tnode, RuntimeState* state) {
|
||||
RETURN_IF_ERROR(ExecNode::init(tnode, state));
|
||||
@ -102,6 +103,16 @@ Status VExchangeNode::get_next(RuntimeState* state, Block* block, bool* eos) {
|
||||
}
|
||||
auto status = _stream_recvr->get_next(block, eos);
|
||||
if (block != nullptr) {
|
||||
if (!_is_merging) {
|
||||
if (_num_rows_skipped + block->rows() < _offset) {
|
||||
_num_rows_skipped += block->rows();
|
||||
block->set_num_rows(0);
|
||||
} else if (_num_rows_skipped < _offset) {
|
||||
auto offset = _offset - _num_rows_skipped;
|
||||
_num_rows_skipped = _offset;
|
||||
block->set_num_rows(block->rows() - offset);
|
||||
}
|
||||
}
|
||||
if (_num_rows_returned + block->rows() < _limit) {
|
||||
_num_rows_returned += block->rows();
|
||||
} else {
|
||||
|
||||
@ -57,6 +57,7 @@ private:
|
||||
|
||||
// use in merge sort
|
||||
size_t _offset;
|
||||
int64_t _num_rows_skipped;
|
||||
VSortExecExprs _vsort_exec_exprs;
|
||||
std::vector<bool> _is_asc_order;
|
||||
std::vector<bool> _nulls_first;
|
||||
|
||||
@ -26,6 +26,7 @@ import org.apache.doris.common.AnalysisException;
|
||||
import org.apache.doris.common.ErrorCode;
|
||||
import org.apache.doris.common.ErrorReport;
|
||||
import org.apache.doris.common.UserException;
|
||||
import org.apache.doris.common.util.VectorizedUtil;
|
||||
import org.apache.doris.rewrite.ExprRewriter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -187,8 +188,7 @@ public abstract class QueryStmt extends StatementBase implements Queriable {
|
||||
}
|
||||
|
||||
private void analyzeLimit(Analyzer analyzer) throws AnalysisException {
|
||||
// TODO chenhao
|
||||
if (limitElement.getOffset() > 0 && !hasOrderByClause()) {
|
||||
if (!VectorizedUtil.isVectorized() && limitElement.getOffset() > 0 && !hasOrderByClause()) {
|
||||
throw new AnalysisException("OFFSET requires an ORDER BY clause: "
|
||||
+ limitElement.toSql().trim());
|
||||
}
|
||||
@ -621,10 +621,11 @@ public abstract class QueryStmt extends StatementBase implements Queriable {
|
||||
return limitElement.getLimit();
|
||||
}
|
||||
|
||||
public void setLimit(long limit) throws AnalysisException {
|
||||
public void setLimit(long limit) {
|
||||
Preconditions.checkState(limit >= 0);
|
||||
long newLimit = hasLimitClause() ? Math.min(limit, getLimit()) : limit;
|
||||
limitElement = new LimitElement(newLimit);
|
||||
long offset = hasLimitClause() ? getOffset() : 0;
|
||||
limitElement = new LimitElement(offset, newLimit);
|
||||
}
|
||||
|
||||
public void removeLimitElement() {
|
||||
|
||||
@ -236,7 +236,8 @@ public class DistributedPlanner {
|
||||
// move 'result' to end, it depends on all of its children
|
||||
fragments.remove(result);
|
||||
fragments.add(result);
|
||||
if (!isPartitioned && result.isPartitioned() && result.getPlanRoot().getNumInstances() > 1) {
|
||||
if ((!isPartitioned && result.isPartitioned() && result.getPlanRoot().getNumInstances() > 1)
|
||||
|| (!(root instanceof SortNode) && root.hasOffset())) {
|
||||
result = createMergeFragment(result);
|
||||
fragments.add(result);
|
||||
}
|
||||
@ -251,7 +252,7 @@ public class DistributedPlanner {
|
||||
*/
|
||||
private PlanFragment createMergeFragment(PlanFragment inputFragment)
|
||||
throws UserException {
|
||||
Preconditions.checkState(inputFragment.isPartitioned());
|
||||
Preconditions.checkState(inputFragment.isPartitioned() || inputFragment.getPlanRoot().hasOffset());
|
||||
|
||||
// exchange node clones the behavior of its input, aside from the conjuncts
|
||||
ExchangeNode mergePlan =
|
||||
|
||||
@ -43,6 +43,7 @@ public class EmptySetNode extends PlanNode {
|
||||
public EmptySetNode(PlanNodeId id, ArrayList<TupleId> tupleIds) {
|
||||
super(id, tupleIds, "EMPTYSET", StatisticalType.EMPTY_SET_NODE);
|
||||
cardinality = 0L;
|
||||
offset = 0;
|
||||
Preconditions.checkArgument(tupleIds.size() > 0);
|
||||
}
|
||||
|
||||
|
||||
@ -30,6 +30,7 @@ import org.apache.doris.common.util.VectorizedUtil;
|
||||
import org.apache.doris.statistics.StatisticalType;
|
||||
import org.apache.doris.statistics.StatsRecursiveDerive;
|
||||
import org.apache.doris.thrift.TExchangeNode;
|
||||
import org.apache.doris.thrift.TExplainLevel;
|
||||
import org.apache.doris.thrift.TPlanNode;
|
||||
import org.apache.doris.thrift.TPlanNodeType;
|
||||
import org.apache.doris.thrift.TSortInfo;
|
||||
@ -81,8 +82,9 @@ public class ExchangeNode extends PlanNode {
|
||||
}
|
||||
// Only apply the limit at the receiver if there are multiple senders.
|
||||
if (inputNode.getFragment().isPartitioned()) {
|
||||
limit = inputNode.limit;
|
||||
limit = inputNode.limit - inputNode.offset;
|
||||
}
|
||||
offset = inputNode.offset;
|
||||
computeTupleIds();
|
||||
|
||||
}
|
||||
@ -162,8 +164,8 @@ public class ExchangeNode extends PlanNode {
|
||||
Expr.treesToThrift(mergeInfo.getOrderingExprs()),
|
||||
mergeInfo.getIsAscOrder(), mergeInfo.getNullsFirst());
|
||||
msg.exchange_node.setSortInfo(sortInfo);
|
||||
msg.exchange_node.setOffset(offset);
|
||||
}
|
||||
msg.exchange_node.setOffset(offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -179,4 +181,9 @@ public class ExchangeNode extends PlanNode {
|
||||
return numInstances;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNodeExplainString(String prefix, TExplainLevel detailLevel) {
|
||||
return prefix + "offset: " + offset + "\n";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -82,6 +82,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
|
||||
protected PlanNodeId id; // unique w/in plan tree; assigned by planner
|
||||
protected PlanFragmentId fragmentId; // assigned by planner after fragmentation step
|
||||
protected long limit; // max. # of rows to be returned; 0: no limit
|
||||
protected long offset;
|
||||
|
||||
// ids materialized by the tree rooted at this node
|
||||
protected ArrayList<TupleId> tupleIds;
|
||||
@ -151,6 +152,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
|
||||
StatisticalType statisticalType) {
|
||||
this.id = id;
|
||||
this.limit = -1;
|
||||
this.offset = 0;
|
||||
// make a copy, just to be on the safe side
|
||||
this.tupleIds = Lists.newArrayList(tupleIds);
|
||||
this.tblRefIds = Lists.newArrayList(tupleIds);
|
||||
@ -177,6 +179,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
|
||||
protected PlanNode(PlanNodeId id, PlanNode node, String planNodeName, StatisticalType statisticalType) {
|
||||
this.id = id;
|
||||
this.limit = node.limit;
|
||||
this.offset = node.offset;
|
||||
this.tupleIds = Lists.newArrayList(node.tupleIds);
|
||||
this.tblRefIds = Lists.newArrayList(node.tblRefIds);
|
||||
this.nullableTupleIds = Sets.newHashSet(node.nullableTupleIds);
|
||||
@ -258,6 +261,10 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
|
||||
return limit;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the limit to the given limit only if the limit hasn't been set, or the new limit
|
||||
* is lower.
|
||||
@ -270,10 +277,27 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
|
||||
}
|
||||
}
|
||||
|
||||
public void setLimitAndOffset(long limit, long offset) {
|
||||
if (this.limit == -1) {
|
||||
this.limit = limit;
|
||||
} else if (limit != -1) {
|
||||
this.limit = Math.min(this.limit - offset, limit);
|
||||
}
|
||||
this.offset += offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public boolean hasLimit() {
|
||||
return limit > -1;
|
||||
}
|
||||
|
||||
public boolean hasOffset() {
|
||||
return offset != 0;
|
||||
}
|
||||
|
||||
public void setCardinality(long cardinality) {
|
||||
this.cardinality = cardinality;
|
||||
}
|
||||
|
||||
@ -305,7 +305,7 @@ public class SingleNodePlanner {
|
||||
// from SelectStmt outside
|
||||
root = addUnassignedConjuncts(analyzer, root);
|
||||
} else {
|
||||
root.setLimit(stmt.getLimit());
|
||||
root.setLimitAndOffset(stmt.getLimit(), stmt.getOffset());
|
||||
root.computeStats(analyzer);
|
||||
}
|
||||
|
||||
|
||||
@ -64,7 +64,6 @@ public class SortNode extends PlanNode {
|
||||
private final boolean useTopN;
|
||||
|
||||
private boolean isDefaultLimit;
|
||||
private long offset;
|
||||
// if true, the output of this node feeds an AnalyticNode
|
||||
private boolean isAnalyticSort;
|
||||
private DataPartition inputPartition;
|
||||
@ -130,14 +129,6 @@ public class SortNode extends PlanNode {
|
||||
this.inputPartition = inputPartition;
|
||||
}
|
||||
|
||||
public long getOffset() {
|
||||
return offset;
|
||||
}
|
||||
|
||||
public void setOffset(long offset) {
|
||||
this.offset = offset;
|
||||
}
|
||||
|
||||
public SortInfo getSortInfo() {
|
||||
return info;
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !limit1 --
|
||||
false 1 1989 1001 11011902 123.123 true 1989-03-21 1989-03-21T13:00 wangjuoo4 0.1 6.333 string12345 170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.5 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.500 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
|
||||
-- !limit2 --
|
||||
|
||||
@ -9,7 +9,7 @@ false 2 1986 1001 11011903 1243.5 false 1901-12-31 1989-03-21T13:00 wangynnsf 20
|
||||
|
||||
-- !limit4 --
|
||||
false 1 1989 1001 11011902 123.123 true 1989-03-21 1989-03-21T13:00 wangjuoo4 0.1 6.333 string12345 170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.5 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.500 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk 78945.0 3654.0 string12345 0
|
||||
|
||||
-- !limit5 --
|
||||
@ -20,13 +20,13 @@ false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk
|
||||
|
||||
-- !limit7 --
|
||||
false 1 1989 1001 11011902 123.123 true 1989-03-21 1989-03-21T13:00 wangjuoo4 0.1 6.333 string12345 170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.5 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.500 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
|
||||
-- !limit8 --
|
||||
|
||||
-- !limit9 --
|
||||
false 1 1989 1001 11011902 123.123 true 1989-03-21 1989-03-21T13:00 wangjuoo4 0.1 6.333 string12345 170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.5 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 2 1986 1001 11011903 1243.500 false 1901-12-31 1989-03-21T13:00 wangynnsf 20.268 789.25 string12345 -170141183460469231731687303715884105727
|
||||
false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk 78945.0 3654.0 string12345 0
|
||||
|
||||
-- !limit10 --
|
||||
@ -35,3 +35,18 @@ false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk
|
||||
|
||||
-- !limit11 --
|
||||
|
||||
-- !limit12 --
|
||||
false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk 78945.0 3654.0 string12345 0
|
||||
|
||||
-- !limit13 --
|
||||
false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk 78945.0 3654.0 string12345 0
|
||||
|
||||
-- !limit14 --
|
||||
false 3 1989 1002 11011905 24453.325 false 2012-03-14 2000-01-01T00:00 yunlj8@nk 78945.0 3654.0 string12345 0
|
||||
|
||||
-- !limit15 --
|
||||
|
||||
-- !limit16 --
|
||||
|
||||
-- !limit17 --
|
||||
|
||||
|
||||
@ -31,4 +31,22 @@ suite("test_query_limit", "query,p0") {
|
||||
qt_limit9 "select * from ${tableName} order by k1, k2, k3, k4 desc limit 100"
|
||||
qt_limit10 "select k3, sum(k9) from ${tableName} where k1<5 group by 1 order by 2 limit 3"
|
||||
qt_limit11 "select * from (select * from ${tableName} union all select * from ${tableName2}) b limit 0"
|
||||
qt_limit12 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 1, 1"
|
||||
qt_limit13 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 1, 2"
|
||||
qt_limit14 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 1, 3"
|
||||
qt_limit15 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 2, 1"
|
||||
qt_limit16 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 2, 2"
|
||||
qt_limit17 "select * from (select * from ${tableName} order by k1, k2, k3, k4 limit 1, 2) a limit 2, 3"
|
||||
test {
|
||||
sql "select * from ${tableName} limit 1, 10"
|
||||
rowNum 2
|
||||
}
|
||||
test {
|
||||
sql "select * from ${tableName} limit 2, 10"
|
||||
rowNum 1
|
||||
}
|
||||
test {
|
||||
sql "select * from ${tableName} limit 3, 10"
|
||||
rowNum 0
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user