[fix](Nereids) not do distinct when aggregate with distinct project (#36057)
pick from master #35899
This commit is contained in:
@ -951,7 +951,7 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla
|
||||
List<AggregateExpression> aggregateExpressionList = outputExpressions.stream()
|
||||
.filter(o -> o.anyMatch(AggregateExpression.class::isInstance))
|
||||
.peek(o -> aggFunctionOutput.add(o.toSlot()))
|
||||
.map(o -> o.<Set<AggregateExpression>>collect(AggregateExpression.class::isInstance))
|
||||
.map(o -> o.<AggregateExpression>collect(AggregateExpression.class::isInstance))
|
||||
.flatMap(Set::stream)
|
||||
.collect(Collectors.toList());
|
||||
ArrayList<FunctionCallExpr> execAggregateFunctions = aggregateExpressionList.stream()
|
||||
|
||||
@ -3110,10 +3110,15 @@ public class LogicalPlanBuilder extends DorisParserBaseVisitor<Object> {
|
||||
}
|
||||
|
||||
private LogicalPlan withProjection(LogicalPlan input, SelectColumnClauseContext selectCtx,
|
||||
Optional<AggClauseContext> aggCtx, boolean isDistinct) {
|
||||
Optional<AggClauseContext> aggCtx, boolean isDistinct) {
|
||||
return ParserUtils.withOrigin(selectCtx, () -> {
|
||||
if (aggCtx.isPresent()) {
|
||||
return input;
|
||||
if (isDistinct) {
|
||||
return new LogicalProject<>(ImmutableList.of(new UnboundStar(ImmutableList.of())),
|
||||
Collections.emptyList(), isDistinct, input);
|
||||
} else {
|
||||
return input;
|
||||
}
|
||||
} else {
|
||||
if (selectCtx.EXCEPT() != null) {
|
||||
List<NamedExpression> expressions = getNamedExpressions(selectCtx.namedExpressionSeq());
|
||||
|
||||
@ -60,6 +60,8 @@ public enum RuleType {
|
||||
FILL_UP_HAVING_AGGREGATE(RuleTypeClass.REWRITE),
|
||||
FILL_UP_HAVING_PROJECT(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_AGGREGATE(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_AGGREGATE_AGGREGATE(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_AGGREGATE_HAVING_AGGREGATE(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_HAVING_PROJECT(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_HAVING_AGGREGATE(RuleTypeClass.REWRITE),
|
||||
FILL_UP_SORT_PROJECT(RuleTypeClass.REWRITE),
|
||||
|
||||
@ -740,16 +740,25 @@ public class BindExpression implements AnalysisRuleFactory {
|
||||
}
|
||||
|
||||
private Plan bindSortWithoutSetOperation(MatchingContext<LogicalSort<Plan>> ctx) {
|
||||
CascadesContext cascadesContext = ctx.cascadesContext;
|
||||
LogicalSort<Plan> sort = ctx.root;
|
||||
Plan input = sort.child();
|
||||
|
||||
List<Slot> childOutput = input.getOutput();
|
||||
|
||||
// we should skip distinct project to bind slot in LogicalSort;
|
||||
// check input.child(0) to avoid process SELECT DISTINCT a FROM t ORDER BY b by mistake
|
||||
// NOTICE: SELECT a FROM (SELECT sum(a) AS a FROM t GROUP BY b) v ORDER BY b will not raise error result
|
||||
// because input.child(0) is LogicalSubqueryAlias
|
||||
if (input instanceof LogicalProject && ((LogicalProject<?>) input).isDistinct()
|
||||
&& (input.child(0) instanceof LogicalHaving
|
||||
|| input.child(0) instanceof LogicalAggregate
|
||||
|| input.child(0) instanceof LogicalRepeat)) {
|
||||
input = input.child(0);
|
||||
}
|
||||
// we should skip LogicalHaving to bind slot in LogicalSort;
|
||||
if (input instanceof LogicalHaving) {
|
||||
input = input.child(0);
|
||||
}
|
||||
CascadesContext cascadesContext = ctx.cascadesContext;
|
||||
|
||||
// 1. We should deduplicate the slots, otherwise the binding process will fail due to the
|
||||
// ambiguous slots exist.
|
||||
|
||||
@ -39,6 +39,7 @@ import org.apache.doris.nereids.util.ExpressionUtils;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.Streams;
|
||||
|
||||
import java.util.List;
|
||||
@ -76,6 +77,22 @@ public class FillUpMissingSlots implements AnalysisRuleFactory {
|
||||
sort.withChildren(new LogicalProject<>(projects, project.child())));
|
||||
})
|
||||
),
|
||||
RuleType.FILL_UP_SORT_AGGREGATE_HAVING_AGGREGATE.build(
|
||||
logicalSort(
|
||||
aggregate(logicalHaving(aggregate()))
|
||||
.when(a -> a.getOutputExpressions().stream().allMatch(SlotReference.class::isInstance))
|
||||
).when(this::checkSort)
|
||||
.then(sort -> processDistinctProjectWithAggregate(sort, sort.child(), sort.child().child().child()))
|
||||
),
|
||||
// ATTN: process aggregate with distinct project, must run this rule before FILL_UP_SORT_AGGREGATE
|
||||
// because this pattern will always fail in FILL_UP_SORT_AGGREGATE
|
||||
RuleType.FILL_UP_SORT_AGGREGATE_AGGREGATE.build(
|
||||
logicalSort(
|
||||
aggregate(aggregate())
|
||||
.when(a -> a.getOutputExpressions().stream().allMatch(SlotReference.class::isInstance))
|
||||
).when(this::checkSort)
|
||||
.then(sort -> processDistinctProjectWithAggregate(sort, sort.child(), sort.child().child()))
|
||||
),
|
||||
RuleType.FILL_UP_SORT_AGGREGATE.build(
|
||||
logicalSort(aggregate())
|
||||
.when(this::checkSort)
|
||||
@ -334,7 +351,7 @@ public class FillUpMissingSlots implements AnalysisRuleFactory {
|
||||
}
|
||||
|
||||
interface PlanGenerator {
|
||||
Plan apply(Resolver resolver, Aggregate aggregate);
|
||||
Plan apply(Resolver resolver, Aggregate<?> aggregate);
|
||||
}
|
||||
|
||||
private Plan createPlan(Resolver resolver, Aggregate<? extends Plan> aggregate, PlanGenerator planGenerator) {
|
||||
@ -371,4 +388,49 @@ public class FillUpMissingSlots implements AnalysisRuleFactory {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* for sql like SELECT DISTINCT a FROM t GROUP BY a HAVING b > 0 ORDER BY a.
|
||||
* there order by need to bind with bottom aggregate's output and bottom aggregate's child's output.
|
||||
* this function used to fill up missing slot for these situations correctly.
|
||||
*
|
||||
* @param sort top sort
|
||||
* @param upperAggregate upper aggregate used to check slot in order by should be in select list
|
||||
* @param bottomAggregate bottom aggregate used to bind with its and its child's output
|
||||
*
|
||||
* @return filled up plan
|
||||
*/
|
||||
private Plan processDistinctProjectWithAggregate(LogicalSort<?> sort,
|
||||
Aggregate<?> upperAggregate, Aggregate<Plan> bottomAggregate) {
|
||||
Resolver resolver = new Resolver(bottomAggregate);
|
||||
sort.getExpressions().forEach(resolver::resolve);
|
||||
return createPlan(resolver, bottomAggregate, (r, a) -> {
|
||||
List<OrderKey> newOrderKeys = sort.getOrderKeys().stream()
|
||||
.map(ok -> new OrderKey(
|
||||
ExpressionUtils.replace(ok.getExpr(), r.getSubstitution()),
|
||||
ok.isAsc(),
|
||||
ok.isNullFirst()))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
boolean sortNotChanged = newOrderKeys.equals(sort.getOrderKeys());
|
||||
boolean aggNotChanged = a.equals(bottomAggregate);
|
||||
if (sortNotChanged && aggNotChanged) {
|
||||
return null;
|
||||
}
|
||||
if (aggNotChanged) {
|
||||
// since sort expr must in select list, we should not change agg at all.
|
||||
return new LogicalSort<>(newOrderKeys, sort.child());
|
||||
} else {
|
||||
Set<NamedExpression> upperAggOutputs = Sets.newHashSet(upperAggregate.getOutputExpressions());
|
||||
for (int i = 0; i < newOrderKeys.size(); i++) {
|
||||
OrderKey orderKey = newOrderKeys.get(i);
|
||||
Expression expression = orderKey.getExpr();
|
||||
if (!upperAggOutputs.containsAll(expression.getInputSlots())) {
|
||||
throw new AnalysisException(sort.getOrderKeys().get(i).getExpr().toSql()
|
||||
+ " of ORDER BY clause is not in SELECT list");
|
||||
}
|
||||
}
|
||||
throw new AnalysisException("Expression of ORDER BY clause is not in SELECT list");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -372,14 +372,14 @@ public class NormalizeRepeat extends OneAnalysisRuleFactory {
|
||||
CollectNonWindowedAggFuncs.collect(aggregate.getOutputExpressions());
|
||||
ImmutableSet.Builder<Slot> aggUsedSlotBuilder = ImmutableSet.builder();
|
||||
for (AggregateFunction function : aggregateFunctions) {
|
||||
aggUsedSlotBuilder.addAll(function.<Set<SlotReference>>collect(SlotReference.class::isInstance));
|
||||
aggUsedSlotBuilder.addAll(function.<SlotReference>collect(SlotReference.class::isInstance));
|
||||
}
|
||||
ImmutableSet<Slot> aggUsedSlots = aggUsedSlotBuilder.build();
|
||||
|
||||
ImmutableSet.Builder<Slot> groupingSetsUsedSlotBuilder = ImmutableSet.builder();
|
||||
for (List<Expression> groupingSet : repeat.getGroupingSets()) {
|
||||
for (Expression expr : groupingSet) {
|
||||
groupingSetsUsedSlotBuilder.addAll(expr.<Set<SlotReference>>collect(SlotReference.class::isInstance));
|
||||
groupingSetsUsedSlotBuilder.addAll(expr.<SlotReference>collect(SlotReference.class::isInstance));
|
||||
}
|
||||
}
|
||||
ImmutableSet<Slot> groupingSetsUsedSlot = groupingSetsUsedSlotBuilder.build();
|
||||
|
||||
@ -96,7 +96,7 @@ public class OuterJoinLAsscom extends OneExplorationRuleFactory {
|
||||
topJoin.getHashJoinConjuncts().stream(),
|
||||
topJoin.getOtherJoinConjuncts().stream())
|
||||
.allMatch(expr -> {
|
||||
Set<ExprId> usedExprIdSet = expr.<Set<SlotReference>>collect(SlotReference.class::isInstance)
|
||||
Set<ExprId> usedExprIdSet = expr.<SlotReference>collect(SlotReference.class::isInstance)
|
||||
.stream()
|
||||
.map(SlotReference::getExprId)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
@ -383,7 +383,7 @@ public class AdjustPreAggStatus implements RewriteRuleFactory {
|
||||
project.map(Project::getAliasToProducer);
|
||||
return agg.getOutputExpressions().stream()
|
||||
// extract aggregate functions.
|
||||
.flatMap(e -> e.<Set<AggregateFunction>>collect(AggregateFunction.class::isInstance)
|
||||
.flatMap(e -> e.<AggregateFunction>collect(AggregateFunction.class::isInstance)
|
||||
.stream())
|
||||
// replace aggregate function's input slot by its producing expression.
|
||||
.map(expr -> slotToProducerOpt
|
||||
|
||||
@ -150,7 +150,7 @@ public class MergeAggregate implements RewriteRuleFactory {
|
||||
});
|
||||
}
|
||||
|
||||
boolean commonCheck(LogicalAggregate<? extends Plan> outerAgg, LogicalAggregate<Plan> innerAgg,
|
||||
private boolean commonCheck(LogicalAggregate<? extends Plan> outerAgg, LogicalAggregate<Plan> innerAgg,
|
||||
boolean sameGroupBy, Optional<LogicalProject> projectOptional) {
|
||||
innerAggExprIdToAggFunc = innerAgg.getOutputExpressions().stream()
|
||||
.filter(expr -> (expr instanceof Alias) && (expr.child(0) instanceof AggregateFunction))
|
||||
|
||||
@ -674,7 +674,7 @@ public class SelectMaterializedIndexWithAggregate extends AbstractSelectMaterial
|
||||
Optional<Map<Slot, Expression>> slotToProducerOpt = project.map(Project::getAliasToProducer);
|
||||
return agg.getOutputExpressions().stream()
|
||||
// extract aggregate functions.
|
||||
.flatMap(e -> e.<Set<AggregateFunction>>collect(AggregateFunction.class::isInstance).stream())
|
||||
.flatMap(e -> e.<AggregateFunction>collect(AggregateFunction.class::isInstance).stream())
|
||||
// replace aggregate function's input slot by its producing expression.
|
||||
.map(expr -> slotToProducerOpt.map(slotToExpressions
|
||||
-> (AggregateFunction) ExpressionUtils.replace(expr, slotToExpressions))
|
||||
|
||||
@ -232,14 +232,14 @@ public interface TreeNode<NODE_TYPE extends TreeNode<NODE_TYPE>> {
|
||||
/**
|
||||
* Collect the nodes that satisfied the predicate.
|
||||
*/
|
||||
default <T> T collect(Predicate<TreeNode<NODE_TYPE>> predicate) {
|
||||
default <T> Set<T> collect(Predicate<TreeNode<NODE_TYPE>> predicate) {
|
||||
ImmutableSet.Builder<TreeNode<NODE_TYPE>> result = ImmutableSet.builder();
|
||||
foreach(node -> {
|
||||
if (predicate.test(node)) {
|
||||
result.add(node);
|
||||
}
|
||||
});
|
||||
return (T) result.build();
|
||||
return (Set<T>) result.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -39,7 +39,6 @@ import org.apache.doris.nereids.util.TypeCoercionUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableList.Builder;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
@ -33,7 +33,6 @@ import com.google.common.collect.Maps;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
@ -85,8 +84,8 @@ public class AliasUdfBuilder extends UdfBuilder {
|
||||
|
||||
// replace the placeholder slot to the input expressions.
|
||||
// adjust input, parameter and replaceMap to be corresponding.
|
||||
Map<String, SlotReference> slots = ((Set<SlotReference>) boundFunction
|
||||
.collect(SlotReference.class::isInstance))
|
||||
Map<String, SlotReference> slots = (boundFunction
|
||||
.<SlotReference>collect(SlotReference.class::isInstance))
|
||||
.stream().collect(Collectors.toMap(SlotReference::getName, k -> k, (v1, v2) -> v2));
|
||||
|
||||
Map<SlotReference, Expression> replaceMap = Maps.newHashMap();
|
||||
|
||||
@ -58,6 +58,38 @@ public interface Repeat<CHILD_PLAN extends Plan> extends Aggregate<CHILD_PLAN> {
|
||||
return ExpressionUtils.flatExpressions(getGroupingSets());
|
||||
}
|
||||
|
||||
@Override
|
||||
default Aggregate<CHILD_PLAN> pruneOutputs(List<NamedExpression> prunedOutputs) {
|
||||
// just output reserved outputs and COL_GROUPING_ID for repeat correctly.
|
||||
ImmutableList.Builder<NamedExpression> outputBuilder
|
||||
= ImmutableList.builderWithExpectedSize(prunedOutputs.size() + 1);
|
||||
outputBuilder.addAll(prunedOutputs);
|
||||
for (NamedExpression output : getOutputExpressions()) {
|
||||
Set<VirtualSlotReference> v = output.collect(VirtualSlotReference.class::isInstance);
|
||||
if (v.stream().anyMatch(slot -> slot.getName().equals(COL_GROUPING_ID))) {
|
||||
outputBuilder.add(output);
|
||||
}
|
||||
}
|
||||
// prune groupingSets, if parent operator do not need some exprs in grouping sets, we removed it.
|
||||
// this could not lead to wrong result because be repeat other columns by normal.
|
||||
ImmutableList.Builder<List<Expression>> groupingSetsBuilder
|
||||
= ImmutableList.builderWithExpectedSize(getGroupingSets().size());
|
||||
for (List<Expression> groupingSet : getGroupingSets()) {
|
||||
ImmutableList.Builder<Expression> groupingSetBuilder
|
||||
= ImmutableList.builderWithExpectedSize(groupingSet.size());
|
||||
for (Expression expr : groupingSet) {
|
||||
if (prunedOutputs.contains(expr)) {
|
||||
groupingSetBuilder.add(expr);
|
||||
}
|
||||
}
|
||||
groupingSetsBuilder.add(groupingSetBuilder.build());
|
||||
}
|
||||
return withGroupSetsAndOutput(groupingSetsBuilder.build(), outputBuilder.build());
|
||||
}
|
||||
|
||||
Repeat<CHILD_PLAN> withGroupSetsAndOutput(List<List<Expression>> groupingSets,
|
||||
List<NamedExpression> outputExpressions);
|
||||
|
||||
static VirtualSlotReference generateVirtualGroupingIdSlot() {
|
||||
return new VirtualSlotReference(COL_GROUPING_ID, BigIntType.INSTANCE, Optional.empty(),
|
||||
GroupingSetShapes::computeVirtualGroupingIdValue);
|
||||
|
||||
@ -109,13 +109,13 @@ public class DeleteFromCommand extends Command implements ForwardWithSync {
|
||||
return;
|
||||
}
|
||||
Optional<PhysicalFilter<?>> optFilter = (planner.getPhysicalPlan()
|
||||
.<Set<PhysicalFilter<?>>>collect(PhysicalFilter.class::isInstance)).stream()
|
||||
.<PhysicalFilter<?>>collect(PhysicalFilter.class::isInstance)).stream()
|
||||
.findAny();
|
||||
Optional<PhysicalOlapScan> optScan = (planner.getPhysicalPlan()
|
||||
.<Set<PhysicalOlapScan>>collect(PhysicalOlapScan.class::isInstance)).stream()
|
||||
.<PhysicalOlapScan>collect(PhysicalOlapScan.class::isInstance)).stream()
|
||||
.findAny();
|
||||
Optional<UnboundRelation> optRelation = (logicalQuery
|
||||
.<Set<UnboundRelation>>collect(UnboundRelation.class::isInstance)).stream()
|
||||
.<UnboundRelation>collect(UnboundRelation.class::isInstance)).stream()
|
||||
.findAny();
|
||||
Preconditions.checkArgument(optFilter.isPresent(), "delete command must contain filter");
|
||||
Preconditions.checkArgument(optScan.isPresent(), "delete command could be only used on olap table");
|
||||
@ -141,7 +141,7 @@ public class DeleteFromCommand extends Command implements ForwardWithSync {
|
||||
Plan plan = planner.getPhysicalPlan();
|
||||
checkSubQuery(plan);
|
||||
for (Expression conjunct : filter.getConjuncts()) {
|
||||
conjunct.<Set<SlotReference>>collect(SlotReference.class::isInstance)
|
||||
conjunct.<SlotReference>collect(SlotReference.class::isInstance)
|
||||
.forEach(s -> checkColumn(columns, s, olapTable));
|
||||
checkPredicate(conjunct);
|
||||
}
|
||||
|
||||
@ -53,7 +53,6 @@ import org.apache.logging.log4j.Logger;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
@ -111,7 +110,7 @@ public class BatchInsertIntoTableCommand extends Command implements NoForward, E
|
||||
}
|
||||
|
||||
Optional<TreeNode<?>> plan = planner.getPhysicalPlan()
|
||||
.<Set<TreeNode<?>>>collect(PhysicalOlapTableSink.class::isInstance).stream().findAny();
|
||||
.<TreeNode<?>>collect(PhysicalOlapTableSink.class::isInstance).stream().findAny();
|
||||
Preconditions.checkArgument(plan.isPresent(), "insert into command must contain OlapTableSinkNode");
|
||||
sink = ((PhysicalOlapTableSink<?>) plan.get());
|
||||
Table targetTable = sink.getTargetTable();
|
||||
@ -141,14 +140,14 @@ public class BatchInsertIntoTableCommand extends Command implements NoForward, E
|
||||
}
|
||||
|
||||
Optional<PhysicalUnion> union = planner.getPhysicalPlan()
|
||||
.<Set<PhysicalUnion>>collect(PhysicalUnion.class::isInstance).stream().findAny();
|
||||
.<PhysicalUnion>collect(PhysicalUnion.class::isInstance).stream().findAny();
|
||||
if (union.isPresent()) {
|
||||
InsertUtils.executeBatchInsertTransaction(ctx, targetTable.getQualifiedDbName(),
|
||||
targetTable.getName(), targetSchema, union.get().getConstantExprsList());
|
||||
return;
|
||||
}
|
||||
Optional<PhysicalOneRowRelation> oneRowRelation = planner.getPhysicalPlan()
|
||||
.<Set<PhysicalOneRowRelation>>collect(PhysicalOneRowRelation.class::isInstance).stream().findAny();
|
||||
.<PhysicalOneRowRelation>collect(PhysicalOneRowRelation.class::isInstance).stream().findAny();
|
||||
if (oneRowRelation.isPresent()) {
|
||||
InsertUtils.executeBatchInsertTransaction(ctx, targetTable.getQualifiedDbName(),
|
||||
targetTable.getName(), targetSchema, ImmutableList.of(oneRowRelation.get().getProjects()));
|
||||
|
||||
@ -54,7 +54,6 @@ import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* insert into select command implementation
|
||||
@ -152,7 +151,7 @@ public class InsertIntoTableCommand extends Command implements ForwardWithSync,
|
||||
ctx.getMysqlChannel().reset();
|
||||
}
|
||||
Optional<PhysicalSink<?>> plan = (planner.getPhysicalPlan()
|
||||
.<Set<PhysicalSink<?>>>collect(PhysicalSink.class::isInstance)).stream()
|
||||
.<PhysicalSink<?>>collect(PhysicalSink.class::isInstance)).stream()
|
||||
.findAny();
|
||||
Preconditions.checkArgument(plan.isPresent(), "insert into command must contain target table");
|
||||
PhysicalSink physicalSink = plan.get();
|
||||
|
||||
@ -60,7 +60,6 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* insert into select command implementation
|
||||
@ -122,7 +121,7 @@ public class InsertOverwriteTableCommand extends Command implements ForwardWithS
|
||||
}
|
||||
|
||||
Optional<TreeNode<?>> plan = (planner.getPhysicalPlan()
|
||||
.<Set<TreeNode<?>>>collect(node -> node instanceof PhysicalTableSink)).stream().findAny();
|
||||
.<TreeNode<?>>collect(node -> node instanceof PhysicalTableSink)).stream().findAny();
|
||||
Preconditions.checkArgument(plan.isPresent(), "insert into command must contain OlapTableSinkNode");
|
||||
PhysicalTableSink<?> physicalTableSink = ((PhysicalTableSink<?>) plan.get());
|
||||
TableIf targetTable = physicalTableSink.getTargetTable();
|
||||
|
||||
@ -178,6 +178,13 @@ public class PhysicalRepeat<CHILD_TYPE extends Plan> extends PhysicalUnary<CHILD
|
||||
getLogicalProperties(), physicalProperties, statistics, child());
|
||||
}
|
||||
|
||||
@Override
|
||||
public PhysicalRepeat<CHILD_TYPE> withGroupSetsAndOutput(List<List<Expression>> groupingSets,
|
||||
List<NamedExpression> outputExpressionList) {
|
||||
return new PhysicalRepeat<>(groupingSets, outputExpressionList, Optional.empty(),
|
||||
getLogicalProperties(), physicalProperties, statistics, child());
|
||||
}
|
||||
|
||||
@Override
|
||||
public PhysicalRepeat<CHILD_TYPE> resetLogicalProperties() {
|
||||
return new PhysicalRepeat<>(groupingSets, outputExpressions, groupExpression,
|
||||
|
||||
@ -65,6 +65,7 @@ import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.BitSet;
|
||||
import java.util.Collection;
|
||||
@ -522,9 +523,7 @@ public class ExpressionUtils {
|
||||
ImmutableList<Literal> literals =
|
||||
ImmutableList.of(new NullLiteral(BooleanType.INSTANCE), BooleanLiteral.FALSE);
|
||||
List<MarkJoinSlotReference> markJoinSlotReferenceList =
|
||||
((Set<MarkJoinSlotReference>) predicate
|
||||
.collect(MarkJoinSlotReference.class::isInstance)).stream()
|
||||
.collect(Collectors.toList());
|
||||
new ArrayList<>((predicate.collect(MarkJoinSlotReference.class::isInstance)));
|
||||
int markSlotSize = markJoinSlotReferenceList.size();
|
||||
int maxMarkSlotCount = 4;
|
||||
// if the conjunct has mark slot, and maximum 4 mark slots(for performance)
|
||||
|
||||
@ -40,7 +40,6 @@ import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
@ -140,10 +139,7 @@ public class PlanUtils {
|
||||
}
|
||||
|
||||
public static Set<LogicalCatalogRelation> getLogicalScanFromRootPlan(LogicalPlan rootPlan) {
|
||||
Set<LogicalCatalogRelation> tableSet = new HashSet<>();
|
||||
tableSet.addAll((Collection<? extends LogicalCatalogRelation>) rootPlan
|
||||
.collect(LogicalCatalogRelation.class::isInstance));
|
||||
return tableSet;
|
||||
return rootPlan.collect(LogicalCatalogRelation.class::isInstance);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -1224,7 +1224,7 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
private void assertOneAggFuncType(LogicalAggregate<? extends Plan> agg, Class<?> aggFuncType) {
|
||||
Set<AggregateFunction> aggFuncs = agg.getOutputExpressions()
|
||||
.stream()
|
||||
.flatMap(e -> e.<Set<AggregateFunction>>collect(AggregateFunction.class::isInstance)
|
||||
.flatMap(e -> e.<AggregateFunction>collect(AggregateFunction.class::isInstance)
|
||||
.stream())
|
||||
.collect(Collectors.toSet());
|
||||
Assertions.assertEquals(1, aggFuncs.size());
|
||||
@ -1239,7 +1239,7 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
Assertions.assertEquals(2, scans.size());
|
||||
|
||||
ScanNode scanNode0 = scans.get(0);
|
||||
Assertions.assertTrue(scanNode0 instanceof OlapScanNode);
|
||||
Assertions.assertInstanceOf(OlapScanNode.class, scanNode0);
|
||||
OlapScanNode scan0 = (OlapScanNode) scanNode0;
|
||||
Assertions.assertTrue(scan0.isPreAggregation());
|
||||
Assertions.assertEquals(firstTableIndexName, scan0.getSelectedIndexName());
|
||||
|
||||
@ -0,0 +1,30 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !base_case --
|
||||
0
|
||||
81
|
||||
|
||||
-- !with_order --
|
||||
1
|
||||
82
|
||||
|
||||
-- !with_having --
|
||||
1
|
||||
82
|
||||
|
||||
-- !with_having_with_order --
|
||||
1
|
||||
82
|
||||
|
||||
-- !with_order_with_grouping_sets --
|
||||
\N
|
||||
1
|
||||
82
|
||||
|
||||
-- !with_having_with_grouping_sets --
|
||||
1
|
||||
82
|
||||
|
||||
-- !with_having_with_order_with_grouping_sets --
|
||||
1
|
||||
82
|
||||
|
||||
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
suite("agg_with_distinct_project") {
|
||||
|
||||
sql "set enable_fallback_to_original_planner=false"
|
||||
sql "DROP TABLE IF EXISTS agg_with_distinct_project;"
|
||||
sql """
|
||||
CREATE TABLE agg_with_distinct_project (
|
||||
id int NOT NULL,
|
||||
a int DEFAULT NULL,
|
||||
b int DEFAULT NULL
|
||||
)
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1"
|
||||
);
|
||||
"""
|
||||
|
||||
sql """INSERT INTO agg_with_distinct_project VALUES(83,0,38),(26,0,79),(43,81,24)"""
|
||||
|
||||
order_qt_base_case """
|
||||
SELECT DISTINCT a as c1 FROM agg_with_distinct_project GROUP BY b, a;
|
||||
"""
|
||||
|
||||
qt_with_order """
|
||||
select distinct a + 1 from agg_with_distinct_project group by a + 1, b order by a + 1;
|
||||
"""
|
||||
|
||||
order_qt_with_having """
|
||||
select distinct a + 1 from agg_with_distinct_project group by a + 1, b having b > 1;
|
||||
"""
|
||||
|
||||
qt_with_having_with_order """
|
||||
select distinct a + 1 from agg_with_distinct_project group by a + 1, b having b > 1 order by a + 1;
|
||||
"""
|
||||
|
||||
qt_with_order_with_grouping_sets """
|
||||
select distinct a + 1 from agg_with_distinct_project group by grouping sets(( a + 1, b ), (b + 1)) order by a + 1;
|
||||
"""
|
||||
|
||||
order_qt_with_having_with_grouping_sets """
|
||||
select distinct a + 1 from agg_with_distinct_project group by grouping sets(( a + 1, b ), (b + 1)) having b > 1;
|
||||
"""
|
||||
|
||||
qt_with_having_with_order_with_grouping_sets """
|
||||
select distinct a + 1 from agg_with_distinct_project group by grouping sets(( a + 1, b ), (b + 1)) having b > 1 order by a + 1;
|
||||
"""
|
||||
|
||||
// order by column not in select list
|
||||
test {
|
||||
sql """
|
||||
select distinct a + 1 from agg_with_distinct_project group by a + 1, b order by b;
|
||||
"""
|
||||
exception "b of ORDER BY clause is not in SELECT list"
|
||||
}
|
||||
|
||||
// order by column not in select list
|
||||
test {
|
||||
sql """
|
||||
select distinct a + 1 from agg_with_distinct_project group by grouping sets(( a + 1, b ), (b + 1)) order by b;
|
||||
"""
|
||||
exception "b of ORDER BY clause is not in SELECT list"
|
||||
}
|
||||
|
||||
sql "DROP TABLE IF EXISTS agg_with_distinct_project;"
|
||||
}
|
||||
Reference in New Issue
Block a user