[Feature](Nereids) support advanced materialized view (#19650)
Increase the functionality of advanced materialized view This feature already supported by legacy planner with PR #19650 This PR implement it in Nereids. This PR implement the features as below: 1. Support multiple columns in aggregate function. eg: select sum(c1 + c2) from t1; 2. Supports complex expressions. eg: select abs(c1), sum(abc(c1+1) + 1) from t1; TODO: 1. Support adding where in materialized view
This commit is contained in:
@ -41,6 +41,7 @@ import org.apache.logging.log4j.Logger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Materialized view is performed to materialize the results of query.
|
||||
@ -568,6 +569,11 @@ public class CreateMaterializedViewStmt extends DdlStmt {
|
||||
return new StringBuilder().append(MATERIALIZED_VIEW_NAME_PREFIX).append(name).toString();
|
||||
}
|
||||
|
||||
public static String mvColumnBuilder(Optional<String> functionName, String sourceColumnName) {
|
||||
return functionName.map(s -> mvAggregateColumnBuilder(s, sourceColumnName))
|
||||
.orElseGet(() -> mvColumnBuilder(sourceColumnName));
|
||||
}
|
||||
|
||||
public static String mvColumnBreaker(String name) {
|
||||
if (name.startsWith(MATERIALIZED_VIEW_AGGREGATE_NAME_PREFIX)) {
|
||||
// mva_SUM__k2 -> k2
|
||||
|
||||
@ -634,9 +634,7 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla
|
||||
@Override
|
||||
public PlanFragment visitPhysicalOlapScan(PhysicalOlapScan olapScan, PlanTranslatorContext context) {
|
||||
// Create OlapScanNode
|
||||
List<Slot> slotList = new ImmutableList.Builder<Slot>()
|
||||
.addAll(olapScan.getOutput())
|
||||
.build();
|
||||
List<Slot> slotList = olapScan.getOutput();
|
||||
Set<ExprId> deferredMaterializedExprIds = Collections.emptySet();
|
||||
if (olapScan.getMutableState(PhysicalOlapScan.DEFERRED_MATERIALIZED_SLOTS).isPresent()) {
|
||||
deferredMaterializedExprIds = (Set<ExprId>) (olapScan
|
||||
@ -644,6 +642,11 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla
|
||||
}
|
||||
OlapTable olapTable = olapScan.getTable();
|
||||
TupleDescriptor tupleDescriptor = generateTupleDesc(slotList, olapTable, deferredMaterializedExprIds, context);
|
||||
|
||||
if (olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) {
|
||||
generateTupleDesc(olapScan.getBaseOutputs(), olapTable, deferredMaterializedExprIds, context);
|
||||
}
|
||||
|
||||
if (olapScan.getMutableState(PhysicalOlapScan.DEFERRED_MATERIALIZED_SLOTS).isPresent()) {
|
||||
injectRowIdColumnSlot(tupleDescriptor);
|
||||
}
|
||||
@ -701,7 +704,6 @@ public class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, Pla
|
||||
|
||||
List<Slot> slotList = new ImmutableList.Builder<Slot>()
|
||||
.addAll(schemaScan.getOutput())
|
||||
.addAll(schemaScan.getNonUserVisibleOutput())
|
||||
.build();
|
||||
TupleDescriptor tupleDescriptor = generateTupleDesc(slotList, table, context);
|
||||
tupleDescriptor.setTable(table);
|
||||
|
||||
@ -30,6 +30,7 @@ import org.apache.doris.nereids.rules.analysis.LogicalSubQueryAliasToLogicalProj
|
||||
import org.apache.doris.nereids.rules.expression.ExpressionNormalization;
|
||||
import org.apache.doris.nereids.rules.expression.ExpressionOptimization;
|
||||
import org.apache.doris.nereids.rules.expression.ExpressionRewrite;
|
||||
import org.apache.doris.nereids.rules.mv.SelectMaterializedIndexWithAggregate;
|
||||
import org.apache.doris.nereids.rules.mv.SelectMaterializedIndexWithoutAggregate;
|
||||
import org.apache.doris.nereids.rules.rewrite.logical.AdjustNullable;
|
||||
import org.apache.doris.nereids.rules.rewrite.logical.AggScalarSubQueryToWindowFunction;
|
||||
@ -263,13 +264,15 @@ public class NereidsRewriter extends BatchRewriteJob {
|
||||
|
||||
topic("MV optimization",
|
||||
topDown(
|
||||
// TODO: enable this rule after https://github.com/apache/doris/issues/18263 is fixed
|
||||
// new SelectMaterializedIndexWithAggregate(),
|
||||
new SelectMaterializedIndexWithAggregate(),
|
||||
new SelectMaterializedIndexWithoutAggregate(),
|
||||
new PushdownFilterThroughProject(),
|
||||
new MergeProjects(),
|
||||
new PruneOlapScanTablet()
|
||||
)
|
||||
),
|
||||
custom(RuleType.COLUMN_PRUNING, ColumnPruning::new),
|
||||
bottomUp(RuleSet.PUSH_DOWN_FILTERS),
|
||||
custom(RuleType.ELIMINATE_UNNECESSARY_PROJECT, EliminateUnnecessaryProject::new)
|
||||
),
|
||||
|
||||
// this rule batch must keep at the end of rewrite to do some plan check
|
||||
|
||||
@ -38,7 +38,6 @@ import java.util.Set;
|
||||
*/
|
||||
public class LogicalProperties {
|
||||
protected final Supplier<List<Slot>> outputSupplier;
|
||||
protected final Supplier<List<Slot>> nonUserVisibleOutputSupplier;
|
||||
protected final Supplier<List<Id>> outputExprIdsSupplier;
|
||||
protected final Supplier<Set<Slot>> outputSetSupplier;
|
||||
protected final Supplier<Map<Slot, Slot>> outputMapSupplier;
|
||||
@ -59,9 +58,6 @@ public class LogicalProperties {
|
||||
this.outputSupplier = Suppliers.memoize(
|
||||
Objects.requireNonNull(outputSupplier, "outputSupplier can not be null")
|
||||
);
|
||||
this.nonUserVisibleOutputSupplier = Suppliers.memoize(
|
||||
Objects.requireNonNull(nonUserVisibleOutputSupplier, "nonUserVisibleOutputSupplier can not be null")
|
||||
);
|
||||
this.outputExprIdsSupplier = Suppliers.memoize(
|
||||
() -> this.outputSupplier.get().stream().map(NamedExpression::getExprId).map(Id.class::cast)
|
||||
.collect(ImmutableList.toImmutableList())
|
||||
@ -83,10 +79,6 @@ public class LogicalProperties {
|
||||
return outputSupplier.get();
|
||||
}
|
||||
|
||||
public List<Slot> getNonUserVisibleOutput() {
|
||||
return nonUserVisibleOutputSupplier.get();
|
||||
}
|
||||
|
||||
public Set<Slot> getOutputSet() {
|
||||
return outputSetSupplier.get();
|
||||
}
|
||||
@ -104,14 +96,13 @@ public class LogicalProperties {
|
||||
}
|
||||
|
||||
public LogicalProperties withOutput(List<Slot> output) {
|
||||
return new LogicalProperties(Suppliers.ofInstance(output), nonUserVisibleOutputSupplier);
|
||||
return new LogicalProperties(Suppliers.ofInstance(output));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LogicalProperties{"
|
||||
+ "\noutputSupplier=" + outputSupplier.get()
|
||||
+ "\nnonUserVisibleOutputSupplier=" + nonUserVisibleOutputSupplier.get()
|
||||
+ "\noutputExprIdsSupplier=" + outputExprIdsSupplier.get()
|
||||
+ "\noutputSetSupplier=" + outputSetSupplier.get()
|
||||
+ "\noutputMapSupplier=" + outputMapSupplier.get()
|
||||
|
||||
@ -38,7 +38,6 @@ import org.apache.commons.lang3.StringUtils;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* some check need to do after analyze whole plan.
|
||||
@ -58,7 +57,7 @@ public class CheckAfterRewrite extends OneAnalysisRuleFactory {
|
||||
.flatMap(expr -> expr.getInputSlots().stream())
|
||||
.collect(Collectors.toSet());
|
||||
Set<ExprId> childrenOutput = plan.children().stream()
|
||||
.flatMap(child -> Stream.concat(child.getOutput().stream(), child.getNonUserVisibleOutput().stream()))
|
||||
.flatMap(child -> child.getOutput().stream())
|
||||
.map(NamedExpression::getExprId)
|
||||
.collect(Collectors.toSet());
|
||||
notFromChildren = notFromChildren.stream()
|
||||
|
||||
@ -58,6 +58,7 @@ public class LogicalOlapScanToPhysicalOlapScan extends OneImplementationRuleFact
|
||||
olapScan.getSelectedPartitionIds(),
|
||||
convertDistribution(olapScan),
|
||||
olapScan.getPreAggStatus(),
|
||||
olapScan.getOutputByMvIndex(olapScan.getTable().getBaseIndexId()),
|
||||
Optional.empty(),
|
||||
olapScan.getLogicalProperties())
|
||||
).toRule(RuleType.LOGICAL_OLAP_SCAN_TO_PHYSICAL_OLAP_SCAN_RULE);
|
||||
@ -79,6 +80,31 @@ public class LogicalOlapScanToPhysicalOlapScan extends OneImplementationRuleFact
|
||||
|| olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) {
|
||||
// TODO if a mv is selected, we ignore base table's distributionInfo for now
|
||||
// need improve this to handle the case if mv's distributionInfo is the same as base table
|
||||
if (olapScan.getSelectedIndexId() != olapScan.getTable().getBaseIndexId()) {
|
||||
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
|
||||
List<Slot> output = olapScan.getOutput();
|
||||
List<Slot> baseOutput = olapScan.getOutputByMvIndex(olapScan.getTable().getBaseIndexId());
|
||||
List<ExprId> hashColumns = Lists.newArrayList();
|
||||
for (int i = 0; i < output.size(); i++) {
|
||||
for (Column column : hashDistributionInfo.getDistributionColumns()) {
|
||||
if (((SlotReference) output.get(i)).getColumn().get().getNameWithoutMvPrefix()
|
||||
.equals(column.getName())) {
|
||||
hashColumns.add(output.get(i).getExprId());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hashColumns.size() != hashDistributionInfo.getDistributionColumns().size()) {
|
||||
for (int i = 0; i < baseOutput.size(); i++) {
|
||||
for (Column column : hashDistributionInfo.getDistributionColumns()) {
|
||||
if (((SlotReference) baseOutput.get(i)).getColumn().get().equals(column)) {
|
||||
hashColumns.add(baseOutput.get(i).getExprId());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new DistributionSpecHash(hashColumns, ShuffleType.NATURAL, olapScan.getTable().getId(),
|
||||
olapScan.getSelectedIndexId(), Sets.newHashSet(olapScan.getSelectedPartitionIds()));
|
||||
}
|
||||
return DistributionSpecAny.INSTANCE;
|
||||
}
|
||||
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
|
||||
|
||||
@ -17,10 +17,14 @@
|
||||
|
||||
package org.apache.doris.nereids.rules.mv;
|
||||
|
||||
import org.apache.doris.analysis.CreateMaterializedViewStmt;
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.MaterializedIndex;
|
||||
import org.apache.doris.catalog.OlapTable;
|
||||
import org.apache.doris.nereids.parser.NereidsParser;
|
||||
import org.apache.doris.nereids.trees.expressions.Alias;
|
||||
import org.apache.doris.nereids.trees.expressions.CaseWhen;
|
||||
import org.apache.doris.nereids.trees.expressions.Cast;
|
||||
import org.apache.doris.nereids.trees.expressions.ComparisonPredicate;
|
||||
import org.apache.doris.nereids.trees.expressions.EqualTo;
|
||||
import org.apache.doris.nereids.trees.expressions.ExprId;
|
||||
@ -30,6 +34,7 @@ import org.apache.doris.nereids.trees.expressions.IsNull;
|
||||
import org.apache.doris.nereids.trees.expressions.NamedExpression;
|
||||
import org.apache.doris.nereids.trees.expressions.NullSafeEqual;
|
||||
import org.apache.doris.nereids.trees.expressions.Slot;
|
||||
import org.apache.doris.nereids.trees.expressions.SlotReference;
|
||||
import org.apache.doris.nereids.trees.expressions.WhenClause;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.agg.AggregateFunction;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.agg.Count;
|
||||
@ -40,16 +45,26 @@ import org.apache.doris.nereids.trees.expressions.functions.scalar.ToBitmap;
|
||||
import org.apache.doris.nereids.trees.expressions.functions.scalar.ToBitmapWithCheck;
|
||||
import org.apache.doris.nereids.trees.expressions.literal.Literal;
|
||||
import org.apache.doris.nereids.trees.expressions.literal.TinyIntLiteral;
|
||||
import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter;
|
||||
import org.apache.doris.nereids.trees.expressions.visitor.ExpressionVisitor;
|
||||
import org.apache.doris.nereids.trees.plans.Plan;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalFilter;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalProject;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalRepeat;
|
||||
import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanVisitor;
|
||||
import org.apache.doris.nereids.util.ExpressionUtils;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
@ -76,23 +91,66 @@ public abstract class AbstractSelectMaterializedIndexRule {
|
||||
protected boolean containAllRequiredColumns(
|
||||
MaterializedIndex index,
|
||||
LogicalOlapScan scan,
|
||||
Set<Slot> requiredScanOutput) {
|
||||
Set<Slot> requiredScanOutput,
|
||||
Set<? extends Expression> requiredExpr) {
|
||||
|
||||
OlapTable table = scan.getTable();
|
||||
|
||||
Set<String> requiredColumnNames = requiredScanOutput.stream()
|
||||
.map(s -> normalizeName(s.getName()))
|
||||
.collect(Collectors.toSet());
|
||||
Set<String> requiredMvColumnNames = requiredScanOutput.stream()
|
||||
.map(s -> normalizeName(Column.getNameWithoutMvPrefix(s.getName())))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
Set<String> mvColNames = table.getSchemaByIndexId(index.getId(), true).stream()
|
||||
.map(c -> normalizeName(c.getNameWithoutMvPrefix()))
|
||||
.map(c -> normalizeName(parseMvColumnToSql(c.getNameWithoutMvPrefix())))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
table.getSchemaByIndexId(index.getId(), true).stream()
|
||||
.forEach(column -> mvColNames.add(normalizeName(column.getName())));
|
||||
return mvColNames.containsAll(requiredMvColumnNames)
|
||||
|| requiredExpr.stream()
|
||||
.map(AbstractSelectMaterializedIndexRule::removeCastAndAlias)
|
||||
.filter(e -> !containsAllColumn(e, mvColNames))
|
||||
.collect(Collectors.toSet()).isEmpty();
|
||||
}
|
||||
|
||||
return mvColNames
|
||||
.containsAll(requiredColumnNames);
|
||||
public static String parseMvColumnToSql(String mvName) {
|
||||
return new NereidsParser().parseExpression(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBreaker(mvName)).toSql();
|
||||
}
|
||||
|
||||
public static String parseMvColumnToMvName(String mvName, Optional<String> aggTypeName) {
|
||||
return CreateMaterializedViewStmt.mvColumnBuilder(aggTypeName,
|
||||
new NereidsParser().parseExpression(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBreaker(mvName)).toSql());
|
||||
}
|
||||
|
||||
protected static Expression removeCastAndAlias(Expression expression) {
|
||||
List<Expression> children = new ArrayList<>();
|
||||
for (Expression child : expression.children()) {
|
||||
children.add(removeCastAndAlias(child));
|
||||
}
|
||||
if (expression instanceof Cast) {
|
||||
return ((Cast) expression.withChildren(children)).child();
|
||||
}
|
||||
if (expression instanceof Alias) {
|
||||
return ((Alias) expression.withChildren(children)).child();
|
||||
}
|
||||
return children.isEmpty() ? expression : expression.withChildren(children);
|
||||
}
|
||||
|
||||
protected static boolean containsAllColumn(Expression expression, Set<String> mvColumnNames) {
|
||||
if (mvColumnNames.contains(Column.getNameWithoutMvPrefix(expression.toSql()))) {
|
||||
return true;
|
||||
}
|
||||
if (expression.children().isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
boolean childContain = true;
|
||||
for (Expression child : expression.children()) {
|
||||
if (child instanceof Literal) {
|
||||
continue;
|
||||
}
|
||||
childContain &= containsAllColumn(child, mvColumnNames);
|
||||
}
|
||||
return childContain;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -103,6 +161,10 @@ public abstract class AbstractSelectMaterializedIndexRule {
|
||||
List<MaterializedIndex> candidates,
|
||||
LogicalOlapScan scan,
|
||||
Set<Expression> predicates) {
|
||||
if (candidates.isEmpty()) {
|
||||
return scan.getTable().getBaseIndexId();
|
||||
}
|
||||
|
||||
OlapTable table = scan.getTable();
|
||||
// Scan slot exprId -> slot name
|
||||
Map<ExprId, String> exprIdToName = scan.getOutput()
|
||||
@ -293,7 +355,7 @@ public abstract class AbstractSelectMaterializedIndexRule {
|
||||
}
|
||||
|
||||
public static String normalizeName(String name) {
|
||||
return name.replace("`", "");
|
||||
return name.replace("`", "").toLowerCase();
|
||||
}
|
||||
|
||||
public static Expression slotToCaseWhen(Expression expression) {
|
||||
@ -317,4 +379,236 @@ public abstract class AbstractSelectMaterializedIndexRule {
|
||||
}
|
||||
return aggregateFunction.withChildren(slot).toSql();
|
||||
}
|
||||
|
||||
protected SlotContext generateBaseScanExprToMvExpr(LogicalOlapScan mvPlan) {
|
||||
Map<Slot, Slot> baseSlotToMvSlot = new HashMap<>();
|
||||
Map<String, Slot> mvNameToMvSlot = new HashMap<>();
|
||||
if (mvPlan.getSelectedIndexId() == mvPlan.getTable().getBaseIndexId()) {
|
||||
return new SlotContext(baseSlotToMvSlot, mvNameToMvSlot);
|
||||
}
|
||||
for (Slot mvSlot : mvPlan.getOutputByMvIndex(mvPlan.getSelectedIndexId())) {
|
||||
boolean isPushed = false;
|
||||
for (Slot baseSlot : mvPlan.getOutput()) {
|
||||
if (org.apache.doris.analysis.CreateMaterializedViewStmt.isMVColumnAggregate(mvSlot.getName())) {
|
||||
continue;
|
||||
}
|
||||
if (baseSlot.toSql().equalsIgnoreCase(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBreaker(
|
||||
normalizeName(mvSlot.getName())))) {
|
||||
baseSlotToMvSlot.put(baseSlot, mvSlot);
|
||||
isPushed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!isPushed) {
|
||||
if (org.apache.doris.analysis.CreateMaterializedViewStmt.isMVColumnAggregate(mvSlot.getName())) {
|
||||
mvNameToMvSlot.put(normalizeName(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBreaker(mvSlot.getName())),
|
||||
mvSlot);
|
||||
}
|
||||
mvNameToMvSlot.put(normalizeName(mvSlot.getName()), mvSlot);
|
||||
}
|
||||
}
|
||||
return new SlotContext(baseSlotToMvSlot, mvNameToMvSlot);
|
||||
}
|
||||
|
||||
/** SlotContext */
|
||||
protected static class SlotContext {
|
||||
// base index Slot to selected mv Slot
|
||||
public final Map<Slot, Slot> baseSlotToMvSlot;
|
||||
|
||||
// selected mv Slot name to mv Slot
|
||||
public final Map<String, Slot> mvNameToMvSlot;
|
||||
|
||||
public SlotContext(Map<Slot, Slot> baseSlotToMvSlot, Map<String, Slot> mvNameToMvSlot) {
|
||||
this.baseSlotToMvSlot = ImmutableMap.copyOf(baseSlotToMvSlot);
|
||||
this.mvNameToMvSlot = ImmutableMap.copyOf(mvNameToMvSlot);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ReplaceExpressions
|
||||
* Notes: For the sum type, the original column and the mv column may have inconsistent types,
|
||||
* but the be side will not check the column type, so it can work normally
|
||||
*/
|
||||
protected static class ReplaceExpressions extends DefaultPlanVisitor<Plan, Void> {
|
||||
private final SlotContext slotContext;
|
||||
|
||||
public ReplaceExpressions(SlotContext slotContext) {
|
||||
this.slotContext = slotContext;
|
||||
}
|
||||
|
||||
public Plan replace(Plan plan, LogicalOlapScan scan) {
|
||||
if (scan.getSelectedIndexId() == scan.getTable().getBaseIndexId()) {
|
||||
return plan;
|
||||
}
|
||||
return plan.accept(this, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalAggregate visitLogicalAggregate(LogicalAggregate agg, Void ctx) {
|
||||
Plan child = agg.child(0).accept(this, ctx);
|
||||
List<Expression> groupByExprs = agg.getGroupByExpressions();
|
||||
List<Expression> newGroupByExprs = groupByExprs.stream()
|
||||
.map(expr -> new ReplaceExpressionWithMvColumn(slotContext).replace(expr))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
|
||||
List<NamedExpression> outputExpressions = agg.getOutputExpressions();
|
||||
List<NamedExpression> newOutputExpressions = outputExpressions.stream()
|
||||
.map(expr -> (NamedExpression) new ReplaceExpressionWithMvColumn(slotContext).replace(expr))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
|
||||
return agg.withNormalized(newGroupByExprs, newOutputExpressions, child);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalRepeat visitLogicalRepeat(LogicalRepeat repeat, Void ctx) {
|
||||
Plan child = repeat.child(0).accept(this, ctx);
|
||||
List<List<Expression>> groupingSets = repeat.getGroupingSets();
|
||||
ImmutableList.Builder<List<Expression>> newGroupingExprs = ImmutableList.builder();
|
||||
for (List<Expression> expressions : groupingSets) {
|
||||
newGroupingExprs.add(expressions.stream()
|
||||
.map(expr -> new ReplaceExpressionWithMvColumn(slotContext).replace(expr))
|
||||
.collect(ImmutableList.toImmutableList())
|
||||
);
|
||||
}
|
||||
|
||||
List<NamedExpression> outputExpressions = repeat.getOutputExpressions();
|
||||
List<NamedExpression> newOutputExpressions = outputExpressions.stream()
|
||||
.map(expr -> (NamedExpression) new ReplaceExpressionWithMvColumn(slotContext).replace(expr))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
|
||||
return repeat.withNormalizedExpr(newGroupingExprs.build(), newOutputExpressions, child);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalFilter visitLogicalFilter(LogicalFilter filter, Void ctx) {
|
||||
Plan child = filter.child(0).accept(this, ctx);
|
||||
Set<Expression> newConjuncts = ImmutableSet.copyOf(ExpressionUtils.extractConjunction(
|
||||
new ReplaceExpressionWithMvColumn(slotContext).replace(filter.getPredicate())));
|
||||
|
||||
return filter.withConjunctsAndChild(newConjuncts, child);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalProject visitLogicalProject(LogicalProject project, Void ctx) {
|
||||
Plan child = project.child(0).accept(this, ctx);
|
||||
List<NamedExpression> projects = project.getProjects();
|
||||
List<NamedExpression> newProjects = projects.stream()
|
||||
.map(expr -> (NamedExpression) new ReplaceExpressionWithMvColumn(slotContext).replace(expr))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
|
||||
return project.withProjectsAndChild(newProjects, child);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalOlapScan visitLogicalOlapScan(LogicalOlapScan scan, Void ctx) {
|
||||
return scan.withLogicalProperties(Optional.empty());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ReplaceExpressionWithMvColumn
|
||||
*/
|
||||
protected static class ReplaceExpressionWithMvColumn extends DefaultExpressionRewriter<Void> {
|
||||
// base index Slot to selected mv Slot
|
||||
private final Map<Slot, Slot> baseSlotToMvSlot;
|
||||
|
||||
// selected mv Slot name to mv Slot
|
||||
private final Map<String, Slot> mvNameToMvSlot;
|
||||
|
||||
public ReplaceExpressionWithMvColumn(SlotContext slotContext) {
|
||||
this.baseSlotToMvSlot = ImmutableMap.copyOf(slotContext.baseSlotToMvSlot);
|
||||
this.mvNameToMvSlot = ImmutableMap.copyOf(slotContext.mvNameToMvSlot);
|
||||
}
|
||||
|
||||
public Expression replace(Expression expression) {
|
||||
return expression.accept(this, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Expression visit(Expression expr, Void context) {
|
||||
if (notUseMv() || org.apache.doris.analysis.CreateMaterializedViewStmt.isMVColumn(expr.toSql())) {
|
||||
return expr;
|
||||
} else if (checkExprIsMvColumn(expr)) {
|
||||
return mvNameToMvSlot.get(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBuilder(expr.toSql()));
|
||||
} else {
|
||||
expr = super.visit(expr, context);
|
||||
return expr;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Expression visitSlotReference(SlotReference slotReference, Void context) {
|
||||
if (baseSlotToMvSlot.containsKey(slotReference)) {
|
||||
return baseSlotToMvSlot.get(slotReference);
|
||||
}
|
||||
return slotReference;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Expression visitAggregateFunction(AggregateFunction aggregateFunction, Void context) {
|
||||
String childrenName = aggregateFunction.children()
|
||||
.stream()
|
||||
.map(Expression::toSql)
|
||||
.collect(Collectors.joining(", "));
|
||||
String mvName = org.apache.doris.analysis.CreateMaterializedViewStmt.mvAggregateColumnBuilder(
|
||||
aggregateFunction.getName(), childrenName);
|
||||
if (mvNameToMvSlot.containsKey(mvName)) {
|
||||
return aggregateFunction.withChildren(mvNameToMvSlot.get(mvName));
|
||||
} else if (mvNameToMvSlot.containsKey(childrenName)) {
|
||||
// aggRewrite eg: bitmap_union_count -> bitmap_union
|
||||
return aggregateFunction.withChildren(mvNameToMvSlot.get(childrenName));
|
||||
}
|
||||
return visit(aggregateFunction, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Expression visitAlias(Alias alias, Void context) {
|
||||
if (mvNameToMvSlot.containsKey(alias.toSlot().toSql())) {
|
||||
return mvNameToMvSlot.get(alias.toSlot().toSql());
|
||||
}
|
||||
return visit(alias, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Expression visitScalarFunction(ScalarFunction scalarFunction, Void context) {
|
||||
List<Expression> newChildrenWithoutCast = scalarFunction.children().stream()
|
||||
.map(child -> {
|
||||
if (child instanceof Cast) {
|
||||
return ((Cast) child).child();
|
||||
}
|
||||
return child;
|
||||
}).collect(ImmutableList.toImmutableList());
|
||||
Expression newScalarFunction = scalarFunction.withChildren(newChildrenWithoutCast);
|
||||
if (checkExprIsMvColumn(newScalarFunction)) {
|
||||
return mvNameToMvSlot.get(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBuilder(newScalarFunction.toSql()));
|
||||
}
|
||||
return visit(scalarFunction, context);
|
||||
}
|
||||
|
||||
private boolean notUseMv() {
|
||||
return baseSlotToMvSlot.isEmpty() && mvNameToMvSlot.isEmpty();
|
||||
}
|
||||
|
||||
private boolean checkExprIsMvColumn(Expression expr) {
|
||||
return mvNameToMvSlot.containsKey(
|
||||
org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBuilder(expr.toSql()));
|
||||
}
|
||||
}
|
||||
|
||||
protected List<NamedExpression> generateProjectsAlias(
|
||||
List<? extends NamedExpression> oldProjects, SlotContext slotContext) {
|
||||
return oldProjects.stream().map(e -> {
|
||||
if (slotContext.baseSlotToMvSlot.containsKey(e.toSlot())) {
|
||||
return new Alias(e.getExprId(), slotContext.baseSlotToMvSlot.get(e.toSlot()), e.getName());
|
||||
}
|
||||
if (slotContext.mvNameToMvSlot.containsKey(e.toSql())) {
|
||||
return new Alias(e.getExprId(), slotContext.mvNameToMvSlot.get(e.toSql()), e.getName());
|
||||
}
|
||||
return e;
|
||||
}).collect(ImmutableList.toImmutableList());
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -24,25 +24,22 @@ import org.apache.doris.catalog.OlapTable;
|
||||
import org.apache.doris.nereids.rules.Rule;
|
||||
import org.apache.doris.nereids.rules.RuleType;
|
||||
import org.apache.doris.nereids.rules.rewrite.RewriteRuleFactory;
|
||||
import org.apache.doris.nereids.trees.expressions.Alias;
|
||||
import org.apache.doris.nereids.trees.expressions.Expression;
|
||||
import org.apache.doris.nereids.trees.expressions.Slot;
|
||||
import org.apache.doris.nereids.trees.expressions.SlotReference;
|
||||
import org.apache.doris.nereids.trees.plans.PreAggStatus;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalFilter;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalPlan;
|
||||
import org.apache.doris.nereids.trees.plans.logical.LogicalProject;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* Select materialized index, i.e., both for rollup and materialized view when aggregate is not present.
|
||||
@ -63,47 +60,95 @@ public class SelectMaterializedIndexWithoutAggregate extends AbstractSelectMater
|
||||
// project with pushdown filter.
|
||||
// Project(Filter(Scan))
|
||||
logicalProject(logicalFilter(logicalOlapScan().when(this::shouldSelectIndex)))
|
||||
.then(project -> {
|
||||
.thenApply(ctx -> {
|
||||
LogicalProject<LogicalFilter<LogicalOlapScan>> project = ctx.root;
|
||||
LogicalFilter<LogicalOlapScan> filter = project.child();
|
||||
LogicalOlapScan scan = filter.child();
|
||||
return project.withChildren(filter.withChildren(
|
||||
select(scan, project::getInputSlots, filter::getConjuncts)));
|
||||
|
||||
LogicalOlapScan mvPlan = select(
|
||||
scan, project::getInputSlots, filter::getConjuncts,
|
||||
Stream.concat(filter.getExpressions().stream(),
|
||||
project.getExpressions().stream()).collect(ImmutableSet.toImmutableSet()));
|
||||
SlotContext slotContext = generateBaseScanExprToMvExpr(mvPlan);
|
||||
|
||||
return new LogicalProject(
|
||||
generateProjectsAlias(project.getOutput(), slotContext),
|
||||
new ReplaceExpressions(slotContext).replace(
|
||||
project.withChildren(filter.withChildren(mvPlan)), mvPlan));
|
||||
}).toRule(RuleType.MATERIALIZED_INDEX_PROJECT_FILTER_SCAN),
|
||||
|
||||
// project with filter that cannot be pushdown.
|
||||
// Filter(Project(Scan))
|
||||
logicalFilter(logicalProject(logicalOlapScan().when(this::shouldSelectIndex)))
|
||||
.then(filter -> {
|
||||
.thenApply(ctx -> {
|
||||
LogicalFilter<LogicalProject<LogicalOlapScan>> filter = ctx.root;
|
||||
LogicalProject<LogicalOlapScan> project = filter.child();
|
||||
LogicalOlapScan scan = project.child();
|
||||
return filter.withChildren(project.withChildren(
|
||||
select(scan, project::getInputSlots, ImmutableSet::of)
|
||||
));
|
||||
|
||||
LogicalOlapScan mvPlan = select(
|
||||
scan, project::getInputSlots, ImmutableSet::of,
|
||||
new HashSet<>(project.getExpressions()));
|
||||
SlotContext slotContext = generateBaseScanExprToMvExpr(mvPlan);
|
||||
|
||||
return new LogicalProject(
|
||||
generateProjectsAlias(project.getOutput(), slotContext),
|
||||
new ReplaceExpressions(slotContext).replace(
|
||||
filter.withChildren(project.withChildren(mvPlan)), mvPlan));
|
||||
}).toRule(RuleType.MATERIALIZED_INDEX_FILTER_PROJECT_SCAN),
|
||||
|
||||
// scan with filters could be pushdown.
|
||||
// Filter(Scan)
|
||||
logicalFilter(logicalOlapScan().when(this::shouldSelectIndex))
|
||||
.then(filter -> {
|
||||
.thenApply(ctx -> {
|
||||
LogicalFilter<LogicalOlapScan> filter = ctx.root;
|
||||
LogicalOlapScan scan = filter.child();
|
||||
return filter.withChildren(select(scan, filter::getOutputSet, filter::getConjuncts));
|
||||
LogicalOlapScan mvPlan = select(
|
||||
scan, filter::getOutputSet, filter::getConjuncts,
|
||||
new HashSet<>(filter.getExpressions()));
|
||||
SlotContext slotContext = generateBaseScanExprToMvExpr(mvPlan);
|
||||
|
||||
return new LogicalProject(
|
||||
generateProjectsAlias(scan.getOutput(), slotContext),
|
||||
new ReplaceExpressions(slotContext).replace(
|
||||
new LogicalProject(mvPlan.getOutput(), filter.withChildren(mvPlan)), mvPlan));
|
||||
})
|
||||
.toRule(RuleType.MATERIALIZED_INDEX_FILTER_SCAN),
|
||||
|
||||
// project and scan.
|
||||
// Project(Scan)
|
||||
logicalProject(logicalOlapScan().when(this::shouldSelectIndex))
|
||||
.then(project -> {
|
||||
.thenApply(ctx -> {
|
||||
LogicalProject<LogicalOlapScan> project = ctx.root;
|
||||
LogicalOlapScan scan = project.child();
|
||||
return project.withChildren(
|
||||
select(scan, project::getInputSlots, ImmutableSet::of));
|
||||
|
||||
LogicalOlapScan mvPlan = select(
|
||||
scan, project::getInputSlots, ImmutableSet::of,
|
||||
new HashSet<>(project.getExpressions()));
|
||||
SlotContext slotContext = generateBaseScanExprToMvExpr(mvPlan);
|
||||
|
||||
return new LogicalProject(
|
||||
generateProjectsAlias(project.getOutput(), slotContext),
|
||||
new ReplaceExpressions(slotContext).replace(
|
||||
project.withChildren(mvPlan), mvPlan));
|
||||
})
|
||||
.toRule(RuleType.MATERIALIZED_INDEX_PROJECT_SCAN),
|
||||
|
||||
// only scan.
|
||||
logicalOlapScan()
|
||||
.when(this::shouldSelectIndex)
|
||||
.then(scan -> select(scan, scan::getOutputSet, ImmutableSet::of))
|
||||
.thenApply(ctx -> {
|
||||
LogicalOlapScan scan = ctx.root;
|
||||
|
||||
LogicalOlapScan mvPlan = select(
|
||||
scan, scan::getOutputSet, ImmutableSet::of,
|
||||
scan.getOutputSet());
|
||||
SlotContext slotContext = generateBaseScanExprToMvExpr(mvPlan);
|
||||
|
||||
return new LogicalProject(
|
||||
generateProjectsAlias(mvPlan.getOutput(), slotContext),
|
||||
new ReplaceExpressions(slotContext).replace(
|
||||
new LogicalProject(mvPlan.getOutput(), mvPlan), mvPlan));
|
||||
})
|
||||
.toRule(RuleType.MATERIALIZED_INDEX_SCAN)
|
||||
);
|
||||
}
|
||||
@ -116,10 +161,11 @@ public class SelectMaterializedIndexWithoutAggregate extends AbstractSelectMater
|
||||
* @param predicatesSupplier Supplier to get pushdown predicates.
|
||||
* @return Result scan node.
|
||||
*/
|
||||
private LogicalPlan select(
|
||||
private LogicalOlapScan select(
|
||||
LogicalOlapScan scan,
|
||||
Supplier<Set<Slot>> requiredScanOutputSupplier,
|
||||
Supplier<Set<Expression>> predicatesSupplier) {
|
||||
Supplier<Set<Expression>> predicatesSupplier,
|
||||
Set<? extends Expression> requiredExpr) {
|
||||
OlapTable table = scan.getTable();
|
||||
long baseIndexId = table.getBaseIndexId();
|
||||
KeysType keysType = scan.getTable().getKeysType();
|
||||
@ -138,16 +184,13 @@ public class SelectMaterializedIndexWithoutAggregate extends AbstractSelectMater
|
||||
if (scan.getTable().isDupKeysOrMergeOnWrite()) {
|
||||
// Set pre-aggregation to `on` to keep consistency with legacy logic.
|
||||
List<MaterializedIndex> candidates = scan.getTable().getVisibleIndex().stream()
|
||||
.filter(index -> index.getId() != baseIndexId)
|
||||
.filter(index -> !indexHasAggregate(index, scan))
|
||||
.filter(index -> containAllRequiredColumns(index, scan,
|
||||
requiredScanOutputSupplier.get()))
|
||||
requiredScanOutputSupplier.get(), requiredExpr))
|
||||
.collect(Collectors.toList());
|
||||
long bestIndex = selectBestIndex(candidates, scan, predicatesSupplier.get());
|
||||
if (bestIndex == baseIndexId) {
|
||||
return scan.withMaterializedIndexSelected(PreAggStatus.on(), bestIndex);
|
||||
} else {
|
||||
return createProjectForMv(scan.withMaterializedIndexSelected(PreAggStatus.on(), bestIndex));
|
||||
}
|
||||
return scan.withMaterializedIndexSelected(PreAggStatus.on(), bestIndex);
|
||||
} else {
|
||||
final PreAggStatus preAggStatus;
|
||||
if (preAggEnabledByHint(scan)) {
|
||||
@ -163,17 +206,18 @@ public class SelectMaterializedIndexWithoutAggregate extends AbstractSelectMater
|
||||
// No aggregate on scan.
|
||||
// So only base index and indexes that have all the keys could be used.
|
||||
List<MaterializedIndex> candidates = table.getVisibleIndex().stream()
|
||||
.filter(index -> index.getId() == baseIndexId
|
||||
|| table.getKeyColumnsByIndexId(index.getId()).size() == baseIndexKeySize)
|
||||
.filter(index -> containAllRequiredColumns(index, scan, requiredScanOutputSupplier.get()))
|
||||
.filter(index -> table.getKeyColumnsByIndexId(index.getId()).size() == baseIndexKeySize)
|
||||
.filter(index -> containAllRequiredColumns(
|
||||
index, scan, requiredScanOutputSupplier.get(),
|
||||
predicatesSupplier.get()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (candidates.size() == 1) {
|
||||
// `candidates` only have base index.
|
||||
return scan.withMaterializedIndexSelected(preAggStatus, baseIndexId);
|
||||
} else {
|
||||
return createProjectForMv(scan.withMaterializedIndexSelected(preAggStatus,
|
||||
selectBestIndex(candidates, scan, predicatesSupplier.get())));
|
||||
return scan.withMaterializedIndexSelected(preAggStatus,
|
||||
selectBestIndex(candidates, scan, predicatesSupplier.get()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -183,28 +227,4 @@ public class SelectMaterializedIndexWithoutAggregate extends AbstractSelectMater
|
||||
.stream()
|
||||
.anyMatch(Column::isAggregated);
|
||||
}
|
||||
|
||||
private LogicalProject createProjectForMv(LogicalOlapScan scan) {
|
||||
Preconditions.checkArgument(scan.getSelectedIndexId() != scan.getTable().getBaseIndexId());
|
||||
List<Slot> mvSlots = scan.getOutputByMvIndex(scan.getSelectedIndexId());
|
||||
List<Slot> baseSlots = scan.getOutputByMvIndex(scan.getTable().getBaseIndexId());
|
||||
List<Alias> aliases = Lists.newArrayList();
|
||||
List<String> baseColumnNames = mvSlots.stream()
|
||||
.map(slot -> org.apache.doris.analysis.CreateMaterializedViewStmt.mvColumnBreaker(slot.getName()))
|
||||
.collect(Collectors.toList());
|
||||
boolean isMvName = org.apache.doris.analysis.CreateMaterializedViewStmt.isMVColumn(mvSlots.get(0).getName());
|
||||
for (int i = 0; i < baseColumnNames.size(); ++i) {
|
||||
for (Slot slot : baseSlots) {
|
||||
if (((SlotReference) slot).getColumn().get().getName()
|
||||
.equals(baseColumnNames.get(i))) {
|
||||
aliases.add(
|
||||
new Alias(slot.getExprId(), isMvName ? mvSlots.get(i) : slot, baseColumnNames.get(i)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return new LogicalProject(aliases,
|
||||
isMvName ? scan.withOutput(scan.getOutputByMvIndex(scan.getSelectedIndexId()))
|
||||
: scan);
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,6 +88,12 @@ public class SlotReference extends Slot {
|
||||
column.isAllowNull(), qualifier, column);
|
||||
}
|
||||
|
||||
public static SlotReference fromColumn(Column column, String name, List<String> qualifier) {
|
||||
DataType dataType = DataType.fromCatalogType(column.getType());
|
||||
return new SlotReference(StatementScopeIdGenerator.newExprId(), name, dataType,
|
||||
column.isAllowNull(), qualifier, column);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
|
||||
@ -164,11 +164,6 @@ public abstract class AbstractPlan extends AbstractTreeNode<Plan> implements Pla
|
||||
return getLogicalProperties().getOutput();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Slot> getNonUserVisibleOutput() {
|
||||
return getLogicalProperties().getNonUserVisibleOutput();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<Slot> getOutputSet() {
|
||||
return getLogicalProperties().getOutputSet();
|
||||
|
||||
@ -25,8 +25,6 @@ import org.apache.doris.nereids.trees.plans.visitor.PlanVisitor;
|
||||
import org.apache.doris.nereids.util.MutableState;
|
||||
import org.apache.doris.nereids.util.MutableState.MultiMutableState;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
@ -92,11 +90,6 @@ public class FakePlan implements Plan {
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Slot> getNonUserVisibleOutput() {
|
||||
return ImmutableList.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String treeString() {
|
||||
return "DUMMY";
|
||||
|
||||
@ -88,8 +88,6 @@ public interface Plan extends TreeNode<Plan> {
|
||||
*/
|
||||
List<Slot> getOutput();
|
||||
|
||||
List<Slot> getNonUserVisibleOutput();
|
||||
|
||||
/**
|
||||
* Get output slot set of the plan.
|
||||
*/
|
||||
@ -117,10 +115,6 @@ public interface Plan extends TreeNode<Plan> {
|
||||
throw new IllegalStateException("Not support compute output for " + getClass().getName());
|
||||
}
|
||||
|
||||
default List<Slot> computeNonUserVisibleOutput() {
|
||||
return ImmutableList.of();
|
||||
}
|
||||
|
||||
String treeString();
|
||||
|
||||
default Plan withOutput(List<Slot> output) {
|
||||
|
||||
@ -108,11 +108,6 @@ public abstract class Command extends AbstractPlan implements LogicalPlan {
|
||||
throw new RuntimeException("Command do not implement getOutput");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Slot> getNonUserVisibleOutput() {
|
||||
throw new RuntimeException("Command do not implement getNonUserVisibleOutput");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String treeString() {
|
||||
throw new RuntimeException("Command do not implement treeString");
|
||||
|
||||
@ -60,7 +60,7 @@ public abstract class AbstractLogicalPlan extends AbstractPlan implements Logica
|
||||
if (hasUnboundChild || hasUnboundExpression()) {
|
||||
return UnboundLogicalProperties.INSTANCE;
|
||||
} else {
|
||||
return new LogicalProperties(this::computeOutput, this::computeNonUserVisibleOutput);
|
||||
return new LogicalProperties(this::computeOutput);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,4 +130,8 @@ public class LogicalFilter<CHILD_TYPE extends Plan> extends LogicalUnary<CHILD_T
|
||||
public LogicalFilter<Plan> withLogicalProperties(Optional<LogicalProperties> logicalProperties) {
|
||||
return new LogicalFilter<>(conjuncts, Optional.empty(), logicalProperties, child());
|
||||
}
|
||||
|
||||
public LogicalFilter<Plan> withConjunctsAndChild(Set<Expression> conjuncts, Plan child) {
|
||||
return new LogicalFilter<>(conjuncts, child);
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@ import org.apache.doris.common.util.Util;
|
||||
import org.apache.doris.nereids.exceptions.AnalysisException;
|
||||
import org.apache.doris.nereids.memo.GroupExpression;
|
||||
import org.apache.doris.nereids.properties.LogicalProperties;
|
||||
import org.apache.doris.nereids.rules.mv.AbstractSelectMaterializedIndexRule;
|
||||
import org.apache.doris.nereids.trees.expressions.Slot;
|
||||
import org.apache.doris.nereids.trees.expressions.SlotReference;
|
||||
import org.apache.doris.nereids.trees.plans.ObjectId;
|
||||
@ -74,7 +75,13 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
*/
|
||||
private final PreAggStatus preAggStatus;
|
||||
|
||||
private final Map<String, Slot> mvNameToSlot;
|
||||
/**
|
||||
* When the Slotreference is generated through fromColumn,
|
||||
* the exprId will be generated incrementally,
|
||||
* causing the slotId of the base to change when the output is recalculated.
|
||||
* This structure is responsible for storing the generated Slotreference
|
||||
*/
|
||||
private final Map<String, Slot> cacheSlotWithSlotName;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Members for tablet ids.
|
||||
@ -140,7 +147,8 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
Optional<GroupExpression> groupExpression, Optional<LogicalProperties> logicalProperties,
|
||||
List<Long> selectedPartitionIds, boolean partitionPruned,
|
||||
List<Long> selectedTabletIds, long selectedIndexId, boolean indexSelected,
|
||||
PreAggStatus preAggStatus, List<Long> partitions, List<String> hints, Map<String, Slot> mvNameToSlot) {
|
||||
PreAggStatus preAggStatus, List<Long> partitions,
|
||||
List<String> hints, Map<String, Slot> cacheSlotWithSlotName) {
|
||||
|
||||
super(id, PlanType.LOGICAL_OLAP_SCAN, table, qualifier,
|
||||
groupExpression, logicalProperties);
|
||||
@ -155,7 +163,7 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
.filter(partitionId -> this.getTable().getPartition(partitionId).hasData()).collect(
|
||||
Collectors.toList());
|
||||
this.hints = Objects.requireNonNull(hints, "hints can not be null");
|
||||
this.mvNameToSlot = Objects.requireNonNull(mvNameToSlot, "mvNameToSlot can not be null");
|
||||
this.cacheSlotWithSlotName = Objects.requireNonNull(cacheSlotWithSlotName, "mvNameToSlot can not be null");
|
||||
}
|
||||
|
||||
public List<Long> getSelectedPartitionIds() {
|
||||
@ -200,7 +208,7 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
&& Objects.equals(indexSelected, ((LogicalOlapScan) o).indexSelected)
|
||||
&& Objects.equals(selectedTabletIds, ((LogicalOlapScan) o).selectedTabletIds)
|
||||
&& Objects.equals(hints, ((LogicalOlapScan) o).hints)
|
||||
&& Objects.equals(mvNameToSlot, ((LogicalOlapScan) o).mvNameToSlot);
|
||||
&& Objects.equals(cacheSlotWithSlotName, ((LogicalOlapScan) o).cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -209,45 +217,50 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
selectedPartitionIds, partitionPruned,
|
||||
selectedIndexId, indexSelected,
|
||||
selectedTabletIds,
|
||||
hints, mvNameToSlot);
|
||||
hints);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalOlapScan withGroupExpression(Optional<GroupExpression> groupExpression) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, groupExpression, Optional.of(getLogicalProperties()),
|
||||
selectedPartitionIds, partitionPruned, selectedTabletIds,
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
|
||||
hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogicalOlapScan withLogicalProperties(Optional<LogicalProperties> logicalProperties) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, Optional.empty(), logicalProperties,
|
||||
selectedPartitionIds, partitionPruned, selectedTabletIds,
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
|
||||
hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
public LogicalOlapScan withSelectedPartitionIds(List<Long> selectedPartitionIds) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, Optional.empty(), Optional.of(getLogicalProperties()),
|
||||
selectedPartitionIds, true, selectedTabletIds,
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
|
||||
hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
public LogicalOlapScan withMaterializedIndexSelected(PreAggStatus preAgg, long indexId) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, Optional.empty(), Optional.of(getLogicalProperties()),
|
||||
selectedPartitionIds, partitionPruned, selectedTabletIds,
|
||||
indexId, true, preAgg, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
indexId, true, preAgg, manuallySpecifiedPartitions, hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
public LogicalOlapScan withSelectedTabletIds(List<Long> selectedTabletIds) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, Optional.empty(), Optional.of(getLogicalProperties()),
|
||||
selectedPartitionIds, partitionPruned, selectedTabletIds,
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
|
||||
hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
public LogicalOlapScan withPreAggStatus(PreAggStatus preAggStatus) {
|
||||
return new LogicalOlapScan(id, (Table) table, qualifier, Optional.empty(), Optional.of(getLogicalProperties()),
|
||||
selectedPartitionIds, partitionPruned, selectedTabletIds,
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions, hints, mvNameToSlot);
|
||||
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
|
||||
hints, cacheSlotWithSlotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -284,6 +297,9 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
|
||||
@Override
|
||||
public List<Slot> computeOutput() {
|
||||
if (selectedIndexId != ((OlapTable) table).getBaseIndexId()) {
|
||||
return getOutputByMvIndex(selectedIndexId);
|
||||
}
|
||||
List<Column> otherColumns = new ArrayList<>();
|
||||
if (!Util.showHiddenColumns() && getTable().hasDeleteSign()
|
||||
&& !ConnectContext.get().getSessionVariable()
|
||||
@ -291,18 +307,14 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
otherColumns.add(getTable().getDeleteSignColumn());
|
||||
}
|
||||
return Stream.concat(table.getBaseSchema().stream(), otherColumns.stream())
|
||||
.map(col -> SlotReference.fromColumn(col, qualified()))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Slot> computeNonUserVisibleOutput() {
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
return olapTable.getVisibleIndexIdToMeta().values()
|
||||
.stream()
|
||||
.filter(index -> index.getIndexId() != ((OlapTable) table).getBaseIndexId())
|
||||
.flatMap(index -> index.getSchema().stream())
|
||||
.map(this::generateUniqueSlot)
|
||||
.map(col -> {
|
||||
if (cacheSlotWithSlotName.containsKey(col.getName())) {
|
||||
return cacheSlotWithSlotName.get(col.getName());
|
||||
}
|
||||
Slot slot = SlotReference.fromColumn(col, qualified());
|
||||
cacheSlotWithSlotName.put(col.getName(), slot);
|
||||
return slot;
|
||||
})
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
}
|
||||
|
||||
@ -311,23 +323,28 @@ public class LogicalOlapScan extends LogicalRelation implements CatalogRelation,
|
||||
* and create a new slotReference for the slot that has not appeared in the materialized view.
|
||||
*/
|
||||
public List<Slot> getOutputByMvIndex(long indexId) {
|
||||
if (indexId == ((OlapTable) table).getBaseIndexId()) {
|
||||
return getOutput();
|
||||
}
|
||||
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
// PhysicalStorageLayerAggregateTest has no visible index
|
||||
if (-1 == indexId) {
|
||||
return olapTable.getIndexMetaByIndexId(indexId).getSchema()
|
||||
.stream().map(s -> generateUniqueSlot(s, indexId == ((OlapTable) table).getBaseIndexId()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
return olapTable.getVisibleIndexIdToMeta().get(indexId).getSchema()
|
||||
.stream()
|
||||
.map(this::generateUniqueSlot)
|
||||
.map(s -> generateUniqueSlot(s, indexId == ((OlapTable) table).getBaseIndexId()))
|
||||
.collect(ImmutableList.toImmutableList());
|
||||
}
|
||||
|
||||
private Slot generateUniqueSlot(Column column) {
|
||||
if (mvNameToSlot.containsKey(column.getName())) {
|
||||
return mvNameToSlot.get(column.getName());
|
||||
private Slot generateUniqueSlot(Column column, boolean isBaseIndex) {
|
||||
String name = isBaseIndex ? column.getName()
|
||||
: AbstractSelectMaterializedIndexRule.parseMvColumnToMvName(column.getName(),
|
||||
column.isAggregated() ? Optional.of(column.getAggregationType().toSql()) : Optional.empty());
|
||||
if (cacheSlotWithSlotName.containsKey(name)) {
|
||||
return cacheSlotWithSlotName.get(name);
|
||||
}
|
||||
Slot slot = SlotReference.fromColumn(column, qualified());
|
||||
mvNameToSlot.put(column.getName(), slot);
|
||||
Slot slot = SlotReference.fromColumn(column, name, qualified());
|
||||
cacheSlotWithSlotName.put(name, slot);
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
@ -170,6 +170,10 @@ public class LogicalRepeat<CHILD_TYPE extends Plan> extends LogicalUnary<CHILD_T
|
||||
return new LogicalRepeat<>(groupingSets, outputExpressionList, child);
|
||||
}
|
||||
|
||||
public LogicalRepeat<Plan> withAggOutputAndChild(List<NamedExpression> newOutput, Plan child) {
|
||||
return new LogicalRepeat<>(groupingSets, newOutput, child);
|
||||
}
|
||||
|
||||
public boolean canBindVirtualSlot() {
|
||||
return bound() && outputExpressions.stream()
|
||||
.noneMatch(output -> output.containsType(VirtualSlotReference.class));
|
||||
|
||||
@ -22,6 +22,7 @@ import org.apache.doris.nereids.memo.GroupExpression;
|
||||
import org.apache.doris.nereids.properties.DistributionSpec;
|
||||
import org.apache.doris.nereids.properties.LogicalProperties;
|
||||
import org.apache.doris.nereids.properties.PhysicalProperties;
|
||||
import org.apache.doris.nereids.trees.expressions.Slot;
|
||||
import org.apache.doris.nereids.trees.plans.ObjectId;
|
||||
import org.apache.doris.nereids.trees.plans.PlanType;
|
||||
import org.apache.doris.nereids.trees.plans.PreAggStatus;
|
||||
@ -51,14 +52,17 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
private final ImmutableList<Long> selectedPartitionIds;
|
||||
private final PreAggStatus preAggStatus;
|
||||
|
||||
private final List<Slot> baseOutputs;
|
||||
|
||||
/**
|
||||
* Constructor for PhysicalOlapScan.
|
||||
*/
|
||||
public PhysicalOlapScan(ObjectId id, OlapTable olapTable, List<String> qualifier, long selectedIndexId,
|
||||
List<Long> selectedTabletIds, List<Long> selectedPartitionIds, DistributionSpec distributionSpec,
|
||||
PreAggStatus preAggStatus, Optional<GroupExpression> groupExpression, LogicalProperties logicalProperties) {
|
||||
PreAggStatus preAggStatus, List<Slot> baseOutputs,
|
||||
Optional<GroupExpression> groupExpression, LogicalProperties logicalProperties) {
|
||||
this(id, olapTable, qualifier, selectedIndexId, selectedTabletIds, selectedPartitionIds, distributionSpec,
|
||||
preAggStatus, groupExpression, logicalProperties, null, null);
|
||||
preAggStatus, baseOutputs, groupExpression, logicalProperties, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -66,7 +70,8 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
*/
|
||||
public PhysicalOlapScan(ObjectId id, OlapTable olapTable, List<String> qualifier, long selectedIndexId,
|
||||
List<Long> selectedTabletIds, List<Long> selectedPartitionIds, DistributionSpec distributionSpec,
|
||||
PreAggStatus preAggStatus, Optional<GroupExpression> groupExpression, LogicalProperties logicalProperties,
|
||||
PreAggStatus preAggStatus, List<Slot> baseOutputs,
|
||||
Optional<GroupExpression> groupExpression, LogicalProperties logicalProperties,
|
||||
PhysicalProperties physicalProperties, Statistics statistics) {
|
||||
super(id, PlanType.PHYSICAL_OLAP_SCAN, qualifier, groupExpression, logicalProperties, physicalProperties,
|
||||
statistics);
|
||||
@ -76,6 +81,7 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
this.selectedPartitionIds = ImmutableList.copyOf(selectedPartitionIds);
|
||||
this.distributionSpec = distributionSpec;
|
||||
this.preAggStatus = preAggStatus;
|
||||
this.baseOutputs = ImmutableList.copyOf(baseOutputs);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -105,6 +111,10 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
return preAggStatus;
|
||||
}
|
||||
|
||||
public List<Slot> getBaseOutputs() {
|
||||
return baseOutputs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Utils.toSqlString("PhysicalOlapScan[" + id.asInt() + "]" + getGroupIdAsString(),
|
||||
@ -141,13 +151,14 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
@Override
|
||||
public PhysicalOlapScan withGroupExpression(Optional<GroupExpression> groupExpression) {
|
||||
return new PhysicalOlapScan(id, olapTable, qualifier, selectedIndexId, selectedTabletIds,
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, groupExpression, getLogicalProperties());
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, baseOutputs,
|
||||
groupExpression, getLogicalProperties());
|
||||
}
|
||||
|
||||
@Override
|
||||
public PhysicalOlapScan withLogicalProperties(Optional<LogicalProperties> logicalProperties) {
|
||||
return new PhysicalOlapScan(id, olapTable, qualifier, selectedIndexId, selectedTabletIds,
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, Optional.empty(),
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, baseOutputs, Optional.empty(),
|
||||
logicalProperties.get());
|
||||
}
|
||||
|
||||
@ -155,7 +166,7 @@ public class PhysicalOlapScan extends PhysicalRelation implements OlapScan {
|
||||
public PhysicalOlapScan withPhysicalPropertiesAndStats(
|
||||
PhysicalProperties physicalProperties, Statistics statistics) {
|
||||
return new PhysicalOlapScan(id, olapTable, qualifier, selectedIndexId, selectedTabletIds,
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, groupExpression,
|
||||
selectedPartitionIds, distributionSpec, preAggStatus, baseOutputs, groupExpression,
|
||||
getLogicalProperties(), physicalProperties, statistics);
|
||||
}
|
||||
|
||||
|
||||
@ -38,6 +38,7 @@ import org.apache.doris.planner.OlapScanNode;
|
||||
import org.apache.doris.planner.PlanFragment;
|
||||
import org.apache.doris.planner.PlanNode;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import mockit.Injectable;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
@ -65,7 +66,7 @@ public class PhysicalPlanTranslatorTest {
|
||||
LogicalProperties t1Properties = new LogicalProperties(() -> t1Output);
|
||||
PhysicalOlapScan scan = new PhysicalOlapScan(RelationUtil.newRelationId(), t1, qualifier, t1.getBaseIndexId(),
|
||||
Collections.emptyList(), Collections.emptyList(), null, PreAggStatus.on(),
|
||||
Optional.empty(), t1Properties);
|
||||
ImmutableList.of(), Optional.empty(), t1Properties);
|
||||
Literal t1FilterRight = new IntegerLiteral(1);
|
||||
Expression t1FilterExpr = new GreaterThan(col1, t1FilterRight);
|
||||
PhysicalFilter<PhysicalOlapScan> filter =
|
||||
|
||||
@ -34,6 +34,7 @@ import org.apache.doris.nereids.trees.plans.physical.PhysicalProject;
|
||||
import org.apache.doris.nereids.types.IntegerType;
|
||||
import org.apache.doris.nereids.util.PlanConstructor;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import mockit.Injectable;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
@ -75,7 +76,7 @@ public class MergeProjectPostProcessTest {
|
||||
t1Output.add(c);
|
||||
LogicalProperties t1Properties = new LogicalProperties(() -> t1Output);
|
||||
PhysicalOlapScan scan = new PhysicalOlapScan(ObjectId.createGenerator().getNextId(), t1, qualifier, 0L,
|
||||
Collections.emptyList(), Collections.emptyList(), null, PreAggStatus.on(),
|
||||
Collections.emptyList(), Collections.emptyList(), null, PreAggStatus.on(), ImmutableList.of(),
|
||||
Optional.empty(), t1Properties);
|
||||
Alias x = new Alias(a, "x");
|
||||
List<NamedExpression> projList3 = Lists.newArrayList(x, b, c);
|
||||
|
||||
@ -41,7 +41,6 @@ import com.google.common.collect.ImmutableMap;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.List;
|
||||
@ -63,6 +62,8 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
private static final String TEST_TABLE_NAME = "test_tb";
|
||||
private static final String USER_TAG_MV_NAME = "user_tags_mv";
|
||||
|
||||
private static final String ADVANCE_TABLE_NAME = "advance";
|
||||
|
||||
@Override
|
||||
protected void beforeCreatingConnectContext() throws Exception {
|
||||
FeConstants.default_scheduler_interval_millisecond = 10;
|
||||
@ -73,6 +74,7 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
protected void runBeforeAll() throws Exception {
|
||||
createDatabase(HR_DB_NAME);
|
||||
useDatabase(HR_DB_NAME);
|
||||
connectContext.getSessionVariable().enableNereidsTimeout = false;
|
||||
}
|
||||
|
||||
@BeforeEach
|
||||
@ -91,6 +93,19 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
+ " (time_col date, user_id int, user_name varchar(20), tag_id int) partition by range (time_col) "
|
||||
+ " (partition p1 values less than MAXVALUE) "
|
||||
+ "distributed by hash(time_col) buckets 3 properties('replication_num' = '1');");
|
||||
|
||||
createTable("create table " + HR_DB_NAME + "." + ADVANCE_TABLE_NAME
|
||||
+ "( a int, \n"
|
||||
+ " b int, \n"
|
||||
+ " c int\n"
|
||||
+ ")ENGINE=OLAP \n"
|
||||
+ "DISTRIBUTED BY HASH(a) BUCKETS 3\n"
|
||||
+ "PROPERTIES (\n"
|
||||
+ "\"replication_allocation\" = \"tag.location.default: 1\",\n"
|
||||
+ "\"in_memory\" = \"false\",\n"
|
||||
+ "\"storage_format\" = \"V2\",\n"
|
||||
+ "\"disable_auto_compaction\" = \"false\"\n"
|
||||
+ ");");
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
@ -98,6 +113,7 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
dropTable(EMPS_TABLE_NAME, true);
|
||||
dropTable(DEPTS_TABLE_NAME, true);
|
||||
dropTable(USER_TAG_TABLE_NAME, true);
|
||||
dropTable(ADVANCE_TABLE_NAME, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -152,7 +168,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMvWithTwoTable(union, EMPS_MV_NAME, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQueryOnAggMV1() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), "
|
||||
@ -162,7 +177,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQueryOnAggMV2() throws Exception {
|
||||
String agg = "select deptno, sum(salary) from " + EMPS_TABLE_NAME + " group by deptno";
|
||||
@ -173,7 +187,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQueryOnAggMV3() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary)"
|
||||
@ -200,7 +213,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* There will be a compensating Project added after matching of the Aggregate.
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQuqeryOnAggMV5() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary)"
|
||||
@ -214,7 +226,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* There will be a compensating Project + Filter added after matching of the Aggregate.
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQuqeryOnAggMV6() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary)"
|
||||
@ -229,7 +240,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
* Aggregation query with groupSets at coarser level of aggregation than
|
||||
* aggregation materialized view.
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testGroupingSetQueryOnAggMV() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -242,7 +252,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* Aggregation query at coarser level of aggregation than aggregation materialized view.
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQuqeryOnAggMV7() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary) "
|
||||
@ -252,7 +261,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQueryOnAggMV8() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary) "
|
||||
@ -266,7 +274,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
* Query with cube and arithmetic expr
|
||||
* TODO: enable this when group by cube is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testAggQueryOnAggMV9() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary) "
|
||||
+ "from " + EMPS_TABLE_NAME + " group by deptno, commission;";
|
||||
@ -279,7 +286,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* Query with rollup and arithmetic expr
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggQueryOnAggMV10() throws Exception {
|
||||
String createMVSql = "create materialized view " + EMPS_MV_NAME + " as select deptno, commission, sum(salary) "
|
||||
@ -315,7 +321,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMvWithTwoTable(query, EMPS_TABLE_NAME, EMPS_TABLE_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testJoinOnLeftProjectToJoin() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME
|
||||
@ -330,7 +335,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, ImmutableMap.of(EMPS_TABLE_NAME, EMPS_MV_NAME, DEPTS_TABLE_NAME, DEPTS_MV_NAME));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testJoinOnRightProjectToJoin() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum"
|
||||
@ -345,7 +349,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, ImmutableMap.of(EMPS_TABLE_NAME, EMPS_MV_NAME, DEPTS_TABLE_NAME, DEPTS_MV_NAME));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testJoinOnProjectsToJoin() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary), sum"
|
||||
@ -416,7 +419,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when implicit case is fully developed.
|
||||
*/
|
||||
@Disabled
|
||||
public void testJoinOnCalcToJoin4() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno from "
|
||||
+ EMPS_TABLE_NAME + ";";
|
||||
@ -433,7 +435,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when order by column not in project is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testOrderByQueryOnProjectView() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from "
|
||||
+ EMPS_TABLE_NAME + ";";
|
||||
@ -447,7 +448,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when order by column not in select is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testOrderByQueryOnOrderByView() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid from "
|
||||
+ EMPS_TABLE_NAME + " order by deptno;";
|
||||
@ -456,7 +456,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVAggregateFuncs1() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -466,7 +465,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVAggregateFuncs2() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -476,7 +474,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVAggregateFuncs3() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -486,7 +483,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVAggregateFuncs4() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -496,7 +492,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVAggregateFuncs5() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, sum(salary) "
|
||||
@ -506,7 +501,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVCalcGroupByQuery1() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, sum(salary) "
|
||||
@ -517,7 +511,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVCalcGroupByQuery2() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, sum(salary) "
|
||||
@ -528,7 +521,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVCalcGroupByQuery3() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid, sum(salary) "
|
||||
@ -552,7 +544,7 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when estimate stats bug fixed.
|
||||
*/
|
||||
@Disabled
|
||||
@Test
|
||||
public void testSubQuery() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, empid "
|
||||
+ "from " + EMPS_TABLE_NAME + ";";
|
||||
@ -565,7 +557,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when sum(distinct xxx) is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testDistinctQuery() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select deptno, sum(salary) "
|
||||
+ "from " + EMPS_TABLE_NAME + " group by deptno;";
|
||||
@ -587,7 +578,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, ImmutableMap.of(EMPS_TABLE_NAME, EMPS_MV_NAME));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testMultiMVMultiUsage() throws Exception {
|
||||
String createEmpsMVSql01 = "create materialized view emp_mv_01 as select deptno, empid, salary "
|
||||
@ -612,7 +602,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, ImmutableMap.of(EMPS_TABLE_NAME, EMPS_MV_NAME, DEPTS_TABLE_NAME, DEPTS_TABLE_NAME));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggregateMVOnCountDistinctQuery1() throws Exception {
|
||||
String createEmpsMVsql = "create materialized view " + EMPS_MV_NAME + " as select empid, deptno, sum(salary) "
|
||||
@ -703,7 +692,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when order by aggregate function is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testAggFunctionInOrder() throws Exception {
|
||||
String duplicateTable = "CREATE TABLE " + TEST_TABLE_NAME + " ( k1 int(11) NOT NULL , k2 int(11) NOT NULL ,"
|
||||
+ "v1 varchar(4096) NOT NULL, v2 float NOT NULL , v3 decimal(20, 7) NOT NULL ) ENGINE=OLAP "
|
||||
@ -750,7 +738,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* bitmap_union_count(to_bitmap()) -> bitmap_union_count without having
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testBitmapUnionRewrite() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME
|
||||
@ -762,10 +749,23 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
singleTableTest(query, USER_TAG_MV_NAME, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* bitmap_union_count(bitmap_hash()) -> bitmap_union_count without having
|
||||
*/
|
||||
@Test
|
||||
public void testBitmapUnionBitmapHashRewrite() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME
|
||||
+ " as select user_id, bitmap_union(bitmap_hash(tag_id)) from "
|
||||
+ USER_TAG_TABLE_NAME + " group by user_id;";
|
||||
createMv(createUserTagMVSql);
|
||||
String query = "select user_id, bitmap_union_count(bitmap_hash(tag_id)) a from " + USER_TAG_TABLE_NAME
|
||||
+ " group by user_id";
|
||||
singleTableTest(query, USER_TAG_MV_NAME, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* bitmap_union_count(to_bitmap()) -> bitmap_union_count with having
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testBitmapUnionInQuery() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME
|
||||
@ -777,7 +777,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
singleTableTest(query, USER_TAG_MV_NAME, true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testBitmapUnionInSubquery() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -817,7 +816,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, ImmutableMap.of("user_tags", "user_tags"));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testTwoTupleInQuery() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -830,8 +828,8 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
.analyze(query)
|
||||
.rewrite()
|
||||
.matches(logicalJoin(
|
||||
logicalAggregate(
|
||||
logicalProject(
|
||||
logicalProject(
|
||||
logicalAggregate(
|
||||
logicalOlapScan().when(scan -> "user_tags_mv".equals(
|
||||
scan.getSelectedMaterializedIndexName().get())))),
|
||||
logicalAggregate(
|
||||
@ -889,7 +887,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* count distinct to bitmap_union_count in mv
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testCountDistinctToBitmap() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -922,7 +919,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, USER_TAG_TABLE_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testNDVToHll() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -936,7 +932,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
/**
|
||||
* TODO: enable this when hll is supported.
|
||||
*/
|
||||
@Disabled
|
||||
public void testApproxCountDistinctToHll() throws Exception {
|
||||
// String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
// + "`" + FunctionSet.HLL_UNION + "`(" + FunctionSet.HLL_HASH + "(tag_id)) from " + USER_TAG_TABLE_NAME
|
||||
@ -946,7 +941,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
// dorisAssert.query(query).explainContains(USER_TAG_MV_NAME, "hll_union_agg");
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testHLLUnionFamilyRewrite() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -995,7 +989,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, EMPS_TABLE_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testCountFieldInQuery() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -1012,7 +1005,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, USER_TAG_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testCreateMVBaseBitmapAggTable() throws Exception {
|
||||
String createTableSQL = "create table " + HR_DB_NAME + ".agg_table "
|
||||
@ -1031,7 +1023,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
dropTable("agg_table", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testSelectMVWithTableAlias() throws Exception {
|
||||
String createUserTagMVSql = "create materialized view " + USER_TAG_MV_NAME + " as select user_id, "
|
||||
@ -1048,7 +1039,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
testMv(query, USER_TAG_MV_NAME);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void selectBitmapMvWithProjectTest1() throws Exception {
|
||||
createTable("create table t(\n"
|
||||
@ -1070,7 +1060,6 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
dropTable("t", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void selectBitmapMvWithProjectTest2() throws Exception {
|
||||
createTable("create table t(\n"
|
||||
@ -1092,10 +1081,9 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
dropTable("t", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void selectBitmapMvWithProjectMultiMv() throws Exception {
|
||||
createTable("create table t(\n"
|
||||
createTable("create table selectBitmapMvWithProjectMultiMv(\n"
|
||||
+ " a int, \n"
|
||||
+ " b int, \n"
|
||||
+ " c int\n"
|
||||
@ -1108,14 +1096,60 @@ class SelectMvIndexTest extends BaseMaterializedIndexSelectTest implements MemoP
|
||||
+ "\"disable_auto_compaction\" = \"false\"\n"
|
||||
+ ");");
|
||||
createMv("create materialized view mv as"
|
||||
+ " select a, bitmap_union(to_bitmap(b)) from t group by a;");
|
||||
+ " select a, bitmap_union(to_bitmap(b)) from selectBitmapMvWithProjectMultiMv group by a;");
|
||||
createMv("create materialized view mv1 as"
|
||||
+ " select c, bitmap_union(to_bitmap(b)) from t group by c;");
|
||||
+ " select c, bitmap_union(to_bitmap(b)) from selectBitmapMvWithProjectMultiMv group by c;");
|
||||
createMv("create materialized view mv2 as"
|
||||
+ " select a, c, bitmap_union(to_bitmap(b)) from t group by a, c;");
|
||||
+ " select a, c, bitmap_union(to_bitmap(b)) from selectBitmapMvWithProjectMultiMv group by a, c;");
|
||||
|
||||
testMv("select a, bitmap_union_count(to_bitmap(b)) as cnt from t group by a", "mv");
|
||||
dropTable("t", true);
|
||||
testMv("select a, bitmap_union_count(to_bitmap(b)) as cnt from selectBitmapMvWithProjectMultiMv group by a", "mv");
|
||||
dropTable("selectBitmapMvWithProjectMultiMv", true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void advanceMvAggWithExprTest() throws Exception {
|
||||
createMv("create materialized view mv1 as"
|
||||
+ " select abs(a)+1 tmp, sum(abs(b+2)) from " + ADVANCE_TABLE_NAME + " group by tmp;");
|
||||
|
||||
testMv("select abs(a)+1 tmp, sum(abs(b+2)) from " + ADVANCE_TABLE_NAME + " group by tmp", "mv1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void advanceMvDupColTest() throws Exception {
|
||||
createMv("create materialized view mv2 as"
|
||||
+ " select a, sum(b), max(b) from " + ADVANCE_TABLE_NAME + " group by a;");
|
||||
|
||||
testMv("select a, sum(b), max(b) as cnt from " + ADVANCE_TABLE_NAME + " group by a", "mv2");
|
||||
testMv("select a, sum(b) as cnt from " + ADVANCE_TABLE_NAME + " group by a", "mv2");
|
||||
testMv("select a, max(b) as cnt from " + ADVANCE_TABLE_NAME + " group by a", "mv2");
|
||||
testMv("select unix_timestamp(a) tmp, max(b) as cnt from " + ADVANCE_TABLE_NAME + " group by tmp", "mv2");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void advanceMvDupColTest1() throws Exception {
|
||||
createMv("create materialized view mv2 as"
|
||||
+ " select b, sum(a), max(a) from " + ADVANCE_TABLE_NAME + " group by b;");
|
||||
|
||||
testMv("select b, sum(a), max(a) as cnt from " + ADVANCE_TABLE_NAME + " group by b", "mv2");
|
||||
testMv("select b, sum(a) as cnt from " + ADVANCE_TABLE_NAME + " group by b", "mv2");
|
||||
testMv("select b, max(a) as cnt from " + ADVANCE_TABLE_NAME + " group by b", "mv2");
|
||||
testMv("select unix_timestamp(b) tmp, max(a) as cnt from " + ADVANCE_TABLE_NAME + " group by tmp", "mv2");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void advanceMvMultiSlotTest() throws Exception {
|
||||
createMv("create materialized view mv3 as"
|
||||
+ " select abs(a)+b+1,abs(b+2)+c+3 from " + ADVANCE_TABLE_NAME);
|
||||
|
||||
testMv("select abs(a)+b+1,abs(b+2)+c+3 from " + ADVANCE_TABLE_NAME, "mv3");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void advanceMvMultiSlotWithAggTest() throws Exception {
|
||||
createMv("create materialized view mv4 as"
|
||||
+ " select abs(a)+b+1 tmp, sum(abs(b+2)+c+3) from " + ADVANCE_TABLE_NAME + " group by tmp");
|
||||
|
||||
testMv("select abs(a)+b+1 tmp, sum(abs(b+2)+c+3) from " + ADVANCE_TABLE_NAME + " group by tmp", "mv4");
|
||||
}
|
||||
|
||||
private void testMv(String sql, Map<String, String> tableToIndex) {
|
||||
|
||||
@ -98,7 +98,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
+ ");");
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testAggMatching() {
|
||||
singleTableTest("select k2, sum(v1) from t group by k2", "r1", true);
|
||||
@ -133,7 +133,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
PlanChecker.from(connectContext).checkPlannerResult("select k2, sum(v1) from t group by k2");
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testTranslateWhenPreAggIsOff() {
|
||||
singleTableTest("select k2, min(v1) from t group by k2", scan -> {
|
||||
@ -171,11 +171,11 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
@Test
|
||||
public void testWithFilter() {
|
||||
PlanChecker.from(connectContext)
|
||||
.analyze("select k2, sum(v1) from t where k2>3 group by k3")
|
||||
.analyze("select k2, sum(v1) from t where k2>3 group by k2")
|
||||
.applyTopDown(new SelectMaterializedIndexWithAggregate())
|
||||
.matches(logicalOlapScan().when(scan -> {
|
||||
Assertions.assertTrue(scan.getPreAggStatus().isOn());
|
||||
Assertions.assertEquals("r2", scan.getSelectedMaterializedIndexName().get());
|
||||
Assertions.assertEquals("r1", scan.getSelectedMaterializedIndexName().get());
|
||||
return true;
|
||||
}));
|
||||
}
|
||||
@ -240,7 +240,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
.matches(logicalOlapScan().when(scan -> {
|
||||
PreAggStatus preAgg = scan.getPreAggStatus();
|
||||
Assertions.assertTrue(preAgg.isOff());
|
||||
Assertions.assertEquals("Input of aggregate function sum((v1 + 1)) should be slot or cast on slot.",
|
||||
Assertions.assertEquals("Slot((v1 + 1)) in sum((v1 + 1)) is neither key column nor value column.",
|
||||
preAgg.getOffReason());
|
||||
return true;
|
||||
}));
|
||||
@ -315,7 +315,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
}));
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testKeysOnlyQuery() throws Exception {
|
||||
singleTableTest("select k1 from t1", "r3", false);
|
||||
@ -329,7 +329,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
/**
|
||||
* Rollup with all the keys should be used.
|
||||
*/
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testRollupWithAllTheKeys() throws Exception {
|
||||
createTable(" CREATE TABLE `t4` (\n"
|
||||
@ -355,19 +355,19 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
singleTableTest("select k1, sum(v1) from t4 group by k1", "r1", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testComplexGroupingExpr() throws Exception {
|
||||
singleTableTest("select k2 + 1, sum(v1) from t group by k2 + 1", "r1", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testCountDistinctKeyColumn() {
|
||||
singleTableTest("select k2, count(distinct k3) from t group by k2", "r4", true);
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testCountDistinctValueColumn() {
|
||||
singleTableTest("select k1, count(distinct v1) from t group by k1", scan -> {
|
||||
@ -378,7 +378,7 @@ class SelectRollupIndexTest extends BaseMaterializedIndexSelectTest implements M
|
||||
});
|
||||
}
|
||||
|
||||
@Disabled //ISSUE #18263
|
||||
//@Disabled //ISSUE #18263
|
||||
@Test
|
||||
public void testOnlyValueColumn1() throws Exception {
|
||||
singleTableTest("select sum(v1) from t", "r1", true);
|
||||
|
||||
@ -259,16 +259,16 @@ public class PlanEqualsTest {
|
||||
|
||||
PhysicalOlapScan actual = new PhysicalOlapScan(id, olapTable, Lists.newArrayList("a"),
|
||||
olapTable.getBaseIndexId(), selectedTabletId, olapTable.getPartitionIds(), distributionSpecHash,
|
||||
PreAggStatus.on(), Optional.empty(), logicalProperties);
|
||||
PreAggStatus.on(), ImmutableList.of(), Optional.empty(), logicalProperties);
|
||||
|
||||
PhysicalOlapScan expected = new PhysicalOlapScan(id, olapTable, Lists.newArrayList("a"),
|
||||
olapTable.getBaseIndexId(), selectedTabletId, olapTable.getPartitionIds(), distributionSpecHash,
|
||||
PreAggStatus.on(), Optional.empty(), logicalProperties);
|
||||
PreAggStatus.on(), ImmutableList.of(), Optional.empty(), logicalProperties);
|
||||
Assertions.assertEquals(expected, actual);
|
||||
|
||||
PhysicalOlapScan unexpected = new PhysicalOlapScan(id, olapTable, Lists.newArrayList("b"),
|
||||
olapTable.getBaseIndexId(), selectedTabletId, olapTable.getPartitionIds(), distributionSpecHash,
|
||||
PreAggStatus.on(), Optional.empty(), logicalProperties);
|
||||
PreAggStatus.on(), ImmutableList.of(), Optional.empty(), logicalProperties);
|
||||
Assertions.assertNotEquals(unexpected, actual);
|
||||
}
|
||||
|
||||
|
||||
19
regression-test/data/nereids_syntax_p0/advance_mv.out
Normal file
19
regression-test/data/nereids_syntax_p0/advance_mv.out
Normal file
@ -0,0 +1,19 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_star --
|
||||
11 15
|
||||
13 17
|
||||
9 13
|
||||
|
||||
-- !select_star --
|
||||
11 15
|
||||
13 17
|
||||
9 13
|
||||
|
||||
-- !select_star --
|
||||
40
|
||||
|
||||
@ -0,0 +1,28 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 -4 -4
|
||||
1 1 1
|
||||
2 2 2
|
||||
3 -3 -3
|
||||
|
||||
-- !select_mv --
|
||||
-4 -4
|
||||
1 1
|
||||
2 2
|
||||
3 -3
|
||||
|
||||
-- !select_mv --
|
||||
-4 -4
|
||||
1 1
|
||||
2 2
|
||||
3 -3
|
||||
|
||||
-- !select_mv --
|
||||
\N -4
|
||||
|
||||
@ -0,0 +1,19 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 4
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
|
||||
-- !select_mv --
|
||||
-4 4
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
|
||||
@ -0,0 +1,19 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 4
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
|
||||
-- !select_mv_sub --
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
|
||||
@ -0,0 +1,19 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 -3
|
||||
1 2
|
||||
2 3
|
||||
3 -2
|
||||
|
||||
-- !select_mv_sub --
|
||||
-2
|
||||
-3
|
||||
2
|
||||
3
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 4
|
||||
1 1
|
||||
2 2
|
||||
3 3
|
||||
|
||||
-- !select_mv_sub --
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
|
||||
-- !select_mv_sub_add --
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
|
||||
-- !select_group_mv --
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
|
||||
-- !select_group_mv_add --
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
|
||||
-- !select_group_mv_not --
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 1111111111111111111111111111111111111111111111111111111111111100
|
||||
1 1
|
||||
2 10
|
||||
3 1111111111111111111111111111111111111111111111111111111111111101
|
||||
|
||||
-- !select_mv_sub --
|
||||
1
|
||||
10
|
||||
1111111111111111111111111111111111111111111111111111111111111100
|
||||
1111111111111111111111111111111111111111111111111111111111111101
|
||||
|
||||
-- !select_mv_sub_add --
|
||||
10a
|
||||
1111111111111111111111111111111111111111111111111111111111111100a
|
||||
1111111111111111111111111111111111111111111111111111111111111101a
|
||||
1a
|
||||
|
||||
-- !select_group_mv --
|
||||
1
|
||||
10
|
||||
1111111111111111111111111111111111111111111111111111111111111100
|
||||
1111111111111111111111111111111111111111111111111111111111111101
|
||||
|
||||
-- !select_group_mv_add --
|
||||
10a
|
||||
1111111111111111111111111111111111111111111111111111111111111100a
|
||||
1111111111111111111111111111111111111111111111111111111111111101a
|
||||
1a
|
||||
|
||||
-- !select_group_mv_not --
|
||||
1
|
||||
10
|
||||
1111111111111111111111111111111111111111111111111111111111111100
|
||||
1111111111111111111111111111111111111111111111111111111111111101
|
||||
|
||||
@ -0,0 +1,25 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_mv --
|
||||
1
|
||||
1
|
||||
1
|
||||
|
||||
-- !select_k1 --
|
||||
1
|
||||
2
|
||||
2
|
||||
3
|
||||
3
|
||||
|
||||
-- !select_star --
|
||||
1 1 a
|
||||
2 2 b
|
||||
2 2 bb
|
||||
3 3 c
|
||||
3 3 c
|
||||
|
||||
-- !select_mv_sub --
|
||||
1 1
|
||||
2 2
|
||||
3 1
|
||||
|
||||
@ -0,0 +1,43 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 -3
|
||||
1 2
|
||||
2 3
|
||||
3 -2
|
||||
|
||||
-- !select_mv_sub --
|
||||
-2
|
||||
-3
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_group_mv --
|
||||
-2
|
||||
-3
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_group_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_group_mv_not --
|
||||
-3
|
||||
-4
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4 -3
|
||||
1 2
|
||||
2 3
|
||||
3 -2
|
||||
|
||||
@ -0,0 +1,18 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_mv --
|
||||
1 2003
|
||||
2 2013
|
||||
3 2023
|
||||
|
||||
-- !select_star --
|
||||
1 2003-12-31 2003-12-31T01:02:03
|
||||
2 2013-12-31 2013-12-31T01:02:03
|
||||
3 2023-12-31 2023-12-31T01:02:03
|
||||
4 2033-12-31 2033-12-31T01:02:03
|
||||
|
||||
-- !select_mv_sub --
|
||||
2003
|
||||
2013
|
||||
2023
|
||||
2033
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
1 \N
|
||||
1 1
|
||||
3 7
|
||||
5 9
|
||||
|
||||
@ -0,0 +1,17 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
1 1
|
||||
3 7
|
||||
5 9
|
||||
|
||||
-- !select_base --
|
||||
1 1
|
||||
3 7
|
||||
5 9
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-3 1
|
||||
2 7
|
||||
3 9
|
||||
4 \N
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-3 1
|
||||
2 7
|
||||
3 9
|
||||
4 \N
|
||||
|
||||
@ -0,0 +1,31 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 -8
|
||||
1 2
|
||||
2 4
|
||||
3 \N
|
||||
3 \N
|
||||
|
||||
-- !select_mv --
|
||||
-4 -4
|
||||
1 1
|
||||
2 2
|
||||
3 -3
|
||||
3 -3
|
||||
3 -3
|
||||
3 -3
|
||||
|
||||
-- !select_mv --
|
||||
-4 5.7.99
|
||||
1 5.7.99
|
||||
2 5.7.99
|
||||
3 5.7.99
|
||||
3 5.7.99
|
||||
|
||||
@ -0,0 +1,18 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
1 1
|
||||
3 7
|
||||
5 9
|
||||
|
||||
-- !select_mv --
|
||||
1 \N
|
||||
1 1
|
||||
3 7
|
||||
5 9
|
||||
|
||||
@ -0,0 +1,12 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
1 1 1
|
||||
|
||||
-- !select_mv --
|
||||
1 1 a
|
||||
|
||||
@ -0,0 +1,51 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
-3
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
1
|
||||
2
|
||||
|
||||
-- !select_mv --
|
||||
-4
|
||||
-3
|
||||
1
|
||||
2
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
|
||||
-- !select_mv --
|
||||
2 2
|
||||
3 3
|
||||
4 2
|
||||
5 3
|
||||
|
||||
@ -0,0 +1,29 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
-4 -4 -4 d
|
||||
1 1 1 a
|
||||
2 2 2 b
|
||||
3 -3 \N c
|
||||
3 2 \N c
|
||||
|
||||
-- !select_mv --
|
||||
-4 d -4.0
|
||||
1 a 1.0
|
||||
2 b 2.0
|
||||
3 c -0.5
|
||||
|
||||
-- !select_mv --
|
||||
-4 -4.0
|
||||
1 1.0
|
||||
2 2.0
|
||||
3 -0.5
|
||||
|
||||
-- !select_mv --
|
||||
a 1.0
|
||||
b 2.0
|
||||
c -0.5
|
||||
d -4.0
|
||||
|
||||
-- !select_mv --
|
||||
-0.4
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1
|
||||
2020-01-01 1 a 1
|
||||
2020-01-02 2 b 2
|
||||
|
||||
-- !select_mv --
|
||||
3
|
||||
|
||||
@ -0,0 +1,11 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
1 \N
|
||||
2 \N
|
||||
3 \N
|
||||
|
||||
-- !select_mv --
|
||||
1 1
|
||||
2 1
|
||||
3 1
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
|
||||
12
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV1.out
Normal file
12
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV1.out
Normal file
@ -0,0 +1,12 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
2 1
|
||||
2 2
|
||||
3 3
|
||||
|
||||
@ -0,0 +1,16 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
\N \N 8
|
||||
1 \N 3
|
||||
1 1 3
|
||||
2 \N 3
|
||||
2 2 3
|
||||
3 \N 4
|
||||
3 3 4
|
||||
|
||||
@ -0,0 +1,12 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
1 4
|
||||
2 2
|
||||
3 2
|
||||
|
||||
14
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV2.out
Normal file
14
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV2.out
Normal file
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_emps_mv --
|
||||
2 9
|
||||
3 3
|
||||
|
||||
-- !select_star --
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-02 2 b 2 7 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
2 9
|
||||
3 3
|
||||
|
||||
10
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV3.out
Normal file
10
regression-test/data/nereids_syntax_p0/mv/ut/aggOnAggMV3.out
Normal file
@ -0,0 +1,10 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 10
|
||||
2020-01-04 4 d 21 4 4
|
||||
|
||||
-- !select_mv --
|
||||
4 4
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1
|
||||
2020-01-01 1 a 2
|
||||
2020-01-02 2 b 2
|
||||
|
||||
-- !select_mv --
|
||||
1 2
|
||||
|
||||
@ -0,0 +1,9 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1
|
||||
2020-01-01 1 a 2
|
||||
2020-01-02 2 b 2
|
||||
|
||||
-- !select_mv --
|
||||
1 \N
|
||||
|
||||
@ -0,0 +1,10 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1
|
||||
2020-01-01 1 a 2
|
||||
2020-01-02 2 b 2
|
||||
|
||||
-- !select_mv --
|
||||
a 2
|
||||
b 1
|
||||
|
||||
@ -0,0 +1,5 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_mv --
|
||||
2 9 2 2
|
||||
3 3 3 3
|
||||
|
||||
14
regression-test/data/nereids_syntax_p0/mv/ut/onStar.out
Normal file
14
regression-test/data/nereids_syntax_p0/mv/ut/onStar.out
Normal file
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
|
||||
-- !select_mv --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
|
||||
-- !select_mv --
|
||||
1
|
||||
1
|
||||
|
||||
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
1
|
||||
1
|
||||
2
|
||||
3
|
||||
|
||||
11
regression-test/data/nereids_syntax_p0/mv/ut/projectMV1.out
Normal file
11
regression-test/data/nereids_syntax_p0/mv/ut/projectMV1.out
Normal file
@ -0,0 +1,11 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
|
||||
-- !select_mv --
|
||||
1 1
|
||||
1 1
|
||||
2 2
|
||||
|
||||
14
regression-test/data/nereids_syntax_p0/mv/ut/projectMV2.out
Normal file
14
regression-test/data/nereids_syntax_p0/mv/ut/projectMV2.out
Normal file
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
|
||||
-- !select_mv --
|
||||
2
|
||||
2
|
||||
|
||||
-- !select_base --
|
||||
a
|
||||
a
|
||||
|
||||
14
regression-test/data/nereids_syntax_p0/mv/ut/projectMV3.out
Normal file
14
regression-test/data/nereids_syntax_p0/mv/ut/projectMV3.out
Normal file
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
|
||||
-- !select_mv --
|
||||
2 a
|
||||
2 a
|
||||
|
||||
-- !select_mv2 --
|
||||
a
|
||||
a
|
||||
|
||||
12
regression-test/data/nereids_syntax_p0/mv/ut/projectMV4.out
Normal file
12
regression-test/data/nereids_syntax_p0/mv/ut/projectMV4.out
Normal file
@ -0,0 +1,12 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
|
||||
-- !select_mv --
|
||||
b
|
||||
|
||||
-- !select_base --
|
||||
2
|
||||
|
||||
@ -0,0 +1,7 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
11
regression-test/data/nereids_syntax_p0/mv/ut/unionDis.out
Normal file
11
regression-test/data/nereids_syntax_p0/mv/ut/unionDis.out
Normal file
@ -0,0 +1,11 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select_star --
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-01 1 a 1 1 1
|
||||
2020-01-02 2 b 2 2 2
|
||||
2020-01-03 3 c 3 3 3
|
||||
|
||||
-- !select_mv --
|
||||
2 2
|
||||
3 3
|
||||
|
||||
16
regression-test/data/nereids_syntax_p0/rollup/agg.out
Normal file
16
regression-test/data/nereids_syntax_p0/rollup/agg.out
Normal file
@ -0,0 +1,16 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
test_rollup_agg1 AGG_KEYS siteid INT INT No true \N true
|
||||
citycode SMALLINT SMALLINT No true \N true
|
||||
username VARCHAR(32) VARCHAR(32) No true \N true
|
||||
pv BIGINT BIGINT No false 0 SUM true
|
||||
uv BIGINT BIGINT No false 0 SUM true
|
||||
vv BIGINT BIGINT Yes false 0 SUM true
|
||||
|
||||
rollup_city AGG_KEYS citycode SMALLINT SMALLINT No true \N true
|
||||
pv BIGINT BIGINT No false 0 SUM true
|
||||
vv BIGINT BIGINT Yes false 0 SUM true
|
||||
|
||||
-- !sql --
|
||||
1 200
|
||||
|
||||
25
regression-test/data/nereids_syntax_p0/rollup/agg_date.out
Normal file
25
regression-test/data/nereids_syntax_p0/rollup/agg_date.out
Normal file
@ -0,0 +1,25 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
test_rollup_agg_date1 AGG_KEYS datek1 DATE DATEV2 Yes true \N true
|
||||
datetimek1 DATETIME DATETIMEV2(0) Yes true \N true
|
||||
datetimek2 DATETIME DATETIMEV2(3) Yes true \N true
|
||||
datetimek3 DATETIME DATETIMEV2(6) Yes true \N true
|
||||
datev1 DATE DATEV2 No false \N MAX true
|
||||
datetimev1 DATETIME DATETIMEV2(0) No false \N MAX true
|
||||
datetimev2 DATETIME DATETIMEV2(3) No false \N MAX true
|
||||
datetimev3 DATETIME DATETIMEV2(6) No false \N MAX true
|
||||
datetimev4 DATETIME DATETIMEV2(3) Yes false \N MAX true
|
||||
|
||||
rollup_date AGG_KEYS datek1 DATE DATEV2 Yes true \N true
|
||||
datetimek2 DATETIME DATETIMEV2(3) Yes true \N true
|
||||
datetimek1 DATETIME DATETIMEV2(0) Yes true \N true
|
||||
datetimek3 DATETIME DATETIMEV2(6) Yes true \N true
|
||||
datev1 DATE DATEV2 No false \N MAX true
|
||||
datetimev1 DATETIME DATETIMEV2(0) No false \N MAX true
|
||||
datetimev2 DATETIME DATETIMEV2(3) No false \N MAX true
|
||||
datetimev3 DATETIME DATETIMEV2(6) No false \N MAX true
|
||||
|
||||
-- !sql --
|
||||
2022-08-23 2022-08-23T11:11:11 2022-08-23T11:11:11.111 2022-08-23T11:11:11.111111 2022-08-23 2022-08-23T11:11:11 2022-08-23T11:11:11.111 2022-08-23T11:11:11.111111
|
||||
2022-08-22 2022-08-22T11:11:11 2022-08-22T11:11:11.111 2022-08-22T11:11:11.111111 2022-08-22 2022-08-22T11:11:11 2022-08-22T11:11:11.111 2022-08-22T11:11:11.111111
|
||||
|
||||
13
regression-test/data/nereids_syntax_p0/rollup/date.out
Normal file
13
regression-test/data/nereids_syntax_p0/rollup/date.out
Normal file
@ -0,0 +1,13 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
1 2020-05-30
|
||||
|
||||
-- !sql --
|
||||
1 2020-05-30T11:11:11
|
||||
|
||||
-- !sql --
|
||||
1 2020-05-30T11:11:11.111
|
||||
|
||||
-- !sql --
|
||||
1 2020-05-30T11:11:11.111111
|
||||
|
||||
14
regression-test/data/nereids_syntax_p0/rollup/hll.out
Normal file
14
regression-test/data/nereids_syntax_p0/rollup/hll.out
Normal file
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
test_materialized_view_hll1 DUP_KEYS record_id INT INT Yes true \N true
|
||||
seller_id INT INT Yes true \N true
|
||||
store_id INT INT Yes true \N true
|
||||
sale_date DATE DATEV2 Yes false \N NONE true
|
||||
sale_amt BIGINT BIGINT Yes false \N NONE true
|
||||
|
||||
amt_count AGG_KEYS mv_store_id INT INT Yes true \N true `store_id`
|
||||
mva_HLL_UNION__hll_hash(`sale_amt`) HLL HLL No false \N HLL_UNION true hll_hash(`sale_amt`)
|
||||
|
||||
-- !sql --
|
||||
1 1
|
||||
|
||||
@ -0,0 +1,14 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !sql --
|
||||
test_materialized_view_hll_with_light_sc1 DUP_KEYS record_id INT INT Yes true \N true
|
||||
seller_id INT INT Yes true \N true
|
||||
store_id INT INT Yes true \N true
|
||||
sale_date DATE DATEV2 Yes false \N NONE true
|
||||
sale_amt BIGINT BIGINT Yes false \N NONE true
|
||||
|
||||
amt_count1 AGG_KEYS mv_store_id INT INT Yes true \N true `store_id`
|
||||
mva_HLL_UNION__hll_hash(`sale_amt`) HLL HLL No false \N HLL_UNION true hll_hash(`sale_amt`)
|
||||
|
||||
-- !sql --
|
||||
1 1
|
||||
|
||||
@ -39,7 +39,7 @@ suite ("testIncorrectMVRewriteInSubquery") {
|
||||
sql("select * from user_tags order by time_col;")
|
||||
contains "(user_tags)"
|
||||
}
|
||||
qt_select_star "select * from user_tags order by time_col,tag_id;"
|
||||
qt_select_star "select * from user_tags order by time_col, tag_id;"
|
||||
|
||||
explain {
|
||||
sql("select user_id, bitmap_union(to_bitmap(tag_id)) from user_tags where user_name in (select user_name from user_tags group by user_name having bitmap_union_count(to_bitmap(tag_id)) >1 ) group by user_id order by user_id;")
|
||||
|
||||
175
regression-test/suites/nereids_syntax_p0/advance_mv.groovy
Normal file
175
regression-test/suites/nereids_syntax_p0/advance_mv.groovy
Normal file
@ -0,0 +1,175 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
suite("advance_mv") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
def tbName1 = "test_advance_mv_agg_table"
|
||||
def tbName2 = "test_advance_mv_dup_table"
|
||||
def tbName3 = "schema_change_dup_mv_regression_test"
|
||||
|
||||
def getJobState = { tableName ->
|
||||
def jobStateResult = sql """ SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1; """
|
||||
return jobStateResult[0][8]
|
||||
}
|
||||
sql "DROP TABLE IF EXISTS ${tbName1} FORCE"
|
||||
sql """
|
||||
CREATE TABLE IF NOT EXISTS ${tbName1}(
|
||||
k1 int,
|
||||
k2 int,
|
||||
k3 int,
|
||||
v1 varchar(10) replace,
|
||||
v2 bigint sum
|
||||
)
|
||||
AGGREGATE KEY(k1, k2, k3)
|
||||
DISTRIBUTED BY HASH(k1) buckets 1 properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "DROP TABLE IF EXISTS ${tbName2} FORCE"
|
||||
sql """
|
||||
CREATE TABLE IF NOT EXISTS ${tbName2}(
|
||||
k1 int,
|
||||
k2 int,
|
||||
k3 int,
|
||||
k4 varchar(10)
|
||||
)
|
||||
DISTRIBUTED BY HASH(k1) buckets 1 properties("replication_num" = "1");
|
||||
"""
|
||||
sql "DROP TABLE IF EXISTS ${tbName3} FORCE"
|
||||
sql """
|
||||
CREATE TABLE IF NOT EXISTS ${tbName3} (
|
||||
`user_id` LARGEINT NOT NULL COMMENT "用户id",
|
||||
`date` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
|
||||
`city` VARCHAR(20) COMMENT "用户所在城市",
|
||||
`age` SMALLINT COMMENT "用户年龄",
|
||||
`sex` TINYINT COMMENT "用户性别",
|
||||
`last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
|
||||
`last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次更新时间",
|
||||
`last_visit_date_not_null` DATETIME NOT NULL DEFAULT "1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
|
||||
`cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
|
||||
`max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
|
||||
`min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间")
|
||||
DUPLICATE KEY(`user_id`, `date`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
|
||||
BUCKETS 1
|
||||
PROPERTIES ( "replication_num" = "1", "light_schema_change" = "true" );
|
||||
"""
|
||||
|
||||
sql """INSERT INTO ${tbName3} VALUES
|
||||
(1, '2017-10-01', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', '2020-01-01', 1, 30, 20);
|
||||
"""
|
||||
sql"""INSERT INTO ${tbName3} VALUES
|
||||
(1, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 19);
|
||||
"""
|
||||
sql """INSERT INTO ${tbName3} VALUES
|
||||
(2, '2017-10-01', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', '2020-01-02', 1, 31, 21);
|
||||
"""
|
||||
sql """INSERT INTO ${tbName3} VALUES
|
||||
(2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', '2020-01-03', '2020-01-03', 1, 32, 20);
|
||||
"""
|
||||
|
||||
sql """insert into ${tbName1} values (1,1,1,'a',10);"""
|
||||
sql """insert into ${tbName1} values (2,2,2,'b',10);"""
|
||||
sql """insert into ${tbName1} values (3,3,3,'c',10);"""
|
||||
|
||||
sql """insert into ${tbName2} values (4,4,4,'d');"""
|
||||
sql """insert into ${tbName2} values (5,5,5,'e');"""
|
||||
sql """insert into ${tbName2} values (6,6,6,'f');"""
|
||||
//
|
||||
sql "CREATE materialized VIEW mv1 AS SELECT k1, sum(k2) FROM ${tbName1} GROUP BY k1;"
|
||||
int max_try_secs = 60
|
||||
while (max_try_secs--) {
|
||||
String res = getJobState(tbName1)
|
||||
if (res == "FINISHED") {
|
||||
sleep(3000)
|
||||
break
|
||||
} else {
|
||||
Thread.sleep(2000)
|
||||
if (max_try_secs < 1) {
|
||||
println "test timeout," + "state:" + res
|
||||
assertEquals("FINISHED",res)
|
||||
}
|
||||
}
|
||||
}
|
||||
explain {
|
||||
sql("select k1, sum(k2) from ${tbName1} group by k1 order by k1;")
|
||||
contains "(mv1)"
|
||||
}
|
||||
order_qt_select_star "select k1 from ${tbName1} order by k1;"
|
||||
|
||||
sql "CREATE materialized VIEW mv2 AS SELECT abs(k1)+k2+1 tmp, sum(abs(k2+2)+k3+3) FROM ${tbName2} GROUP BY tmp;"
|
||||
int max_try_secs1 = 60
|
||||
while (max_try_secs1--) {
|
||||
String res = getJobState(tbName2)
|
||||
if (res == "FINISHED") {
|
||||
sleep(3000)
|
||||
break
|
||||
} else {
|
||||
Thread.sleep(2000)
|
||||
if (max_try_secs1 < 1) {
|
||||
println "test timeout," + "state:" + res
|
||||
assertEquals("FINISHED",res)
|
||||
}
|
||||
}
|
||||
}
|
||||
explain {
|
||||
sql("SELECT abs(k1)+k2+1 tmp, sum(abs(k2+2)+k3+3) from FROM ${tbName2} GROUP BY tmp;")
|
||||
contains "(mv2)"
|
||||
}
|
||||
order_qt_select_star "SELECT abs(k1)+k2+1 tmp, sum(abs(k2+2)+k3+3) from FROM ${tbName2} GROUP BY tmp;"
|
||||
|
||||
sql "CREATE materialized VIEW mv3 AS SELECT abs(k1)+k2+1 tmp, abs(k2+2)+k3+3 FROM ${tbName2};"
|
||||
int max_try_secs2 = 60
|
||||
while (max_try_secs2--) {
|
||||
String res = getJobState(tbName2)
|
||||
if (res == "FINISHED") {
|
||||
sleep(3000)
|
||||
break
|
||||
} else {
|
||||
Thread.sleep(2000)
|
||||
if (max_try_secs2 < 1) {
|
||||
println "test timeout," + "state:" + res
|
||||
assertEquals("FINISHED",res)
|
||||
}
|
||||
}
|
||||
}
|
||||
explain {
|
||||
sql("SELECT abs(k1)+k2+1 tmp, abs(k2+2)+k3+3 FROM ${tbName2};")
|
||||
contains "(mv3)"
|
||||
}
|
||||
order_qt_select_star "SELECT abs(k1)+k2+1 tmp, abs(k2+2)+k3+3 FROM ${tbName2};"
|
||||
|
||||
|
||||
sql "create materialized view mv4 as select date, user_id, city, sum(age) from ${tbName3} group by date, user_id, city;"
|
||||
int max_try_secs3 = 60
|
||||
while (max_try_secs3--) {
|
||||
String res = getJobState(tbName3)
|
||||
if (res == "FINISHED") {
|
||||
sleep(3000)
|
||||
break
|
||||
} else {
|
||||
Thread.sleep(2000)
|
||||
if (max_try_secs2 < 1) {
|
||||
println "test timeout," + "state:" + res
|
||||
assertEquals("FINISHED",res)
|
||||
}
|
||||
}
|
||||
}
|
||||
explain {
|
||||
sql("select sum(age) from ${tbName3};")
|
||||
contains "(mv4)"
|
||||
}
|
||||
order_qt_select_star "select sum(age) from ${tbName3};"
|
||||
}
|
||||
@ -0,0 +1,80 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("agg_have_dup_base") {
|
||||
|
||||
def tbName1 = "agg_have_dup_base"
|
||||
def getJobState = { tableName ->
|
||||
def jobStateResult = sql """ SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='${tableName}' ORDER BY CreateTime DESC LIMIT 1; """
|
||||
return jobStateResult[0][8]
|
||||
}
|
||||
sql """ DROP TABLE IF EXISTS agg_have_dup_base; """
|
||||
sql """
|
||||
create table agg_have_dup_base(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into agg_have_dup_base select 1,1,1,'a';"
|
||||
sql "insert into agg_have_dup_base select 2,2,2,'b';"
|
||||
sql "insert into agg_have_dup_base select 3,-3,null,'c';"
|
||||
|
||||
createMV( "create materialized view k12s3m as select k1,sum(k2),max(k2) from agg_have_dup_base group by k1;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
|
||||
sql "insert into agg_have_dup_base select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from agg_have_dup_base order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,sum(k2),max(k2) from agg_have_dup_base group by k1;")
|
||||
contains "(k12s3m)"
|
||||
}
|
||||
order_qt_select_mv "select k1,sum(k2),max(k2) from agg_have_dup_base group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,sum(k2) from agg_have_dup_base group by k1;")
|
||||
contains "(k12s3m)"
|
||||
}
|
||||
order_qt_select_mv "select k1,sum(k2) from agg_have_dup_base group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,max(k2) from agg_have_dup_base group by k1;")
|
||||
contains "(k12s3m)"
|
||||
}
|
||||
order_qt_select_mv "select k1,max(k2) from agg_have_dup_base group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select unix_timestamp(k1) tmp,sum(k2) from agg_have_dup_base group by tmp;")
|
||||
contains "(k12s3m)"
|
||||
}
|
||||
order_qt_select_mv "select unix_timestamp(k1) tmp,sum(k2) from agg_have_dup_base group by tmp order by tmp;"
|
||||
}
|
||||
@ -0,0 +1,60 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("case_ignore") {
|
||||
sql """ DROP TABLE IF EXISTS case_ignore; """
|
||||
|
||||
sql """
|
||||
create table case_ignore(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into case_ignore select 1,1,1,'a';"
|
||||
sql "insert into case_ignore select 2,2,2,'b';"
|
||||
sql "insert into case_ignore select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k12a as select K1,abs(K2) from case_ignore;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into case_ignore select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
qt_select_star "select * from case_ignore order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,abs(k2) from case_ignore order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_mv "select k1,abs(k2) from case_ignore order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select K1,abs(K2) from case_ignore order by K1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_mv "select K1,abs(K2) from case_ignore order by K1;"
|
||||
|
||||
}
|
||||
@ -0,0 +1,61 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_gb_mv_abs") {
|
||||
sql """ DROP TABLE IF EXISTS dup_gb_mv_abs; """
|
||||
|
||||
sql """
|
||||
create table dup_gb_mv_abs(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_gb_mv_abs select 1,1,1,'a';"
|
||||
sql "insert into dup_gb_mv_abs select 2,2,2,'b';"
|
||||
sql "insert into dup_gb_mv_abs select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k12sa as select k1,sum(abs(k2)) from dup_gb_mv_abs group by k1;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into dup_gb_mv_abs select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from dup_gb_mv_abs order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,sum(abs(k2)) from dup_gb_mv_abs group by k1;")
|
||||
contains "(k12sa)"
|
||||
}
|
||||
order_qt_select_mv "select k1,sum(abs(k2)) from dup_gb_mv_abs group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(abs(k2)) from dup_gb_mv_abs group by k1;")
|
||||
contains "(k12sa)"
|
||||
}
|
||||
order_qt_select_mv_sub "select sum(abs(k2)) from dup_gb_mv_abs group by k1 order by k1;"
|
||||
}
|
||||
@ -0,0 +1,61 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_gb_mv_plus") {
|
||||
sql """ DROP TABLE IF EXISTS dup_gb_mv_plus; """
|
||||
|
||||
sql """
|
||||
create table dup_gb_mv_plus(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_gb_mv_plus select 1,1,1,'a';"
|
||||
sql "insert into dup_gb_mv_plus select 2,2,2,'b';"
|
||||
sql "insert into dup_gb_mv_plus select 3,-3,null,'c';"
|
||||
|
||||
createMV( "create materialized view k12sp as select k1,sum(k2+1) from dup_gb_mv_plus group by k1;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into dup_gb_mv_plus select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from dup_gb_mv_plus order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,sum(k2+1) from dup_gb_mv_plus group by k1;")
|
||||
contains "(k12sp)"
|
||||
}
|
||||
order_qt_select_mv "select k1,sum(k2+1) from dup_gb_mv_plus group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(k2+1) from dup_gb_mv_plus group by k1;")
|
||||
contains "(k12sp)"
|
||||
}
|
||||
order_qt_select_mv_sub "select sum(k2+1) from dup_gb_mv_plus group by k1 order by k1;"
|
||||
}
|
||||
@ -0,0 +1,85 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_mv_abs") {
|
||||
sql """ DROP TABLE IF EXISTS dup_mv_abs; """
|
||||
|
||||
sql """
|
||||
create table dup_mv_abs(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_mv_abs select 1,1,1,'a';"
|
||||
sql "insert into dup_mv_abs select 2,2,2,'b';"
|
||||
sql "insert into dup_mv_abs select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k12a as select k1,abs(k2) from dup_mv_abs;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into dup_mv_abs select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from dup_mv_abs order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,abs(k2) from dup_mv_abs order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_mv "select k1,abs(k2) from dup_mv_abs order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k2) from dup_mv_abs order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_mv_sub "select abs(k2) from dup_mv_abs order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k2)+1 from dup_mv_abs order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_mv_sub_add "select abs(k2)+1 from dup_mv_abs order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(abs(k2)) from dup_mv_abs group by k1 order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_group_mv "select sum(abs(k2)) from dup_mv_abs group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(abs(k2)+1) from dup_mv_abs group by k1 order by k1;")
|
||||
contains "(k12a)"
|
||||
}
|
||||
order_qt_select_group_mv_add "select sum(abs(k2)+1) from dup_mv_abs group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(abs(k2)) from dup_mv_abs group by k3;")
|
||||
contains "(dup_mv_abs)"
|
||||
}
|
||||
order_qt_select_group_mv_not "select sum(abs(k2)) from dup_mv_abs group by k3 order by k3;"
|
||||
}
|
||||
@ -0,0 +1,85 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_mv_bin") {
|
||||
sql """ DROP TABLE IF EXISTS dup_mv_bin; """
|
||||
|
||||
sql """
|
||||
create table dup_mv_bin(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_mv_bin select 1,1,1,'a';"
|
||||
sql "insert into dup_mv_bin select 2,2,2,'b';"
|
||||
sql "insert into dup_mv_bin select 3,-3,null,'c';"
|
||||
|
||||
createMV( "create materialized view k12b as select k1,bin(k2) from dup_mv_bin;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into dup_mv_bin select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from dup_mv_bin order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,bin(k2) from dup_mv_bin order by k1;")
|
||||
contains "(k12b)"
|
||||
}
|
||||
order_qt_select_mv "select k1,bin(k2) from dup_mv_bin order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select bin(k2) from dup_mv_bin order by k1;")
|
||||
contains "(k12b)"
|
||||
}
|
||||
order_qt_select_mv_sub "select bin(k2) from dup_mv_bin order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select bin(k2)+1 from dup_mv_bin order by k1;")
|
||||
contains "(k12b)"
|
||||
}
|
||||
order_qt_select_mv_sub_add "select concat(bin(k2),'a') from dup_mv_bin order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select group_concat(bin(k2)) from dup_mv_bin group by k1 order by k1;")
|
||||
contains "(k12b)"
|
||||
}
|
||||
order_qt_select_group_mv "select group_concat(bin(k2)) from dup_mv_bin group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select group_concat(concat(bin(k2),'a')) from dup_mv_bin group by k1 order by k1;")
|
||||
contains "(k12b)"
|
||||
}
|
||||
order_qt_select_group_mv_add "select group_concat(concat(bin(k2),'a')) from dup_mv_bin group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select group_concat(bin(k2)) from dup_mv_bin group by k3;")
|
||||
contains "(dup_mv_bin)"
|
||||
}
|
||||
order_qt_select_group_mv_not "select group_concat(bin(k2)) from dup_mv_bin group by k3 order by k3;"
|
||||
}
|
||||
@ -0,0 +1,79 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_mv_bm_hash") {
|
||||
sql """ DROP TABLE IF EXISTS dup_mv_bm_hash; """
|
||||
|
||||
sql """
|
||||
create table dup_mv_bm_hash(
|
||||
k1 int null,
|
||||
k2 int null,
|
||||
k3 varchar(100) null
|
||||
)
|
||||
duplicate key (k1)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_mv_bm_hash select 1,1,'a';"
|
||||
sql "insert into dup_mv_bm_hash select 2,2,'b';"
|
||||
sql "insert into dup_mv_bm_hash select 3,3,'c';"
|
||||
|
||||
createMV( "create materialized view dup_mv_bm_hash_mv1 as select k1,bitmap_union(to_bitmap(k2)) from dup_mv_bm_hash group by k1;")
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
explain {
|
||||
sql("select bitmap_union_count(to_bitmap(k2)) from dup_mv_bm_hash group by k1 order by k1;")
|
||||
contains "(dup_mv_bm_hash_mv1)"
|
||||
}
|
||||
order_qt_select_mv "select bitmap_union_count(to_bitmap(k2)) from dup_mv_bm_hash group by k1 order by k1;"
|
||||
|
||||
result = "null"
|
||||
sql "create materialized view dup_mv_bm_hash_mv2 as select k1,bitmap_union(bitmap_hash(k3)) from dup_mv_bm_hash group by k1;"
|
||||
while (!result.contains("FINISHED")){
|
||||
result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='dup_mv_bm_hash' ORDER BY CreateTime DESC LIMIT 1;"
|
||||
result = result.toString()
|
||||
logger.info("result: ${result}")
|
||||
if(result.contains("CANCELLED")){
|
||||
return
|
||||
}
|
||||
Thread.sleep(1000)
|
||||
}
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=false"
|
||||
|
||||
sql "insert into dup_mv_bm_hash select 2,2,'bb';"
|
||||
sql "insert into dup_mv_bm_hash select 3,3,'c';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_k1 "select k1 from dup_mv_bm_hash order by k1;"
|
||||
|
||||
order_qt_select_star "select * from dup_mv_bm_hash order by k1,k2,k3;"
|
||||
|
||||
explain {
|
||||
sql("select k1,bitmap_union_count(bitmap_hash(k3)) from dup_mv_bm_hash group by k1;")
|
||||
contains "(dup_mv_bm_hash_mv2)"
|
||||
}
|
||||
order_qt_select_mv_sub "select k1,bitmap_union_count(bitmap_hash(k3)) from dup_mv_bm_hash group by k1 order by k1;"
|
||||
}
|
||||
@ -0,0 +1,102 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_mv_plus") {
|
||||
sql """ DROP TABLE IF EXISTS dup_mv_plus; """
|
||||
|
||||
sql """
|
||||
create table dup_mv_plus(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_mv_plus select 1,1,1,'a';"
|
||||
sql "insert into dup_mv_plus select 2,2,2,'b';"
|
||||
sql "insert into dup_mv_plus select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k12p as select k1,k2+1 from dup_mv_plus;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into dup_mv_plus select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from dup_mv_plus order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,k2+1 from dup_mv_plus order by k1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
order_qt_select_mv "select k1,k2+1 from dup_mv_plus order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k2+1 from dup_mv_plus order by k1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
order_qt_select_mv_sub "select k2+1 from dup_mv_plus order by k1;"
|
||||
|
||||
/*
|
||||
TODO: The selection of the current materialized view is after the constant folding,
|
||||
so if the rewriting of the constant folding occurs, the corresponding materialized view cannot be selected.
|
||||
explain {
|
||||
sql("select k2+1-1 from dup_mv_plus order by k1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
qt_select_mv_sub_add "select k2+1-1 from dup_mv_plus order by k1;"
|
||||
*/
|
||||
|
||||
explain {
|
||||
sql("select sum(k2+1) from dup_mv_plus group by k1 order by k1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
order_qt_select_group_mv "select sum(k2+1) from dup_mv_plus group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select sum(k1) from dup_mv_plus group by k2+1 order by k2+1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
order_qt_select_group_mv "select sum(k1) from dup_mv_plus group by k2+1 order by k2+1;"
|
||||
|
||||
/*
|
||||
explain {
|
||||
sql("select sum(k2+1-1) from dup_mv_plus group by k1 order by k1;")
|
||||
contains "(k12p)"
|
||||
}
|
||||
qt_select_group_mv_add "select sum(k2+1-1) from dup_mv_plus group by k1 order by k1;"
|
||||
*/
|
||||
|
||||
explain {
|
||||
sql("select sum(k2) from dup_mv_plus group by k3;")
|
||||
contains "(dup_mv_plus)"
|
||||
}
|
||||
order_qt_select_group_mv_not "select sum(k2) from dup_mv_plus group by k3 order by k3;"
|
||||
|
||||
explain {
|
||||
sql("select k1,k2+1 from dup_mv_plus order by k2;")
|
||||
contains "(dup_mv_plus)"
|
||||
}
|
||||
order_qt_select_mv "select k1,k2+1 from dup_mv_plus order by k2;"
|
||||
}
|
||||
@ -0,0 +1,76 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("dup_mv_year") {
|
||||
sql """ DROP TABLE IF EXISTS dup_mv_year; """
|
||||
|
||||
sql """
|
||||
create table dup_mv_year(
|
||||
k1 int null,
|
||||
k2 dateV2 null,
|
||||
k3 datetime null
|
||||
)
|
||||
duplicate key (k1)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into dup_mv_year select 1,'2003-12-31','2003-12-31 01:02:03';"
|
||||
sql "insert into dup_mv_year select 2,'2013-12-31','2013-12-31 01:02:03';"
|
||||
sql "insert into dup_mv_year select 3,'2023-12-31','2023-12-31 01:02:03';"
|
||||
|
||||
createMV "create materialized view k12y as select k1,year(k2) from dup_mv_year;"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select k1,year(k2) from dup_mv_year order by k1;")
|
||||
contains "(k12y)"
|
||||
}
|
||||
order_qt_select_mv "select k1,year(k2) from dup_mv_year order by k1;"
|
||||
|
||||
result = "null"
|
||||
sql "create materialized view k13y as select k1,year(k3) from dup_mv_year;"
|
||||
while (!result.contains("FINISHED")){
|
||||
result = sql "SHOW ALTER TABLE MATERIALIZED VIEW WHERE TableName='dup_mv_year' ORDER BY CreateTime DESC LIMIT 1;"
|
||||
result = result.toString()
|
||||
logger.info("result: ${result}")
|
||||
if(result.contains("CANCELLED")){
|
||||
return
|
||||
}
|
||||
Thread.sleep(1000)
|
||||
}
|
||||
sql "SET experimental_enable_nereids_planner=false"
|
||||
|
||||
|
||||
sql "insert into dup_mv_year select 4,'2033-12-31','2033-12-31 01:02:03';"
|
||||
Thread.sleep(1000)
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
order_qt_select_star "select * from dup_mv_year order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select year(k3) from dup_mv_year order by k1;")
|
||||
contains "(k13y)"
|
||||
}
|
||||
order_qt_select_mv_sub "select year(k3) from dup_mv_year order by k1;"
|
||||
}
|
||||
@ -0,0 +1,53 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("multi_slot1") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot1; """
|
||||
|
||||
sql """
|
||||
create table multi_slot1(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot1 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot1 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot1 select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k1a2p2ap3p as select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot1;")
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into multi_slot1 select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
order_qt_select_star "select * from multi_slot1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot1 order by abs(k1)+k2+1,abs(k2+2)+k3+3")
|
||||
contains "(k1a2p2ap3p)"
|
||||
}
|
||||
order_qt_select_mv "select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot1 order by abs(k1)+k2+1,abs(k2+2)+k3+3;"
|
||||
}
|
||||
@ -0,0 +1,68 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("multi_slot2") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot2; """
|
||||
|
||||
sql """
|
||||
create table multi_slot2(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot2 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot2 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot2 select 3,-3,null,'c';"
|
||||
|
||||
boolean createFail = false;
|
||||
try {
|
||||
sql "create materialized view k1a2p2ap3ps as select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2;"
|
||||
} catch (Exception e) {
|
||||
createFail = true;
|
||||
}
|
||||
assertTrue(createFail);
|
||||
|
||||
createMV ("create materialized view k1a2p2ap3ps as select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2+1;")
|
||||
|
||||
sleep(3000)
|
||||
sql "insert into multi_slot2 select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from multi_slot2 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2+1 order by abs(k1)+k2+1")
|
||||
contains "(k1a2p2ap3ps)"
|
||||
}
|
||||
order_qt_select_mv "select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2+1 order by abs(k1)+k2+1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2 order by abs(k1)+k2")
|
||||
contains "(multi_slot2)"
|
||||
}
|
||||
order_qt_select_base "select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot2 group by abs(k1)+k2 order by abs(k1)+k2;"
|
||||
}
|
||||
@ -0,0 +1,54 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("multi_slot3") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot3; """
|
||||
|
||||
sql """
|
||||
create table multi_slot3(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot3 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot3 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot3 select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k1p2ap3p as select k1+1,abs(k2+2)+k3+3 from multi_slot3;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into multi_slot3 select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
order_qt_select_star "select * from multi_slot3 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1+1,abs(k2+2)+k3+3 from multi_slot3 order by k1+1;")
|
||||
contains "(k1p2ap3p)"
|
||||
}
|
||||
order_qt_select_mv "select k1+1,abs(k2+2)+k3+3 from multi_slot3 order by k1+1;"
|
||||
}
|
||||
@ -0,0 +1,56 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("multi_slot4") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot4; """
|
||||
|
||||
sql """
|
||||
create table multi_slot4(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot4 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot4 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot4 select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k1p2ap3ps as select k1+1,sum(abs(k2+2)+k3+3) from multi_slot4 group by k1+1;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into multi_slot4 select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from multi_slot4 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1+1,sum(abs(k2+2)+k3+3) from multi_slot4 group by k1+1 order by k1+1;")
|
||||
contains "(k1p2ap3ps)"
|
||||
}
|
||||
order_qt_select_mv "select k1+1,sum(abs(k2+2)+k3+3) from multi_slot4 group by k1+1 order by k1+1;"
|
||||
}
|
||||
@ -0,0 +1,68 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("multi_slot5") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot5; """
|
||||
|
||||
sql """
|
||||
create table multi_slot5(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot5 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot5 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot5 select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k123p as select k1,k2+k3 from multi_slot5;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into multi_slot5 select -4,-4,-4,'d';"
|
||||
sql "insert into multi_slot5 select 3,-3,null,'c';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
order_qt_select_star "select * from multi_slot5 order by k1,k4;"
|
||||
|
||||
explain {
|
||||
sql("select k1,k2+k3 from multi_slot5 order by k1;")
|
||||
contains "(k123p)"
|
||||
}
|
||||
order_qt_select_mv "select k1,k2+k3 from multi_slot5 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select lhs.k1,rhs.k2 from multi_slot5 as lhs right join multi_slot5 as rhs on lhs.k1=rhs.k1;")
|
||||
contains "(k123p)"
|
||||
contains "(multi_slot5)"
|
||||
}
|
||||
order_qt_select_mv "select lhs.k1,rhs.k2 from multi_slot5 as lhs right join multi_slot5 as rhs on lhs.k1=rhs.k1 order by lhs.k1;"
|
||||
|
||||
explain {
|
||||
sql("select k1,version() from multi_slot5;")
|
||||
contains "(k123p)"
|
||||
}
|
||||
order_qt_select_mv "select k1,version() from multi_slot5 order by k1;"
|
||||
}
|
||||
@ -0,0 +1,86 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
suite ("multi_slot6") {
|
||||
sql """ DROP TABLE IF EXISTS multi_slot6; """
|
||||
|
||||
sql """
|
||||
create table multi_slot6(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into multi_slot6 select 1,1,1,'a';"
|
||||
sql "insert into multi_slot6 select 2,2,2,'b';"
|
||||
sql "insert into multi_slot6 select 3,-3,null,'c';"
|
||||
|
||||
createMV ("create materialized view k1a2p2ap3p as select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot6;")
|
||||
|
||||
createMV("create materialized view k1a2p2ap3ps as select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot6 group by abs(k1)+k2+1;")
|
||||
|
||||
sql "insert into multi_slot6 select -4,-4,-4,'d';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from multi_slot6 order by k1;"
|
||||
|
||||
def retry_times = 60
|
||||
for (def i = 0; i < retry_times; ++i) {
|
||||
boolean is_k1a2p2ap3p = false
|
||||
boolean is_k1a2p2ap3ps = false
|
||||
boolean is_d_table = false
|
||||
explain {
|
||||
sql("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot6 group by abs(k1)+k2+1 order by abs(k1)+k2+1")
|
||||
check { explainStr, ex, startTime, endTime ->
|
||||
if (ex != null) {
|
||||
throw ex;
|
||||
}
|
||||
logger.info("explain result: ${explainStr}".toString())
|
||||
is_k1a2p2ap3p = explainStr.contains"(k1a2p2ap3p)"
|
||||
is_k1a2p2ap3ps = explainStr.contains("(k1a2p2ap3ps)")
|
||||
is_d_table = explainStr.contains("(multi_slot6)")
|
||||
assert is_k1a2p2ap3p || is_k1a2p2ap3ps || is_d_table
|
||||
}
|
||||
}
|
||||
// FIXME: the mv selector maybe select base table forever when exist multi mv,
|
||||
// so this pr just treat as success if select base table.
|
||||
// we should remove is_d_table in the future
|
||||
if (is_d_table || is_k1a2p2ap3p || is_k1a2p2ap3ps) {
|
||||
break
|
||||
}
|
||||
if (i + 1 == retry_times) {
|
||||
throw new IllegalStateException("retry and failed too much")
|
||||
}
|
||||
sleep(1000)
|
||||
}
|
||||
order_qt_select_mv "select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from multi_slot6 group by abs(k1)+k2+1 order by abs(k1)+k2+1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot6 order by abs(k1)+k2+1,abs(k2+2)+k3+3")
|
||||
contains "(k1a2p2ap3p)"
|
||||
}
|
||||
order_qt_select_mv "select abs(k1)+k2+1,abs(k2+2)+k3+3 from multi_slot6 order by abs(k1)+k2+1,abs(k2+2)+k3+3;"
|
||||
|
||||
}
|
||||
@ -0,0 +1,79 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("mv_with_view") {
|
||||
sql """ DROP TABLE IF EXISTS mv_with_view; """
|
||||
|
||||
sql """
|
||||
create table mv_with_view (
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql """insert into mv_with_view select 1,1,1,'a';"""
|
||||
sql """insert into mv_with_view select 2,2,2,'b';"""
|
||||
|
||||
createMV("create materialized view k132 as select k1,k3,k2 from mv_with_view;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into mv_with_view select 3,-3,null,'c';"""
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select * from mv_with_view order by k1;")
|
||||
contains "(mv_with_view)"
|
||||
}
|
||||
order_qt_select_star "select * from mv_with_view order by k1;"
|
||||
|
||||
sql """
|
||||
drop view if exists v_k132;
|
||||
"""
|
||||
|
||||
sql """
|
||||
create view v_k132 as select k1,k3,k2 from mv_with_view where k1 = 1;
|
||||
"""
|
||||
explain {
|
||||
sql("select * from v_k132 order by k1;")
|
||||
contains "(k132)"
|
||||
}
|
||||
order_qt_select_mv "select * from v_k132 order by k1;"
|
||||
|
||||
sql """
|
||||
drop view if exists v_k124;
|
||||
"""
|
||||
|
||||
sql """
|
||||
create view v_k124 as select k1,k2,k4 from mv_with_view where k1 = 1;
|
||||
"""
|
||||
explain {
|
||||
sql("select * from v_k124 order by k1;")
|
||||
contains "(mv_with_view)"
|
||||
}
|
||||
order_qt_select_mv "select * from v_k124 order by k1;"
|
||||
}
|
||||
@ -0,0 +1,58 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("rollback1") {
|
||||
sql """ DROP TABLE IF EXISTS rollback1; """
|
||||
|
||||
sql """
|
||||
create table rollback1(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into rollback1 select 1,1,1,'a';"
|
||||
sql "insert into rollback1 select 2,2,2,'b';"
|
||||
sql "insert into rollback1 select 3,-3,null,'c';"
|
||||
|
||||
createMV("create materialized view k123p as select k1,k2+k3 from rollback1;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into rollback1 select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from rollback1 order by k1;"
|
||||
|
||||
qt_select_mv "select k1 p from rollback1 as t where t.k1 in (select k1 from rollback1) order by p;"
|
||||
qt_select_mv "select k1 p from rollback1 as t where t.k1 in (select k2 from rollback1) order by p;"
|
||||
qt_select_mv "select k1 p from rollback1 as t where t.k2 in (select k1 from rollback1) order by p;"
|
||||
qt_select_mv "select k1 p from rollback1 as t where t.k2 in (select k2 from rollback1) order by p;"
|
||||
qt_select_mv "select k2 p from rollback1 as t where t.k1 in (select k1 from rollback1) order by p;"
|
||||
qt_select_mv "select k2 p from rollback1 as t where t.k1 in (select k2 from rollback1) order by p;"
|
||||
qt_select_mv "select k2 p from rollback1 as t where t.k2 in (select k1 from rollback1) order by p;"
|
||||
qt_select_mv "select k2 p from rollback1 as t where t.k2 in (select k2 from rollback1) order by p;"
|
||||
}
|
||||
@ -0,0 +1,55 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("single_slot") {
|
||||
sql """ DROP TABLE IF EXISTS single_slot; """
|
||||
|
||||
sql """
|
||||
create table single_slot(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into single_slot select 1,1,1,'a';"
|
||||
sql "insert into single_slot select 2,2,2,'b';"
|
||||
sql "insert into single_slot select 3,-3,null,'c';"
|
||||
|
||||
createMV("create materialized view k1ap2spa as select abs(k1)+1,sum(abs(k2+1)) from single_slot group by abs(k1)+1;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "insert into single_slot select -4,-4,-4,'d';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
|
||||
order_qt_select_star "select * from single_slot order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select abs(k1)+1 t,sum(abs(k2+1)) from single_slot group by t order by t;")
|
||||
contains "(k1ap2spa)"
|
||||
}
|
||||
order_qt_select_mv "select abs(k1)+1 t,sum(abs(k2+1)) from single_slot group by t order by t;"
|
||||
}
|
||||
@ -0,0 +1,83 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("sum_devide_count") {
|
||||
sql """ DROP TABLE IF EXISTS sum_devide_count; """
|
||||
|
||||
sql """
|
||||
create table sum_devide_count(
|
||||
k1 int null,
|
||||
k2 int not null,
|
||||
k3 bigint null,
|
||||
k4 varchar(100) null
|
||||
)
|
||||
duplicate key (k1,k2,k3)
|
||||
distributed BY hash(k1) buckets 3
|
||||
properties("replication_num" = "1");
|
||||
"""
|
||||
|
||||
sql "insert into sum_devide_count select 1,1,1,'a';"
|
||||
sql "insert into sum_devide_count select 2,2,2,'b';"
|
||||
sql "insert into sum_devide_count select 3,-3,null,'c';"
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
test {
|
||||
sql "create materialized view kavg as select k1,k4,avg(k2) from sum_devide_count group by k1,k4;"
|
||||
exception "errCode = 2,"
|
||||
}
|
||||
|
||||
createMV ("create materialized view kavg as select k1,k4,sum(k2),count(k2) from sum_devide_count group by k1,k4;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql "SET experimental_enable_nereids_planner=false"
|
||||
|
||||
sql "insert into sum_devide_count select -4,-4,-4,'d';"
|
||||
sql "insert into sum_devide_count select 3,2,null,'c';"
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
|
||||
qt_select_star "select * from sum_devide_count order by k1,k2,k3,k4;"
|
||||
|
||||
explain {
|
||||
sql("select k1,k4,sum(k2)/count(k2) from sum_devide_count group by k1,k4 order by k1,k4;")
|
||||
contains "(kavg)"
|
||||
}
|
||||
order_qt_select_mv "select k1,k4,sum(k2)/count(k2) from sum_devide_count group by k1,k4 order by k1,k4;"
|
||||
|
||||
explain {
|
||||
sql("select k1,sum(k2)/count(k2) from sum_devide_count group by k1 order by k1;")
|
||||
contains "(kavg)"
|
||||
}
|
||||
order_qt_select_mv "select k1,sum(k2)/count(k2) from sum_devide_count group by k1 order by k1;"
|
||||
|
||||
explain {
|
||||
sql("select k4,sum(k2)/count(k2) from sum_devide_count group by k4 order by k4;")
|
||||
contains "(kavg)"
|
||||
}
|
||||
order_qt_select_mv "select k4,sum(k2)/count(k2) from sum_devide_count group by k4 order by k4;"
|
||||
|
||||
explain {
|
||||
sql("select sum(k2)/count(k2) from sum_devide_count;")
|
||||
contains "(kavg)"
|
||||
}
|
||||
order_qt_select_mv "select sum(k2)/count(k2) from sum_devide_count;"
|
||||
}
|
||||
@ -0,0 +1,60 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("MVMultiUsage") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS MVMultiUsage; """
|
||||
|
||||
sql """
|
||||
create table MVMultiUsage (
|
||||
time_col dateV2,
|
||||
empid int,
|
||||
name varchar,
|
||||
deptno int,
|
||||
salary int,
|
||||
commission int)
|
||||
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into MVMultiUsage values("2020-01-01",1,"a",1,1,1);"""
|
||||
sql """insert into MVMultiUsage values("2020-01-02",2,"b",2,2,2);"""
|
||||
sql """insert into MVMultiUsage values("2020-01-03",3,"c",3,3,3);"""
|
||||
|
||||
createMV("create materialized view MVMultiUsage_mv as select deptno, empid, salary from MVMultiUsage order by deptno;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into MVMultiUsage values("2020-01-01",1,"a",1,1,1);"""
|
||||
|
||||
explain {
|
||||
sql("select * from MVMultiUsage order by empid;")
|
||||
contains "(MVMultiUsage)"
|
||||
}
|
||||
order_qt_select_star "select * from MVMultiUsage order by empid;"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select * from (select deptno, empid from MVMultiUsage where deptno>100) A join (select deptno, empid from MVMultiUsage where deptno >200) B using (deptno);")
|
||||
contains "(MVMultiUsage_mv)"
|
||||
notContains "(MVMultiUsage)"
|
||||
}
|
||||
order_qt_select_mv "select * from (select deptno, empid from MVMultiUsage where deptno>100) A join (select deptno, empid from MVMultiUsage where deptno >200) B using (deptno) order by 1;"
|
||||
|
||||
}
|
||||
@ -0,0 +1,53 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("MVWithAs") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS MVWithAs; """
|
||||
|
||||
sql """ create table MVWithAs (
|
||||
time_col dateV2,
|
||||
user_id int,
|
||||
user_name varchar(20),
|
||||
tag_id int)
|
||||
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into MVWithAs values("2020-01-01",1,"a",1);"""
|
||||
sql """insert into MVWithAs values("2020-01-02",2,"b",2);"""
|
||||
|
||||
createMV("create materialized view MVWithAs_mv as select user_id, count(tag_id) from MVWithAs group by user_id;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into MVWithAs values("2020-01-01",1,"a",1);"""
|
||||
|
||||
explain {
|
||||
sql("select * from MVWithAs order by time_col;")
|
||||
contains "(MVWithAs)"
|
||||
}
|
||||
order_qt_select_star "select * from MVWithAs order by time_col;"
|
||||
|
||||
explain {
|
||||
sql("select count(tag_id) from MVWithAs t;")
|
||||
contains "(MVWithAs_mv)"
|
||||
}
|
||||
order_qt_select_mv "select count(tag_id) from MVWithAs t;"
|
||||
}
|
||||
@ -0,0 +1,44 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
// testAggTableCountDistinctInBitmapType
|
||||
suite ("aggCDInBitmap") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS aggCDInBitmap; """
|
||||
|
||||
sql """
|
||||
CREATE TABLE aggCDInBitmap (k1 int, v1 bitmap bitmap_union) Aggregate KEY (k1) DISTRIBUTED BY HASH(k1) BUCKETS 3 PROPERTIES ('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into aggCDInBitmap values(1,to_bitmap(1));"""
|
||||
sql """insert into aggCDInBitmap values(2,to_bitmap(2));"""
|
||||
sql """insert into aggCDInBitmap values(3,to_bitmap(3));"""
|
||||
|
||||
|
||||
order_qt_select_star "select * from aggCDInBitmap order by 1;"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select k1, count(distinct v1) from aggCDInBitmap group by k1;")
|
||||
contains "bitmap_union_count"
|
||||
}
|
||||
order_qt_select_mv "select k1, count(distinct v1) from aggCDInBitmap group by k1 order by k1;"
|
||||
|
||||
}
|
||||
@ -0,0 +1,60 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
// testAggregateMVCalcAggFunctionQuery
|
||||
suite ("aggMVCalcAggFun") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS aggMVCalcAggFun; """
|
||||
|
||||
sql """
|
||||
create table aggMVCalcAggFun (
|
||||
time_col dateV2,
|
||||
empid int,
|
||||
name varchar,
|
||||
deptno int,
|
||||
salary int,
|
||||
commission int)
|
||||
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into aggMVCalcAggFun values("2020-01-01",1,"a",1,1,1);"""
|
||||
sql """insert into aggMVCalcAggFun values("2020-01-02",2,"b",2,2,2);"""
|
||||
sql """insert into aggMVCalcAggFun values("2020-01-03",3,"c",3,3,3);"""
|
||||
|
||||
|
||||
createMV("create materialized view aggMVCalcAggFunMv as select deptno, empid, sum(salary) from aggMVCalcAggFun group by empid, deptno;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into aggMVCalcAggFun values("2020-01-01",1,"a",1,1,1);"""
|
||||
|
||||
explain {
|
||||
sql("select * from aggMVCalcAggFun order by empid;")
|
||||
contains "(aggMVCalcAggFun)"
|
||||
}
|
||||
order_qt_select_star "select * from aggMVCalcAggFun order by empid;"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select deptno, sum(salary + 1) from aggMVCalcAggFun where deptno > 10 group by deptno;")
|
||||
notContains "(aggMVCalcAggFunMv)"
|
||||
}
|
||||
order_qt_select_mv "select deptno, sum(salary + 1) from aggMVCalcAggFun where deptno > 10 group by deptno order by deptno;"
|
||||
}
|
||||
@ -0,0 +1,61 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("aggOnAggMV1") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS aggOnAggMV1; """
|
||||
|
||||
sql """
|
||||
create table aggOnAggMV1 (
|
||||
time_col dateV2,
|
||||
empid int,
|
||||
name varchar,
|
||||
deptno int,
|
||||
salary int,
|
||||
commission int)
|
||||
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into aggOnAggMV1 values("2020-01-01",1,"a",1,1,1);"""
|
||||
sql """insert into aggOnAggMV1 values("2020-01-02",2,"b",2,2,2);"""
|
||||
sql """insert into aggOnAggMV1 values("2020-01-03",3,"c",3,3,3);"""
|
||||
|
||||
|
||||
createMV("create materialized view aggOnAggMV1_mv as select deptno, sum(salary), max(commission) from aggOnAggMV1 group by deptno ;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into aggOnAggMV1 values("2020-01-01",1,"a",1,1,1);"""
|
||||
|
||||
explain {
|
||||
sql("select * from aggOnAggMV1 order by empid;")
|
||||
contains "(aggOnAggMV1)"
|
||||
}
|
||||
order_qt_select_star "select * from aggOnAggMV1 order by empid;"
|
||||
|
||||
|
||||
explain {
|
||||
sql("select sum(salary), deptno from aggOnAggMV1 group by deptno order by deptno;")
|
||||
contains "(aggOnAggMV1_mv)"
|
||||
}
|
||||
order_qt_select_mv "select sum(salary), deptno from aggOnAggMV1 group by deptno order by deptno;"
|
||||
|
||||
|
||||
}
|
||||
@ -0,0 +1,57 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
import org.codehaus.groovy.runtime.IOGroovyMethods
|
||||
|
||||
suite ("aggOnAggMV10") {
|
||||
sql "SET experimental_enable_nereids_planner=true"
|
||||
sql "SET enable_fallback_to_original_planner=false"
|
||||
sql """ DROP TABLE IF EXISTS aggOnAggMV10; """
|
||||
|
||||
sql """
|
||||
create table aggOnAggMV10 (
|
||||
time_col dateV2,
|
||||
empid int,
|
||||
name varchar,
|
||||
deptno int,
|
||||
salary int,
|
||||
commission int)
|
||||
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
|
||||
"""
|
||||
|
||||
sql """insert into aggOnAggMV10 values("2020-01-01",1,"a",1,1,1);"""
|
||||
sql """insert into aggOnAggMV10 values("2020-01-02",2,"b",2,2,2);"""
|
||||
sql """insert into aggOnAggMV10 values("2020-01-03",3,"c",3,3,3);"""
|
||||
|
||||
createMV("create materialized view aggOnAggMV10_mv as select deptno, commission, sum(salary) from aggOnAggMV10 group by deptno, commission;")
|
||||
|
||||
sleep(3000)
|
||||
|
||||
sql """insert into aggOnAggMV10 values("2020-01-01",1,"a",1,1,1);"""
|
||||
|
||||
explain {
|
||||
sql("select * from aggOnAggMV10 order by empid;")
|
||||
contains "(aggOnAggMV10)"
|
||||
}
|
||||
order_qt_select_star "select * from aggOnAggMV10 order by empid;"
|
||||
|
||||
explain {
|
||||
sql("select deptno, commission, sum(salary) + 1 from aggOnAggMV10 group by rollup (deptno, commission);")
|
||||
contains "(aggOnAggMV10_mv)"
|
||||
}
|
||||
order_qt_select_mv "select deptno, commission, sum(salary) + 1 from aggOnAggMV10 group by rollup (deptno, commission) order by 1,2;"
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user