This commit is contained in:
seawinde
2024-12-09 11:46:48 +08:00
committed by GitHub
parent 99e51ae4d2
commit 1662e4709a
195 changed files with 2219 additions and 480 deletions

View File

@ -51,14 +51,18 @@ public class MTMVCache {
// The materialized view plan which should be optimized by the same rules to query
// and will remove top sink and unused sort
private final Plan logicalPlan;
// The original plan of mv def sql
// The original rewritten plan of mv def sql
private final Plan originalPlan;
// The analyzed plan of mv def sql, which is used by tableCollector,should not be optimized by rbo
private final Plan analyzedPlan;
private final Statistics statistics;
private final StructInfo structInfo;
public MTMVCache(Plan logicalPlan, Plan originalPlan, Statistics statistics, StructInfo structInfo) {
public MTMVCache(Plan logicalPlan, Plan originalPlan, Plan analyzedPlan,
Statistics statistics, StructInfo structInfo) {
this.logicalPlan = logicalPlan;
this.originalPlan = originalPlan;
this.analyzedPlan = analyzedPlan;
this.statistics = statistics;
this.structInfo = structInfo;
}
@ -71,6 +75,10 @@ public class MTMVCache {
return originalPlan;
}
public Plan getAnalyzedPlan() {
return analyzedPlan;
}
public Statistics getStatistics() {
return statistics;
}
@ -118,7 +126,7 @@ public class MTMVCache {
Optional<StructInfo> structInfoOptional = MaterializationContext.constructStructInfo(mvPlan, originPlan,
planner.getCascadesContext(),
new BitSet());
return new MTMVCache(mvPlan, originPlan, needCost
return new MTMVCache(mvPlan, originPlan, planner.getAnalyzedPlan(), needCost
? planner.getCascadesContext().getMemo().getRoot().getStatistics() : null,
structInfoOptional.orElseGet(() -> null));
}

View File

@ -35,12 +35,10 @@ import org.apache.doris.nereids.properties.PhysicalProperties;
import org.apache.doris.nereids.rules.RuleType;
import org.apache.doris.nereids.trees.plans.Plan;
import org.apache.doris.nereids.trees.plans.commands.ExplainCommand.ExplainLevel;
import org.apache.doris.nereids.trees.plans.commands.info.CreateMTMVInfo;
import org.apache.doris.nereids.trees.plans.logical.LogicalPlan;
import org.apache.doris.nereids.trees.plans.visitor.TableCollector;
import org.apache.doris.nereids.trees.plans.visitor.TableCollector.TableCollectorContext;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.qe.SessionVariable;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
@ -60,6 +58,12 @@ public class MTMVPlanUtil {
ctx.setThreadLocalInfo();
ctx.getSessionVariable().enableFallbackToOriginalPlanner = false;
ctx.getSessionVariable().enableNereidsDML = true;
// Debug session variable should be disabled when refreshed
ctx.getSessionVariable().skipDeletePredicate = false;
ctx.getSessionVariable().skipDeleteBitmap = false;
ctx.getSessionVariable().skipDeleteSign = false;
ctx.getSessionVariable().skipStorageEngineMerge = false;
ctx.getSessionVariable().showHiddenColumns = false;
ctx.getSessionVariable().allowModifyMaterializedViewData = true;
// Disable add default limit rule to avoid refresh data wrong
ctx.getSessionVariable().setDisableNereidsRules(
@ -100,31 +104,20 @@ public class MTMVPlanUtil {
public static MTMVRelation generateMTMVRelation(MTMV mtmv, ConnectContext ctx) {
// Should not make table without data to empty relation when analyze the related table,
// so add disable rules
SessionVariable sessionVariable = ctx.getSessionVariable();
Set<String> tempDisableRules = sessionVariable.getDisableNereidsRuleNames();
sessionVariable.setDisableNereidsRules(CreateMTMVInfo.MTMV_PLANER_DISABLE_RULES);
if (ctx.getStatementContext() != null) {
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
}
Plan plan;
try {
plan = getPlanBySql(mtmv.getQuerySql(), ctx);
} finally {
sessionVariable.setDisableNereidsRules(String.join(",", tempDisableRules));
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
}
return generateMTMVRelation(plan);
Plan plan = getAnalyzePlanBySql(mtmv.getQuerySql(), ctx);
return generateMTMVRelation(plan, ctx);
}
public static MTMVRelation generateMTMVRelation(Plan plan) {
return new MTMVRelation(getBaseTables(plan, true), getBaseTables(plan, false), getBaseViews(plan));
public static MTMVRelation generateMTMVRelation(Plan plan, ConnectContext connectContext) {
return new MTMVRelation(getBaseTables(plan, true, connectContext),
getBaseTables(plan, false, connectContext), getBaseViews(plan));
}
private static Set<BaseTableInfo> getBaseTables(Plan plan, boolean expand) {
private static Set<BaseTableInfo> getBaseTables(Plan plan, boolean expand, ConnectContext connectContext) {
TableCollectorContext collectorContext =
new TableCollector.TableCollectorContext(
com.google.common.collect.Sets
.newHashSet(TableType.values()), expand);
.newHashSet(TableType.values()), expand, connectContext);
plan.accept(TableCollector.INSTANCE, collectorContext);
Set<TableIf> collectedTables = collectorContext.getCollectedTables();
return transferTableIfToInfo(collectedTables);
@ -142,7 +135,7 @@ public class MTMVPlanUtil {
return result;
}
private static Plan getPlanBySql(String querySql, ConnectContext ctx) {
private static Plan getAnalyzePlanBySql(String querySql, ConnectContext ctx) {
List<StatementBase> statements;
try {
statements = new NereidsParser().parseSQL(querySql);
@ -155,7 +148,7 @@ public class MTMVPlanUtil {
ctx.setStatementContext(new StatementContext());
try {
NereidsPlanner planner = new NereidsPlanner(ctx.getStatementContext());
return planner.planWithLock(logicalPlan, PhysicalProperties.ANY, ExplainLevel.NONE);
return planner.planWithLock(logicalPlan, PhysicalProperties.ANY, ExplainLevel.ANALYZED_PLAN);
} finally {
ctx.setStatementContext(original);
}

View File

@ -0,0 +1,27 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.nereids.jobs.joinorder.hypergraph;
/**
* This is the common base class for all
* */
public interface HyperElement {
// Get the references nodes
long getReferenceNodes();
}

View File

@ -18,6 +18,7 @@
package org.apache.doris.nereids.jobs.joinorder.hypergraph.edge;
import org.apache.doris.common.Pair;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperElement;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.bitmap.LongBitmap;
import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.Slot;
@ -32,7 +33,7 @@ import java.util.Set;
/**
* Edge in HyperGraph
*/
public abstract class Edge {
public abstract class Edge implements HyperElement {
private final int index;
private final double selectivity;
@ -192,6 +193,7 @@ public abstract class Edge {
return LongBitmap.isSubset(getReferenceNodes(), otherBitmap);
}
@Override
public long getReferenceNodes() {
return LongBitmap.newBitmapUnion(leftExtendedNodes, rightExtendedNodes);
}

View File

@ -17,6 +17,7 @@
package org.apache.doris.nereids.jobs.joinorder.hypergraph.node;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperElement;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.bitmap.LongBitmap;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.Edge;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.FilterEdge;
@ -33,7 +34,7 @@ import java.util.Set;
/**
* HyperGraph Node.
*/
public class AbstractNode {
public class AbstractNode implements HyperElement {
protected final int index;
protected final List<JoinEdge> joinEdges;
protected final List<FilterEdge> filterEdges;
@ -65,6 +66,11 @@ public class AbstractNode {
.build();
}
@Override
public long getReferenceNodes() {
return getNodeMap();
}
public int getIndex() {
return index;
}

View File

@ -237,6 +237,10 @@ public class BindRelation extends OneAnalysisRuleFactory {
unboundRelation.getTableSample());
}
}
if (!tabletIds.isEmpty()) {
// This tabletIds is set manually, so need to set specifiedTabletIds
scan = scan.withManuallySpecifiedTabletIds(tabletIds);
}
if (needGenerateLogicalAggForRandomDistAggTable(scan)) {
// it's a random distribution agg table
// add agg on olap scan

View File

@ -19,12 +19,14 @@ package org.apache.doris.nereids.rules.exploration.mv;
import org.apache.doris.common.Pair;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.ConflictRulesMaker;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperElement;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperGraph;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.bitmap.LongBitmap;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.Edge;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.FilterEdge;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.JoinEdge;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.node.StructInfoNode;
import org.apache.doris.nereids.rules.exploration.mv.StructInfo.ExpressionPosition;
import org.apache.doris.nereids.rules.rewrite.PushDownFilterThroughJoin;
import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.NamedExpression;
@ -51,6 +53,7 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
@ -79,9 +82,9 @@ public class HyperGraphComparator {
private final Map<Edge, List<? extends Expression>> pullUpViewExprWithEdge = new HashMap<>();
private final LogicalCompatibilityContext logicalCompatibilityContext;
// this records the slots which needs to reject null
// the key is the target join which should reject null, the value is a pair, the first value of the pair is the
// join type, the second value is also a pair which left represents the slots in the left of join that should
// reject null, right represents the slots in the right of join that should reject null.
// the key is the view join edge which should reject null, the value is a pair, the first value of the pair is the
// query join type, the second value is also a pair which left represents the slots in the left of view join that
// should reject null, right represents the slots in the right of view join that should reject null.
private final Map<JoinEdge, Pair<JoinType, Pair<Set<Slot>, Set<Slot>>>> inferredViewEdgeWithCond = new HashMap<>();
private List<JoinEdge> viewJoinEdgesAfterInferring;
private List<FilterEdge> viewFilterEdgesAfterInferring;
@ -249,9 +252,17 @@ public class HyperGraphComparator {
}
int size = queryExprSetList.size();
for (int i = 0; i < size; i++) {
Set<Expression> mappingQueryExprSet = queryExprSetList.get(i).stream()
.map(logicalCompatibilityContext::getViewNodeExprFromQuery)
.collect(Collectors.toSet());
Set<Expression> queryExpressions = queryExprSetList.get(i);
Set<Expression> mappingQueryExprSet = new HashSet<>();
for (Expression queryExpression : queryExpressions) {
Optional<Expression> mappingViewExprByQueryExpr = getMappingViewExprByQueryExpr(queryExpression, query,
this.logicalCompatibilityContext,
ExpressionPosition.NODE);
if (!mappingViewExprByQueryExpr.isPresent()) {
return false;
}
mappingQueryExprSet.add(mappingViewExprByQueryExpr.get());
}
if (!mappingQueryExprSet.equals(viewExprSetList.get(i))) {
return false;
}
@ -407,7 +418,10 @@ public class HyperGraphComparator {
if (edgeMap.containsKey(entry.getValue())) {
continue;
}
Expression viewExpr = logicalCompatibilityContext.getViewJoinExprFromQuery(entry.getKey());
Expression viewExpr = getMappingViewExprByQueryExpr(entry.getKey(),
entry.getValue(),
logicalCompatibilityContext,
ExpressionPosition.JOIN_EDGE).orElse(null);
if (viewExprToEdge.containsKey(viewExpr)) {
edgeMap.put(entry.getValue(), Objects.requireNonNull(viewExprToEdge.get(viewExpr)));
}
@ -441,15 +455,19 @@ public class HyperGraphComparator {
HashMap<Edge, Edge> queryToViewEdgeMap = new HashMap<>();
for (Entry<Expression, Collection<Edge>> entry : queryExprToEdge.asMap().entrySet()) {
Expression queryExprViewBased = logicalCompatibilityContext.getViewFilterExprFromQuery(entry.getKey());
if (queryExprViewBased == null) {
continue;
}
Collection<Edge> viewEdges = viewExprToEdge.get(queryExprViewBased);
if (viewEdges.isEmpty()) {
continue;
}
Expression queryExprViewBased = null;
for (Edge queryEdge : entry.getValue()) {
queryExprViewBased = getMappingViewExprByQueryExpr(entry.getKey(),
queryEdge,
logicalCompatibilityContext,
ExpressionPosition.FILTER_EDGE).orElse(null);
if (queryExprViewBased == null) {
continue;
}
Collection<Edge> viewEdges = viewExprToEdge.get(queryExprViewBased);
if (viewEdges.isEmpty()) {
continue;
}
for (Edge viewEdge : viewEdges) {
if (!isSubTreeNodesEquals(queryEdge, viewEdge, logicalCompatibilityContext)) {
// Such as query filter edge is <{1} --FILTER-- {}> but view filter edge is
@ -512,17 +530,17 @@ public class HyperGraphComparator {
}
private boolean compareFilterEdgeWithNode(FilterEdge query, FilterEdge view) {
return rewriteQueryNodeMap(query.getReferenceNodes()) == view.getReferenceNodes();
return getViewNodesByQuery(query.getReferenceNodes()) == view.getReferenceNodes();
}
private boolean compareJoinEdgeWithNode(JoinEdge query, JoinEdge view) {
boolean res = false;
if (query.getJoinType().swap() == view.getJoinType()) {
res |= rewriteQueryNodeMap(query.getLeftExtendedNodes()) == view.getRightExtendedNodes()
&& rewriteQueryNodeMap(query.getRightExtendedNodes()) == view.getLeftExtendedNodes();
res |= getViewNodesByQuery(query.getLeftExtendedNodes()) == view.getRightExtendedNodes()
&& getViewNodesByQuery(query.getRightExtendedNodes()) == view.getLeftExtendedNodes();
}
res |= rewriteQueryNodeMap(query.getLeftExtendedNodes()) == view.getLeftExtendedNodes()
&& rewriteQueryNodeMap(query.getRightExtendedNodes()) == view.getRightExtendedNodes();
res |= getViewNodesByQuery(query.getLeftExtendedNodes()) == view.getLeftExtendedNodes()
&& getViewNodesByQuery(query.getRightExtendedNodes()) == view.getRightExtendedNodes();
return res;
}
@ -545,8 +563,8 @@ public class HyperGraphComparator {
}
private boolean tryInferEdge(JoinEdge query, JoinEdge view) {
if (rewriteQueryNodeMap(query.getLeftRequiredNodes()) != view.getLeftRequiredNodes()
|| rewriteQueryNodeMap(query.getRightRequiredNodes()) != view.getRightRequiredNodes()) {
if (getViewNodesByQuery(query.getLeftRequiredNodes()) != view.getLeftRequiredNodes()
|| getViewNodesByQuery(query.getRightRequiredNodes()) != view.getRightRequiredNodes()) {
return false;
}
if (!query.getJoinType().equals(view.getJoinType())) {
@ -567,7 +585,7 @@ public class HyperGraphComparator {
return true;
}
private long rewriteQueryNodeMap(long bitmap) {
private long getViewNodesByQuery(long bitmap) {
long newBitmap = LongBitmap.newBitmap();
for (int i : LongBitmap.getIterator(bitmap)) {
int newIdx = getQueryToViewNodeIdMap().getOrDefault(i, 0);
@ -576,6 +594,35 @@ public class HyperGraphComparator {
return newBitmap;
}
private Optional<Expression> getMappingViewExprByQueryExpr(Expression queryExpression,
HyperElement queryExpressionBelongedHyperElement,
LogicalCompatibilityContext context,
ExpressionPosition expressionPosition) {
Expression queryShuttledExpr;
Collection<Pair<Expression, HyperElement>> viewExpressions;
if (ExpressionPosition.JOIN_EDGE.equals(expressionPosition)) {
queryShuttledExpr = context.getQueryJoinShuttledExpr(queryExpression);
viewExpressions = context.getViewJoinExprFromQuery(queryShuttledExpr);
} else if (ExpressionPosition.FILTER_EDGE.equals(expressionPosition)) {
queryShuttledExpr = context.getQueryFilterShuttledExpr(queryExpression);
viewExpressions = context.getViewFilterExprFromQuery(queryShuttledExpr);
} else {
queryShuttledExpr = context.getQueryNodeShuttledExpr(queryExpression);
viewExpressions = context.getViewNodeExprFromQuery(queryShuttledExpr);
}
if (viewExpressions.size() == 1) {
return Optional.of(viewExpressions.iterator().next().key());
}
long queryReferenceNodes = queryExpressionBelongedHyperElement.getReferenceNodes();
long viewReferenceNodes = getViewNodesByQuery(queryReferenceNodes);
for (Pair<Expression, HyperElement> viewExpressionPair : viewExpressions) {
if (viewExpressionPair.value().getReferenceNodes() == viewReferenceNodes) {
return Optional.of(viewExpressionPair.key());
}
}
return Optional.empty();
}
private void compareJoinEdgeWithExpr(Edge query, Edge view) {
Set<? extends Expression> queryExprSet = query.getExpressionSet();
Set<? extends Expression> viewExprSet = view.getExpressionSet();
@ -583,7 +630,10 @@ public class HyperGraphComparator {
Set<Expression> exprMappedOfView = new HashSet<>();
List<Expression> residualQueryExpr = new ArrayList<>();
for (Expression queryExpr : queryExprSet) {
Expression viewExpr = logicalCompatibilityContext.getViewJoinExprFromQuery(queryExpr);
Expression viewExpr = getMappingViewExprByQueryExpr(queryExpr,
query,
logicalCompatibilityContext,
ExpressionPosition.JOIN_EDGE).orElse(null);
if (viewExprSet.contains(viewExpr)) {
exprMappedOfView.add(viewExpr);
} else {
@ -602,7 +652,10 @@ public class HyperGraphComparator {
Set<Expression> exprMappedOfView = new HashSet<>();
List<Expression> residualQueryExpr = new ArrayList<>();
for (Expression queryExpr : queryExprSet) {
Expression viewExpr = logicalCompatibilityContext.getViewFilterExprFromQuery(queryExpr);
Expression viewExpr = getMappingViewExprByQueryExpr(queryExpr,
query,
logicalCompatibilityContext,
ExpressionPosition.FILTER_EDGE).orElse(null);
if (viewExprSet.contains(viewExpr)) {
exprMappedOfView.add(viewExpr);
} else {

View File

@ -69,13 +69,18 @@ public class InitMaterializationContextHook implements PlannerHook {
* @param cascadesContext current cascadesContext in the planner
*/
protected void doInitMaterializationContext(CascadesContext cascadesContext) {
if (cascadesContext.getConnectContext().getSessionVariable().isInDebugMode()) {
LOG.info(String.format("MaterializationContext init return because is in debug mode, current queryId is %s",
cascadesContext.getConnectContext().getQueryIdentifier()));
return;
}
// Only collect the table or mv which query use directly, to avoid useless mv partition in rewrite
TableCollectorContext collectorContext = new TableCollectorContext(Sets.newHashSet(), false);
// Keep use one connection context when in query, if new connect context,
// the ConnectionContext.get() will change
TableCollectorContext collectorContext = new TableCollectorContext(Sets.newHashSet(), false,
cascadesContext.getConnectContext());
try {
Plan rewritePlan = cascadesContext.getRewritePlan();
// Keep use one connection context when in query, if new connect context,
// the ConnectionContext.get() will change
collectorContext.setConnectContext(cascadesContext.getConnectContext());
rewritePlan.accept(TableCollector.INSTANCE, collectorContext);
} catch (Exception e) {
LOG.warn(String.format("MaterializationContext init table collect fail, current queryId is %s",

View File

@ -17,7 +17,10 @@
package org.apache.doris.nereids.rules.exploration.mv;
import org.apache.doris.common.Pair;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperElement;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.node.StructInfoNode;
import org.apache.doris.nereids.memo.GroupExpression;
import org.apache.doris.nereids.rules.exploration.mv.StructInfo.ExpressionPosition;
import org.apache.doris.nereids.rules.exploration.mv.mapping.Mapping.MappedRelation;
import org.apache.doris.nereids.rules.exploration.mv.mapping.RelationMapping;
@ -27,6 +30,7 @@ import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.NamedExpression;
import org.apache.doris.nereids.trees.expressions.SlotReference;
import org.apache.doris.nereids.trees.expressions.visitor.DefaultExpressionRewriter;
import org.apache.doris.nereids.trees.plans.ObjectId;
import org.apache.doris.nereids.trees.plans.RelationId;
import org.apache.doris.nereids.util.ExpressionUtils;
import org.apache.doris.nereids.util.Utils;
@ -34,8 +38,10 @@ import org.apache.doris.nereids.util.Utils;
import com.google.common.base.Suppliers;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import java.util.HashMap;
import java.util.Collection;
import java.util.Map;
import java.util.function.Supplier;
@ -45,9 +51,16 @@ import java.util.function.Supplier;
public class LogicalCompatibilityContext {
private final BiMap<StructInfoNode, StructInfoNode> queryToViewNodeMapping;
private final BiMap<Integer, Integer> queryToViewNodeIDMapping;
private final Supplier<BiMap<Expression, Expression>> queryToViewJoinEdgeExpressionMappingSupplier;
private final Supplier<BiMap<Expression, Expression>> queryToViewNodeExpressionMappingSupplier;
private final Supplier<BiMap<Expression, Expression>> queryToViewFilterEdgeExpressionMappingSupplier;
private final ObjectId planNodeId;
private final Supplier<Multimap<Expression, Pair<Expression, HyperElement>>>
queryToViewJoinEdgeExpressionMappingSupplier;
private final Supplier<Map<Expression, Expression>> queryToQueryShuttledJoinExpressionMappingSupplier;
private final Supplier<Multimap<Expression, Pair<Expression, HyperElement>>>
queryToViewNodeExpressionMappingSupplier;
private final Supplier<Map<Expression, Expression>> queryToQueryShuttledNodeExpressionMappingSupplier;
private final Supplier<Multimap<Expression, Pair<Expression, HyperElement>>>
queryToViewFilterEdgeExpressionMappingSupplier;
private final Supplier<Map<Expression, Expression>> queryToQueryShuttledFilterExpressionMappingSupplier;
/**
* LogicalCompatibilityContext
@ -61,19 +74,31 @@ public class LogicalCompatibilityContext {
queryStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.JOIN_EDGE),
viewStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.JOIN_EDGE)));
this.queryToQueryShuttledJoinExpressionMappingSupplier = Suppliers.memoize(
() -> queryStructInfo.getExpressionToShuttledExpressionToMap().get(ExpressionPosition.JOIN_EDGE));
this.queryToViewNodeExpressionMappingSupplier =
Suppliers.memoize(() -> generateExpressionMapping(viewToQuerySlotMapping,
queryStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.NODE),
viewStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.NODE)));
this.queryToQueryShuttledNodeExpressionMappingSupplier = Suppliers.memoize(
() -> queryStructInfo.getExpressionToShuttledExpressionToMap().get(ExpressionPosition.NODE));
this.queryToViewFilterEdgeExpressionMappingSupplier =
Suppliers.memoize(() -> generateExpressionMapping(viewToQuerySlotMapping,
queryStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.FILTER_EDGE),
viewStructInfo.getShuttledExpressionsToExpressionsMap().get(ExpressionPosition.FILTER_EDGE)));
this.queryToQueryShuttledFilterExpressionMappingSupplier = Suppliers.memoize(
() -> queryStructInfo.getExpressionToShuttledExpressionToMap().get(ExpressionPosition.FILTER_EDGE));
this.queryToViewNodeMapping = queryToViewNodeMapping;
this.queryToViewNodeIDMapping = HashBiMap.create();
queryToViewNodeMapping.forEach((k, v) -> queryToViewNodeIDMapping.put(k.getIndex(), v.getIndex()));
this.planNodeId = queryStructInfo.getTopPlan().getGroupExpression()
.map(GroupExpression::getId).orElseGet(() -> new ObjectId(-1));
}
public BiMap<StructInfoNode, StructInfoNode> getQueryToViewNodeMapping() {
@ -84,18 +109,30 @@ public class LogicalCompatibilityContext {
return queryToViewNodeIDMapping;
}
public Expression getViewJoinExprFromQuery(Expression queryJoinExpr) {
public Collection<Pair<Expression, HyperElement>> getViewJoinExprFromQuery(Expression queryJoinExpr) {
return queryToViewJoinEdgeExpressionMappingSupplier.get().get(queryJoinExpr);
}
public Expression getViewFilterExprFromQuery(Expression queryJoinExpr) {
public Expression getQueryJoinShuttledExpr(Expression queryJoinExpr) {
return queryToQueryShuttledJoinExpressionMappingSupplier.get().get(queryJoinExpr);
}
public Collection<Pair<Expression, HyperElement>> getViewFilterExprFromQuery(Expression queryJoinExpr) {
return queryToViewFilterEdgeExpressionMappingSupplier.get().get(queryJoinExpr);
}
public Expression getViewNodeExprFromQuery(Expression queryJoinExpr) {
public Expression getQueryFilterShuttledExpr(Expression queryFilterExpr) {
return queryToQueryShuttledFilterExpressionMappingSupplier.get().get(queryFilterExpr);
}
public Collection<Pair<Expression, HyperElement>> getViewNodeExprFromQuery(Expression queryJoinExpr) {
return queryToViewNodeExpressionMappingSupplier.get().get(queryJoinExpr);
}
public Expression getQueryNodeShuttledExpr(Expression queryNodeExpr) {
return queryToQueryShuttledNodeExpressionMappingSupplier.get().get(queryNodeExpr);
}
/**
* Generate logical compatibility context,
* this make expression mapping between query and view by relation and the slot in relation mapping
@ -126,24 +163,31 @@ public class LogicalCompatibilityContext {
viewStructInfo);
}
private static BiMap<Expression, Expression> generateExpressionMapping(
/**
* The result is multimap
* the key is shuttled query expr
* the value is original view expr collection
* */
private static Multimap<Expression, Pair<Expression, HyperElement>> generateExpressionMapping(
Map<SlotReference, SlotReference> viewToQuerySlotMapping,
Map<Expression, Expression> queryShuttledExprToExprMap,
Map<Expression, Expression> viewShuttledExprToExprMap) {
final Map<Expression, Expression> viewEdgeToConjunctsMapQueryBased = new HashMap<>();
BiMap<Expression, Expression> queryToViewEdgeMapping = HashBiMap.create();
Multimap<Expression, Pair<Expression, HyperElement>> queryShuttledExprToExprMap,
Multimap<Expression, Pair<Expression, HyperElement>> viewShuttledExprToExprMap) {
Multimap<Expression, Pair<Expression, HyperElement>> queryToViewEdgeMapping = HashMultimap.create();
if (queryShuttledExprToExprMap == null || viewShuttledExprToExprMap == null
|| queryShuttledExprToExprMap.isEmpty() || viewShuttledExprToExprMap.isEmpty()) {
return queryToViewEdgeMapping;
}
final Multimap<Expression, Pair<Expression, HyperElement>> viewShuttledExprToExprMapQueryBased =
HashMultimap.create();
viewShuttledExprToExprMap.forEach((shuttledExpr, expr) -> {
viewEdgeToConjunctsMapQueryBased.put(
viewShuttledExprToExprMapQueryBased.put(
orderSlotAsc(ExpressionUtils.replace(shuttledExpr, viewToQuerySlotMapping)), expr);
});
queryShuttledExprToExprMap.forEach((exprSet, edge) -> {
Expression viewExpr = viewEdgeToConjunctsMapQueryBased.get(orderSlotAsc(exprSet));
if (viewExpr != null) {
queryToViewEdgeMapping.put(edge, viewExpr);
queryShuttledExprToExprMap.forEach((shuttledExpr, expr) -> {
Collection<Pair<Expression, HyperElement>> viewExpressions = viewShuttledExprToExprMapQueryBased.get(
orderSlotAsc(shuttledExpr));
if (viewExpressions != null) {
queryToViewEdgeMapping.putAll(shuttledExpr, viewExpressions);
}
});
return queryToViewEdgeMapping;

View File

@ -25,14 +25,21 @@ import org.apache.doris.catalog.PartitionType;
import org.apache.doris.catalog.TableIf;
import org.apache.doris.catalog.constraint.TableIdentifier;
import org.apache.doris.mtmv.BaseTableInfo;
import org.apache.doris.mtmv.MTMVCache;
import org.apache.doris.mtmv.MTMVRelatedTableIf;
import org.apache.doris.nereids.CascadesContext;
import org.apache.doris.nereids.NereidsPlanner;
import org.apache.doris.nereids.StatementContext;
import org.apache.doris.nereids.jobs.executor.Rewriter;
import org.apache.doris.nereids.memo.Group;
import org.apache.doris.nereids.memo.StructInfoMap;
import org.apache.doris.nereids.parser.NereidsParser;
import org.apache.doris.nereids.properties.PhysicalProperties;
import org.apache.doris.nereids.rules.RuleType;
import org.apache.doris.nereids.rules.analysis.BindRelation;
import org.apache.doris.nereids.rules.expression.ExpressionNormalization;
import org.apache.doris.nereids.rules.expression.ExpressionRewriteContext;
import org.apache.doris.nereids.rules.rewrite.EliminateSort;
import org.apache.doris.nereids.trees.expressions.Alias;
import org.apache.doris.nereids.trees.expressions.ExprId;
import org.apache.doris.nereids.trees.expressions.Expression;
@ -47,19 +54,25 @@ import org.apache.doris.nereids.trees.plans.JoinType;
import org.apache.doris.nereids.trees.plans.Plan;
import org.apache.doris.nereids.trees.plans.PreAggStatus;
import org.apache.doris.nereids.trees.plans.algebra.CatalogRelation;
import org.apache.doris.nereids.trees.plans.commands.ExplainCommand;
import org.apache.doris.nereids.trees.plans.logical.LogicalAggregate;
import org.apache.doris.nereids.trees.plans.logical.LogicalCatalogRelation;
import org.apache.doris.nereids.trees.plans.logical.LogicalFileScan;
import org.apache.doris.nereids.trees.plans.logical.LogicalFilter;
import org.apache.doris.nereids.trees.plans.logical.LogicalJoin;
import org.apache.doris.nereids.trees.plans.logical.LogicalLimit;
import org.apache.doris.nereids.trees.plans.logical.LogicalOlapScan;
import org.apache.doris.nereids.trees.plans.logical.LogicalPlan;
import org.apache.doris.nereids.trees.plans.logical.LogicalProject;
import org.apache.doris.nereids.trees.plans.logical.LogicalRelation;
import org.apache.doris.nereids.trees.plans.logical.LogicalResultSink;
import org.apache.doris.nereids.trees.plans.logical.LogicalWindow;
import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanRewriter;
import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanVisitor;
import org.apache.doris.nereids.trees.plans.visitor.NondeterministicFunctionCollector;
import org.apache.doris.nereids.util.ExpressionUtils;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.qe.OriginStatement;
import org.apache.doris.qe.SessionVariable;
import com.google.common.collect.HashMultimap;
@ -299,7 +312,50 @@ public class MaterializedViewUtils {
return nondeterministicFunctions;
}
private static final class TableQueryOperatorChecker extends DefaultPlanVisitor<Boolean, Void> {
/**
* createMTMVCache from querySql
*/
public static MTMVCache createMTMVCache(String querySql, ConnectContext connectContext) {
LogicalPlan unboundMvPlan = new NereidsParser().parseSingle(querySql);
StatementContext mvSqlStatementContext = new StatementContext(connectContext,
new OriginStatement(querySql, 0));
NereidsPlanner planner = new NereidsPlanner(mvSqlStatementContext);
if (mvSqlStatementContext.getConnectContext().getStatementContext() == null) {
mvSqlStatementContext.getConnectContext().setStatementContext(mvSqlStatementContext);
}
// Can not convert to table sink, because use the same column from different table when self join
// the out slot is wrong
planner.planWithLock(unboundMvPlan, PhysicalProperties.ANY, ExplainCommand.ExplainLevel.ALL_PLAN);
Plan originPlan = planner.getRewrittenPlan();
// Eliminate result sink because sink operator is useless in query rewrite by materialized view
// and the top sort can also be removed
Plan mvPlan = originPlan.accept(new DefaultPlanRewriter<Object>() {
@Override
public Plan visitLogicalResultSink(LogicalResultSink<? extends Plan> logicalResultSink, Object context) {
return logicalResultSink.child().accept(this, context);
}
}, null);
// Optimize by rules to remove top sort
CascadesContext parentCascadesContext = CascadesContext.initContext(mvSqlStatementContext, mvPlan,
PhysicalProperties.ANY);
mvPlan = MaterializedViewUtils.rewriteByRules(parentCascadesContext, childContext -> {
Rewriter.getCteChildrenRewriter(childContext,
ImmutableList.of(Rewriter.custom(RuleType.ELIMINATE_SORT, EliminateSort::new))).execute();
return childContext.getRewritePlan();
}, mvPlan, originPlan);
return new MTMVCache(mvPlan, originPlan, planner.getAnalyzedPlan(),
planner.getCascadesContext().getMemo().getRoot().getStatistics(), null);
}
/**
* Check the query if Contains query operator
* Such sql as following should return true
* select * from orders TABLET(10098) because TABLET(10098) should return true
* select * from orders_partition PARTITION (day_2) because PARTITION (day_2)
* select * from orders index query_index_test because index query_index_test
* select * from orders TABLESAMPLE(20 percent) because TABLESAMPLE(20 percent)
* */
public static final class TableQueryOperatorChecker extends DefaultPlanVisitor<Boolean, Void> {
public static final TableQueryOperatorChecker INSTANCE = new TableQueryOperatorChecker();
@Override
@ -310,12 +366,20 @@ public class MaterializedViewUtils {
if (relation instanceof LogicalOlapScan) {
LogicalOlapScan logicalOlapScan = (LogicalOlapScan) relation;
if (logicalOlapScan.getTableSample().isPresent()) {
// Contain sample, select * from orders TABLESAMPLE(20 percent)
return true;
}
if (!logicalOlapScan.getSelectedTabletIds().isEmpty()) {
if (!logicalOlapScan.getManuallySpecifiedTabletIds().isEmpty()) {
// Contain tablets, select * from orders TABLET(10098) because TABLET(10098)
return true;
}
if (!logicalOlapScan.getManuallySpecifiedPartitions().isEmpty()) {
// Contain specified partitions, select * from orders_partition PARTITION (day_2)
return true;
}
if (logicalOlapScan.getSelectedIndexId() != logicalOlapScan.getTable().getBaseIndexId()) {
// Contains select index or use sync mv in rbo rewrite
// select * from orders index query_index_test
return true;
}
}
@ -492,6 +556,7 @@ public class MaterializedViewUtils {
@Override
public Void visit(Plan plan, IncrementCheckerContext context) {
if (plan instanceof LogicalProject
|| plan instanceof LogicalLimit
|| plan instanceof LogicalFilter
|| plan instanceof LogicalJoin
|| plan instanceof LogicalAggregate

View File

@ -23,11 +23,12 @@ import org.apache.doris.common.Pair;
import org.apache.doris.mtmv.BaseTableInfo;
import org.apache.doris.nereids.CascadesContext;
import org.apache.doris.nereids.jobs.executor.Rewriter;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperElement;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.HyperGraph;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.edge.JoinEdge;
import org.apache.doris.nereids.jobs.joinorder.hypergraph.node.StructInfoNode;
import org.apache.doris.nereids.memo.Group;
import org.apache.doris.nereids.memo.GroupExpression;
import org.apache.doris.nereids.rules.exploration.mv.MaterializedViewUtils.TableQueryOperatorChecker;
import org.apache.doris.nereids.rules.exploration.mv.Predicates.SplitPredicate;
import org.apache.doris.nereids.trees.copier.DeepCopierContext;
import org.apache.doris.nereids.trees.copier.LogicalPlanDeepCopier;
@ -36,6 +37,7 @@ import org.apache.doris.nereids.trees.expressions.ExprId;
import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.SlotReference;
import org.apache.doris.nereids.trees.expressions.literal.Literal;
import org.apache.doris.nereids.trees.plans.AbstractPlan;
import org.apache.doris.nereids.trees.plans.GroupPlan;
import org.apache.doris.nereids.trees.plans.JoinType;
import org.apache.doris.nereids.trees.plans.ObjectId;
@ -61,12 +63,15 @@ import org.apache.doris.nereids.trees.plans.visitor.DefaultPlanVisitor;
import org.apache.doris.nereids.trees.plans.visitor.ExpressionLineageReplacer;
import org.apache.doris.nereids.util.ExpressionUtils;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
@ -109,9 +114,23 @@ public class StructInfo {
// split predicates is shuttled
private SplitPredicate splitPredicate;
private EquivalenceClass equivalenceClass;
// Key is the expression shuttled and the value is the origin expression
// For value of Map, the key is the position of expression
// the value is the expressions and the hyper element of expression pair
// Key of pair is the expression shuttled and the value is the origin expression and the hyper element it belonged
// Sometimes origin expressions are different and shuttled expression is same
// Such as origin expressions are l_partkey#0 > 1 and l_partkey#10 > 1 and shuttled expression is l_partkey#10 > 1
// this is for building LogicalCompatibilityContext later.
private final Map<ExpressionPosition, Map<Expression, Expression>> shuttledExpressionsToExpressionsMap;
private final Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
shuttledExpressionsToExpressionsMap;
// For value of Map, the key is the position of expression
// the value is the original expression and shuttled expression map
// Such as origin expressions are l_partkey#0 > 1 and shuttled expression is l_partkey#10 > 1
// the map would be {ExpressionPosition.FILTER, {
// l_partkey#0 > 1 : l_partkey#10 > 1
// }}
// this is for building LogicalCompatibilityContext later.
private final Map<ExpressionPosition, Map<Expression, Expression>> expressionToShuttledExpressionToMap;
// Record the exprId and the corresponding expr map, this is used by expression shuttled
private final Map<ExprId, Expression> namedExprIdAndExprMapping;
private final List<? extends Expression> planOutputShuttledExpressions;
@ -123,7 +142,9 @@ public class StructInfo {
Plan bottomPlan, List<CatalogRelation> relations,
Map<RelationId, StructInfoNode> relationIdStructInfoNodeMap,
@Nullable Predicates predicates,
Map<ExpressionPosition, Map<Expression, Expression>> shuttledExpressionsToExpressionsMap,
Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
shuttledExpressionsToExpressionsMap,
Map<ExpressionPosition, Map<Expression, Expression>> expressionToShuttledExpressionToMap,
Map<ExprId, Expression> namedExprIdAndExprMapping,
BitSet tableIdSet,
SplitPredicate splitPredicate,
@ -142,6 +163,7 @@ public class StructInfo {
this.splitPredicate = splitPredicate;
this.equivalenceClass = equivalenceClass;
this.shuttledExpressionsToExpressionsMap = shuttledExpressionsToExpressionsMap;
this.expressionToShuttledExpressionToMap = expressionToShuttledExpressionToMap;
this.namedExprIdAndExprMapping = namedExprIdAndExprMapping;
this.planOutputShuttledExpressions = planOutputShuttledExpressions;
}
@ -152,7 +174,8 @@ public class StructInfo {
public StructInfo withPredicates(Predicates predicates) {
return new StructInfo(this.originalPlan, this.originalPlanId, this.hyperGraph, this.valid, this.topPlan,
this.bottomPlan, this.relations, this.relationIdStructInfoNodeMap, predicates,
this.shuttledExpressionsToExpressionsMap, this.namedExprIdAndExprMapping, this.tableBitSet,
this.shuttledExpressionsToExpressionsMap, this.expressionToShuttledExpressionToMap,
this.namedExprIdAndExprMapping, this.tableBitSet,
null, null, this.planOutputShuttledExpressions);
}
@ -162,13 +185,16 @@ public class StructInfo {
public StructInfo withTableBitSet(BitSet tableBitSet) {
return new StructInfo(this.originalPlan, this.originalPlanId, this.hyperGraph, this.valid, this.topPlan,
this.bottomPlan, this.relations, this.relationIdStructInfoNodeMap, this.predicates,
this.shuttledExpressionsToExpressionsMap, this.namedExprIdAndExprMapping, tableBitSet,
this.shuttledExpressionsToExpressionsMap, this.expressionToShuttledExpressionToMap,
this.namedExprIdAndExprMapping, tableBitSet,
this.splitPredicate, this.equivalenceClass, this.planOutputShuttledExpressions);
}
private static boolean collectStructInfoFromGraph(HyperGraph hyperGraph,
Plan topPlan,
Map<ExpressionPosition, Map<Expression, Expression>> shuttledExpressionsToExpressionsMap,
Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
shuttledExpressionsToExpressionsMap,
Map<ExpressionPosition, Map<Expression, Expression>> expressionToShuttledExpressionToMap,
Map<ExprId, Expression> namedExprIdAndExprMapping,
List<CatalogRelation> relations,
Map<RelationId, StructInfoNode> relationIdStructInfoNodeMap,
@ -196,8 +222,9 @@ public class StructInfo {
structInfoNode.getPlan().accept(ExpressionLineageReplacer.INSTANCE, replaceContext);
// Replace expressions by expression map
List<Expression> replacedExpressions = replaceContext.getReplacedExpressions();
putShuttledExpressionsToExpressionsMap(shuttledExpressionsToExpressionsMap,
ExpressionPosition.NODE, replacedExpressions.get(0), expression);
putShuttledExpressionToExpressionsMap(shuttledExpressionsToExpressionsMap,
expressionToShuttledExpressionToMap,
ExpressionPosition.NODE, replacedExpressions.get(0), expression, node);
// Record this, will be used in top level expression shuttle later, see the method
// ExpressionLineageReplacer#visitGroupPlan
namedExprIdAndExprMapping.putAll(replaceContext.getExprIdExpressionMap());
@ -210,28 +237,27 @@ public class StructInfo {
});
// Collect expression from join condition in hyper graph
for (JoinEdge edge : hyperGraph.getJoinEdges()) {
List<Expression> hashJoinConjuncts = edge.getHashJoinConjuncts();
List<? extends Expression> joinConjunctExpressions = edge.getExpressions();
// shuttle expression in edge for the build of LogicalCompatibilityContext later.
// Record the exprId to expr map in the processing to strut info
// TODO get exprId to expr map when complex project is ready in join dege
hashJoinConjuncts.forEach(conjunctExpr -> {
ExpressionLineageReplacer.ExpressionReplaceContext replaceContext =
new ExpressionLineageReplacer.ExpressionReplaceContext(
Lists.newArrayList(conjunctExpr), ImmutableSet.of(),
ImmutableSet.of(), new BitSet());
topPlan.accept(ExpressionLineageReplacer.INSTANCE, replaceContext);
// Replace expressions by expression map
List<Expression> replacedExpressions = replaceContext.getReplacedExpressions();
putShuttledExpressionsToExpressionsMap(shuttledExpressionsToExpressionsMap,
ExpressionPosition.JOIN_EDGE, replacedExpressions.get(0), conjunctExpr);
// Record this, will be used in top level expression shuttle later, see the method
// ExpressionLineageReplacer#visitGroupPlan
namedExprIdAndExprMapping.putAll(replaceContext.getExprIdExpressionMap());
});
List<Expression> otherJoinConjuncts = edge.getOtherJoinConjuncts();
if (!otherJoinConjuncts.isEmpty()) {
return false;
ExpressionLineageReplacer.ExpressionReplaceContext replaceContext =
new ExpressionLineageReplacer.ExpressionReplaceContext(
joinConjunctExpressions.stream().map(expr -> (Expression) expr)
.collect(Collectors.toList()),
ImmutableSet.of(), ImmutableSet.of(), new BitSet());
topPlan.accept(ExpressionLineageReplacer.INSTANCE, replaceContext);
// Replace expressions by expression map
List<Expression> replacedExpressions = replaceContext.getReplacedExpressions();
for (int i = 0; i < replacedExpressions.size(); i++) {
putShuttledExpressionToExpressionsMap(shuttledExpressionsToExpressionsMap,
expressionToShuttledExpressionToMap,
ExpressionPosition.JOIN_EDGE, replacedExpressions.get(i), joinConjunctExpressions.get(i),
edge);
}
// Record this, will be used in top level expression shuttle later, see the method
// ExpressionLineageReplacer#visitGroupPlan
namedExprIdAndExprMapping.putAll(replaceContext.getExprIdExpressionMap());
}
// Collect expression from where in hyper graph
hyperGraph.getFilterEdges().forEach(filterEdge -> {
@ -239,10 +265,11 @@ public class StructInfo {
filterExpressions.forEach(predicate -> {
// this is used for LogicalCompatibilityContext
ExpressionUtils.extractConjunction(predicate).forEach(expr ->
putShuttledExpressionsToExpressionsMap(shuttledExpressionsToExpressionsMap,
putShuttledExpressionToExpressionsMap(shuttledExpressionsToExpressionsMap,
expressionToShuttledExpressionToMap,
ExpressionPosition.FILTER_EDGE,
ExpressionUtils.shuttleExpressionWithLineage(predicate, topPlan, new BitSet()),
predicate));
predicate, filterEdge));
});
});
return true;
@ -314,11 +341,13 @@ public class StructInfo {
// collect struct info fromGraph
List<CatalogRelation> relationList = new ArrayList<>();
Map<RelationId, StructInfoNode> relationIdStructInfoNodeMap = new LinkedHashMap<>();
Map<ExpressionPosition, Map<Expression, Expression>> shuttledHashConjunctsToConjunctsMap =
new LinkedHashMap<>();
Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
shuttledHashConjunctsToConjunctsMap = new LinkedHashMap<>();
Map<ExprId, Expression> namedExprIdAndExprMapping = new LinkedHashMap<>();
BitSet tableBitSet = new BitSet();
Map<ExpressionPosition, Map<Expression, Expression>> expressionToShuttledExpressionToMap = new HashMap<>();
boolean valid = collectStructInfoFromGraph(hyperGraph, topPlan, shuttledHashConjunctsToConjunctsMap,
expressionToShuttledExpressionToMap,
namedExprIdAndExprMapping,
relationList,
relationIdStructInfoNodeMap,
@ -326,6 +355,11 @@ public class StructInfo {
cascadesContext);
valid = valid
&& hyperGraph.getNodes().stream().allMatch(n -> ((StructInfoNode) n).getExpressions() != null);
// if relationList has any relation which contains table operator,
// such as query with sample, index, table, is invalid
boolean invalid = relationList.stream().anyMatch(relation ->
((AbstractPlan) relation).accept(TableQueryOperatorChecker.INSTANCE, null));
valid = valid && !invalid;
// collect predicate from top plan which not in hyper graph
Set<Expression> topPlanPredicates = new LinkedHashSet<>();
topPlan.accept(PREDICATE_COLLECTOR, topPlanPredicates);
@ -335,19 +369,11 @@ public class StructInfo {
ExpressionUtils.shuttleExpressionWithLineage(originalPlan.getOutput(), originalPlan, new BitSet());
return new StructInfo(originalPlan, originalPlanId, hyperGraph, valid, topPlan, bottomPlan,
relationList, relationIdStructInfoNodeMap, predicates, shuttledHashConjunctsToConjunctsMap,
expressionToShuttledExpressionToMap,
namedExprIdAndExprMapping, tableBitSet, null, null,
planOutputShuttledExpressions);
}
/**
* Build Struct info from group.
* Maybe return multi structInfo when original plan already be rewritten by mv
*/
public static StructInfo of(Group group) {
// TODO build graph from original plan and get relations and predicates from graph
return null;
}
public List<CatalogRelation> getRelations() {
return relations;
}
@ -404,21 +430,36 @@ public class StructInfo {
return relationIdStructInfoNodeMap;
}
public Map<ExpressionPosition, Map<Expression, Expression>> getShuttledExpressionsToExpressionsMap() {
public Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
getShuttledExpressionsToExpressionsMap() {
return shuttledExpressionsToExpressionsMap;
}
private static void putShuttledExpressionsToExpressionsMap(
Map<ExpressionPosition, Map<Expression, Expression>> shuttledExpressionsToExpressionsMap,
public Map<ExpressionPosition, Map<Expression, Expression>> getExpressionToShuttledExpressionToMap() {
return expressionToShuttledExpressionToMap;
}
private static void putShuttledExpressionToExpressionsMap(
Map<ExpressionPosition, Multimap<Expression, Pair<Expression, HyperElement>>>
shuttledExpressionsToExpressionsMap,
Map<ExpressionPosition, Map<Expression, Expression>> expressionPositionToExpressionToMap,
ExpressionPosition expressionPosition,
Expression key, Expression value) {
Map<Expression, Expression> expressionExpressionMap = shuttledExpressionsToExpressionsMap.get(
expressionPosition);
if (expressionExpressionMap == null) {
expressionExpressionMap = new LinkedHashMap<>();
shuttledExpressionsToExpressionsMap.put(expressionPosition, expressionExpressionMap);
Expression shuttledExpression, Expression originalExpression, HyperElement valueBelongedElement) {
Multimap<Expression, Pair<Expression, HyperElement>> shuttledExpressionToExpressionMap =
shuttledExpressionsToExpressionsMap.get(expressionPosition);
if (shuttledExpressionToExpressionMap == null) {
shuttledExpressionToExpressionMap = HashMultimap.create();
shuttledExpressionsToExpressionsMap.put(expressionPosition, shuttledExpressionToExpressionMap);
}
expressionExpressionMap.put(key, value);
shuttledExpressionToExpressionMap.put(shuttledExpression, Pair.of(originalExpression, valueBelongedElement));
Map<Expression, Expression> originalExprToShuttledExprMap =
expressionPositionToExpressionToMap.get(expressionPosition);
if (originalExprToShuttledExprMap == null) {
originalExprToShuttledExprMap = new HashMap<>();
expressionPositionToExpressionToMap.put(expressionPosition, originalExprToShuttledExprMap);
}
originalExprToShuttledExprMap.put(originalExpression, shuttledExpression);
}
public List<? extends Expression> getExpressions() {

View File

@ -36,11 +36,16 @@ public class PruneEmptyPartition extends OneRewriteRuleFactory {
return logicalOlapScan().thenApply(ctx -> {
LogicalOlapScan scan = ctx.root;
OlapTable table = scan.getTable();
List<Long> ids = table.selectNonEmptyPartitionIds(scan.getSelectedPartitionIds());
List<Long> partitionIdsToPrune = scan.getSelectedPartitionIds();
List<Long> ids = table.selectNonEmptyPartitionIds(partitionIdsToPrune);
if (ids.isEmpty()) {
return new LogicalEmptyRelation(ConnectContext.get().getStatementContext().getNextRelationId(),
scan.getOutput());
}
if (partitionIdsToPrune.equals(ids)) {
// Not Prune actually, return directly
return null;
}
return scan.withSelectedPartitionIds(ids);
}).toRule(RuleType.PRUNE_EMPTY_PARTITION);
}

View File

@ -53,7 +53,7 @@ public class PruneOlapScanTablet extends OneRewriteRuleFactory {
LogicalOlapScan olapScan = filter.child();
OlapTable table = olapScan.getTable();
Builder<Long> selectedTabletIdsBuilder = ImmutableList.builder();
if (olapScan.getSelectedTabletIds().isEmpty()) {
if (olapScan.getManuallySpecifiedTabletIds().isEmpty()) {
for (Long id : olapScan.getSelectedPartitionIds()) {
Partition partition = table.getPartition(id);
MaterializedIndex index = partition.getIndex(olapScan.getSelectedIndexId());
@ -64,10 +64,10 @@ public class PruneOlapScanTablet extends OneRewriteRuleFactory {
partition.getDistributionInfo()));
}
} else {
selectedTabletIdsBuilder.addAll(olapScan.getSelectedTabletIds());
selectedTabletIdsBuilder.addAll(olapScan.getManuallySpecifiedTabletIds());
}
List<Long> selectedTabletIds = selectedTabletIdsBuilder.build();
if (new HashSet<>(selectedTabletIds).equals(new HashSet<>(olapScan.getSelectedTabletIds()))) {
if (new HashSet<>(selectedTabletIds).equals(new HashSet<>(olapScan.getManuallySpecifiedTabletIds()))) {
return null;
}
return filter.withChildren(olapScan.withSelectedTabletIds(selectedTabletIds));

View File

@ -246,11 +246,16 @@ public class UpdateMvByPartitionCommand extends InsertOverwriteTableCommand {
if (predicates.isEmpty()) {
return cte;
}
List<LogicalSubQueryAlias<Plan>> rewrittenSubQueryAlias = new ArrayList<>();
for (LogicalSubQueryAlias<Plan> subQueryAlias : cte.getAliasQueries()) {
List<Plan> subQueryAliasChildren = new ArrayList<>();
this.virtualRelationNamePartSet.add(subQueryAlias.getQualifier());
subQueryAlias.children().forEach(subQuery -> subQuery.accept(this, predicates));
subQueryAlias.children().forEach(subQuery ->
subQueryAliasChildren.add(subQuery.accept(this, predicates))
);
rewrittenSubQueryAlias.add(subQueryAlias.withChildren(subQueryAliasChildren));
}
return super.visitLogicalCTE(cte, predicates);
return super.visitLogicalCTE(new LogicalCTE<>(rewrittenSubQueryAlias, cte.child()), predicates);
}
@Override

View File

@ -156,6 +156,9 @@ public class CreateMTMVInfo {
if (!InternalCatalog.INTERNAL_CATALOG_NAME.equals(mvName.getCtl())) {
throw new AnalysisException("Only support creating asynchronous materialized views in internal catalog");
}
if (ctx.getSessionVariable().isInDebugMode()) {
throw new AnalysisException("Create materialized view fail, because is in debug mode");
}
try {
FeNameFormat.checkTableName(mvName.getTbl());
} catch (org.apache.doris.common.AnalysisException e) {
@ -252,9 +255,21 @@ public class CreateMTMVInfo {
NereidsPlanner planner = new NereidsPlanner(statementContext);
// this is for expression column name infer when not use alias
LogicalSink<Plan> logicalSink = new UnboundResultSink<>(logicalQuery);
// must disable constant folding by be, because be constant folding may return wrong type
ctx.getSessionVariable().disableConstantFoldingByBEOnce();
Plan plan = planner.planWithLock(logicalSink, PhysicalProperties.ANY, ExplainLevel.ALL_PLAN);
// Should not make table without data to empty relation when analyze the related table,
// so add disable rules
Set<String> tempDisableRules = ctx.getSessionVariable().getDisableNereidsRuleNames();
ctx.getSessionVariable().setDisableNereidsRules(CreateMTMVInfo.MTMV_PLANER_DISABLE_RULES);
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
Plan plan;
try {
// must disable constant folding by be, because be constant folding may return wrong type
ctx.getSessionVariable().disableConstantFoldingByBEOnce();
plan = planner.planWithLock(logicalSink, PhysicalProperties.ANY, ExplainLevel.ALL_PLAN);
} finally {
// after operate, roll back the disable rules
ctx.getSessionVariable().setDisableNereidsRules(String.join(",", tempDisableRules));
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
}
// can not contain VIEW or MTMV
analyzeBaseTables(planner.getAnalyzedPlan());
// can not contain Random function
@ -265,8 +280,7 @@ public class CreateMTMVInfo {
throw new AnalysisException("can not contain invalid expression");
}
getRelation(planner);
this.mvPartitionInfo = mvPartitionDefinition
.analyzeAndTransferToMTMVPartitionInfo(planner, ctx, logicalQuery);
this.mvPartitionInfo = mvPartitionDefinition.analyzeAndTransferToMTMVPartitionInfo(planner, ctx);
this.partitionDesc = generatePartitionDesc(ctx);
getColumns(plan, ctx, mvPartitionInfo.getPartitionCol(), distribution);
analyzeKeys();
@ -311,24 +325,10 @@ public class CreateMTMVInfo {
}
}
// Should use analyzed plan for collect views and tables
private void getRelation(NereidsPlanner planner) {
// Should not make table without data to empty relation when analyze the related table,
// so add disable rules
ConnectContext ctx = planner.getCascadesContext().getConnectContext();
SessionVariable sessionVariable = ctx.getSessionVariable();
Set<String> tempDisableRules = sessionVariable.getDisableNereidsRuleNames();
sessionVariable.setDisableNereidsRules(CreateMTMVInfo.MTMV_PLANER_DISABLE_RULES);
if (ctx.getStatementContext() != null) {
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
}
Plan plan;
try {
plan = planner.planWithLock(logicalQuery, PhysicalProperties.ANY, ExplainLevel.NONE);
} finally {
sessionVariable.setDisableNereidsRules(String.join(",", tempDisableRules));
ctx.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
}
this.relation = MTMVPlanUtil.generateMTMVRelation(plan);
this.relation = MTMVPlanUtil.generateMTMVRelation(planner.getAnalyzedPlan(),
planner.getCascadesContext().getConnectContext());
}
private PartitionDesc generatePartitionDesc(ConnectContext ctx) {

View File

@ -37,7 +37,6 @@ import org.apache.doris.nereids.NereidsPlanner;
import org.apache.doris.nereids.analyzer.UnboundFunction;
import org.apache.doris.nereids.analyzer.UnboundSlot;
import org.apache.doris.nereids.exceptions.AnalysisException;
import org.apache.doris.nereids.properties.PhysicalProperties;
import org.apache.doris.nereids.rules.exploration.mv.MaterializedViewUtils;
import org.apache.doris.nereids.rules.exploration.mv.MaterializedViewUtils.RelatedTableInfo;
import org.apache.doris.nereids.trees.expressions.Cast;
@ -45,11 +44,7 @@ import org.apache.doris.nereids.trees.expressions.Expression;
import org.apache.doris.nereids.trees.expressions.Slot;
import org.apache.doris.nereids.trees.expressions.functions.scalar.DateTrunc;
import org.apache.doris.nereids.trees.expressions.literal.Literal;
import org.apache.doris.nereids.trees.plans.Plan;
import org.apache.doris.nereids.trees.plans.commands.ExplainCommand.ExplainLevel;
import org.apache.doris.nereids.trees.plans.logical.LogicalPlan;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.qe.SessionVariable;
import com.google.common.collect.Sets;
@ -72,11 +67,9 @@ public class MTMVPartitionDefinition {
*
* @param planner planner
* @param ctx ctx
* @param logicalQuery logicalQuery
* @return MTMVPartitionInfo
*/
public MTMVPartitionInfo analyzeAndTransferToMTMVPartitionInfo(NereidsPlanner planner, ConnectContext ctx,
LogicalPlan logicalQuery) {
public MTMVPartitionInfo analyzeAndTransferToMTMVPartitionInfo(NereidsPlanner planner, ConnectContext ctx) {
MTMVPartitionInfo mtmvPartitionInfo = new MTMVPartitionInfo(partitionType);
if (this.partitionType == MTMVPartitionType.SELF_MANAGE) {
return mtmvPartitionInfo;
@ -100,7 +93,7 @@ public class MTMVPartitionDefinition {
timeUnit = null;
}
mtmvPartitionInfo.setPartitionCol(partitionColName);
RelatedTableInfo relatedTableInfo = getRelatedTableInfo(planner, ctx, logicalQuery, partitionColName, timeUnit);
RelatedTableInfo relatedTableInfo = getRelatedTableInfo(planner, ctx, partitionColName, timeUnit);
mtmvPartitionInfo.setRelatedCol(relatedTableInfo.getColumn());
mtmvPartitionInfo.setRelatedTable(relatedTableInfo.getTableInfo());
if (relatedTableInfo.getPartitionExpression().isPresent()) {
@ -125,47 +118,33 @@ public class MTMVPartitionDefinition {
return mtmvPartitionInfo;
}
private RelatedTableInfo getRelatedTableInfo(NereidsPlanner planner, ConnectContext ctx, LogicalPlan
logicalQuery,
String partitionColName,
String timeUnit) {
// Should use rewritten plan without view and subQuery to get related partition table
private RelatedTableInfo getRelatedTableInfo(NereidsPlanner planner, ConnectContext ctx,
String partitionColName, String timeUnit) {
CascadesContext cascadesContext = planner.getCascadesContext();
SessionVariable sessionVariable = cascadesContext.getConnectContext().getSessionVariable();
Set<String> tempDisableRules = sessionVariable.getDisableNereidsRuleNames();
// Should not make table without data to empty relation when analyze the related table,
// so add disable rules
sessionVariable.setDisableNereidsRules(CreateMTMVInfo.MTMV_PLANER_DISABLE_RULES);
cascadesContext.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
try {
Plan mvRewrittenPlan =
planner.planWithLock(logicalQuery, PhysicalProperties.ANY, ExplainLevel.REWRITTEN_PLAN);
RelatedTableInfo relatedTableInfo = MaterializedViewUtils
.getRelatedTableInfo(partitionColName, timeUnit, mvRewrittenPlan, cascadesContext);
if (!relatedTableInfo.isPctPossible()) {
throw new AnalysisException(String.format("Unable to find a suitable base table for partitioning,"
+ " the fail reason is %s", relatedTableInfo.getFailReason()));
}
MTMVRelatedTableIf mtmvBaseRealtedTable = MTMVUtil.getRelatedTable(relatedTableInfo.getTableInfo());
Set<String> partitionColumnNames = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
try {
partitionColumnNames.addAll(mtmvBaseRealtedTable.getPartitionColumnNames(Optional.empty()));
} catch (DdlException e) {
throw new AnalysisException(e.getMessage(), e);
}
if (!partitionColumnNames.contains(relatedTableInfo.getColumn())) {
throw new AnalysisException("error related column: " + relatedTableInfo.getColumn());
}
if (!(mtmvBaseRealtedTable instanceof HMSExternalTable)
&& partitionColumnNames.size() != 1) {
throw new AnalysisException("only hms table support multi column partition.");
}
return relatedTableInfo;
} finally {
// after operate, roll back the disable rules
sessionVariable.setDisableNereidsRules(String.join(",", tempDisableRules));
cascadesContext.getStatementContext().invalidCache(SessionVariable.DISABLE_NEREIDS_RULES);
RelatedTableInfo relatedTableInfo = MaterializedViewUtils
.getRelatedTableInfo(partitionColName, timeUnit, planner.getRewrittenPlan(), cascadesContext);
if (!relatedTableInfo.isPctPossible()) {
throw new AnalysisException(String.format("Unable to find a suitable base table for partitioning,"
+ " the fail reason is %s", relatedTableInfo.getFailReason()));
}
MTMVRelatedTableIf mtmvBaseRealtedTable = MTMVUtil.getRelatedTable(relatedTableInfo.getTableInfo());
Set<String> partitionColumnNames = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
try {
partitionColumnNames.addAll(mtmvBaseRealtedTable.getPartitionColumnNames(Optional.empty()));
} catch (DdlException e) {
throw new AnalysisException(e.getMessage(), e);
}
if (!partitionColumnNames.contains(relatedTableInfo.getColumn())) {
throw new AnalysisException("error related column: " + relatedTableInfo.getColumn());
}
if (!(mtmvBaseRealtedTable instanceof HMSExternalTable)
&& partitionColumnNames.size() != 1) {
throw new AnalysisException("only hms table support multi column partition.");
}
return relatedTableInfo;
}
private static List<Expr> convertToLegacyArguments(List<Expression> children) {

View File

@ -92,6 +92,12 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
*/
private final List<Long> selectedTabletIds;
/**
* Selected tablet ids to read data from, this would be set if user query with tablets manually
* Such as select * from orders TABLET(100);
*/
private final List<Long> manuallySpecifiedTabletIds;
///////////////////////////////////////////////////////////////////////////
// Members for partition ids.
///////////////////////////////////////////////////////////////////////////
@ -120,12 +126,16 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
this(id, table, ImmutableList.of());
}
/**
* LogicalOlapScan construct method
*/
public LogicalOlapScan(RelationId id, OlapTable table, List<String> qualifier) {
this(id, table, qualifier, Optional.empty(), Optional.empty(),
table.getPartitionIds(), false,
ImmutableList.of(),
-1, false, PreAggStatus.unset(), ImmutableList.of(), ImmutableList.of(),
Maps.newHashMap(), Optional.empty(), false, ImmutableMap.of());
Maps.newHashMap(), Optional.empty(), false, ImmutableMap.of(),
ImmutableList.of());
}
public LogicalOlapScan(RelationId id, OlapTable table, List<String> qualifier, List<Long> tabletIds,
@ -133,7 +143,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
this(id, table, qualifier, Optional.empty(), Optional.empty(),
table.getPartitionIds(), false, tabletIds,
-1, false, PreAggStatus.unset(), ImmutableList.of(), hints, Maps.newHashMap(),
tableSample, false, ImmutableMap.of());
tableSample, false, ImmutableMap.of(), ImmutableList.of());
}
public LogicalOlapScan(RelationId id, OlapTable table, List<String> qualifier, List<Long> specifiedPartitions,
@ -142,7 +152,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
// must use specifiedPartitions here for prune partition by sql like 'select * from t partition p1'
specifiedPartitions, false, tabletIds,
-1, false, PreAggStatus.unset(), specifiedPartitions, hints, Maps.newHashMap(),
tableSample, false, ImmutableMap.of());
tableSample, false, ImmutableMap.of(), ImmutableList.of());
}
public LogicalOlapScan(RelationId id, OlapTable table, List<String> qualifier, List<Long> tabletIds,
@ -151,7 +161,8 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
this(id, table, qualifier, Optional.empty(), Optional.empty(),
selectedPartitionIds, false, tabletIds,
selectedIndexId, true, preAggStatus,
specifiedPartitions, hints, Maps.newHashMap(), tableSample, true, ImmutableMap.of());
specifiedPartitions, hints, Maps.newHashMap(), tableSample, true, ImmutableMap.of(),
ImmutableList.of());
}
/**
@ -164,7 +175,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
PreAggStatus preAggStatus, List<Long> specifiedPartitions,
List<String> hints, Map<Pair<Long, String>, Slot> cacheSlotWithSlotName,
Optional<TableSample> tableSample, boolean directMvScan,
Map<String, Set<List<String>>> colToSubPathsMap) {
Map<String, Set<List<String>>> colToSubPathsMap, List<Long> specifiedTabletIds) {
super(id, PlanType.LOGICAL_OLAP_SCAN, table, qualifier,
groupExpression, logicalProperties);
Preconditions.checkArgument(selectedPartitionIds != null,
@ -175,6 +186,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
this.indexSelected = indexSelected;
this.preAggStatus = preAggStatus;
this.manuallySpecifiedPartitions = ImmutableList.copyOf(specifiedPartitions);
this.manuallySpecifiedTabletIds = ImmutableList.copyOf(specifiedTabletIds);
if (selectedPartitionIds.isEmpty()) {
this.selectedPartitionIds = ImmutableList.of();
@ -233,6 +245,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
&& partitionPruned == that.partitionPruned && Objects.equals(preAggStatus, that.preAggStatus)
&& Objects.equals(selectedTabletIds, that.selectedTabletIds)
&& Objects.equals(manuallySpecifiedPartitions, that.manuallySpecifiedPartitions)
&& Objects.equals(manuallySpecifiedTabletIds, that.manuallySpecifiedTabletIds)
&& Objects.equals(selectedPartitionIds, that.selectedPartitionIds)
&& Objects.equals(hints, that.hints)
&& Objects.equals(tableSample, that.tableSample);
@ -241,8 +254,8 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), selectedIndexId, indexSelected, preAggStatus, cacheSlotWithSlotName,
selectedTabletIds, partitionPruned, manuallySpecifiedPartitions, selectedPartitionIds, hints,
tableSample);
selectedTabletIds, partitionPruned, manuallySpecifiedTabletIds, manuallySpecifiedPartitions,
selectedPartitionIds, hints, tableSample);
}
@Override
@ -251,7 +264,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
groupExpression, Optional.of(getLogicalProperties()),
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
@Override
@ -260,7 +273,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
return new LogicalOlapScan(relationId, (Table) table, qualifier, groupExpression, logicalProperties,
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withSelectedPartitionIds(List<Long> selectedPartitionIds) {
@ -268,7 +281,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.of(getLogicalProperties()),
selectedPartitionIds, true, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withMaterializedIndexSelected(long indexId) {
@ -276,7 +289,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.of(getLogicalProperties()),
selectedPartitionIds, partitionPruned, selectedTabletIds,
indexId, true, PreAggStatus.unset(), manuallySpecifiedPartitions, hints, cacheSlotWithSlotName,
tableSample, directMvScan, colToSubPathsMap);
tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withSelectedTabletIds(List<Long> selectedTabletIds) {
@ -284,7 +297,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.of(getLogicalProperties()),
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withPreAggStatus(PreAggStatus preAggStatus) {
@ -292,7 +305,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.of(getLogicalProperties()),
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withColToSubPathsMap(Map<String, Set<List<String>>> colToSubPathsMap) {
@ -300,7 +313,15 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.empty(),
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap);
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
public LogicalOlapScan withManuallySpecifiedTabletIds(List<Long> manuallySpecifiedTabletIds) {
return new LogicalOlapScan(relationId, (Table) table, qualifier,
Optional.empty(), Optional.of(getLogicalProperties()),
selectedPartitionIds, partitionPruned, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, cacheSlotWithSlotName, tableSample, directMvScan, colToSubPathsMap, manuallySpecifiedTabletIds);
}
@Override
@ -310,7 +331,7 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
Optional.empty(), Optional.empty(),
selectedPartitionIds, false, selectedTabletIds,
selectedIndexId, indexSelected, preAggStatus, manuallySpecifiedPartitions,
hints, Maps.newHashMap(), tableSample, directMvScan, colToSubPathsMap);
hints, Maps.newHashMap(), tableSample, directMvScan, colToSubPathsMap, selectedTabletIds);
}
@Override
@ -326,6 +347,10 @@ public class LogicalOlapScan extends LogicalCatalogRelation implements OlapScan
return selectedTabletIds;
}
public List<Long> getManuallySpecifiedTabletIds() {
return manuallySpecifiedTabletIds;
}
@Override
public long getSelectedIndexId() {
return selectedIndexId;

View File

@ -20,8 +20,8 @@ package org.apache.doris.nereids.trees.plans.visitor;
import org.apache.doris.catalog.MTMV;
import org.apache.doris.catalog.TableIf;
import org.apache.doris.catalog.TableIf.TableType;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.mtmv.MTMVCache;
import org.apache.doris.mtmv.MTMVPlanUtil;
import org.apache.doris.nereids.trees.plans.Plan;
import org.apache.doris.nereids.trees.plans.logical.LogicalCatalogRelation;
import org.apache.doris.nereids.trees.plans.physical.PhysicalCatalogRelation;
@ -70,13 +70,19 @@ public class TableCollector extends DefaultPlanVisitor<Plan, TableCollectorConte
}
private void expandMvAndCollect(MTMV mtmv, TableCollectorContext context) {
if (!context.isExpand()) {
if (!context.isExpandMaterializedView()) {
return;
}
// Make sure use only one connection context when in query to avoid ConnectionContext.get() wrong
MTMVCache expandedMv = MTMVCache.from(mtmv, context.getConnectContext() == null
? MTMVPlanUtil.createMTMVContext(mtmv) : context.getConnectContext(), false);
expandedMv.getLogicalPlan().accept(this, context);
MTMVCache expandedMvCache;
try {
expandedMvCache = mtmv.getOrGenerateCache(context.getConnectContext());
} catch (AnalysisException exception) {
LOG.warn(String.format("expandMvAndCollect getOrGenerateCache fail, mtmv name is %s", mtmv.getName()),
exception);
expandedMvCache = MTMVCache.from(mtmv, context.getConnectContext(), false);
}
expandedMvCache.getAnalyzedPlan().accept(this, context);
}
/**
@ -87,12 +93,14 @@ public class TableCollector extends DefaultPlanVisitor<Plan, TableCollectorConte
private final Set<TableIf> collectedTables = new HashSet<>();
private final Set<TableType> targetTableTypes;
// if expand the mv or not
private final boolean expand;
private ConnectContext connectContext;
private final boolean expandMaterializedView;
private final ConnectContext connectContext;
public TableCollectorContext(Set<TableType> targetTableTypes, boolean expand) {
public TableCollectorContext(Set<TableType> targetTableTypes, boolean expandMaterializedView,
ConnectContext connectContext) {
this.targetTableTypes = targetTableTypes;
this.expand = expand;
this.expandMaterializedView = expandMaterializedView;
this.connectContext = connectContext;
}
public Set<TableIf> getCollectedTables() {
@ -103,16 +111,12 @@ public class TableCollector extends DefaultPlanVisitor<Plan, TableCollectorConte
return targetTableTypes;
}
public boolean isExpand() {
return expand;
public boolean isExpandMaterializedView() {
return expandMaterializedView;
}
public ConnectContext getConnectContext() {
return connectContext;
}
public void setConnectContext(ConnectContext connectContext) {
this.connectContext = connectContext;
}
}
}

View File

@ -28,6 +28,7 @@ import org.apache.doris.catalog.MaterializedIndex;
import org.apache.doris.catalog.OlapTable;
import org.apache.doris.catalog.Partition;
import org.apache.doris.catalog.PrimitiveType;
import org.apache.doris.nereids.sqltest.SqlTestBase;
import org.apache.doris.nereids.trees.expressions.EqualTo;
import org.apache.doris.nereids.trees.expressions.GreaterThanEqual;
import org.apache.doris.nereids.trees.expressions.InPredicate;
@ -51,8 +52,9 @@ import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Objects;
class PruneOlapScanTabletTest implements MemoPatternMatchSupported {
class PruneOlapScanTabletTest extends SqlTestBase implements MemoPatternMatchSupported {
@Test
void testPruneOlapScanTablet(@Mocked OlapTable olapTable,
@ -154,4 +156,21 @@ class PruneOlapScanTabletTest implements MemoPatternMatchSupported {
)
);
}
@Test
void testPruneOlapScanTabletWithManually() {
String sql = "select * from T4 TABLET(110) where id > 8";
PlanChecker.from(connectContext)
.analyze(sql)
.applyTopDown(new PruneOlapScanTablet())
.matches(
logicalFilter(
logicalOlapScan().when(s ->
Objects.equals(s.getSelectedTabletIds(), Lists.newArrayList(110L))
&& Objects.equals(s.getManuallySpecifiedTabletIds(),
Lists.newArrayList(110L))
)
)
);
}
}

View File

@ -87,6 +87,9 @@ public abstract class SqlTestBase extends TestWithFeService implements MemoPatte
+ " score bigint\n"
+ ")\n"
+ "DUPLICATE KEY(id)\n"
+ "AUTO PARTITION BY LIST(`id`)\n"
+ "(\n"
+ ")\n"
+ "DISTRIBUTED BY HASH(id) BUCKETS 1\n"
+ "PROPERTIES (\n"
+ " \"replication_num\" = \"1\"\n"

View File

@ -130,7 +130,7 @@ public class PlanVisitorTest extends TestWithFeService {
Assertions.assertTrue(nondeterministicFunctionSet.get(0) instanceof Random);
// Check get tables
TableCollectorContext collectorContext = new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.OLAP), true);
Sets.newHashSet(TableType.OLAP), true, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, collectorContext);
Set<String> expectedTables = new HashSet<>();
expectedTables.add("table1");
@ -159,7 +159,7 @@ public class PlanVisitorTest extends TestWithFeService {
Assertions.assertTrue(nondeterministicFunctionSet.get(1) instanceof Random);
// Check get tables
TableCollectorContext collectorContext = new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.OLAP), true);
Sets.newHashSet(TableType.OLAP), true, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, collectorContext);
Set<String> expectedTables = new HashSet<>();
expectedTables.add("table1");
@ -196,7 +196,7 @@ public class PlanVisitorTest extends TestWithFeService {
Assertions.assertTrue(nondeterministicFunctionSet.get(0) instanceof Uuid);
// Check get tables
TableCollectorContext collectorContext = new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.OLAP), true);
Sets.newHashSet(TableType.OLAP), true, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, collectorContext);
Set<String> expectedTables = new HashSet<>();
expectedTables.add("table1");
@ -210,7 +210,7 @@ public class PlanVisitorTest extends TestWithFeService {
TableCollectorContext collectorContextWithNoExpand =
new TableCollector.TableCollectorContext(Sets.newHashSet(TableType.OLAP),
false);
false, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, collectorContextWithNoExpand);
Set<String> expectedTablesWithNoExpand = new HashSet<>();
expectedTablesWithNoExpand.add("table1");
@ -222,7 +222,7 @@ public class PlanVisitorTest extends TestWithFeService {
expectedTablesWithNoExpand);
TableCollectorContext mvCollectorContext = new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.MATERIALIZED_VIEW), true);
Sets.newHashSet(TableType.MATERIALIZED_VIEW), true, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, mvCollectorContext);
Set<String> expectedMvs = new HashSet<>();
expectedMvs.add("mv1");
@ -234,7 +234,7 @@ public class PlanVisitorTest extends TestWithFeService {
TableCollectorContext mvCollectorContextWithNoExpand =
new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.MATERIALIZED_VIEW), false);
Sets.newHashSet(TableType.MATERIALIZED_VIEW), false, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, mvCollectorContextWithNoExpand);
Set<String> expectedMvsWithNoExpand = new HashSet<>();
expectedMvsWithNoExpand.add("mv1");
@ -246,7 +246,7 @@ public class PlanVisitorTest extends TestWithFeService {
TableCollectorContext allTableTypeWithExpand =
new TableCollector.TableCollectorContext(
Sets.newHashSet(TableType.values()), true);
Sets.newHashSet(TableType.values()), true, connectContext);
physicalPlan.accept(TableCollector.INSTANCE, allTableTypeWithExpand);
// when collect in plan with expand, should collect table which is expended
Set<String> expectedTablesWithExpand = new HashSet<>();

View File

@ -1,11 +1,18 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !select_star --
19930101 1 1 1 1 1 1 1 1 1 1 100 1 1 1 2023-06-09 shipmode name address city nation AMERICA phone mktsegment name address city nation AMERICA phone name MFGR#1 category brand color type 4 container
19930101 1 1 1 1 1 1 1 1 1 1 100 1 1 1 2023-06-09 shipmode name address city nation AMERICA phone mktsegment name address city nation AMERICA phone name MFGR#1 category brand color type 4 container
19930101 1 1 1 1 1 1 1 1 1 1 100 1 1 1 2023-06-09 shipmode name address city nation AMERICA phone mktsegment name address city nation AMERICA phone name MFGR#1 category brand color type 4 container
19930101 1 1 1 1 1 1 1 1 1 1 100 1 1 1 2023-06-09 shipmode name address city nation AMERICA phone mktsegment name address city nation AMERICA phone name MFGR#1 category brand color type 4 container
19930101 1 1 1 1 1 1 1 1 1 1 100 1 1 1 2023-06-09 shipmode name address city nation AMERICA phone mktsegment name address city nation AMERICA phone name MFGR#1 category brand color type 4 container
19930101 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2023-06-09 shipmode name address city nation region phone mktsegment name address city nation region phone name mfgr category brand color type 4 container
19930101 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2023-06-09 shipmode name address city nation region phone mktsegment name address city nation region phone name mfgr category brand color type 4 container
19930101 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2023-06-09 shipmode name address city nation region phone mktsegment name address city nation region phone name mfgr category brand color type 4 container
19930101 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2023-06-09 shipmode name address city nation region phone mktsegment name address city nation region phone name mfgr category brand color type 4 container
-- !select_mv --
4
16
-- !select --
4
16

View File

@ -323,3 +323,27 @@ c 3 6 c,c,c 5.333333333333333 mi 3 2
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query29_0_before --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query29_0_after --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query30_0_before --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query30_0_after --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query31_0_before --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1
-- !query31_0_after --
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 \N \N 8 8 1
1 2023-12-09 1 yy 2 2 2 4 3 \N 2 3 1 2 8 8 1

View File

@ -0,0 +1,128 @@
-- This file is automatically generated. You should know what you did if you want to edit this
-- !query_0_after --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
-- !query_mv_0 --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
-- !query_mv_1 --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
-- !query_0_after --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
-- !query_mv_2 --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
-- !query_mv_3 --
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-08 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-09 4 1 3
2023-12-10 4 1 3
2023-12-10 4 1 3
2023-12-10 4 1 3
2023-12-10 4 1 3
2023-12-10 4 1 3
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-10 4 1 4
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-11 4 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3
2023-12-12 6 2 3

View File

@ -1515,6 +1515,22 @@ class Suite implements GroovyInterceptable {
sql "analyze table ${db}.${mv_name} with sync;"
}
def create_async_partition_mv = { db, mv_name, mv_sql, partition_col ->
sql """DROP MATERIALIZED VIEW IF EXISTS ${db}.${mv_name}"""
sql"""
CREATE MATERIALIZED VIEW ${db}.${mv_name}
BUILD IMMEDIATE REFRESH COMPLETE ON MANUAL
PARTITION BY ${partition_col}
DISTRIBUTED BY RANDOM BUCKETS 2
PROPERTIES ('replication_num' = '1')
AS ${mv_sql}
"""
def job_name = getJobName(db, mv_name);
waitingMTMVTaskFinished(job_name)
sql "analyze table ${db}.${mv_name} with sync;"
}
// mv not part in rewrite process
def mv_not_part_in = { query_sql, mv_name ->
logger.info("query_sql = " + query_sql + ", mv_names = " + mv_name)
@ -1601,9 +1617,8 @@ class Suite implements GroovyInterceptable {
check { result ->
boolean success = true;
for (String mv_name : mv_names) {
success = success && result.contains("${mv_name} chose")
Assert.assertEquals(true, result.contains("${mv_name} chose"))
}
Assert.assertEquals(true, success)
}
}
}

View File

@ -56,6 +56,8 @@ suite ("agg_have_dup_base") {
qt_select_mv "select unix_timestamp(k1) tmp,sum(k2) from d_table group by tmp order by tmp;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='5');"""
mv_rewrite_success("select k1,sum(k2),max(k2) from d_table group by k1;", "k12s3m")
mv_rewrite_success("select k1,sum(k2) from d_table group by k1;", "k12s3m")

View File

@ -69,6 +69,7 @@ suite ("test_agg_state_max_by") {
qt_select_star "select * from d_table order by 1,2;"
mv_rewrite_success("select k1,max_by(k2,k3) from d_table group by k1 order by 1,2;", "k1mb")
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='8');"""
mv_rewrite_success("select k1,max_by(k2,k3) from d_table group by k1 order by 1,2;", "k1mb")
qt_select_mv "select k1,max_by(k2,k3) from d_table group by k1 order by 1,2;"
@ -101,6 +102,7 @@ suite ("test_agg_state_max_by") {
qt_select_star "select * from d_table order by 1,2;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='8');"""
sql "analyze table d_table with sync;"
sql """set enable_stats=false;"""
@ -114,6 +116,7 @@ suite ("test_agg_state_max_by") {
qt_select_mv "select k1,max_by(k2,abs(k3)) from d_table group by k1 order by 1,2;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='8');"""
mv_rewrite_success("select k1,max_by(k2+k3,abs(k3)) from d_table group by k1 order by 1,2;", "k1mbcp1")
mv_rewrite_success("select k1,max_by(k2+k3,k3) from d_table group by k1 order by 1,2;", "k1mbcp2")
mv_rewrite_success("select k1,max_by(k2,abs(k3)) from d_table group by k1 order by 1,2;", "k1mbcp3")

View File

@ -37,6 +37,8 @@ suite ("case_ignore") {
sql "insert into d_table select 2,2,2,'b';"
sql "insert into d_table select 3,-3,null,'c';"
sql """alter table d_table modify column k4 set stats ('row_count'='4');"""
createMV ("create materialized view k12a as select K1,abs(K2) from d_table;")
sql "insert into d_table select -4,-4,-4,'d';"
@ -53,6 +55,7 @@ suite ("case_ignore") {
qt_select_mv "select K1,abs(K2) from d_table order by K1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='8');"""
mv_rewrite_success("select k1,abs(k2) from d_table order by k1;", "k12a")
mv_rewrite_success("select K1,abs(K2) from d_table order by K1;", "k12a")

View File

@ -64,6 +64,8 @@ suite ("count_star") {
qt_select_mv "select count(*) from d_table where k3=1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='8');"""
mv_rewrite_success("select k1,k4,count(*) from d_table group by k1,k4;", "kstar")
mv_rewrite_success("select k1,k4,count(*) from d_table where k1=1 group by k1,k4;", "kstar")
mv_rewrite_fail("select k1,k4,count(*) from d_table where k3=1 group by k1,k4;", "kstar")

View File

@ -28,6 +28,8 @@ suite ("dis_26495") {
sql """insert into doris_test values (1,2,max_by_state(1,2));"""
sql """alter table doris_test modify column agg_st_1 set stats ('row_count'='1');"""
streamLoad {
table "doris_test"
set 'column_separator', ','

View File

@ -52,6 +52,8 @@ suite ("k1ap2spa") {
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='5');"""
mv_rewrite_success("select abs(k1)+1 t,sum(abs(k2+1)) from d_table group by t order by t;", "k1ap2spa")
}

View File

@ -36,6 +36,7 @@ suite ("k1s2m3") {
sql "insert into d_table select 2,2,2,'b';"
sql "insert into d_table select 3,-3,null,'c';"
sql """alter table d_table modify column k1 set stats ('row_count'='6');"""
createMV("create materialized view k1s2m3 as select k1,sum(k2*k3) from d_table group by k1;")
sql "insert into d_table select -4,-4,-4,'d';"

View File

@ -57,5 +57,6 @@ suite ("k1s2m3_auto_inc") {
qt_select_mv "select k3,sum(abs(k2+1)) from d_table group by k3 order by 1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='2');"""
mv_rewrite_success("select k3,sum(abs(k2+1)) from d_table group by k3 order by 1;", "k3ap2spa")
}

View File

@ -64,6 +64,7 @@ suite ("multi_agg_with_same_slot") {
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='5');"""
mv_rewrite_success("select k1,k2,avg(k3),max(k3) from d_table group by k1,k2 order by 1,2;", "kmv")
mv_rewrite_success("select k1,k2,avg(k3)+max(k3) from d_table group by k1,k2 order by 1,2;", "kmv")
mv_rewrite_success("select k1,k2,avg(k3)+max(k3) from d_table group by grouping sets((k1),(k1,k2),()) order by 1,2;", "kmv")

View File

@ -66,5 +66,6 @@ suite ("multi_slot_k123p") {
qt_select_mv "select k1,version() from d_table order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='5');"""
mv_rewrite_success("select k1,k2+k3 from d_table order by k1;", "k123p")
}

View File

@ -51,6 +51,7 @@ suite ("multi_slot_k1a2p2ap3p") {
qt_select_mv "select abs(k1)+k2+1,abs(k2+2)+k3+3 from d_table order by abs(k1)+k2+1,abs(k2+2)+k3+3;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='7');"""
mv_rewrite_success("select abs(k1)+k2+1,abs(k2+2)+k3+3 from d_table order by abs(k1)+k2+1,abs(k2+2)+k3+3", "k1a2p2ap3p")
}

View File

@ -61,6 +61,7 @@ suite ("multi_slot_k1a2p2ap3ps") {
qt_select_base "select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from d_table group by abs(k1)+k2 order by abs(k1)+k2;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from d_table group by abs(k1)+k2+1 order by abs(k1)+k2+1", "k1a2p2ap3ps")
mv_rewrite_fail("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from d_table group by abs(k1)+k2 order by abs(k1)+k2", "k1a2p2ap3ps")

View File

@ -50,5 +50,6 @@ suite ("multi_slot_k1p2ap3p") {
qt_select_mv "select k1+1,abs(k2+2)+k3+3 from d_table order by k1+1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1+1,abs(k2+2)+k3+3 from d_table order by k1+1;", "k1p2ap3p")
}

View File

@ -49,5 +49,6 @@ suite ("multi_slot_k1p2ap3ps") {
qt_select_mv "select k1+1,sum(abs(k2+2)+k3+3) from d_table group by k1+1 order by k1+1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='5');"""
mv_rewrite_success("select k1+1,sum(abs(k2+2)+k3+3) from d_table group by k1+1 order by k1+1;", "k1p2ap3ps")
}

View File

@ -36,28 +36,25 @@ suite ("mv_with_view") {
sql """insert into d_table select 1,1,1,'a';"""
sql """insert into d_table select 2,2,2,'b';"""
createMV("create materialized view k132 as select k1,k3,k2 from d_table;")
createMV("create materialized view k312 as select k3,k1,k2 from d_table;")
sql """insert into d_table select 3,-3,null,'c';"""
explain {
sql("select * from d_table order by k1;")
contains "(d_table)"
}
sql "analyze table d_table with sync;"
sql """set enable_stats=false;"""
mv_rewrite_fail("select * from d_table order by k1;", "k312")
qt_select_star "select * from d_table order by k1;"
sql """
drop view if exists v_k132;
drop view if exists v_k312;
"""
sql """
create view v_k132 as select k1,k3,k2 from d_table where k1 = 1;
create view v_k312 as select k1,k3,k2 from d_table where k3 = 1;
"""
explain {
sql("select * from v_k132 order by k1;")
contains "(k132)"
}
qt_select_mv "select * from v_k132 order by k1;"
mv_rewrite_success("select * from v_k312 order by k1;", "k312")
qt_select_mv "select * from v_k312 order by k1;"
sql """
drop view if exists v_k124;
@ -66,9 +63,6 @@ suite ("mv_with_view") {
sql """
create view v_k124 as select k1,k2,k4 from d_table where k1 = 1;
"""
explain {
sql("select * from v_k124 order by k1;")
contains "(d_table)"
}
mv_rewrite_fail("select * from v_k124 order by k1;", "k312")
qt_select_mv "select * from v_k124 order by k1;"
}

View File

@ -76,6 +76,7 @@ suite ("null_insert") {
GROUP BY date,vid,os,ver,ip_country;"""
sql """set enable_stats=true;"""
sql """alter table test modify column date set stats ('row_count'='3');"""
mv_rewrite_success("""SELECT date, vid, os, ver, ip_country, hll_union(hll_hash(uid))
FROM test
GROUP BY date,vid,os,ver,ip_country;""", "mv_test")

View File

@ -58,5 +58,6 @@ suite ("routine_load_hll") {
qt_select_mv "select time_stamp, hll_union_agg(device_id) from test group by time_stamp order by 1;"
sql """set enable_stats=true;"""
sql """alter table test modify column event_id set stats ('row_count'='2');"""
mv_rewrite_success("select time_stamp, hll_union_agg(device_id) from test group by time_stamp order by 1;", "m_view")
}

View File

@ -65,14 +65,6 @@ suite ("multiple_ssb_between") {
) ENGINE=OLAP
DUPLICATE KEY(`LO_ORDERDATE`, `LO_ORDERKEY`)
COMMENT "OLAP"
PARTITION BY RANGE(`LO_ORDERDATE`)
(PARTITION p1992 VALUES [("-2147483648"), ("19930101")),
PARTITION p1993 VALUES [("19930101"), ("19940101")),
PARTITION p1994 VALUES [("19940101"), ("19950101")),
PARTITION p1995 VALUES [("19950101"), ("19960101")),
PARTITION p1996 VALUES [("19960101"), ("19970101")),
PARTITION p1997 VALUES [("19970101"), ("19980101")),
PARTITION p1998 VALUES [("19980101"), ("19990101")))
DISTRIBUTED BY HASH(`LO_ORDERKEY`) BUCKETS 48
PROPERTIES (
"replication_num" = "1",
@ -153,17 +145,22 @@ suite ("multiple_ssb_between") {
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE, C_NAME, C_ADDRESS, C_CITY, C_NATION, C_REGION, C_PHONE, C_MKTSEGMENT, S_NAME, S_ADDRESS, S_CITY, S_NATION, S_REGION, S_PHONE, P_NAME, P_MFGR, P_CATEGORY, P_BRAND, P_COLOR,P_TYPE,P_SIZE,P_CONTAINER) VALUES (1 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container');"""
qt_select_star "select * from lineorder_flat order by 1,2, P_MFGR;"
sql "analyze table lineorder_flat with sync;"
sql """set enable_stats=true;"""
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='8');"""
explain {
sql("""SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
mv_rewrite_success("""SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
FROM lineorder_flat
WHERE
LO_ORDERDATE >= 19930101
AND LO_ORDERDATE <= 19931231
AND LO_DISCOUNT BETWEEN 1 AND 3
AND LO_QUANTITY < 25;""")
contains "(lineorder_q_1_1)"
}
AND LO_QUANTITY < 25;""",
"lineorder_q_1_1"
)
qt_select_q_1_1 """SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
FROM lineorder_flat
WHERE
@ -172,16 +169,16 @@ suite ("multiple_ssb_between") {
AND LO_DISCOUNT BETWEEN 1 AND 3
AND LO_QUANTITY < 25;"""
explain {
sql("""SELECT
mv_rewrite_success("""SELECT
SUM(LO_REVENUE), (LO_ORDERDATE DIV 10000) AS YEAR,
P_BRAND
FROM lineorder_flat
WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA'
GROUP BY (LO_ORDERDATE DIV 10000), P_BRAND
ORDER BY YEAR, P_BRAND;""")
contains "(lineorder_q_2_1)"
}
ORDER BY YEAR, P_BRAND;""",
"lineorder_q_2_1"
)
qt_select_q_2_1 """SELECT
SUM(LO_REVENUE), (LO_ORDERDATE DIV 10000) AS YEAR,
P_BRAND
@ -190,8 +187,7 @@ suite ("multiple_ssb_between") {
GROUP BY YEAR, P_BRAND
ORDER BY YEAR, P_BRAND;"""
explain {
sql("""SELECT
mv_rewrite_success("""SELECT
C_NATION,
S_NATION, (LO_ORDERDATE DIV 10000) AS YEAR,
SUM(LO_REVENUE) AS revenue
@ -202,9 +198,9 @@ suite ("multiple_ssb_between") {
AND LO_ORDERDATE >= 19920101
AND LO_ORDERDATE <= 19971231
GROUP BY C_NATION, S_NATION, YEAR
ORDER BY YEAR ASC, revenue DESC;""")
contains "(lineorder_q_3_1)"
}
ORDER BY YEAR ASC, revenue DESC;""",
"lineorder_q_3_1")
qt_select_q_3_1 """SELECT
C_NATION,
S_NATION, (LO_ORDERDATE DIV 10000) AS YEAR,
@ -218,8 +214,7 @@ suite ("multiple_ssb_between") {
GROUP BY C_NATION, S_NATION, YEAR
ORDER BY YEAR ASC, revenue DESC;"""
explain {
sql("""SELECT (LO_ORDERDATE DIV 10000) AS YEAR,
mv_rewrite_success("""SELECT (LO_ORDERDATE DIV 10000) AS YEAR,
C_NATION,
SUM(LO_REVENUE - LO_SUPPLYCOST) AS profit
FROM lineorder_flat
@ -228,9 +223,9 @@ suite ("multiple_ssb_between") {
AND S_REGION = 'AMERICA'
AND P_MFGR IN ('MFGR#1', 'MFGR#2')
GROUP BY YEAR, C_NATION
ORDER BY YEAR ASC, C_NATION ASC;""")
contains "(lineorder_q_4_1)"
}
ORDER BY YEAR ASC, C_NATION ASC;""",
"lineorder_q_4_1")
qt_select_q_4_1 """SELECT (LO_ORDERDATE DIV 10000) AS YEAR,
C_NATION,
SUM(LO_REVENUE - LO_SUPPLYCOST) AS profit

View File

@ -63,14 +63,6 @@ suite ("mv_ssb_q_1_1") {
) ENGINE=OLAP
DUPLICATE KEY(`LO_ORDERDATE`, `LO_ORDERKEY`)
COMMENT "OLAP"
PARTITION BY RANGE(`LO_ORDERDATE`)
(PARTITION p1992 VALUES [("-2147483648"), ("19930101")),
PARTITION p1993 VALUES [("19930101"), ("19940101")),
PARTITION p1994 VALUES [("19940101"), ("19950101")),
PARTITION p1995 VALUES [("19950101"), ("19960101")),
PARTITION p1996 VALUES [("19960101"), ("19970101")),
PARTITION p1997 VALUES [("19970101"), ("19980101")),
PARTITION p1998 VALUES [("19980101"), ("19990101")))
DISTRIBUTED BY HASH(`LO_ORDERKEY`) BUCKETS 48
PROPERTIES (
"replication_num" = "1",
@ -80,7 +72,15 @@ suite ("mv_ssb_q_1_1") {
);
"""
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE, C_NAME, C_ADDRESS, C_CITY, C_NATION, C_REGION, C_PHONE, C_MKTSEGMENT, S_NAME, S_ADDRESS, S_CITY, S_NATION, S_REGION, S_PHONE, P_NAME, P_MFGR, P_CATEGORY, P_BRAND, P_COLOR,P_TYPE,P_SIZE,P_CONTAINER) VALUES (19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container');"""
// Add more data when insert into firstly to make sure use mv easier
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE, C_NAME, C_ADDRESS, C_CITY, C_NATION, C_REGION, C_PHONE, C_MKTSEGMENT, S_NAME, S_ADDRESS, S_CITY, S_NATION, S_REGION, S_PHONE, P_NAME, P_MFGR, P_CATEGORY, P_BRAND, P_COLOR,P_TYPE,P_SIZE,P_CONTAINER)
VALUES (19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container'),
(19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container'),
(19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container'),
(19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container'),
(19930101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'shipmode' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' , 'phone' , 'mktsegment' , 'name' , 'address' , 'city' , 'nation' , 'AMERICA' ,'phone', 'name', 'MFGR#1', 'category', 'brand', 'color', 'type', 4 ,'container');"""
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='6');"""
createMV ("""create materialized view lineorder_q_1_1 as
SELECT LO_ORDERKEY, SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
@ -90,23 +90,25 @@ suite ("mv_ssb_q_1_1") {
AND LO_ORDERDATE <= 19931231
AND LO_DISCOUNT >= 1 AND LO_DISCOUNT <= 3
AND LO_QUANTITY < 25
GROUP BY
LO_ORDERKEY;""")
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE,C_NAME,C_ADDRESS,C_CITY,C_NATION,C_REGION,C_PHONE,C_MKTSEGMENT,S_NAME,S_ADDRESS,S_CITY,S_NATION,S_REGION,S_PHONE,P_NAME,P_MFGR,P_CATEGORY,P_BRAND,P_COLOR,P_TYPE,P_SIZE,P_CONTAINER) VALUES (19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container');"""
GROUP BY LO_ORDERKEY;""")
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE,C_NAME,C_ADDRESS,C_CITY,C_NATION,C_REGION,C_PHONE,C_MKTSEGMENT,S_NAME,S_ADDRESS,S_CITY,S_NATION,S_REGION,S_PHONE,P_NAME,P_MFGR,P_CATEGORY,P_BRAND,P_COLOR,P_TYPE,P_SIZE,P_CONTAINER)
VALUES
(19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container'),
(19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container'),
(19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container'),
(19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container');"""
qt_select_star "select * from lineorder_flat order by 1,2, P_MFGR;"
explain {
sql("""SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
sql "analyze table lineorder_flat with sync;"
mv_rewrite_success("""SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
FROM lineorder_flat
WHERE
LO_ORDERDATE >= 19930101
AND LO_ORDERDATE <= 19931231
AND LO_DISCOUNT >= 1 AND LO_DISCOUNT <= 3
AND LO_QUANTITY < 25;""")
contains "(lineorder_q_1_1)"
}
AND LO_QUANTITY < 25;""", "lineorder_q_1_1")
qt_select_mv """SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
FROM lineorder_flat
WHERE
@ -115,6 +117,7 @@ suite ("mv_ssb_q_1_1") {
AND LO_DISCOUNT >= 1 AND LO_DISCOUNT <= 3
AND LO_QUANTITY < 25;"""
sql""" drop materialized view lineorder_q_1_1 on lineorder_flat; """
qt_select """SELECT SUM(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue
@ -124,4 +127,5 @@ suite ("mv_ssb_q_1_1") {
AND LO_ORDERDATE <= 19931231
AND LO_DISCOUNT >= 1 AND LO_DISCOUNT <= 3
AND LO_QUANTITY < 25;"""
}

View File

@ -93,6 +93,8 @@ suite ("mv_ssb_q_2_1") {
qt_select_star "select * from lineorder_flat order by 1,2,P_MFGR;"
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='2');"""
mv_rewrite_success("""SELECT
SUM(LO_REVENUE), (LO_ORDERDATE DIV 10000) AS YEAR,
P_BRAND

View File

@ -66,14 +66,6 @@ suite ("mv_ssb_q_3_1") {
) ENGINE=OLAP
DUPLICATE KEY(`LO_ORDERDATE`, `LO_ORDERKEY`)
COMMENT "OLAP"
PARTITION BY RANGE(`LO_ORDERDATE`)
(PARTITION p1992 VALUES [("-2147483648"), ("19930101")),
PARTITION p1993 VALUES [("19930101"), ("19940101")),
PARTITION p1994 VALUES [("19940101"), ("19950101")),
PARTITION p1995 VALUES [("19950101"), ("19960101")),
PARTITION p1996 VALUES [("19960101"), ("19970101")),
PARTITION p1997 VALUES [("19970101"), ("19980101")),
PARTITION p1998 VALUES [("19980101"), ("19990101")))
DISTRIBUTED BY HASH(`LO_ORDERKEY`) BUCKETS 48
PROPERTIES (
"replication_num" = "1",
@ -83,7 +75,8 @@ suite ("mv_ssb_q_3_1") {
);
"""
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE, C_NAME, C_ADDRESS, C_CITY, C_NATION, C_REGION, C_PHONE, C_MKTSEGMENT, S_NAME, S_ADDRESS, S_CITY, S_NATION, S_REGION, S_PHONE, P_NAME, P_MFGR, P_CATEGORY, P_BRAND, P_COLOR,P_TYPE,P_SIZE,P_CONTAINER) VALUES (19920101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' ,'ASIA', 'ASIA', 'MFGR#12', 'MFGR#12', 'brand', 'color', 'type', 4 ,'container');"""
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE, C_NAME, C_ADDRESS, C_CITY, C_NATION, C_REGION, C_PHONE, C_MKTSEGMENT, S_NAME, S_ADDRESS, S_CITY, S_NATION, S_REGION, S_PHONE, P_NAME, P_MFGR, P_CATEGORY, P_BRAND, P_COLOR,P_TYPE,P_SIZE,P_CONTAINER)
VALUES (19920101 , 1 , 1 , 1 , 1 , 1 , '1' , 1 , 1 , 1 , 1 , 100 , 1 , 1 , 1 , '2023-06-09' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' , 'ASIA' ,'ASIA', 'ASIA', 'MFGR#12', 'MFGR#12', 'brand', 'color', 'type', 4 ,'container');"""
createMV ("""create materialized view lineorder_q_3_1 as
SELECT
@ -98,12 +91,16 @@ suite ("mv_ssb_q_3_1") {
AND LO_ORDERDATE <= 19971231
GROUP BY C_NATION, S_NATION, YEAR;""")
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE,C_NAME,C_ADDRESS,C_CITY,C_NATION,C_REGION,C_PHONE,C_MKTSEGMENT,S_NAME,S_ADDRESS,S_CITY,S_NATION,S_REGION,S_PHONE,P_NAME,P_MFGR,P_CATEGORY,P_BRAND,P_COLOR,P_TYPE,P_SIZE,P_CONTAINER) VALUES (19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container');"""
sql """INSERT INTO lineorder_flat (LO_ORDERDATE, LO_ORDERKEY, LO_LINENUMBER, LO_CUSTKEY, LO_PARTKEY, LO_SUPPKEY, LO_ORDERPRIORITY, LO_SHIPPRIORITY, LO_QUANTITY, LO_EXTENDEDPRICE, LO_ORDTOTALPRICE, LO_DISCOUNT, LO_REVENUE, LO_SUPPLYCOST, LO_TAX, LO_COMMITDATE, LO_SHIPMODE,C_NAME,C_ADDRESS,C_CITY,C_NATION,C_REGION,C_PHONE,C_MKTSEGMENT,S_NAME,S_ADDRESS,S_CITY,S_NATION,S_REGION,S_PHONE,P_NAME,P_MFGR,P_CATEGORY,P_BRAND,P_COLOR,P_TYPE,P_SIZE,P_CONTAINER)
VALUES (19930101 , 2 , 2 , 2 , 2 , 2 ,'2',2 ,2 ,2 ,2 ,2 ,2 ,2 ,2 ,'2023-06-09','shipmode','name','address','city','nation','region','phone','mktsegment','name','address','city','nation','region','phone','name','mfgr','category','brand','color','type',4,'container');"""
qt_select_star "select * from lineorder_flat order by 1,2,P_MFGR;"
explain {
sql("""SELECT
sql """analyze table lineorder_flat with sync;"""
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='2');"""
mv_rewrite_success("""SELECT
C_NATION,
S_NATION, (LO_ORDERDATE DIV 10000) AS YEAR,
SUM(LO_REVENUE) AS revenue
@ -114,9 +111,9 @@ suite ("mv_ssb_q_3_1") {
AND LO_ORDERDATE >= 19920101
AND LO_ORDERDATE <= 19971231
GROUP BY C_NATION, S_NATION, YEAR
ORDER BY YEAR ASC, revenue DESC;""")
contains "(lineorder_q_3_1)"
}
ORDER BY YEAR ASC, revenue DESC;""",
"lineorder_q_3_1")
qt_select_mv """SELECT
C_NATION,
S_NATION, (LO_ORDERDATE DIV 10000) AS YEAR,

View File

@ -92,6 +92,8 @@ suite ("mv_ssb_q_4_1") {
sql """analyze table lineorder_flat with sync;"""
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='2');"""
mv_rewrite_success("""SELECT (LO_ORDERDATE DIV 10000) AS YEAR,
C_NATION,
SUM(LO_REVENUE - LO_SUPPLYCOST) AS profit

View File

@ -119,6 +119,7 @@ suite ("q_4_1_r1") {
GROUP BY YEAR, C_NATION
ORDER BY YEAR ASC, C_NATION ASC;"""
sql """set enable_stats=true;"""
sql """alter table lineorder_flat modify column LO_ORDERDATE set stats ('row_count'='8');"""
mv_rewrite_success("""SELECT (LO_ORDERDATE DIV 10000) AS YEAR,
C_NATION,
SUM(LO_REVENUE - LO_SUPPLYCOST) AS profit

View File

@ -51,6 +51,7 @@ suite ("sum_count") {
sql "analyze table d_table with sync;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='8');"""
mv_rewrite_success("select k1,k4,sum(k2),count(k2) from d_table group by k1,k4;", "kavg")
qt_select_mv "select k1,k4,sum(k2),count(k2) from d_table group by k1,k4 order by 1,2;"

View File

@ -46,6 +46,7 @@ suite ("sum_devide_count") {
sql """analyze table d_table with sync;"""
sql """set enable_stats=false;"""
mv_rewrite_success("select k1,k4,sum(k2)/count(k2) from d_table group by k1,k4 order by k1,k4;", "kavg")
qt_select_mv "select k1,k4,sum(k2)/count(k2) from d_table group by k1,k4 order by k1,k4;"
@ -59,6 +60,7 @@ suite ("sum_devide_count") {
qt_select_mv "select sum(k2)/count(k2) from d_table;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='5');"""
mv_rewrite_success("select k1,k4,sum(k2)/count(k2) from d_table group by k1,k4 order by k1,k4;", "kavg")
mv_rewrite_success("select k1,sum(k2)/count(k2) from d_table group by k1 order by k1;", "kavg")

View File

@ -69,8 +69,8 @@ suite ("test_28741") {
sql """set enable_stats=false;"""
mv_rewrite_fail("select b1 from test where t >= '2023-12-20 17:21:00'", "mv_test")
qt_select "select b1 from test where t >= '2023-12-20 17:21:00'"
sql """set enable_stats=true;"""
sql """alter table test modify column a set stats ('row_count'='2');"""
mv_rewrite_fail("select b1 from test where t >= '2023-12-20 17:21:00'", "mv_test")
}

View File

@ -50,6 +50,7 @@ suite ("test_approx_count_distinct") {
qt_select_mv "select user_id, approx_count_distinct(tag_id) a from user_tags group by user_id order by user_id;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_success("select user_id, ndv(tag_id) a from user_tags group by user_id order by user_id;", "user_tags_mv")

View File

@ -53,6 +53,7 @@ suite ("test_base") {
qt_select_mv "SELECT id,created_at FROM dwd order by 1, 2;"
sql """set enable_stats=true;"""
sql """alter table dwd modify column id set stats ('row_count'='2');"""
mv_rewrite_success("SELECT created_at, id FROM dwd where created_at = '2020-09-09 00:00:00' order by 1, 2;", "dwd_mv")
mv_rewrite_success("SELECT id,created_at FROM dwd where id is not null order by 1, 2;", "dwd_mv")

View File

@ -40,5 +40,6 @@ suite ("test_casewhen") {
qt_select_mv "select store_id, sum(case when sale_amt>10 then 1 else 2 end) from sales_records group by store_id order by 1;"
sql """set enable_stats=true;"""
sql """alter table sales_records modify column record_id set stats ('row_count'='4');"""
mv_rewrite_success("select store_id, sum(case when sale_amt>10 then 1 else 2 end) from sales_records group by store_id order by 1;", "store_amt")
}

View File

@ -49,6 +49,8 @@ suite("test_create_mv") {
sql """ insert into ${tableName} values ('2024-03-20 10:00:00', 'a', 'b', 1) """
sql """alter table test_mv_10010 modify column load_time set stats ('row_count'='1');"""
sql """
create materialized view mv_1 as
select

View File

@ -37,6 +37,8 @@ suite ("create_mv_complex_type") {
sql """insert into base_table select 1, 100000, 1.0, '{"jsonk1": 123}', [100, 200], {"k1": 10}, {1, 2};"""
sql """alter table base_table modify column c_int set stats ('row_count'='1');"""
def success = false
// 1. special column - mv dup key

View File

@ -57,6 +57,7 @@ suite ("test_doc_e4") {
qt_select_mv "select year(k4)+month(k4) from d_table where year(k4) = 2020 order by 1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='3');"""
mv_rewrite_success("select abs(k1)+k2+1,sum(abs(k2+2)+k3+3) from d_table group by abs(k1)+k2+1 order by 1,2;", "k1a2p2ap3ps")
mv_rewrite_success("select bin(abs(k1)+k2+1),sum(abs(k2+2)+k3+3) from d_table group by bin(abs(k1)+k2+1);", "k1a2p2ap3ps")

View File

@ -53,6 +53,7 @@ suite ("test_dup_group_by_mv_abs") {
qt_select_mv_sub "select sum(abs(k2)) from d_table group by k1 order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,sum(abs(k2)) from d_table group by k1;", "k12sa")
mv_rewrite_success("select sum(abs(k2)) from d_table group by k1;", "k12sa")
}

View File

@ -53,6 +53,7 @@ suite ("test_dup_group_by_mv_plus") {
qt_select_mv_sub "select sum(k2+1) from d_table group by k1 order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,sum(k2+1) from d_table group by k1;", "k12sp")
mv_rewrite_success("select sum(k2+1) from d_table group by k1;", "k12sp")

View File

@ -65,6 +65,7 @@ suite ("test_dup_mv_abs") {
qt_select_group_mv_not "select sum(abs(k2)) from d_table group by k3 order by k3;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,abs(k2) from d_table order by k1;", "k12a")
mv_rewrite_success("select abs(k2) from d_table order by k1;", "k12a")

View File

@ -64,6 +64,7 @@ suite ("test_dup_mv_bin") {
qt_select_group_mv_not "select group_concat(bin(k2)) from d_table group by k3 order by k3;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,bin(k2) from d_table order by k1;", "k12b")
mv_rewrite_success("select bin(k2) from d_table order by k1;", "k12b")

View File

@ -46,6 +46,7 @@ suite ("test_dup_mv_bitmap_hash") {
mv_rewrite_success("select bitmap_union_count(to_bitmap(k2)) from d_table group by k1 order by k1;", "k1g2bm")
qt_select_mv "select bitmap_union_count(to_bitmap(k2)) from d_table group by k1 order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select bitmap_union_count(to_bitmap(k2)) from d_table group by k1 order by k1;", "k1g2bm")
createMV "create materialized view k1g3bm as select k1,bitmap_union(bitmap_hash(k3)) from d_table group by k1;"
@ -58,11 +59,13 @@ suite ("test_dup_mv_bitmap_hash") {
qt_select_star "select * from d_table order by k1,k2,k3;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
sql """analyze table d_table with sync;"""
sql """set enable_stats=false;"""
mv_rewrite_success("select k1,bitmap_union_count(bitmap_hash(k3)) from d_table group by k1;", "k1g3bm")
qt_select_mv_sub "select k1,bitmap_union_count(bitmap_hash(k3)) from d_table group by k1 order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,bitmap_union_count(bitmap_hash(k3)) from d_table group by k1;", "k1g3bm")
}

View File

@ -70,7 +70,7 @@ suite ("test_dup_mv_plus") {
qt_select_mv "select k1,k2+1 from d_table order by k2;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k4 set stats ('row_count'='3');"""
mv_rewrite_success("select k1,k2+1 from d_table order by k1;", "k12p")
mv_rewrite_success("select k2+1 from d_table order by k1;", "k12p")

View File

@ -47,6 +47,7 @@ suite ("test_dup_mv_repeat") {
qt_select_mv "SELECT s AS s, sum(n) / count(DISTINCT dt) AS n FROM db1 GROUP BY GROUPING SETS((s)) order by 1;"
sql """set enable_stats=true;"""
sql """alter table db1 modify column dt set stats ('row_count'='2');"""
mv_rewrite_success("SELECT s AS s, sum(n) / count(DISTINCT dt) AS n FROM db1 GROUP BY GROUPING SETS((s)) order by 1;",
"dbviwe")
}

View File

@ -45,6 +45,7 @@ suite ("test_dup_mv_year") {
qt_select_mv "select k1,year(k2) from d_table order by k1;"
sql """set enable_stats=true;"""
sql """alter table d_table modify column k1 set stats ('row_count'='4');"""
mv_rewrite_success("select k1,year(k2) from d_table order by k1;", "k12y")
createMV "create materialized view k13y as select k1,year(k3) from d_table;"

View File

@ -56,6 +56,7 @@ suite ("test_mv_dp") {
time 10000 // limit inflight 10s
}
*/
mv_rewrite_success("""select d,
bitmap_union_count(bitmap_from_array(cast(uid_list as array<bigint>))),
bitmap_union_count(bitmap_from_array(if(status='success', cast(uid_list as array<bigint>), array())))
@ -68,6 +69,7 @@ suite ("test_mv_dp") {
from dp
group by d order by 1;"""
sql """set enable_stats=true;"""
sql """alter table dp modify column d set stats ('row_count'='4');"""
mv_rewrite_success("""select d,
bitmap_union_count(bitmap_from_array(cast(uid_list as array<bigint>))),
bitmap_union_count(bitmap_from_array(if(status='success', cast(uid_list as array<bigint>), array())))

View File

@ -39,6 +39,8 @@ suite ("test_mv_mor") {
sql "insert into u_table select 1,1,1,2;"
sql "insert into u_table select 1,2,1,2;"
sql """alter table u_table modify column k1 set stats ('row_count'='2');"""
// do not match mv coz preagg is off, mv need contains all key column to make row count correct
mv_rewrite_fail("select k1,k2+k3 from u_table order by k1;", "k123p")
qt_select_mv "select k1,k2+k3 from u_table order by k1;"

View File

@ -50,6 +50,7 @@ suite ("test_ndv") {
qt_select_mv "select user_id, approx_count_distinct(tag_id) a from user_tags group by user_id order by user_id;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_success("select user_id, ndv(tag_id) a from user_tags group by user_id order by user_id;", "user_tags_mv")

View File

@ -51,6 +51,7 @@ suite ("test_nvl") {
qt_select_mv "select ifnull(id,0) from dwd order by 1;"
sql """set enable_stats=true;"""
sql """alter table dwd modify column id set stats ('row_count'='2');"""
mv_rewrite_success("select nvl(id,0) from dwd order by 1;", "dwd_mv")
mv_rewrite_success("select ifnull(id,0) from dwd order by 1;", "dwd_mv")

View File

@ -60,6 +60,7 @@ suite ("test_o2") {
qt_select_mv "select ts,metric_name,platform,sum(count_value) from o2_order_events group by ts,metric_name,platform;"
sql """set enable_stats=true;"""
sql """alter table o2_order_events modify column ts set stats ('row_count'='2');"""
mv_rewrite_success("select ts,metric_name,platform,sum(count_value) from o2_order_events group by ts,metric_name,platform;",
"o2_order_events_mv")
}

View File

@ -56,6 +56,7 @@ suite ("test_substr") {
qt_select_mv "SELECT substr(created_at,1,10) as statistic_date, max(dt) as dt FROM dwd group by substr(created_at,1,10);"
sql """set enable_stats=true;"""
sql """alter table dwd modify column id set stats ('row_count'='2');"""
mv_rewrite_success("SELECT substr(created_at,1,10) as statistic_date, max(dt) as dt FROM dwd group by substr(created_at,1,10);",
"dwd_mv")
}

View File

@ -66,6 +66,7 @@ suite ("test_tbl_name") {
group by id order by 1,2;
"""
sql """set enable_stats=true;"""
sql """alter table functionality_olap modify column id set stats ('row_count'='2');"""
mv_rewrite_success("""select
functionality_olap.id as id,
sum(functionality_olap.score) as score_max

View File

@ -62,6 +62,7 @@ suite ("test_upper_alias") {
qt_select_mv "SELECT d_a AS d_b FROM test_0401 order by 1;"
sql """set enable_stats=true;"""
sql """alter table test_0401 modify column d_b set stats ('row_count'='3');"""
mv_rewrite_any_success("SELECT upper(d_b) AS d_b FROM test_0401 GROUP BY upper(d_b) order by 1;",
["test_0401_mv", "test_0401_mv2"])

View File

@ -53,6 +53,7 @@ suite ("test_user_activity") {
qt_select_group_mv "select n_dx, percentile_approx(n_duration, 0.5) as p50, percentile_approx(n_duration, 0.90) as p90 FROM u_axx GROUP BY n_dx;"
sql """set enable_stats=true;"""
sql """alter table u_axx modify column r_xx set stats ('row_count'='3');"""
mv_rewrite_success("select n_dx, percentile_approx(n_duration, 0.5) as p50, percentile_approx(n_duration, 0.90) as p90 FROM u_axx GROUP BY n_dx;",
"session_distribution_2")
}

View File

@ -73,6 +73,7 @@ suite ("unique") {
qt_select_star "select * from u_table order by k1;"
sql """set enable_stats=true;"""
sql """alter table u_table modify column k1 set stats ('row_count'='3');"""
mv_rewrite_success("select k3,length(k1),k2 from u_table order by 1,2,3;", "k31l42")
// todo: support match query

View File

@ -36,6 +36,7 @@ suite ("testAggQueryOnAggMV1") {
sql """insert into emps values("2020-01-02",2,"b",2,2,2);"""
sql """insert into emps values("2020-01-03",3,"c",3,3,3);"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
createMV("create materialized view emps_mv as select deptno, sum(salary), max(commission) from emps group by deptno;")
createMV("create materialized view emps_mv_count_key as select deptno, count(deptno) from emps group by deptno;")

View File

@ -51,6 +51,7 @@ suite ("testAggQueryOnAggMV10") {
qt_select_mv "select deptno, commission, sum(salary) + 1 from emps group by rollup (deptno, commission) order by 1,2;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select deptno, commission, sum(salary) + 1 from emps group by rollup (deptno, commission);",

View File

@ -50,6 +50,7 @@ suite ("testAggQueryOnAggMV11") {
qt_select_mv "select deptno, count(salary) + count(1) from emps group by deptno order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_fail("select deptno, count(salary) + count(1) from emps group by deptno;", "emps_mv")

View File

@ -53,6 +53,7 @@ suite ("testAggQueryOnAggMV2") {
qt_select_mv "select * from (select deptno, sum(salary) as sum_salary from emps group by deptno) a where (sum_salary * 2) > 3 order by deptno ;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select * from (select deptno, sum(salary) as sum_salary from emps group by deptno) a where (sum_salary * 2) > 3 order by deptno ;",

View File

@ -38,7 +38,6 @@ suite ("testAggQueryOnAggMV3") {
sql """insert into emps values("2020-01-04",4,"d",21,4,4);"""
createMV("create materialized view emps_mv as select deptno, commission, sum(salary) from emps group by deptno, commission;")
sql "analyze table emps with sync;"
@ -56,6 +55,7 @@ suite ("testAggQueryOnAggMV3") {
qt_select_mv "select commission, sum(salary) from emps where commission = 100 group by commission order by commission;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select commission, sum(salary) from emps where deptno > 0 and commission * (deptno + commission) = 100 group by commission order by commission;",

View File

@ -51,6 +51,7 @@ suite ("testAggQuqeryOnAggMV5") {
qt_select_mv "select * from (select deptno, sum(salary) as sum_salary from emps group by deptno) a where sum_salary>10 order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select * from (select deptno, sum(salary) as sum_salary from emps group by deptno) a where sum_salary>0;",

View File

@ -51,6 +51,7 @@ suite ("testAggQuqeryOnAggMV6") {
qt_select_mv "select * from (select deptno, sum(salary) as sum_salary from emps where deptno>=20 group by deptno) a where sum_salary>10 order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select * from (select deptno, sum(salary) as sum_salary from emps where deptno>=0 group by deptno) a where sum_salary>10;",

View File

@ -50,6 +50,8 @@ suite ("testAggQuqeryOnAggMV7") {
qt_select_mv "select deptno, sum(salary) from emps where deptno>=20 group by deptno order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select deptno, sum(salary) from emps where deptno>=20 group by deptno;", "emps_mv")

View File

@ -50,6 +50,7 @@ suite ("testAggregateMVCalcAggFunctionQuery") {
qt_select_mv "select deptno, sum(salary + 1) from emps where deptno > 10 group by deptno order by deptno;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_fail("select deptno, sum(salary + 1) from emps where deptno > 10 group by deptno;", "emps_mv")

View File

@ -51,7 +51,7 @@ suite ("testBitmapUnionInQuery") {
qt_select_mv "select user_id, bitmap_count(bitmap_union(to_bitmap(tag_id))) a from user_tags group by user_id having a>1 order by a;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_success("select user_id, bitmap_union_count(to_bitmap(tag_id)) a from user_tags group by user_id having a>1 order by a;",

View File

@ -66,6 +66,10 @@ suite ("testCountDistinctToBitmap") {
sql """insert into user_tags2 values("2020-01-01",1,"a",1);"""
sql """insert into user_tags2 values("2020-01-02",2,"b",2);"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
sql """alter table user_tags2 modify column time_col set stats ('row_count'='3');"""
createMV("create materialized view user_tags_mv as select user_id, bitmap_union(to_bitmap(tag_id)) from user_tags2 group by user_id;")
sql """insert into user_tags2 values("2020-01-01",1,"a",2);"""

View File

@ -47,6 +47,7 @@ suite ("testIncorrectMVRewriteInSubquery") {
qt_select_mv "select user_id, bitmap_union(to_bitmap(tag_id)) from user_tags where user_name in (select user_name from user_tags group by user_name having bitmap_union_count(to_bitmap(tag_id)) >1 ) group by user_id order by user_id;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_fail("select user_id, bitmap_union(to_bitmap(tag_id)) from user_tags where user_name in (select user_name from user_tags group by user_name having bitmap_union_count(to_bitmap(tag_id)) >1 ) group by user_id order by user_id;",

View File

@ -45,6 +45,7 @@ suite ("testIncorrectRewriteCountDistinct") {
qt_select_mv "select user_name, count(distinct tag_id) from user_tags group by user_name order by user_name;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_fail("select user_name, count(distinct tag_id) from user_tags group by user_name;", "user_tags_mv")

View File

@ -43,6 +43,8 @@ suite ("testJoinOnLeftProjectToJoin") {
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
"""
sql """alter table depts modify column time_col set stats ('row_count'='3');"""
sql """insert into depts values("2020-01-02",2,"b",2);"""
sql """insert into depts values("2020-01-03",3,"c",3);"""
sql """insert into depts values("2020-01-02",2,"b",1);"""
@ -59,6 +61,7 @@ suite ("testJoinOnLeftProjectToJoin") {
qt_select_mv "select * from (select deptno , sum(salary) from emps group by deptno) A join (select deptno, max(cost) from depts group by deptno ) B on A.deptno = B.deptno order by A.deptno;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_all_success("select * from (select deptno , sum(salary) from emps group by deptno) A join (select deptno, max(cost) from depts group by deptno ) B on A.deptno = B.deptno;",
["emps_mv", "depts_mv"])
}

View File

@ -50,6 +50,7 @@ suite ("testNDVToHll") {
qt_select_mv "select user_id, approx_count_distinct(tag_id) a from user_tags group by user_id order by user_id;"
sql """set enable_stats=true;"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from user_tags order by time_col;", "user_tags_mv")
mv_rewrite_success("select user_id, ndv(tag_id) a from user_tags group by user_id order by user_id;", "user_tags_mv")

View File

@ -49,6 +49,7 @@ suite ("testOrderByQueryOnProjectView") {
qt_select_mv "select empid from emps order by deptno;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select empid from emps where deptno > 0 order by deptno;", "emps_mv")

View File

@ -61,6 +61,7 @@ suite ("testProjectionMV1") {
qt_select_mv "select deptno, sum(empid) from emps group by deptno order by deptno;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select empid, deptno from emps where deptno > 0 order by empid;", "emps_mv")

View File

@ -51,6 +51,7 @@ suite ("testProjectionMV2") {
qt_select_base "select name from emps where deptno -1 = 0 order by empid;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select empid + 1 from emps where deptno = 1 order by empid;", "emps_mv")

View File

@ -53,6 +53,7 @@ suite ("testProjectionMV3") {
qt_select_mv2 "select name from emps where deptno = 1 order by empid;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_success("select empid + 1, name from emps where deptno = 1 order by empid;", "emps_mv")

View File

@ -52,6 +52,7 @@ suite ("testProjectionMV4") {
qt_select_base "select empid from emps where deptno > 1 and empid > 1 order by empid;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='3');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
mv_rewrite_fail("select empid from emps where deptno > 1 and empid > 1 order by empid;", "emps_mv")

View File

@ -70,4 +70,5 @@ suite ("testQueryOnStar") {
contains "(emps_mv)"
}
qt_select_mv "select ref_1.`empid` as c0 from tpch_tiny_region as ref_0 left join emps as ref_1 on (ref_0.`r_comment` = ref_1.`name` ) where true order by ref_0.`r_regionkey`,ref_0.`r_regionkey` desc ,ref_0.`r_regionkey`,ref_0.`r_regionkey`;"
}

View File

@ -29,6 +29,8 @@ suite ("testSelectMVWithTableAlias") {
partition by range (time_col) (partition p1 values less than MAXVALUE) distributed by hash(time_col) buckets 3 properties('replication_num' = '1');
"""
sql """alter table user_tags modify column time_col set stats ('row_count'='3');"""
sql """insert into user_tags values("2020-01-01",1,"a",1);"""
sql """insert into user_tags values("2020-01-02",2,"b",2);"""

View File

@ -52,6 +52,7 @@ suite ("testSingleMVMultiUsage") {
}
qt_select_mv "select * from (select deptno, empid from emps where deptno>100) A join (select deptno, empid from emps where deptno >200) B using (deptno) order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
explain {

View File

@ -38,7 +38,6 @@ suite ("testSubQuery") {
sql """insert into emps values("2020-01-02",2,"b",2,2,2);"""
sql """insert into emps values("2020-01-03",3,"c",3,3,3);"""
createMV("create materialized view emps_mv as select deptno, empid from emps;")
sql """insert into emps values("2020-01-01",1,"a",1,1,1);"""
@ -52,5 +51,6 @@ suite ("testSubQuery") {
qt_select_mv "select empid, deptno, salary from emps e1 where empid = (select max(empid) from emps where deptno = e1.deptno) order by deptno;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
}

View File

@ -35,6 +35,7 @@ suite ("testUnionDistinct") {
sql """insert into emps values("2020-01-02",2,"b",2,2,2);"""
sql """insert into emps values("2020-01-03",3,"c",3,3,3);"""
createMV("create materialized view emps_mv as select empid, deptno from emps order by empid, deptno;")
sql """insert into emps values("2020-01-01",1,"a",1,1,1);"""
@ -52,6 +53,7 @@ suite ("testUnionDistinct") {
}
qt_select_mv "select * from (select empid, deptno from emps where empid >1 union select empid, deptno from emps where empid <0) t order by 1;"
sql """set enable_stats=true;"""
sql """alter table emps modify column time_col set stats ('row_count'='4');"""
mv_rewrite_fail("select * from emps order by empid;", "emps_mv")
explain {

Some files were not shown because too many files have changed in this diff Show More