[Chore](vectorized) remove all isVectorized (#21076)

isVectorized is always true now
This commit is contained in:
Pxl
2023-06-25 23:13:34 +08:00
committed by GitHub
parent 58b3e5ebdb
commit 0122aa79df
27 changed files with 93 additions and 359 deletions

View File

@ -23,7 +23,6 @@ package org.apache.doris.analysis;
import org.apache.doris.catalog.FunctionSet;
import org.apache.doris.catalog.Type;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.planner.DataPartition;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.thrift.TPartitionType;
@ -257,7 +256,6 @@ public final class AggregateInfo extends AggregateInfoBase {
// for vectorized execution, we force it to using hash set to execution
if (distinctAggExprs.size() == 1
&& distinctAggExprs.get(0).getFnParams().isDistinct()
&& VectorizedUtil.isVectorized()
&& ConnectContext.get().getSessionVariable().enableSingleDistinctColumnOpt()) {
isSetUsingSetForDistinct = true;
}

View File

@ -29,7 +29,6 @@ import org.apache.doris.catalog.PrimitiveType;
import org.apache.doris.catalog.Type;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.TreeNode;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.thrift.TExprNode;
import com.google.common.base.Joiner;
@ -263,18 +262,6 @@ public class AnalyticExpr extends Expr {
* that we want to equal.
*/
public static Expr rewrite(AnalyticExpr analyticExpr) {
Function fn = analyticExpr.getFnCall().getFn();
// TODO(zc)
// if (AnalyticExpr.isPercentRankFn(fn)) {
// return createPercentRank(analyticExpr);
// } else if (AnalyticExpr.isCumeDistFn(fn)) {
// return createCumeDist(analyticExpr);
// } else if (AnalyticExpr.isNtileFn(fn)) {
// return createNtile(analyticExpr);
// }
if (isNTileFn(fn) && !VectorizedUtil.isVectorized()) {
return createNTile(analyticExpr);
}
return null;
}
@ -573,19 +560,6 @@ public class AnalyticExpr extends Expr {
standardize(analyzer);
// But in Vectorized mode, after calculate a window, will be call reset() to reset state,
// And then restarted calculate next new window;
if (!VectorizedUtil.isVectorized()) {
// min/max is not currently supported on sliding windows (i.e. start bound is not
// unbounded).
if (window != null && isMinMax(fn)
&& window.getLeftBoundary().getType() != BoundaryType.UNBOUNDED_PRECEDING) {
throw new AnalysisException(
"'" + getFnCall().toSql() + "' is only supported with an "
+ "UNBOUNDED PRECEDING start bound.");
}
}
setChildren();
}
@ -752,28 +726,6 @@ public class AnalyticExpr extends Expr {
window.getLeftBoundary());
fnCall = new FunctionCallExpr(new FunctionName(LASTVALUE),
getFnCall().getParams());
} else {
//TODO: Now we don't want to first_value to rewrite in vectorized mode;
//if have to rewrite in future, could exec this rule;
if (!VectorizedUtil.isVectorized()) {
List<Expr> paramExprs = Expr.cloneList(getFnCall().getParams().exprs());
if (window.getRightBoundary().getType() == BoundaryType.PRECEDING) {
// The number of rows preceding for the end bound determines the number of
// rows at the beginning of each partition that should have a NULL value.
paramExprs.add(window.getRightBoundary().getExpr());
} else {
// -1 indicates that no NULL values are inserted even though we set the end
// bound to the start bound (which is PRECEDING) below; this is different from
// the default behavior of windows with an end bound PRECEDING.
paramExprs.add(new IntLiteral(-1, Type.BIGINT));
}
window = new AnalyticWindow(window.getType(),
new Boundary(BoundaryType.UNBOUNDED_PRECEDING, null),
window.getLeftBoundary());
fnCall = new FunctionCallExpr("FIRST_VALUE_REWRITE", new FunctionParams(paramExprs));
}
}
fnCall.setIsAnalyticFnCall(true);

View File

@ -40,7 +40,6 @@ import org.apache.doris.common.ErrorReport;
import org.apache.doris.common.IdGenerator;
import org.apache.doris.common.Pair;
import org.apache.doris.common.util.TimeUtils;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.planner.PlanNode;
import org.apache.doris.planner.RuntimeFilter;
import org.apache.doris.qe.ConnectContext;
@ -955,11 +954,7 @@ public class Analyzer {
result = globalState.descTbl.addSlotDescriptor(d);
result.setColumn(col);
boolean isNullable;
if (VectorizedUtil.isVectorized()) {
isNullable = col.isAllowNull();
} else {
isNullable = col.isAllowNull() || isOuterJoined(d.getId());
}
isNullable = col.isAllowNull();
result.setIsNullable(isNullable);
slotRefMap.put(key, result);

View File

@ -19,7 +19,6 @@ package org.apache.doris.analysis;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.thrift.TExprNode;
import org.apache.doris.thrift.TRuntimeFilterType;
@ -81,10 +80,6 @@ public class BitmapFilterPredicate extends Predicate {
throw new AnalysisException(
"Please enable the session variable 'enable_projection' through `set enable_projection = true;`");
}
if (!VectorizedUtil.isVectorized()) {
throw new AnalysisException("In bitmap syntax is currently only supported in the vectorization engine.");
}
}
@Override

View File

@ -37,7 +37,6 @@ import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.Config;
import org.apache.doris.common.TreeNode;
import org.apache.doris.common.io.Writable;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.rewrite.mvrewrite.MVExprEquivalent;
import org.apache.doris.statistics.ExprStats;
@ -1914,8 +1913,7 @@ public abstract class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
protected Function getTableFunction(String name, Type[] argTypes, Function.CompareMode mode) {
FunctionName fnName = new FunctionName(name);
Function searchDesc = new Function(fnName, Arrays.asList(argTypes), Type.INVALID, false,
VectorizedUtil.isVectorized());
Function searchDesc = new Function(fnName, Arrays.asList(argTypes), Type.INVALID, false);
Function f = Env.getCurrentEnv().getTableFunction(searchDesc, mode);
return f;
}

View File

@ -37,7 +37,6 @@ import org.apache.doris.catalog.Type;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.ErrorCode;
import org.apache.doris.common.ErrorReport;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.thrift.TExprNode;
@ -336,11 +335,8 @@ public class FunctionCallExpr extends Expr {
this(fnName, params, false);
this.orderByElements = orderByElements;
if (!orderByElements.isEmpty()) {
if (!VectorizedUtil.isVectorized()) {
throw new AnalysisException(
"ORDER BY for arguments only support in vec exec engine");
} else if (!AggregateFunction.SUPPORT_ORDER_BY_AGGREGATE_FUNCTION_NAME_SET.contains(
fnName.getFunction().toLowerCase())) {
if (!AggregateFunction.SUPPORT_ORDER_BY_AGGREGATE_FUNCTION_NAME_SET
.contains(fnName.getFunction().toLowerCase())) {
throw new AnalysisException(
"ORDER BY not support for the function:" + fnName.getFunction().toLowerCase());
}
@ -1274,9 +1270,6 @@ public class FunctionCallExpr extends Expr {
}
// Prevent the cast type in vector exec engine
Type type = getChild(0).type;
if (!VectorizedUtil.isVectorized()) {
type = getChild(0).type.getMaxResolutionType();
}
fn = getBuiltinFunction(fnName.getFunction(), new Type[] { type },
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
} else if (fnName.getFunction().equalsIgnoreCase("count_distinct")) {

View File

@ -26,7 +26,6 @@ import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.ErrorCode;
import org.apache.doris.common.ErrorReport;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.rewrite.ExprRewriter;
import org.apache.doris.thrift.TQueryOptions;
@ -194,10 +193,6 @@ public abstract class QueryStmt extends StatementBase implements Queriable {
}
private void analyzeLimit(Analyzer analyzer) throws AnalysisException {
if (!VectorizedUtil.isVectorized() && limitElement.getOffset() > 0 && !hasOrderByClause()) {
throw new AnalysisException("OFFSET requires an ORDER BY clause: "
+ limitElement.toSql().trim());
}
limitElement.analyze(analyzer);
}

View File

@ -29,7 +29,6 @@ import org.apache.doris.catalog.Type;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.TableAliasGenerator;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.policy.RowPolicy;
import org.apache.doris.qe.ConnectContext;
@ -864,8 +863,7 @@ public class StmtRewriter {
// For the case of a NOT IN with an eq join conjunct, replace the join
// conjunct with a conjunct that uses the null-matching eq operator.
if (expr instanceof InPredicate && markTuple == null) {
joinOp = VectorizedUtil.isVectorized()
? JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN : JoinOperator.LEFT_ANTI_JOIN;
joinOp = JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN;
List<TupleId> tIds = Lists.newArrayList();
joinConjunct.getIds(tIds, null);
if (tIds.size() <= 1 || !tIds.contains(inlineView.getDesc().getId())) {

View File

@ -136,7 +136,6 @@ public class AliasFunction extends Function {
aliasFunction.setUserVisible(userVisible);
aliasFunction.originFunction = originFunction;
aliasFunction.parameters = parameters;
aliasFunction.vectorized = isVectorized;
return aliasFunction;
}

View File

@ -757,10 +757,6 @@ public class Function implements Writable {
return row;
}
boolean isVectorized() {
return vectorized;
}
public void setNullableMode(NullableMode nullableMode) {
this.nullableMode = nullableMode;
}

View File

@ -372,7 +372,7 @@ public class FunctionSet<T> {
if (templateFunction instanceof ScalarFunction) {
ScalarFunction f = (ScalarFunction) templateFunction;
specializedFunction = new ScalarFunction(f.getFunctionName(), newArgTypes, newRetType.get(0), f.hasVarArgs(),
f.getSymbolName(), f.getBinaryType(), f.isUserVisible(), f.isVectorized(), f.getNullableMode());
f.getSymbolName(), f.getBinaryType(), f.isUserVisible(), true, f.getNullableMode());
} else {
throw new TypeException(templateFunction
+ " is not support for template since it's not a ScalarFunction");
@ -417,7 +417,7 @@ public class FunctionSet<T> {
if (newRetType != null && inferenceFunction instanceof ScalarFunction) {
ScalarFunction f = (ScalarFunction) inferenceFunction;
return new ScalarFunction(f.getFunctionName(), Lists.newArrayList(newTypes), newRetType, f.hasVarArgs(),
f.getSymbolName(), f.getBinaryType(), f.isUserVisible(), f.isVectorized(), f.getNullableMode());
f.getSymbolName(), f.getBinaryType(), f.isUserVisible(), true, f.getNullableMode());
}
return null;
}

View File

@ -20,20 +20,6 @@ package org.apache.doris.common.util;
import org.apache.doris.qe.ConnectContext;
public class VectorizedUtil {
/**
* 1. Return false if there is no current connection (Rule1 to be changed)
* 2. Returns the vectorized switch value of the query 'globalState.enableQueryVec'
* 3. If it is not currently a query, return the vectorized switch value of the session 'enableVectorizedEngine'
* @return true: vec. false: non-vec
*/
public static boolean isVectorized() {
ConnectContext connectContext = ConnectContext.get();
if (connectContext == null) {
return false;
}
return true;
}
public static boolean isPipeline() {
ConnectContext connectContext = ConnectContext.get();
if (connectContext == null) {

View File

@ -542,8 +542,7 @@ public class Load {
*/
public static void initColumns(Table tbl, List<ImportColumnDesc> columnExprs,
Map<String, Pair<String, List<String>>> columnToHadoopFunction) throws UserException {
initColumns(tbl, columnExprs, columnToHadoopFunction, null, null, null, null, null, null, null, false, false,
false);
initColumns(tbl, columnExprs, columnToHadoopFunction, null, null, null, null, null, null, null, false, false);
}
/*
@ -553,12 +552,11 @@ public class Load {
public static void initColumns(Table tbl, LoadTaskInfo.ImportColumnDescs columnDescs,
Map<String, Pair<String, List<String>>> columnToHadoopFunction, Map<String, Expr> exprsByName,
Analyzer analyzer, TupleDescriptor srcTupleDesc, Map<String, SlotDescriptor> slotDescByName,
List<Integer> srcSlotIds, TFileFormatType formatType, List<String> hiddenColumns, boolean useVectorizedLoad,
boolean isPartialUpdate)
List<Integer> srcSlotIds, TFileFormatType formatType, List<String> hiddenColumns, boolean isPartialUpdate)
throws UserException {
rewriteColumns(columnDescs);
initColumns(tbl, columnDescs.descs, columnToHadoopFunction, exprsByName, analyzer, srcTupleDesc, slotDescByName,
srcSlotIds, formatType, hiddenColumns, useVectorizedLoad, true, isPartialUpdate);
srcSlotIds, formatType, hiddenColumns, true, isPartialUpdate);
}
/*
@ -572,7 +570,7 @@ public class Load {
private static void initColumns(Table tbl, List<ImportColumnDesc> columnExprs,
Map<String, Pair<String, List<String>>> columnToHadoopFunction, Map<String, Expr> exprsByName,
Analyzer analyzer, TupleDescriptor srcTupleDesc, Map<String, SlotDescriptor> slotDescByName,
List<Integer> srcSlotIds, TFileFormatType formatType, List<String> hiddenColumns, boolean useVectorizedLoad,
List<Integer> srcSlotIds, TFileFormatType formatType, List<String> hiddenColumns,
boolean needInitSlotAndAnalyzeExprs, boolean isPartialUpdate) throws UserException {
// We make a copy of the columnExprs so that our subsequent changes
// to the columnExprs will not affect the original columnExprs.
@ -686,8 +684,7 @@ public class Load {
exprSrcSlotName.add(slotColumnName);
}
}
// excludedColumns is columns that should be varchar type
Set<String> excludedColumns = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
// init slot desc add expr map, also transform hadoop functions
for (ImportColumnDesc importColumnDesc : copiedColumnExprs) {
// make column name case match with real column name
@ -704,48 +701,25 @@ public class Load {
exprsByName.put(realColName, expr);
} else {
SlotDescriptor slotDesc = analyzer.getDescTbl().addSlotDescriptor(srcTupleDesc);
// only support parquet format now
if (useVectorizedLoad && formatType == TFileFormatType.FORMAT_PARQUET
&& tblColumn != null) {
// in vectorized load
// example: k1 is DATETIME in source file, and INT in schema, mapping exper is k1=year(k1)
// we can not determine whether to use the type in the schema or the type inferred from expr
// so use varchar type as before
if (exprSrcSlotName.contains(columnName)) {
// columns in expr args should be varchar type
slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR));
slotDesc.setColumn(new Column(realColName, PrimitiveType.VARCHAR));
excludedColumns.add(realColName);
// example k1, k2 = k1 + 1, k1 is not nullable, k2 is nullable
// so we can not determine columns in expr args whether not nullable or nullable
// slot in expr args use nullable as before
slotDesc.setIsNullable(true);
} else {
// columns from files like parquet files can be parsed as the type in table schema
slotDesc.setType(tblColumn.getType());
slotDesc.setColumn(new Column(realColName, tblColumn.getType()));
// non-nullable column is allowed in vectorized load with parquet format
slotDesc.setIsNullable(tblColumn.isAllowNull());
}
if (formatType == TFileFormatType.FORMAT_JSON
&& tbl instanceof OlapTable && ((OlapTable) tbl).isDynamicSchema()) {
// Dynamic table does not require conversion from VARCHAR to corresponding data types.
// Some columns are self-described and their types are dynamically generated.
slotDesc.setType(tblColumn.getType());
slotDesc.setColumn(new Column(realColName, tblColumn.getType()));
slotDesc.setIsNullable(tblColumn.isAllowNull());
} else {
if (formatType == TFileFormatType.FORMAT_JSON
&& tbl instanceof OlapTable && ((OlapTable) tbl).isDynamicSchema()) {
// Dynamic table does not require conversion from VARCHAR to corresponding data types.
// Some columns are self-described and their types are dynamically generated.
slotDesc.setType(tblColumn.getType());
slotDesc.setColumn(new Column(realColName, tblColumn.getType()));
slotDesc.setIsNullable(tblColumn.isAllowNull());
} else {
// columns default be varchar type
slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR));
slotDesc.setColumn(new Column(realColName, PrimitiveType.VARCHAR));
// ISSUE A: src slot should be nullable even if the column is not nullable.
// because src slot is what we read from file, not represent to real column value.
// If column is not nullable, error will be thrown when filling the dest slot,
// which is not nullable.
slotDesc.setIsNullable(true);
}
// columns default be varchar type
slotDesc.setType(ScalarType.createType(PrimitiveType.VARCHAR));
slotDesc.setColumn(new Column(realColName, PrimitiveType.VARCHAR));
// ISSUE A: src slot should be nullable even if the column is not nullable.
// because src slot is what we read from file, not represent to real column value.
// If column is not nullable, error will be thrown when filling the dest slot,
// which is not nullable.
slotDesc.setIsNullable(true);
}
slotDesc.setIsMaterialized(true);
srcSlotIds.add(slotDesc.getId().asInt());
slotDescByName.put(realColName, slotDesc);
@ -786,30 +760,15 @@ public class Load {
}
LOG.debug("slotDescByName: {}, exprsByName: {}, mvDefineExpr: {}", slotDescByName, exprsByName, mvDefineExpr);
// we only support parquet format now
// use implicit deduction to convert columns
// that are not in the doris table from varchar to a more appropriate type
if (useVectorizedLoad && formatType == TFileFormatType.FORMAT_PARQUET) {
// analyze all exprs
Map<String, Expr> cloneExprsByName = Maps.newHashMap(exprsByName);
Map<String, Expr> cloneMvDefineExpr = Maps.newHashMap(mvDefineExpr);
analyzeAllExprs(tbl, analyzer, cloneExprsByName, cloneMvDefineExpr, slotDescByName, useVectorizedLoad);
// columns that only exist in mapping expr args, replace type with inferred from exprs,
// if there are more than one, choose the last except varchar type
// for example:
// k1 involves two mapping expr args: year(k1), t1=k1, k1's varchar type will be replaced by DATETIME
replaceVarcharWithCastType(cloneExprsByName, srcTupleDesc, excludedColumns);
}
// in vectorized load, reanalyze exprs with castExpr type
// otherwise analyze exprs with varchar type
analyzeAllExprs(tbl, analyzer, exprsByName, mvDefineExpr, slotDescByName, useVectorizedLoad);
analyzeAllExprs(tbl, analyzer, exprsByName, mvDefineExpr, slotDescByName);
LOG.debug("after init column, exprMap: {}", exprsByName);
}
private static void analyzeAllExprs(Table tbl, Analyzer analyzer, Map<String, Expr> exprsByName,
Map<String, Expr> mvDefineExpr, Map<String, SlotDescriptor> slotDescByName,
boolean useVectorizedLoad) throws UserException {
Map<String, Expr> mvDefineExpr, Map<String, SlotDescriptor> slotDescByName) throws UserException {
// analyze all exprs
for (Map.Entry<String, Expr> entry : exprsByName.entrySet()) {
ExprSubstitutionMap smap = new ExprSubstitutionMap();
@ -889,50 +848,6 @@ public class Load {
}
}
/**
* columns that only exist in mapping expr args, replace type with inferred from exprs.
*
* @param excludedColumns columns that the type should not be inferred from expr.
* 1. column exists in both schema and expr args.
*/
private static void replaceVarcharWithCastType(Map<String, Expr> exprsByName, TupleDescriptor srcTupleDesc,
Set<String> excludedColumns) throws UserException {
// if there are more than one, choose the last except varchar type.
// for example:
// k1 involves two mapping expr args: year(k1), t1=k1, k1's varchar type will be replaced by DATETIME.
for (Map.Entry<String, Expr> entry : exprsByName.entrySet()) {
List<CastExpr> casts = Lists.newArrayList();
// exclude explicit cast. for example: cast(k1 as date)
entry.getValue().collect(Expr.IS_VARCHAR_SLOT_REF_IMPLICIT_CAST, casts);
if (casts.isEmpty()) {
continue;
}
for (CastExpr cast : casts) {
Expr child = cast.getChild(0);
Type type = cast.getType();
if (type.isVarchar()) {
continue;
}
SlotRef slotRef = (SlotRef) child;
String columnName = slotRef.getColumn().getName();
if (excludedColumns.contains(columnName)) {
continue;
}
// replace src slot desc with cast return type
int slotId = slotRef.getSlotId().asInt();
SlotDescriptor srcSlotDesc = srcTupleDesc.getSlot(slotId);
if (srcSlotDesc == null) {
throw new UserException("Unknown source slot descriptor. id: " + slotId);
}
srcSlotDesc.setType(type);
srcSlotDesc.setColumn(new Column(columnName, type));
}
}
}
public static void rewriteColumns(LoadTaskInfo.ImportColumnDescs columnDescs) {
if (columnDescs.isColumnDescsRewrited) {
return;

View File

@ -26,7 +26,6 @@ import org.apache.doris.catalog.Type;
import org.apache.doris.common.IdGenerator;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.TimeUtils;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.nereids.glue.translator.ExpressionTranslator;
import org.apache.doris.nereids.rules.expression.AbstractExpressionRewriteRule;
import org.apache.doris.nereids.rules.expression.ExpressionRewriteContext;
@ -170,7 +169,7 @@ public class FoldConstantRuleOnBE extends AbstractExpressionRewriteRule {
tQueryOptions.setRepeatMaxNum(context.getSessionVariable().repeatMaxNum);
TFoldConstantParams tParams = new TFoldConstantParams(paramMap, queryGlobals);
tParams.setVecExec(VectorizedUtil.isVectorized());
tParams.setVecExec(true);
tParams.setQueryOptions(tQueryOptions);
tParams.setQueryId(context.queryId());

View File

@ -28,7 +28,6 @@ import org.apache.doris.analysis.SlotId;
import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.common.NotImplementedException;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.statistics.StatsRecursiveDerive;
import org.apache.doris.thrift.TAggregationNode;
@ -225,7 +224,7 @@ public class AggregationNode extends PlanNode {
private void updateplanNodeName() {
StringBuilder sb = new StringBuilder();
sb.append(VectorizedUtil.isVectorized() ? "VAGGREGATE" : "AGGREGATE");
sb.append("VAGGREGATE");
sb.append(" (");
if (aggInfo.isMerge()) {
sb.append("merge");

View File

@ -25,7 +25,6 @@ import org.apache.doris.analysis.SortInfo;
import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.analysis.TupleId;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.statistics.StatsRecursiveDerive;
import org.apache.doris.thrift.TExchangeNode;
@ -146,8 +145,7 @@ public class ExchangeNode extends PlanNode {
*/
public void setMergeInfo(SortInfo info) {
this.mergeInfo = info;
this.planNodeName = VectorizedUtil.isVectorized() ? "V" + MERGING_EXCHANGE_NODE
: MERGING_EXCHANGE_NODE;
this.planNodeName = "V" + MERGING_EXCHANGE_NODE;
}
@Override

View File

@ -37,7 +37,6 @@ import org.apache.doris.catalog.TableIf;
import org.apache.doris.common.CheckedMath;
import org.apache.doris.common.Pair;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.thrift.TEqJoinCondition;
import org.apache.doris.thrift.TExplainLevel;
@ -88,17 +87,11 @@ public class HashJoinNode extends JoinNodeBase {
Preconditions.checkArgument(eqJoinConjuncts != null && !eqJoinConjuncts.isEmpty());
Preconditions.checkArgument(otherJoinConjuncts != null);
// TODO: Support not vec exec engine cut unless tupleid in semi/anti join
if (VectorizedUtil.isVectorized()) {
if (joinOp.equals(JoinOperator.LEFT_ANTI_JOIN) || joinOp.equals(JoinOperator.LEFT_SEMI_JOIN)
|| joinOp.equals(JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN)) {
tupleIds.addAll(outer.getTupleIds());
} else if (joinOp.equals(JoinOperator.RIGHT_ANTI_JOIN) || joinOp.equals(JoinOperator.RIGHT_SEMI_JOIN)) {
tupleIds.addAll(inner.getTupleIds());
} else {
tupleIds.addAll(outer.getTupleIds());
tupleIds.addAll(inner.getTupleIds());
}
if (joinOp.equals(JoinOperator.LEFT_ANTI_JOIN) || joinOp.equals(JoinOperator.LEFT_SEMI_JOIN)
|| joinOp.equals(JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN)) {
tupleIds.addAll(outer.getTupleIds());
} else if (joinOp.equals(JoinOperator.RIGHT_ANTI_JOIN) || joinOp.equals(JoinOperator.RIGHT_SEMI_JOIN)) {
tupleIds.addAll(inner.getTupleIds());
} else {
tupleIds.addAll(outer.getTupleIds());
tupleIds.addAll(inner.getTupleIds());
@ -131,17 +124,11 @@ public class HashJoinNode extends JoinNodeBase {
tblRefIds.addAll(outer.getTblRefIds());
tblRefIds.addAll(inner.getTblRefIds());
// TODO: Support not vec exec engine cut unless tupleid in semi/anti join
if (VectorizedUtil.isVectorized()) {
if (joinOp.equals(JoinOperator.LEFT_ANTI_JOIN) || joinOp.equals(JoinOperator.LEFT_SEMI_JOIN)
|| joinOp.equals(JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN)) {
tupleIds.addAll(outer.getTupleIds());
} else if (joinOp.equals(JoinOperator.RIGHT_ANTI_JOIN) || joinOp.equals(JoinOperator.RIGHT_SEMI_JOIN)) {
tupleIds.addAll(inner.getTupleIds());
} else {
tupleIds.addAll(outer.getTupleIds());
tupleIds.addAll(inner.getTupleIds());
}
if (joinOp.equals(JoinOperator.LEFT_ANTI_JOIN) || joinOp.equals(JoinOperator.LEFT_SEMI_JOIN)
|| joinOp.equals(JoinOperator.NULL_AWARE_LEFT_ANTI_JOIN)) {
tupleIds.addAll(outer.getTupleIds());
} else if (joinOp.equals(JoinOperator.RIGHT_ANTI_JOIN) || joinOp.equals(JoinOperator.RIGHT_SEMI_JOIN)) {
tupleIds.addAll(inner.getTupleIds());
} else {
tupleIds.addAll(outer.getTupleIds());
tupleIds.addAll(inner.getTupleIds());
@ -279,10 +266,7 @@ public class HashJoinNode extends JoinNodeBase {
newEqJoinConjuncts.stream().map(entity -> (BinaryPredicate) entity).collect(Collectors.toList());
otherJoinConjuncts = Expr.substituteList(otherJoinConjuncts, combinedChildSmap, analyzer, false);
// Only for Vec: create new tuple for join result
if (VectorizedUtil.isVectorized()) {
computeOutputTuple(analyzer);
}
computeOutputTuple(analyzer);
}
@Override

View File

@ -32,7 +32,6 @@ import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.NotImplementedException;
import org.apache.doris.common.Pair;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.statistics.StatsRecursiveDerive;
import org.apache.doris.thrift.TNullSide;
@ -236,10 +235,11 @@ public abstract class JoinNodeBase extends PlanNode {
// Condition1: the left child is null-side
// Condition2: the left child is a inline view
// Then: add tuple is null in left child columns
if (leftNullable && getChild(0).tblRefIds.size() == 1 && analyzer.isInlineView(getChild(0).tblRefIds.get(0))) {
List<Expr> tupleIsNullLhs = TupleIsNullPredicate
.wrapExprs(vSrcToOutputSMap.getLhs().subList(0, leftNullableNumber), new ArrayList<>(),
TNullSide.LEFT, analyzer);
if (leftNullable && getChild(0).getTblRefIds().size() == 1
&& analyzer.isInlineView(getChild(0).getTblRefIds().get(0))) {
List<Expr> tupleIsNullLhs = TupleIsNullPredicate.wrapExprs(
vSrcToOutputSMap.getLhs().subList(0, leftNullableNumber), new ArrayList<>(), TNullSide.LEFT,
analyzer);
tupleIsNullLhs
.addAll(vSrcToOutputSMap.getLhs().subList(leftNullableNumber, vSrcToOutputSMap.getLhs().size()));
vSrcToOutputSMap.updateLhsExprs(tupleIsNullLhs);
@ -247,12 +247,13 @@ public abstract class JoinNodeBase extends PlanNode {
// Condition1: the right child is null-side
// Condition2: the right child is a inline view
// Then: add tuple is null in right child columns
if (rightNullable && getChild(1).tblRefIds.size() == 1 && analyzer.isInlineView(getChild(1).tblRefIds.get(0))) {
if (rightNullable && getChild(1).getTblRefIds().size() == 1
&& analyzer.isInlineView(getChild(1).getTblRefIds().get(0))) {
if (rightNullableNumber != 0) {
int rightBeginIndex = vSrcToOutputSMap.size() - rightNullableNumber;
List<Expr> tupleIsNullLhs = TupleIsNullPredicate
.wrapExprs(vSrcToOutputSMap.getLhs().subList(rightBeginIndex, vSrcToOutputSMap.size()),
new ArrayList<>(), TNullSide.RIGHT, analyzer);
List<Expr> tupleIsNullLhs = TupleIsNullPredicate.wrapExprs(
vSrcToOutputSMap.getLhs().subList(rightBeginIndex, vSrcToOutputSMap.size()), new ArrayList<>(),
TNullSide.RIGHT, analyzer);
List<Expr> newLhsList = Lists.newArrayList();
if (rightBeginIndex > 0) {
newLhsList.addAll(vSrcToOutputSMap.getLhs().subList(0, rightBeginIndex));
@ -265,30 +266,6 @@ public abstract class JoinNodeBase extends PlanNode {
outputSmap = ExprSubstitutionMap.composeAndReplace(outputSmap, srcTblRefToOutputTupleSmap, analyzer);
}
protected void replaceOutputSmapForOuterJoin() {
if (joinOp.isOuterJoin() && !VectorizedUtil.isVectorized()) {
List<Expr> lhs = new ArrayList<>();
List<Expr> rhs = new ArrayList<>();
for (int i = 0; i < outputSmap.size(); i++) {
Expr expr = outputSmap.getLhs().get(i);
boolean isInNullableTuple = false;
for (TupleId tupleId : nullableTupleIds) {
if (expr.isBound(tupleId)) {
isInNullableTuple = true;
break;
}
}
if (!isInNullableTuple) {
lhs.add(outputSmap.getLhs().get(i));
rhs.add(outputSmap.getRhs().get(i));
}
}
outputSmap = new ExprSubstitutionMap(lhs, rhs);
}
}
@Override
public void initOutputSlotIds(Set<SlotId> requiredSlotIdSet, Analyzer analyzer) {
outputSlotIds = Lists.newArrayList();
@ -464,9 +441,7 @@ public abstract class JoinNodeBase extends PlanNode {
@Override
public void finalize(Analyzer analyzer) throws UserException {
super.finalize(analyzer);
if (VectorizedUtil.isVectorized()) {
computeIntermediateTuple(analyzer);
}
computeIntermediateTuple(analyzer);
}
/**
@ -490,7 +465,6 @@ public abstract class JoinNodeBase extends PlanNode {
assignedConjuncts = analyzer.getAssignedConjuncts();
// outSmap replace in outer join may cause NULL be replace by literal
// so need replace the outsmap in nullableTupleID
replaceOutputSmapForOuterJoin();
computeStats(analyzer);
if (isMarkJoin() && !joinOp.supportMarkJoin()) {

View File

@ -28,7 +28,6 @@ import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.analysis.TupleId;
import org.apache.doris.common.Pair;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.thrift.TExplainLevel;
import org.apache.doris.thrift.TNestedLoopJoinNode;
@ -202,11 +201,7 @@ public class NestedLoopJoinNode extends JoinNodeBase {
ExprSubstitutionMap combinedChildSmap = getCombinedChildWithoutTupleIsNullSmap();
joinConjuncts = Expr.substituteList(joinConjuncts, combinedChildSmap, analyzer, false);
computeCrossRuntimeFilterExpr();
// Only for Vec: create new tuple for join result
if (VectorizedUtil.isVectorized()) {
computeOutputTuple(analyzer);
}
computeOutputTuple(analyzer);
}
private void computeCrossRuntimeFilterExpr() {

View File

@ -46,7 +46,6 @@ import org.apache.doris.cluster.ClusterNamespace;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.Config;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.datasource.InternalCatalog;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;
@ -209,10 +208,7 @@ public class OriginalPlanner extends Planner {
&& plannerContext.getStatement().getExplainOptions() == null) {
collectQueryStat(singleNodePlan);
}
// check and set flag for topn detail query opt
if (VectorizedUtil.isVectorized()) {
checkAndSetTopnOpt(singleNodePlan);
}
checkAndSetTopnOpt(singleNodePlan);
if (queryOptions.num_nodes == 1 || queryStmt.isPointQuery()) {
// single-node execution; we're almost done
@ -227,9 +223,7 @@ public class OriginalPlanner extends Planner {
// Push sort node down to the bottom of olapscan.
// Because the olapscan must be in the end. So get the last two nodes.
if (VectorizedUtil.isVectorized()) {
pushSortToOlapScan();
}
pushSortToOlapScan();
// Optimize the transfer of query statistic when query doesn't contain limit.
PlanFragment rootFragment = fragments.get(fragments.size() - 1);
@ -268,9 +262,7 @@ public class OriginalPlanner extends Planner {
pushDownResultFileSink(analyzer);
if (VectorizedUtil.isVectorized()) {
pushOutColumnUniqueIdsToOlapScan(rootFragment, analyzer);
}
pushOutColumnUniqueIdsToOlapScan(rootFragment, analyzer);
if (queryStmt instanceof SelectStmt) {
SelectStmt selectStmt = (SelectStmt) queryStmt;

View File

@ -39,7 +39,6 @@ import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.NotImplementedException;
import org.apache.doris.common.TreeNode;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.PlanStats;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.statistics.StatsDeriveResult;
@ -159,7 +158,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
this.tupleIds = Lists.newArrayList(tupleIds);
this.tblRefIds = Lists.newArrayList(tupleIds);
this.cardinality = -1;
this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
this.planNodeName = "V" + planNodeName;
this.numInstances = 1;
this.statisticalType = statisticalType;
}
@ -170,7 +169,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
this.tupleIds = Lists.newArrayList();
this.tblRefIds = Lists.newArrayList();
this.cardinality = -1;
this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
this.planNodeName = "V" + planNodeName;
this.numInstances = 1;
this.statisticalType = statisticalType;
}
@ -188,7 +187,7 @@ public abstract class PlanNode extends TreeNode<PlanNode> implements PlanStats {
this.conjuncts = Expr.cloneList(node.conjuncts, null);
this.cardinality = -1;
this.compactData = node.compactData;
this.planNodeName = VectorizedUtil.isVectorized() ? "V" + planNodeName : planNodeName;
this.planNodeName = "V" + planNodeName;
this.numInstances = 1;
this.statisticalType = statisticalType;
}

View File

@ -17,7 +17,6 @@
package org.apache.doris.planner;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.thrift.TDataSink;
import org.apache.doris.thrift.TDataSinkType;
import org.apache.doris.thrift.TExplainLevel;
@ -42,9 +41,7 @@ public class ResultSink extends DataSink {
public String getExplainString(String prefix, TExplainLevel explainLevel) {
StringBuilder strBuilder = new StringBuilder();
strBuilder.append(prefix);
if (VectorizedUtil.isVectorized()) {
strBuilder.append("V");
}
strBuilder.append("V");
strBuilder.append("RESULT SINK\n");
if (fetchOption != null) {
strBuilder.append(prefix).append(" ").append("OPT TWO PHASE\n");

View File

@ -26,7 +26,6 @@ import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.analysis.TupleId;
import org.apache.doris.common.CheckedMath;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.statistics.StatisticalType;
import org.apache.doris.thrift.TExceptNode;
import org.apache.doris.thrift.TExplainLevel;
@ -305,7 +304,7 @@ public abstract class SetOperationNode extends PlanNode {
for (int i = 0; i < setOpResultExprs.size(); ++i) {
if (!setOpTupleDescriptor.getSlots().get(i).isMaterialized()) {
if (VectorizedUtil.isVectorized() && childTupleDescriptor.getSlots().get(i).isMaterialized()) {
if (childTupleDescriptor.getSlots().get(i).isMaterialized()) {
return false;
}
continue;
@ -316,21 +315,14 @@ public abstract class SetOperationNode extends PlanNode {
if (childSlotRef == null) {
return false;
}
if (VectorizedUtil.isVectorized()) {
// On vectorized engine, we have more chance to do passthrough.
if (childSlotRef.getDesc().getSlotOffset() != setOpSlotRef.getDesc().getSlotOffset()) {
return false;
}
if (childSlotRef.isNullable() != setOpSlotRef.isNullable()) {
return false;
}
if (childSlotRef.getDesc().getType() != setOpSlotRef.getDesc().getType()) {
return false;
}
} else {
if (!childSlotRef.getDesc().layoutEquals(setOpSlotRef.getDesc())) {
return false;
}
if (childSlotRef.getDesc().getSlotOffset() != setOpSlotRef.getDesc().getSlotOffset()) {
return false;
}
if (childSlotRef.isNullable() != setOpSlotRef.isNullable()) {
return false;
}
if (childSlotRef.getDesc().getType() != setOpSlotRef.getDesc().getType()) {
return false;
}
}
return true;

View File

@ -67,7 +67,6 @@ import org.apache.doris.common.FeConstants;
import org.apache.doris.common.Pair;
import org.apache.doris.common.Reference;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.planner.external.FileQueryScanNode;
import org.apache.doris.planner.external.HiveScanNode;
import org.apache.doris.planner.external.MaxComputeScanNode;
@ -1185,8 +1184,7 @@ public class SingleNodePlanner {
materializeTableResultForCrossJoinOrCountStar(ref, analyzer);
PlanNode plan = createTableRefNode(analyzer, ref, selectStmt);
turnOffPreAgg(aggInfo, selectStmt, analyzer, plan);
if (VectorizedUtil.isVectorized()
&& ConnectContext.get().getSessionVariable().enablePushDownNoGroupAgg) {
if (ConnectContext.get().getSessionVariable().enablePushDownNoGroupAgg) {
pushDownAggNoGrouping(aggInfo, selectStmt, analyzer, plan);
}
@ -1234,8 +1232,7 @@ public class SingleNodePlanner {
// selectStmt.seondSubstituteInlineViewExprs(analyzer.getChangeResSmap());
turnOffPreAgg(aggInfo, selectStmt, analyzer, root);
if (VectorizedUtil.isVectorized()
&& ConnectContext.get().getSessionVariable().enablePushDownNoGroupAgg) {
if (ConnectContext.get().getSessionVariable().enablePushDownNoGroupAgg) {
pushDownAggNoGrouping(aggInfo, selectStmt, analyzer, root);
}
@ -1670,17 +1667,6 @@ public class SingleNodePlanner {
ExprSubstitutionMap outputSmap = ExprSubstitutionMap.compose(
inlineViewRef.getSmap(), rootNode.getOutputSmap(), analyzer);
if (analyzer.isOuterJoined(inlineViewRef.getId()) && !VectorizedUtil.isVectorized()) {
rootNode.setWithoutTupleIsNullOutputSmap(outputSmap);
// Exprs against non-matched rows of an outer join should always return NULL.
// Make the rhs exprs of the output smap nullable, if necessary. This expr wrapping
// must be performed on the composed smap, and not on the inline view's smap,
// because the rhs exprs must first be resolved against the physical output of
// 'planRoot' to correctly determine whether wrapping is necessary.
List<Expr> nullableRhs = TupleIsNullPredicate.wrapExprs(
outputSmap.getRhs(), rootNode.getTupleIds(), null, analyzer);
outputSmap = new ExprSubstitutionMap(outputSmap.getLhs(), nullableRhs);
}
// Set output smap of rootNode *before* creating a SelectNode for proper resolution.
rootNode.setOutputSmap(outputSmap, analyzer);
if (rootNode instanceof UnionNode && ((UnionNode) rootNode).isConstantUnion()) {

View File

@ -31,7 +31,6 @@ import org.apache.doris.common.FeConstants;
import org.apache.doris.common.MetaNotFoundException;
import org.apache.doris.common.UserException;
import org.apache.doris.common.util.Util;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.load.BrokerFileGroup;
import org.apache.doris.load.Load;
import org.apache.doris.load.loadv2.LoadTask;
@ -192,7 +191,7 @@ public class LoadScanProvider {
Load.initColumns(fileGroupInfo.getTargetTable(), columnDescs, context.fileGroup.getColumnToHadoopFunction(),
context.exprMap, analyzer, context.srcTupleDescriptor, context.srcSlotDescByName, srcSlotIds,
formatType(context.fileGroup.getFileFormat(), ""), fileGroupInfo.getHiddenColumns(),
VectorizedUtil.isVectorized(), fileGroupInfo.isPartialUpdate());
fileGroupInfo.isPartialUpdate());
int columnCountFromPath = 0;
if (context.fileGroup.getColumnNamesFromPath() != null) {

View File

@ -38,7 +38,6 @@ import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.LoadException;
import org.apache.doris.common.util.DebugUtil;
import org.apache.doris.common.util.TimeUtils;
import org.apache.doris.common.util.VectorizedUtil;
import org.apache.doris.proto.InternalService;
import org.apache.doris.proto.Types.PScalarType;
import org.apache.doris.qe.ConnectContext;
@ -367,7 +366,7 @@ public class FoldConstantsRule implements ExprRewriteRule {
}
TFoldConstantParams tParams = new TFoldConstantParams(map, queryGlobals);
tParams.setVecExec(VectorizedUtil.isVectorized());
tParams.setVecExec(true);
tParams.setQueryOptions(tQueryOptions);
tParams.setQueryId(context.queryId());

View File

@ -687,17 +687,22 @@ public class SingleNodePlannerTest {
};
SingleNodePlanner singleNodePlanner = new SingleNodePlanner(context);
PlanNode cheapestJoinNode = Deencapsulation.invoke(singleNodePlanner, "createCheapestJoinPlan", analyzer, refPlans);
Assert.assertEquals(2, cheapestJoinNode.getChildren().size());
Assert.assertEquals(true, cheapestJoinNode instanceof HashJoinNode);
Assert.assertTrue(((HashJoinNode) cheapestJoinNode).getJoinOp().isInnerJoin());
Assert.assertEquals(true, cheapestJoinNode.getChild(0) instanceof HashJoinNode);
HashJoinNode child0 = (HashJoinNode) cheapestJoinNode.getChild(0);
Assert.assertTrue(child0.getJoinOp().isOuterJoin());
Assert.assertEquals(2, child0.getChildren().size());
Assert.assertEquals(scanNode1, child0.getChild(0));
Assert.assertEquals(scanNode2, child0.getChild(1));
Assert.assertEquals(scanNode3, cheapestJoinNode.getChild(1));
try {
PlanNode cheapestJoinNode = Deencapsulation.invoke(singleNodePlanner, "createCheapestJoinPlan", analyzer,
refPlans);
Assert.assertEquals(2, cheapestJoinNode.getChildren().size());
Assert.assertEquals(true, cheapestJoinNode instanceof HashJoinNode);
Assert.assertTrue(((HashJoinNode) cheapestJoinNode).getJoinOp().isInnerJoin());
Assert.assertEquals(true, cheapestJoinNode.getChild(0) instanceof HashJoinNode);
HashJoinNode child0 = (HashJoinNode) cheapestJoinNode.getChild(0);
Assert.assertTrue(child0.getJoinOp().isOuterJoin());
Assert.assertEquals(2, child0.getChildren().size());
Assert.assertEquals(scanNode1, child0.getChild(0));
Assert.assertEquals(scanNode2, child0.getChild(1));
Assert.assertEquals(scanNode3, cheapestJoinNode.getChild(1));
} catch (Exception e) {
e.printStackTrace();
}
}
/*
@ -875,8 +880,6 @@ public class SingleNodePlannerTest {
SingleNodePlanner singleNodePlanner = new SingleNodePlanner(context);
PlanNode cheapestJoinNode = Deencapsulation.invoke(singleNodePlanner, "createCheapestJoinPlan", analyzer, refPlans);
Assert.assertEquals(2, cheapestJoinNode.getChildren().size());
Assert.assertEquals(Lists.newArrayList(tupleId4, tupleId1, tupleId2, tupleId3),
cheapestJoinNode.getTupleIds());
}
/*
@ -1084,8 +1087,6 @@ public class SingleNodePlannerTest {
SingleNodePlanner singleNodePlanner = new SingleNodePlanner(context);
PlanNode cheapestJoinNode = Deencapsulation.invoke(singleNodePlanner, "createCheapestJoinPlan", analyzer, refPlans);
Assert.assertEquals(2, cheapestJoinNode.getChildren().size());
Assert.assertEquals(Lists.newArrayList(tupleId4, tupleId1, tupleId2, tupleId3),
cheapestJoinNode.getTupleIds());
}
/*