1399 lines
41 KiB
Go
1399 lines
41 KiB
Go
// Copyright 2015 PingCAP, Inc.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package core
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/pingcap/errors"
|
|
"github.com/pingcap/parser/ast"
|
|
"github.com/pingcap/parser/model"
|
|
"github.com/pingcap/parser/mysql"
|
|
"github.com/pingcap/tidb/domain"
|
|
"github.com/pingcap/tidb/expression"
|
|
"github.com/pingcap/tidb/infoschema"
|
|
"github.com/pingcap/tidb/kv"
|
|
"github.com/pingcap/tidb/metrics"
|
|
"github.com/pingcap/tidb/privilege"
|
|
"github.com/pingcap/tidb/sessionctx"
|
|
"github.com/pingcap/tidb/sessionctx/variable"
|
|
"github.com/pingcap/tidb/table"
|
|
"github.com/pingcap/tidb/table/tables"
|
|
"github.com/pingcap/tidb/types"
|
|
driver "github.com/pingcap/tidb/types/parser_driver"
|
|
"github.com/pingcap/tidb/util/chunk"
|
|
"github.com/pingcap/tidb/util/execdetails"
|
|
"github.com/pingcap/tidb/util/hint"
|
|
"github.com/pingcap/tidb/util/kvcache"
|
|
"github.com/pingcap/tidb/util/logutil"
|
|
"github.com/pingcap/tidb/util/ranger"
|
|
"github.com/pingcap/tidb/util/texttree"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
var planCacheCounter = metrics.PlanCacheCounter.WithLabelValues("prepare")
|
|
|
|
// ShowDDL is for showing DDL information.
|
|
type ShowDDL struct {
|
|
baseSchemaProducer
|
|
}
|
|
|
|
// ShowSlow is for showing slow queries.
|
|
type ShowSlow struct {
|
|
baseSchemaProducer
|
|
|
|
*ast.ShowSlow
|
|
}
|
|
|
|
// ShowDDLJobQueries is for showing DDL job queries sql.
|
|
type ShowDDLJobQueries struct {
|
|
baseSchemaProducer
|
|
|
|
JobIDs []int64
|
|
}
|
|
|
|
// ShowNextRowID is for showing the next global row ID.
|
|
type ShowNextRowID struct {
|
|
baseSchemaProducer
|
|
TableName *ast.TableName
|
|
}
|
|
|
|
// CheckTable is used for checking table data, built from the 'admin check table' statement.
|
|
type CheckTable struct {
|
|
baseSchemaProducer
|
|
|
|
DBName string
|
|
Table table.Table
|
|
IndexInfos []*model.IndexInfo
|
|
IndexLookUpReaders []*PhysicalIndexLookUpReader
|
|
CheckIndex bool
|
|
}
|
|
|
|
// RecoverIndex is used for backfilling corrupted index data.
|
|
type RecoverIndex struct {
|
|
baseSchemaProducer
|
|
|
|
Table *ast.TableName
|
|
IndexName string
|
|
}
|
|
|
|
// CleanupIndex is used to delete dangling index data.
|
|
type CleanupIndex struct {
|
|
baseSchemaProducer
|
|
|
|
Table *ast.TableName
|
|
IndexName string
|
|
}
|
|
|
|
// CheckIndexRange is used for checking index data, output the index values that handle within begin and end.
|
|
type CheckIndexRange struct {
|
|
baseSchemaProducer
|
|
|
|
Table *ast.TableName
|
|
IndexName string
|
|
|
|
HandleRanges []ast.HandleRange
|
|
}
|
|
|
|
// ChecksumTable is used for calculating table checksum, built from the `admin checksum table` statement.
|
|
type ChecksumTable struct {
|
|
baseSchemaProducer
|
|
|
|
Tables []*ast.TableName
|
|
}
|
|
|
|
// CancelDDLJobs represents a cancel DDL jobs plan.
|
|
type CancelDDLJobs struct {
|
|
baseSchemaProducer
|
|
|
|
JobIDs []int64
|
|
}
|
|
|
|
// ReloadExprPushdownBlacklist reloads the data from expr_pushdown_blacklist table.
|
|
type ReloadExprPushdownBlacklist struct {
|
|
baseSchemaProducer
|
|
}
|
|
|
|
// ReloadOptRuleBlacklist reloads the data from opt_rule_blacklist table.
|
|
type ReloadOptRuleBlacklist struct {
|
|
baseSchemaProducer
|
|
}
|
|
|
|
// AdminPluginsAction indicate action will be taken on plugins.
|
|
type AdminPluginsAction int
|
|
|
|
const (
|
|
// Enable indicates enable plugins.
|
|
Enable AdminPluginsAction = iota + 1
|
|
// Disable indicates disable plugins.
|
|
Disable
|
|
)
|
|
|
|
// AdminPlugins administrates tidb plugins.
|
|
type AdminPlugins struct {
|
|
baseSchemaProducer
|
|
Action AdminPluginsAction
|
|
Plugins []string
|
|
}
|
|
|
|
// AdminShowTelemetry displays telemetry status including tracking ID, status and so on.
|
|
type AdminShowTelemetry struct {
|
|
baseSchemaProducer
|
|
}
|
|
|
|
// AdminResetTelemetryID regenerates a new telemetry tracking ID.
|
|
type AdminResetTelemetryID struct {
|
|
baseSchemaProducer
|
|
}
|
|
|
|
// Change represents a change plan.
|
|
type Change struct {
|
|
baseSchemaProducer
|
|
*ast.ChangeStmt
|
|
}
|
|
|
|
// Prepare represents prepare plan.
|
|
type Prepare struct {
|
|
baseSchemaProducer
|
|
|
|
Name string
|
|
SQLText string
|
|
}
|
|
|
|
// Execute represents prepare plan.
|
|
type Execute struct {
|
|
baseSchemaProducer
|
|
|
|
Name string
|
|
UsingVars []expression.Expression
|
|
PrepareParams []types.Datum
|
|
ExecID uint32
|
|
SnapshotTS uint64
|
|
Stmt ast.StmtNode
|
|
StmtType string
|
|
Plan Plan
|
|
}
|
|
|
|
// Check if result of GetVar expr is BinaryLiteral
|
|
// Because GetVar use String to represent BinaryLiteral, here we need to convert string back to BinaryLiteral.
|
|
func isGetVarBinaryLiteral(sctx sessionctx.Context, expr expression.Expression) (res bool) {
|
|
scalarFunc, ok := expr.(*expression.ScalarFunction)
|
|
if ok && scalarFunc.FuncName.L == ast.GetVar {
|
|
name, isNull, err := scalarFunc.GetArgs()[0].EvalString(sctx, chunk.Row{})
|
|
if err != nil || isNull {
|
|
res = false
|
|
} else if dt, ok2 := sctx.GetSessionVars().Users[name]; ok2 {
|
|
res = (dt.Kind() == types.KindBinaryLiteral)
|
|
}
|
|
}
|
|
return res
|
|
}
|
|
|
|
// OptimizePreparedPlan optimizes the prepared statement.
|
|
func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema) error {
|
|
vars := sctx.GetSessionVars()
|
|
if e.Name != "" {
|
|
e.ExecID = vars.PreparedStmtNameToID[e.Name]
|
|
}
|
|
preparedPointer, ok := vars.PreparedStmts[e.ExecID]
|
|
if !ok {
|
|
return errors.Trace(ErrStmtNotFound)
|
|
}
|
|
preparedObj, ok := preparedPointer.(*CachedPrepareStmt)
|
|
if !ok {
|
|
return errors.Errorf("invalid CachedPrepareStmt type")
|
|
}
|
|
prepared := preparedObj.PreparedAst
|
|
vars.StmtCtx.StmtType = prepared.StmtType
|
|
|
|
paramLen := len(e.PrepareParams)
|
|
if paramLen > 0 {
|
|
// for binary protocol execute, argument is placed in vars.PrepareParams
|
|
if len(prepared.Params) != paramLen {
|
|
return errors.Trace(ErrWrongParamCount)
|
|
}
|
|
vars.PreparedParams = e.PrepareParams
|
|
for i, val := range vars.PreparedParams {
|
|
param := prepared.Params[i].(*driver.ParamMarkerExpr)
|
|
param.Datum = val
|
|
param.InExecute = true
|
|
}
|
|
} else {
|
|
// for `execute stmt using @a, @b, @c`, using value in e.UsingVars
|
|
if len(prepared.Params) != len(e.UsingVars) {
|
|
return errors.Trace(ErrWrongParamCount)
|
|
}
|
|
|
|
for i, usingVar := range e.UsingVars {
|
|
val, err := usingVar.Eval(chunk.Row{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
param := prepared.Params[i].(*driver.ParamMarkerExpr)
|
|
if isGetVarBinaryLiteral(sctx, usingVar) {
|
|
binVal, convErr := val.ToBytes()
|
|
if convErr != nil {
|
|
return convErr
|
|
}
|
|
val.SetBinaryLiteral(types.BinaryLiteral(binVal))
|
|
}
|
|
param.Datum = val
|
|
param.InExecute = true
|
|
vars.PreparedParams = append(vars.PreparedParams, val)
|
|
}
|
|
}
|
|
|
|
var snapshotTS uint64
|
|
if preparedObj.SnapshotTSEvaluator != nil {
|
|
if vars.InTxn() {
|
|
return ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.")
|
|
}
|
|
// if preparedObj.SnapshotTSEvaluator != nil, it is a stale read SQL:
|
|
// which means its infoschema is specified by the SQL, not the current/latest infoschema
|
|
var err error
|
|
snapshotTS, err = preparedObj.SnapshotTSEvaluator(sctx)
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
is, err = domain.GetDomain(sctx).GetSnapshotInfoSchema(snapshotTS)
|
|
if err != nil {
|
|
return errors.Trace(err)
|
|
}
|
|
}
|
|
if prepared.SchemaVersion != is.SchemaMetaVersion() {
|
|
// In order to avoid some correctness issues, we have to clear the
|
|
// cached plan once the schema version is changed.
|
|
// Cached plan in prepared struct does NOT have a "cache key" with
|
|
// schema version like prepared plan cache key
|
|
prepared.CachedPlan = nil
|
|
preparedObj.Executor = nil
|
|
// If the schema version has changed we need to preprocess it again,
|
|
// if this time it failed, the real reason for the error is schema changed.
|
|
ret := &PreprocessorReturn{InfoSchema: is}
|
|
err := Preprocess(sctx, prepared.Stmt, InPrepare, WithPreprocessorReturn(ret))
|
|
if err != nil {
|
|
return ErrSchemaChanged.GenWithStack("Schema change caused error: %s", err.Error())
|
|
}
|
|
prepared.SchemaVersion = is.SchemaMetaVersion()
|
|
}
|
|
err := e.getPhysicalPlan(ctx, sctx, is, preparedObj)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
e.SnapshotTS = snapshotTS
|
|
e.Stmt = prepared.Stmt
|
|
return nil
|
|
}
|
|
|
|
func (e *Execute) checkPreparedPriv(ctx context.Context, sctx sessionctx.Context,
|
|
preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error {
|
|
if pm := privilege.GetPrivilegeManager(sctx); pm != nil {
|
|
if err := CheckPrivilege(sctx.GetSessionVars().ActiveRoles, pm, preparedObj.VisitInfos); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
err := CheckTableLock(sctx, is, preparedObj.VisitInfos)
|
|
return err
|
|
}
|
|
|
|
func (e *Execute) setFoundInPlanCache(sctx sessionctx.Context, opt bool) error {
|
|
vars := sctx.GetSessionVars()
|
|
err := vars.SetSystemVar(variable.TiDBFoundInPlanCache, variable.BoolToOnOff(opt))
|
|
return err
|
|
}
|
|
|
|
func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, is infoschema.InfoSchema, preparedStmt *CachedPrepareStmt) error {
|
|
sessVars := sctx.GetSessionVars()
|
|
stmtCtx := sessVars.StmtCtx
|
|
prepared := preparedStmt.PreparedAst
|
|
stmtCtx.UseCache = prepared.UseCache
|
|
var cacheKey kvcache.Key
|
|
if prepared.UseCache {
|
|
cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion)
|
|
}
|
|
tps := make([]*types.FieldType, len(e.UsingVars))
|
|
for i, param := range e.UsingVars {
|
|
name := param.(*expression.ScalarFunction).GetArgs()[0].String()
|
|
tps[i] = sctx.GetSessionVars().UserVarTypes[name]
|
|
if tps[i] == nil {
|
|
tps[i] = types.NewFieldType(mysql.TypeNull)
|
|
}
|
|
}
|
|
if prepared.CachedPlan != nil {
|
|
// Rewriting the expression in the select.where condition will convert its
|
|
// type from "paramMarker" to "Constant".When Point Select queries are executed,
|
|
// the expression in the where condition will not be evaluated,
|
|
// so you don't need to consider whether prepared.useCache is enabled.
|
|
plan := prepared.CachedPlan.(Plan)
|
|
names := prepared.CachedNames.(types.NameSlice)
|
|
err := e.rebuildRange(plan)
|
|
if err != nil {
|
|
logutil.BgLogger().Debug("rebuild range failed", zap.Error(err))
|
|
goto REBUILD
|
|
}
|
|
if metrics.ResettablePlanCacheCounterFortTest {
|
|
metrics.PlanCacheCounter.WithLabelValues("prepare").Inc()
|
|
} else {
|
|
planCacheCounter.Inc()
|
|
}
|
|
err = e.setFoundInPlanCache(sctx, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
e.names = names
|
|
e.Plan = plan
|
|
stmtCtx.PointExec = true
|
|
return nil
|
|
}
|
|
if prepared.UseCache {
|
|
if cacheValue, exists := sctx.PreparedPlanCache().Get(cacheKey); exists {
|
|
if err := e.checkPreparedPriv(ctx, sctx, preparedStmt, is); err != nil {
|
|
return err
|
|
}
|
|
cachedVals := cacheValue.([]*PSTMTPlanCacheValue)
|
|
for _, cachedVal := range cachedVals {
|
|
if !cachedVal.UserVarTypes.Equal(tps) {
|
|
continue
|
|
}
|
|
planValid := true
|
|
for tblInfo, unionScan := range cachedVal.TblInfo2UnionScan {
|
|
if !unionScan && tableHasDirtyContent(sctx, tblInfo) {
|
|
planValid = false
|
|
// TODO we can inject UnionScan into cached plan to avoid invalidating it, though
|
|
// rebuilding the filters in UnionScan is pretty trivial.
|
|
sctx.PreparedPlanCache().Delete(cacheKey)
|
|
break
|
|
}
|
|
}
|
|
if planValid {
|
|
err := e.rebuildRange(cachedVal.Plan)
|
|
if err != nil {
|
|
logutil.BgLogger().Debug("rebuild range failed", zap.Error(err))
|
|
goto REBUILD
|
|
}
|
|
err = e.setFoundInPlanCache(sctx, true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if metrics.ResettablePlanCacheCounterFortTest {
|
|
metrics.PlanCacheCounter.WithLabelValues("prepare").Inc()
|
|
} else {
|
|
planCacheCounter.Inc()
|
|
}
|
|
e.names = cachedVal.OutPutNames
|
|
e.Plan = cachedVal.Plan
|
|
stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest)
|
|
return nil
|
|
}
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
REBUILD:
|
|
stmt := TryAddExtraLimit(sctx, prepared.Stmt)
|
|
p, names, err := OptimizeAstNode(ctx, sctx, stmt, is)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = e.tryCachePointPlan(ctx, sctx, preparedStmt, is, p)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
e.names = names
|
|
e.Plan = p
|
|
_, isTableDual := p.(*PhysicalTableDual)
|
|
if !isTableDual && prepared.UseCache && !stmtCtx.OptimDependOnMutableConst {
|
|
// rebuild key to exclude kv.TiFlash when stmt is not read only
|
|
if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) {
|
|
delete(sessVars.IsolationReadEngines, kv.TiFlash)
|
|
cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion)
|
|
sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{}
|
|
}
|
|
cached := NewPSTMTPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps)
|
|
preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p)
|
|
stmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest)
|
|
if cacheVals, exists := sctx.PreparedPlanCache().Get(cacheKey); exists {
|
|
hitVal := false
|
|
for i, cacheVal := range cacheVals.([]*PSTMTPlanCacheValue) {
|
|
if cacheVal.UserVarTypes.Equal(tps) {
|
|
hitVal = true
|
|
cacheVals.([]*PSTMTPlanCacheValue)[i] = cached
|
|
break
|
|
}
|
|
}
|
|
if !hitVal {
|
|
cacheVals = append(cacheVals.([]*PSTMTPlanCacheValue), cached)
|
|
}
|
|
sctx.PreparedPlanCache().Put(cacheKey, cacheVals)
|
|
} else {
|
|
sctx.PreparedPlanCache().Put(cacheKey, []*PSTMTPlanCacheValue{cached})
|
|
}
|
|
}
|
|
err = e.setFoundInPlanCache(sctx, false)
|
|
return err
|
|
}
|
|
|
|
// tryCachePointPlan will try to cache point execution plan, there may be some
|
|
// short paths for these executions, currently "point select" and "point update"
|
|
func (e *Execute) tryCachePointPlan(ctx context.Context, sctx sessionctx.Context,
|
|
preparedStmt *CachedPrepareStmt, is infoschema.InfoSchema, p Plan) error {
|
|
if sctx.GetSessionVars().StmtCtx.OptimDependOnMutableConst {
|
|
return nil
|
|
}
|
|
var (
|
|
prepared = preparedStmt.PreparedAst
|
|
ok bool
|
|
err error
|
|
names types.NameSlice
|
|
)
|
|
switch p.(type) {
|
|
case *PointGetPlan:
|
|
ok, err = IsPointGetWithPKOrUniqueKeyByAutoCommit(sctx, p)
|
|
names = p.OutputNames()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
if ok {
|
|
// just cache point plan now
|
|
prepared.CachedPlan = p
|
|
prepared.CachedNames = names
|
|
preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p)
|
|
sctx.GetSessionVars().StmtCtx.SetPlanDigest(preparedStmt.NormalizedPlan, preparedStmt.PlanDigest)
|
|
}
|
|
return err
|
|
}
|
|
|
|
func (e *Execute) rebuildRange(p Plan) error {
|
|
sctx := p.SCtx()
|
|
sc := p.SCtx().GetSessionVars().StmtCtx
|
|
var err error
|
|
switch x := p.(type) {
|
|
case *PhysicalTableReader:
|
|
ts := x.TablePlans[0].(*PhysicalTableScan)
|
|
if ts.Table.IsCommonHandle {
|
|
pk := tables.FindPrimaryIndex(ts.Table)
|
|
pkCols := make([]*expression.Column, 0, len(pk.Columns))
|
|
pkColsLen := make([]int, 0, len(pk.Columns))
|
|
for _, colInfo := range pk.Columns {
|
|
if pkCol := expression.ColInfo2Col(ts.schema.Columns, ts.Table.Columns[colInfo.Offset]); pkCol != nil {
|
|
pkCols = append(pkCols, pkCol)
|
|
pkColsLen = append(pkColsLen, colInfo.Length)
|
|
}
|
|
}
|
|
if len(pkCols) > 0 {
|
|
res, err := ranger.DetachCondAndBuildRangeForIndex(p.SCtx(), ts.AccessCondition, pkCols, pkColsLen)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
ts.Ranges = res.Ranges
|
|
} else {
|
|
ts.Ranges = ranger.FullRange()
|
|
}
|
|
} else {
|
|
var pkCol *expression.Column
|
|
if ts.Table.PKIsHandle {
|
|
if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil {
|
|
pkCol = expression.ColInfo2Col(ts.schema.Columns, pkColInfo)
|
|
}
|
|
}
|
|
if pkCol != nil {
|
|
ts.Ranges, err = ranger.BuildTableRange(ts.AccessCondition, sc, pkCol.RetType)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
ts.Ranges = ranger.FullIntRange(false)
|
|
}
|
|
}
|
|
case *PhysicalIndexReader:
|
|
is := x.IndexPlans[0].(*PhysicalIndexScan)
|
|
is.Ranges, err = e.buildRangeForIndexScan(sctx, is)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
case *PhysicalIndexLookUpReader:
|
|
is := x.IndexPlans[0].(*PhysicalIndexScan)
|
|
is.Ranges, err = e.buildRangeForIndexScan(sctx, is)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
case *PointGetPlan:
|
|
// if access condition is not nil, which means it's a point get generated by cbo.
|
|
if x.AccessConditions != nil {
|
|
if x.IndexInfo != nil {
|
|
ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for i := range x.IndexValues {
|
|
x.IndexValues[i] = ranges.Ranges[0].LowVal[i]
|
|
}
|
|
} else {
|
|
var pkCol *expression.Column
|
|
if x.TblInfo.PKIsHandle {
|
|
if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil {
|
|
pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo)
|
|
}
|
|
}
|
|
if pkCol != nil {
|
|
ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx.GetSessionVars().StmtCtx, pkCol.RetType)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
x.Handle = kv.IntHandle(ranges[0].LowVal[0].GetInt64())
|
|
}
|
|
}
|
|
}
|
|
// The code should never run here as long as we're not using point get for partition table.
|
|
// And if we change the logic one day, here work as defensive programming to cache the error.
|
|
if x.PartitionInfo != nil {
|
|
// TODO: relocate the partition after rebuilding range to make PlanCache support PointGet
|
|
return errors.New("point get for partition table can not use plan cache")
|
|
}
|
|
if x.HandleParam != nil {
|
|
var iv int64
|
|
iv, err = x.HandleParam.Datum.ToInt64(sc)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
x.Handle = kv.IntHandle(iv)
|
|
return nil
|
|
}
|
|
for i, param := range x.IndexValueParams {
|
|
if param != nil {
|
|
x.IndexValues[i] = param.Datum
|
|
}
|
|
}
|
|
return nil
|
|
case *BatchPointGetPlan:
|
|
// if access condition is not nil, which means it's a point get generated by cbo.
|
|
if x.AccessConditions != nil {
|
|
if x.IndexInfo != nil {
|
|
ranges, err := ranger.DetachCondAndBuildRangeForIndex(x.ctx, x.AccessConditions, x.IdxCols, x.IdxColLens)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for i := range x.IndexValues {
|
|
for j := range ranges.Ranges[i].LowVal {
|
|
x.IndexValues[i][j] = ranges.Ranges[i].LowVal[j]
|
|
}
|
|
}
|
|
} else {
|
|
var pkCol *expression.Column
|
|
if x.TblInfo.PKIsHandle {
|
|
if pkColInfo := x.TblInfo.GetPkColInfo(); pkColInfo != nil {
|
|
pkCol = expression.ColInfo2Col(x.schema.Columns, pkColInfo)
|
|
}
|
|
}
|
|
if pkCol != nil {
|
|
ranges, err := ranger.BuildTableRange(x.AccessConditions, x.ctx.GetSessionVars().StmtCtx, pkCol.RetType)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for i := range ranges {
|
|
x.Handles[i] = kv.IntHandle(ranges[i].LowVal[0].GetInt64())
|
|
}
|
|
}
|
|
}
|
|
}
|
|
for i, param := range x.HandleParams {
|
|
if param != nil {
|
|
var iv int64
|
|
iv, err = param.Datum.ToInt64(sc)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
x.Handles[i] = kv.IntHandle(iv)
|
|
}
|
|
}
|
|
for i, params := range x.IndexValueParams {
|
|
if len(params) < 1 {
|
|
continue
|
|
}
|
|
for j, param := range params {
|
|
if param != nil {
|
|
x.IndexValues[i][j] = param.Datum
|
|
}
|
|
}
|
|
}
|
|
case PhysicalPlan:
|
|
for _, child := range x.Children() {
|
|
err = e.rebuildRange(child)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
case *Insert:
|
|
if x.SelectPlan != nil {
|
|
return e.rebuildRange(x.SelectPlan)
|
|
}
|
|
case *Update:
|
|
if x.SelectPlan != nil {
|
|
return e.rebuildRange(x.SelectPlan)
|
|
}
|
|
case *Delete:
|
|
if x.SelectPlan != nil {
|
|
return e.rebuildRange(x.SelectPlan)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (e *Execute) buildRangeForIndexScan(sctx sessionctx.Context, is *PhysicalIndexScan) ([]*ranger.Range, error) {
|
|
if len(is.IdxCols) == 0 {
|
|
return ranger.FullRange(), nil
|
|
}
|
|
res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, is.AccessCondition, is.IdxCols, is.IdxColLens)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return res.Ranges, nil
|
|
}
|
|
|
|
// Deallocate represents deallocate plan.
|
|
type Deallocate struct {
|
|
baseSchemaProducer
|
|
|
|
Name string
|
|
}
|
|
|
|
// Set represents a plan for set stmt.
|
|
type Set struct {
|
|
baseSchemaProducer
|
|
|
|
VarAssigns []*expression.VarAssignment
|
|
}
|
|
|
|
// SetConfig represents a plan for set config stmt.
|
|
type SetConfig struct {
|
|
baseSchemaProducer
|
|
|
|
Type string
|
|
Instance string
|
|
Name string
|
|
Value expression.Expression
|
|
}
|
|
|
|
// SQLBindOpType repreents the SQL bind type
|
|
type SQLBindOpType int
|
|
|
|
const (
|
|
// OpSQLBindCreate represents the operation to create a SQL bind.
|
|
OpSQLBindCreate SQLBindOpType = iota
|
|
// OpSQLBindDrop represents the operation to drop a SQL bind.
|
|
OpSQLBindDrop
|
|
// OpFlushBindings is used to flush plan bindings.
|
|
OpFlushBindings
|
|
// OpCaptureBindings is used to capture plan bindings.
|
|
OpCaptureBindings
|
|
// OpEvolveBindings is used to evolve plan binding.
|
|
OpEvolveBindings
|
|
// OpReloadBindings is used to reload plan binding.
|
|
OpReloadBindings
|
|
)
|
|
|
|
// SQLBindPlan represents a plan for SQL bind.
|
|
type SQLBindPlan struct {
|
|
baseSchemaProducer
|
|
|
|
SQLBindOp SQLBindOpType
|
|
NormdOrigSQL string
|
|
BindSQL string
|
|
IsGlobal bool
|
|
BindStmt ast.StmtNode
|
|
Db string
|
|
Charset string
|
|
Collation string
|
|
}
|
|
|
|
// Simple represents a simple statement plan which doesn't need any optimization.
|
|
type Simple struct {
|
|
baseSchemaProducer
|
|
|
|
Statement ast.StmtNode
|
|
|
|
// IsFromRemote indicates whether the statement IS FROM REMOTE TiDB instance in cluster,
|
|
// and executing in co-processor.
|
|
// Used for `global kill`. See https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-01-global-kill.md.
|
|
IsFromRemote bool
|
|
|
|
// StaleTxnStartTS is the StartTS that is used to build a staleness transaction by 'START TRANSACTION READ ONLY' statement.
|
|
StaleTxnStartTS uint64
|
|
}
|
|
|
|
// PhysicalSimpleWrapper is a wrapper of `Simple` to implement physical plan interface.
|
|
// Used for simple statements executing in coprocessor.
|
|
type PhysicalSimpleWrapper struct {
|
|
basePhysicalPlan
|
|
Inner Simple
|
|
}
|
|
|
|
// InsertGeneratedColumns is for completing generated columns in Insert.
|
|
// We resolve generation expressions in plan, and eval those in executor.
|
|
type InsertGeneratedColumns struct {
|
|
Columns []*ast.ColumnName
|
|
Exprs []expression.Expression
|
|
OnDuplicates []*expression.Assignment
|
|
}
|
|
|
|
// Insert represents an insert plan.
|
|
type Insert struct {
|
|
baseSchemaProducer
|
|
|
|
Table table.Table
|
|
tableSchema *expression.Schema
|
|
tableColNames types.NameSlice
|
|
Columns []*ast.ColumnName
|
|
Lists [][]expression.Expression
|
|
SetList []*expression.Assignment
|
|
|
|
OnDuplicate []*expression.Assignment
|
|
Schema4OnDuplicate *expression.Schema
|
|
names4OnDuplicate types.NameSlice
|
|
|
|
GenCols InsertGeneratedColumns
|
|
|
|
SelectPlan PhysicalPlan
|
|
|
|
IsReplace bool
|
|
|
|
// NeedFillDefaultValue is true when expr in value list reference other column.
|
|
NeedFillDefaultValue bool
|
|
|
|
AllAssignmentsAreConstant bool
|
|
|
|
RowLen int
|
|
}
|
|
|
|
// Update represents Update plan.
|
|
type Update struct {
|
|
baseSchemaProducer
|
|
|
|
OrderedList []*expression.Assignment
|
|
|
|
AllAssignmentsAreConstant bool
|
|
|
|
VirtualAssignmentsOffset int
|
|
|
|
SelectPlan PhysicalPlan
|
|
|
|
TblColPosInfos TblColPosInfoSlice
|
|
|
|
// Used when partition sets are given.
|
|
// e.g. update t partition(p0) set a = 1;
|
|
PartitionedTable []table.PartitionedTable
|
|
|
|
tblID2Table map[int64]table.Table
|
|
}
|
|
|
|
// Delete represents a delete plan.
|
|
type Delete struct {
|
|
baseSchemaProducer
|
|
|
|
IsMultiTable bool
|
|
|
|
SelectPlan PhysicalPlan
|
|
|
|
TblColPosInfos TblColPosInfoSlice
|
|
}
|
|
|
|
// AnalyzeTableID is hybrid table id used to analyze table.
|
|
type AnalyzeTableID struct {
|
|
TableID int64
|
|
// PartitionID is used for the construction of partition table statistics. It indicate the ID of the partition.
|
|
// If the table is not the partition table, the PartitionID will be equal to -1.
|
|
PartitionID int64
|
|
}
|
|
|
|
// GetStatisticsID is used to obtain the table ID to build statistics.
|
|
// If the 'PartitionID == -1', we use the TableID to build the statistics for non-partition tables.
|
|
// Otherwise, we use the PartitionID to build the statistics of the partitions in the partition tables.
|
|
func (h *AnalyzeTableID) GetStatisticsID() int64 {
|
|
statisticsID := h.TableID
|
|
if h.PartitionID != -1 {
|
|
statisticsID = h.PartitionID
|
|
}
|
|
return statisticsID
|
|
}
|
|
|
|
// IsPartitionTable indicates whether the table is partition table.
|
|
func (h *AnalyzeTableID) IsPartitionTable() bool {
|
|
return h.PartitionID != -1
|
|
}
|
|
|
|
func (h *AnalyzeTableID) String() string {
|
|
return fmt.Sprintf("%d => %v", h.PartitionID, h.TableID)
|
|
}
|
|
|
|
// Equals indicates whether two table id is equal.
|
|
func (h *AnalyzeTableID) Equals(t *AnalyzeTableID) bool {
|
|
if h == t {
|
|
return true
|
|
}
|
|
if h == nil || t == nil {
|
|
return false
|
|
}
|
|
return h.TableID == t.TableID && h.PartitionID == t.PartitionID
|
|
}
|
|
|
|
// AnalyzeInfo is used to store the database name, table name and partition name of analyze task.
|
|
type AnalyzeInfo struct {
|
|
DBName string
|
|
TableName string
|
|
PartitionName string
|
|
TableID AnalyzeTableID
|
|
Incremental bool
|
|
StatsVersion int
|
|
}
|
|
|
|
// AnalyzeColumnsTask is used for analyze columns.
|
|
type AnalyzeColumnsTask struct {
|
|
HandleCols HandleCols
|
|
CommonHandleInfo *model.IndexInfo
|
|
ColsInfo []*model.ColumnInfo
|
|
TblInfo *model.TableInfo
|
|
Indexes []*model.IndexInfo
|
|
AnalyzeInfo
|
|
}
|
|
|
|
// AnalyzeIndexTask is used for analyze index.
|
|
type AnalyzeIndexTask struct {
|
|
IndexInfo *model.IndexInfo
|
|
TblInfo *model.TableInfo
|
|
AnalyzeInfo
|
|
}
|
|
|
|
// Analyze represents an analyze plan
|
|
type Analyze struct {
|
|
baseSchemaProducer
|
|
|
|
ColTasks []AnalyzeColumnsTask
|
|
IdxTasks []AnalyzeIndexTask
|
|
Opts map[ast.AnalyzeOptionType]uint64
|
|
}
|
|
|
|
// LoadData represents a loaddata plan.
|
|
type LoadData struct {
|
|
baseSchemaProducer
|
|
|
|
IsLocal bool
|
|
OnDuplicate ast.OnDuplicateKeyHandlingType
|
|
Path string
|
|
Table *ast.TableName
|
|
Columns []*ast.ColumnName
|
|
FieldsInfo *ast.FieldsClause
|
|
LinesInfo *ast.LinesClause
|
|
IgnoreLines uint64
|
|
|
|
ColumnAssignments []*ast.Assignment
|
|
ColumnsAndUserVars []*ast.ColumnNameOrUserVar
|
|
|
|
GenCols InsertGeneratedColumns
|
|
}
|
|
|
|
// LoadStats represents a load stats plan.
|
|
type LoadStats struct {
|
|
baseSchemaProducer
|
|
|
|
Path string
|
|
}
|
|
|
|
// IndexAdvise represents a index advise plan.
|
|
type IndexAdvise struct {
|
|
baseSchemaProducer
|
|
|
|
IsLocal bool
|
|
Path string
|
|
MaxMinutes uint64
|
|
MaxIndexNum *ast.MaxIndexNumClause
|
|
LinesInfo *ast.LinesClause
|
|
}
|
|
|
|
// SplitRegion represents a split regions plan.
|
|
type SplitRegion struct {
|
|
baseSchemaProducer
|
|
|
|
TableInfo *model.TableInfo
|
|
PartitionNames []model.CIStr
|
|
IndexInfo *model.IndexInfo
|
|
Lower []types.Datum
|
|
Upper []types.Datum
|
|
Num int
|
|
ValueLists [][]types.Datum
|
|
}
|
|
|
|
// SplitRegionStatus represents a split regions status plan.
|
|
type SplitRegionStatus struct {
|
|
baseSchemaProducer
|
|
|
|
Table table.Table
|
|
IndexInfo *model.IndexInfo
|
|
}
|
|
|
|
// DDL represents a DDL statement plan.
|
|
type DDL struct {
|
|
baseSchemaProducer
|
|
|
|
Statement ast.DDLNode
|
|
}
|
|
|
|
// SelectInto represents a select-into plan.
|
|
type SelectInto struct {
|
|
baseSchemaProducer
|
|
|
|
TargetPlan Plan
|
|
IntoOpt *ast.SelectIntoOption
|
|
}
|
|
|
|
// Explain represents a explain plan.
|
|
type Explain struct {
|
|
baseSchemaProducer
|
|
|
|
TargetPlan Plan
|
|
Format string
|
|
Analyze bool
|
|
ExecStmt ast.StmtNode
|
|
RuntimeStatsColl *execdetails.RuntimeStatsColl
|
|
|
|
Rows [][]string
|
|
ExplainRows [][]string
|
|
explainedPlans map[int]bool
|
|
|
|
ctes []*PhysicalCTE
|
|
}
|
|
|
|
// GetExplainRowsForPlan get explain rows for plan.
|
|
func GetExplainRowsForPlan(plan Plan) (rows [][]string) {
|
|
explain := &Explain{
|
|
TargetPlan: plan,
|
|
Format: ast.ExplainFormatROW,
|
|
Analyze: false,
|
|
}
|
|
if err := explain.RenderResult(); err != nil {
|
|
return rows
|
|
}
|
|
return explain.Rows
|
|
}
|
|
|
|
// prepareSchema prepares explain's result schema.
|
|
func (e *Explain) prepareSchema() error {
|
|
var fieldNames []string
|
|
format := strings.ToLower(e.Format)
|
|
|
|
switch {
|
|
case (format == ast.ExplainFormatROW && (!e.Analyze && e.RuntimeStatsColl == nil)) || (format == ast.ExplainFormatBrief):
|
|
fieldNames = []string{"id", "estRows", "task", "access object", "operator info"}
|
|
case format == ast.ExplainFormatVerbose:
|
|
fieldNames = []string{"id", "estRows", "estCost", "task", "access object", "operator info"}
|
|
case format == ast.ExplainFormatROW && (e.Analyze || e.RuntimeStatsColl != nil):
|
|
fieldNames = []string{"id", "estRows", "actRows", "task", "access object", "execution info", "operator info", "memory", "disk"}
|
|
case format == ast.ExplainFormatDOT:
|
|
fieldNames = []string{"dot contents"}
|
|
case format == ast.ExplainFormatHint:
|
|
fieldNames = []string{"hint"}
|
|
default:
|
|
return errors.Errorf("explain format '%s' is not supported now", e.Format)
|
|
}
|
|
|
|
cwn := &columnsWithNames{
|
|
cols: make([]*expression.Column, 0, len(fieldNames)),
|
|
names: make([]*types.FieldName, 0, len(fieldNames)),
|
|
}
|
|
|
|
for _, fieldName := range fieldNames {
|
|
cwn.Append(buildColumnWithName("", fieldName, mysql.TypeString, mysql.MaxBlobWidth))
|
|
}
|
|
e.SetSchema(cwn.col2Schema())
|
|
e.names = cwn.names
|
|
return nil
|
|
}
|
|
|
|
// RenderResult renders the explain result as specified format.
|
|
func (e *Explain) RenderResult() error {
|
|
if e.TargetPlan == nil {
|
|
return nil
|
|
}
|
|
switch strings.ToLower(e.Format) {
|
|
case ast.ExplainFormatROW, ast.ExplainFormatBrief, ast.ExplainFormatVerbose:
|
|
if e.Rows == nil || e.Analyze {
|
|
e.explainedPlans = map[int]bool{}
|
|
err := e.explainPlanInRowFormat(e.TargetPlan, "root", "", "", true)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
err = e.explainPlanInRowFormatCTE()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
case ast.ExplainFormatDOT:
|
|
if physicalPlan, ok := e.TargetPlan.(PhysicalPlan); ok {
|
|
e.prepareDotInfo(physicalPlan)
|
|
}
|
|
case ast.ExplainFormatHint:
|
|
hints := GenHintsFromPhysicalPlan(e.TargetPlan)
|
|
hints = append(hints, hint.ExtractTableHintsFromStmtNode(e.ExecStmt, nil)...)
|
|
e.Rows = append(e.Rows, []string{hint.RestoreOptimizerHints(hints)})
|
|
default:
|
|
return errors.Errorf("explain format '%s' is not supported now", e.Format)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (e *Explain) explainPlanInRowFormatCTE() (err error) {
|
|
explainedCTEPlan := make(map[int]struct{})
|
|
for i := 0; i < len(e.ctes); i++ {
|
|
x := (*CTEDefinition)(e.ctes[i])
|
|
// skip if the CTE has been explained, the same CTE has same IDForStorage
|
|
if _, ok := explainedCTEPlan[x.CTE.IDForStorage]; ok {
|
|
continue
|
|
}
|
|
e.prepareOperatorInfo(x, "root", "", "", true)
|
|
childIndent := texttree.Indent4Child("", true)
|
|
err = e.explainPlanInRowFormat(x.SeedPlan, "root", "(Seed Part)", childIndent, x.RecurPlan == nil)
|
|
if x.RecurPlan != nil {
|
|
err = e.explainPlanInRowFormat(x.RecurPlan, "root", "(Recursive Part)", childIndent, true)
|
|
}
|
|
explainedCTEPlan[x.CTE.IDForStorage] = struct{}{}
|
|
}
|
|
|
|
return
|
|
}
|
|
|
|
// explainPlanInRowFormat generates explain information for root-tasks.
|
|
func (e *Explain) explainPlanInRowFormat(p Plan, taskType, driverSide, indent string, isLastChild bool) (err error) {
|
|
e.prepareOperatorInfo(p, taskType, driverSide, indent, isLastChild)
|
|
e.explainedPlans[p.ID()] = true
|
|
|
|
// For every child we create a new sub-tree rooted by it.
|
|
childIndent := texttree.Indent4Child(indent, isLastChild)
|
|
|
|
if physPlan, ok := p.(PhysicalPlan); ok {
|
|
// indicate driven side and driving side of 'join' and 'apply'
|
|
// See issue https://github.com/pingcap/tidb/issues/14602.
|
|
driverSideInfo := make([]string, len(physPlan.Children()))
|
|
buildSide := -1
|
|
|
|
switch plan := physPlan.(type) {
|
|
case *PhysicalApply:
|
|
buildSide = plan.InnerChildIdx ^ 1
|
|
case *PhysicalHashJoin:
|
|
if plan.UseOuterToBuild {
|
|
buildSide = plan.InnerChildIdx ^ 1
|
|
} else {
|
|
buildSide = plan.InnerChildIdx
|
|
}
|
|
case *PhysicalMergeJoin:
|
|
if plan.JoinType == RightOuterJoin {
|
|
buildSide = 0
|
|
} else {
|
|
buildSide = 1
|
|
}
|
|
case *PhysicalIndexJoin:
|
|
buildSide = plan.InnerChildIdx ^ 1
|
|
case *PhysicalIndexMergeJoin:
|
|
buildSide = plan.InnerChildIdx ^ 1
|
|
case *PhysicalIndexHashJoin:
|
|
buildSide = plan.InnerChildIdx ^ 1
|
|
}
|
|
|
|
if buildSide != -1 {
|
|
driverSideInfo[0], driverSideInfo[1] = "(Build)", "(Probe)"
|
|
} else {
|
|
buildSide = 0
|
|
}
|
|
|
|
// Always put the Build above the Probe.
|
|
for i := range physPlan.Children() {
|
|
pchild := &physPlan.Children()[i^buildSide]
|
|
if e.explainedPlans[(*pchild).ID()] {
|
|
continue
|
|
}
|
|
err = e.explainPlanInRowFormat(*pchild, taskType, driverSideInfo[i], childIndent, i == len(physPlan.Children())-1)
|
|
if err != nil {
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
switch x := p.(type) {
|
|
case *PhysicalTableReader:
|
|
var storeType string
|
|
switch x.StoreType {
|
|
case kv.TiKV, kv.TiFlash, kv.TiDB:
|
|
// expected do nothing
|
|
default:
|
|
return errors.Errorf("the store type %v is unknown", x.StoreType)
|
|
}
|
|
storeType = x.StoreType.Name()
|
|
taskName := "cop"
|
|
if x.BatchCop {
|
|
taskName = "batchCop"
|
|
}
|
|
err = e.explainPlanInRowFormat(x.tablePlan, taskName+"["+storeType+"]", "", childIndent, true)
|
|
case *PhysicalIndexReader:
|
|
err = e.explainPlanInRowFormat(x.indexPlan, "cop[tikv]", "", childIndent, true)
|
|
case *PhysicalIndexLookUpReader:
|
|
err = e.explainPlanInRowFormat(x.indexPlan, "cop[tikv]", "(Build)", childIndent, false)
|
|
if err != nil {
|
|
return
|
|
}
|
|
err = e.explainPlanInRowFormat(x.tablePlan, "cop[tikv]", "(Probe)", childIndent, true)
|
|
case *PhysicalIndexMergeReader:
|
|
for _, pchild := range x.partialPlans {
|
|
err = e.explainPlanInRowFormat(pchild, "cop[tikv]", "(Build)", childIndent, false)
|
|
if err != nil {
|
|
return
|
|
}
|
|
}
|
|
err = e.explainPlanInRowFormat(x.tablePlan, "cop[tikv]", "(Probe)", childIndent, true)
|
|
case *Insert:
|
|
if x.SelectPlan != nil {
|
|
err = e.explainPlanInRowFormat(x.SelectPlan, "root", "", childIndent, true)
|
|
}
|
|
case *Update:
|
|
if x.SelectPlan != nil {
|
|
err = e.explainPlanInRowFormat(x.SelectPlan, "root", "", childIndent, true)
|
|
}
|
|
case *Delete:
|
|
if x.SelectPlan != nil {
|
|
err = e.explainPlanInRowFormat(x.SelectPlan, "root", "", childIndent, true)
|
|
}
|
|
case *Execute:
|
|
if x.Plan != nil {
|
|
err = e.explainPlanInRowFormat(x.Plan, "root", "", indent, true)
|
|
}
|
|
case *PhysicalCTE:
|
|
e.ctes = append(e.ctes, x)
|
|
}
|
|
return
|
|
}
|
|
|
|
func getRuntimeInfo(ctx sessionctx.Context, p Plan, runtimeStatsColl *execdetails.RuntimeStatsColl) (actRows, analyzeInfo, memoryInfo, diskInfo string) {
|
|
if runtimeStatsColl == nil {
|
|
runtimeStatsColl = ctx.GetSessionVars().StmtCtx.RuntimeStatsColl
|
|
if runtimeStatsColl == nil {
|
|
return
|
|
}
|
|
}
|
|
explainID := p.ID()
|
|
|
|
// There maybe some mock information for cop task to let runtimeStatsColl.Exists(p.ExplainID()) is true.
|
|
// So check copTaskExecDetail first and print the real cop task information if it's not empty.
|
|
if runtimeStatsColl.ExistsRootStats(explainID) {
|
|
rootStats := runtimeStatsColl.GetRootStats(explainID)
|
|
analyzeInfo = rootStats.String()
|
|
actRows = fmt.Sprint(rootStats.GetActRows())
|
|
} else {
|
|
actRows = "0"
|
|
}
|
|
if runtimeStatsColl.ExistsCopStats(explainID) {
|
|
if len(analyzeInfo) > 0 {
|
|
analyzeInfo += ", "
|
|
}
|
|
copStats := runtimeStatsColl.GetCopStats(explainID)
|
|
analyzeInfo += copStats.String()
|
|
actRows = fmt.Sprint(copStats.GetActRows())
|
|
}
|
|
memoryInfo = "N/A"
|
|
memTracker := ctx.GetSessionVars().StmtCtx.MemTracker.SearchTrackerWithoutLock(p.ID())
|
|
if memTracker != nil {
|
|
memoryInfo = memTracker.FormatBytes(memTracker.MaxConsumed())
|
|
}
|
|
|
|
diskInfo = "N/A"
|
|
diskTracker := ctx.GetSessionVars().StmtCtx.DiskTracker.SearchTrackerWithoutLock(p.ID())
|
|
if diskTracker != nil {
|
|
diskInfo = diskTracker.FormatBytes(diskTracker.MaxConsumed())
|
|
}
|
|
return
|
|
}
|
|
|
|
// prepareOperatorInfo generates the following information for every plan:
|
|
// operator id, estimated rows, task type, access object and other operator info.
|
|
func (e *Explain) prepareOperatorInfo(p Plan, taskType, driverSide, indent string, isLastChild bool) {
|
|
if p.ExplainID().String() == "_0" {
|
|
return
|
|
}
|
|
|
|
id := texttree.PrettyIdentifier(p.ExplainID().String()+driverSide, indent, isLastChild)
|
|
estRows, estCost, accessObject, operatorInfo := e.getOperatorInfo(p, id)
|
|
|
|
var row []string
|
|
if e.Analyze {
|
|
actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfo(e.ctx, p, nil)
|
|
row = []string{id, estRows, actRows, taskType, accessObject, analyzeInfo, operatorInfo, memoryInfo, diskInfo}
|
|
} else if e.RuntimeStatsColl != nil {
|
|
actRows, analyzeInfo, memoryInfo, diskInfo := getRuntimeInfo(e.ctx, p, e.RuntimeStatsColl)
|
|
row = []string{id, estRows, actRows, taskType, accessObject, analyzeInfo, operatorInfo, memoryInfo, diskInfo}
|
|
} else {
|
|
row = []string{id, estRows}
|
|
if e.Format == ast.ExplainFormatVerbose {
|
|
row = append(row, estCost)
|
|
}
|
|
row = append(row, taskType, accessObject, operatorInfo)
|
|
}
|
|
e.Rows = append(e.Rows, row)
|
|
}
|
|
|
|
func (e *Explain) getOperatorInfo(p Plan, id string) (string, string, string, string) {
|
|
// For `explain for connection` statement, `e.ExplainRows` will be set.
|
|
for _, row := range e.ExplainRows {
|
|
if len(row) < 5 {
|
|
panic("should never happen")
|
|
}
|
|
if row[0] == id {
|
|
return row[1], "N/A", row[3], row[4]
|
|
}
|
|
}
|
|
estRows := "N/A"
|
|
if si := p.statsInfo(); si != nil {
|
|
estRows = strconv.FormatFloat(si.RowCount, 'f', 2, 64)
|
|
}
|
|
estCost := "N/A"
|
|
if pp, ok := p.(PhysicalPlan); ok {
|
|
estCost = strconv.FormatFloat(pp.Cost(), 'f', 2, 64)
|
|
}
|
|
var accessObject, operatorInfo string
|
|
if plan, ok := p.(dataAccesser); ok {
|
|
accessObject = plan.AccessObject(false)
|
|
operatorInfo = plan.OperatorInfo(false)
|
|
} else {
|
|
if pa, ok := p.(partitionAccesser); ok && e.ctx != nil {
|
|
accessObject = pa.accessObject(e.ctx)
|
|
}
|
|
operatorInfo = p.ExplainInfo()
|
|
}
|
|
return estRows, estCost, accessObject, operatorInfo
|
|
}
|
|
|
|
func (e *Explain) prepareDotInfo(p PhysicalPlan) {
|
|
buffer := bytes.NewBufferString("")
|
|
fmt.Fprintf(buffer, "\ndigraph %s {\n", p.ExplainID())
|
|
e.prepareTaskDot(p, "root", buffer)
|
|
buffer.WriteString("}\n")
|
|
|
|
e.Rows = append(e.Rows, []string{buffer.String()})
|
|
}
|
|
|
|
func (e *Explain) prepareTaskDot(p PhysicalPlan, taskTp string, buffer *bytes.Buffer) {
|
|
fmt.Fprintf(buffer, "subgraph cluster%v{\n", p.ID())
|
|
buffer.WriteString("node [style=filled, color=lightgrey]\n")
|
|
buffer.WriteString("color=black\n")
|
|
fmt.Fprintf(buffer, "label = \"%s\"\n", taskTp)
|
|
|
|
if len(p.Children()) == 0 {
|
|
if taskTp == "cop" {
|
|
fmt.Fprintf(buffer, "\"%s\"\n}\n", p.ExplainID())
|
|
return
|
|
}
|
|
fmt.Fprintf(buffer, "\"%s\"\n", p.ExplainID())
|
|
}
|
|
|
|
var copTasks []PhysicalPlan
|
|
var pipelines []string
|
|
|
|
for planQueue := []PhysicalPlan{p}; len(planQueue) > 0; planQueue = planQueue[1:] {
|
|
curPlan := planQueue[0]
|
|
switch copPlan := curPlan.(type) {
|
|
case *PhysicalTableReader:
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID()))
|
|
copTasks = append(copTasks, copPlan.tablePlan)
|
|
case *PhysicalIndexReader:
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.indexPlan.ExplainID()))
|
|
copTasks = append(copTasks, copPlan.indexPlan)
|
|
case *PhysicalIndexLookUpReader:
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID()))
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.indexPlan.ExplainID()))
|
|
copTasks = append(copTasks, copPlan.tablePlan)
|
|
copTasks = append(copTasks, copPlan.indexPlan)
|
|
case *PhysicalIndexMergeReader:
|
|
for i := 0; i < len(copPlan.partialPlans); i++ {
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.partialPlans[i].ExplainID()))
|
|
copTasks = append(copTasks, copPlan.partialPlans[i])
|
|
}
|
|
if copPlan.tablePlan != nil {
|
|
pipelines = append(pipelines, fmt.Sprintf("\"%s\" -> \"%s\"\n", copPlan.ExplainID(), copPlan.tablePlan.ExplainID()))
|
|
copTasks = append(copTasks, copPlan.tablePlan)
|
|
}
|
|
}
|
|
for _, child := range curPlan.Children() {
|
|
fmt.Fprintf(buffer, "\"%s\" -> \"%s\"\n", curPlan.ExplainID(), child.ExplainID())
|
|
planQueue = append(planQueue, child)
|
|
}
|
|
}
|
|
buffer.WriteString("}\n")
|
|
|
|
for _, cop := range copTasks {
|
|
e.prepareTaskDot(cop.(PhysicalPlan), "cop", buffer)
|
|
}
|
|
|
|
for i := range pipelines {
|
|
buffer.WriteString(pipelines[i])
|
|
}
|
|
}
|
|
|
|
// IsPointGetWithPKOrUniqueKeyByAutoCommit returns true when meets following conditions:
|
|
// 1. ctx is auto commit tagged
|
|
// 2. session is not InTxn
|
|
// 3. plan is point get by pk, or point get by unique index (no double read)
|
|
func IsPointGetWithPKOrUniqueKeyByAutoCommit(ctx sessionctx.Context, p Plan) (bool, error) {
|
|
if !IsAutoCommitTxn(ctx) {
|
|
return false, nil
|
|
}
|
|
|
|
// check plan
|
|
if proj, ok := p.(*PhysicalProjection); ok {
|
|
p = proj.Children()[0]
|
|
}
|
|
|
|
switch v := p.(type) {
|
|
case *PhysicalIndexReader:
|
|
indexScan := v.IndexPlans[0].(*PhysicalIndexScan)
|
|
return indexScan.IsPointGetByUniqueKey(ctx.GetSessionVars().StmtCtx), nil
|
|
case *PhysicalTableReader:
|
|
tableScan := v.TablePlans[0].(*PhysicalTableScan)
|
|
isPointRange := len(tableScan.Ranges) == 1 && tableScan.Ranges[0].IsPoint(ctx.GetSessionVars().StmtCtx)
|
|
if !isPointRange {
|
|
return false, nil
|
|
}
|
|
pkLength := 1
|
|
if tableScan.Table.IsCommonHandle {
|
|
pkIdx := tables.FindPrimaryIndex(tableScan.Table)
|
|
pkLength = len(pkIdx.Columns)
|
|
}
|
|
return len(tableScan.Ranges[0].LowVal) == pkLength, nil
|
|
case *PointGetPlan:
|
|
// If the PointGetPlan needs to read data using unique index (double read), we
|
|
// can't use max uint64, because using math.MaxUint64 can't guarantee repeatable-read
|
|
// and the data and index would be inconsistent!
|
|
isPointGet := v.IndexInfo == nil || (v.IndexInfo.Primary && v.TblInfo.IsCommonHandle)
|
|
return isPointGet, nil
|
|
default:
|
|
return false, nil
|
|
}
|
|
}
|
|
|
|
// IsAutoCommitTxn checks if session is in autocommit mode and not InTxn
|
|
// used for fast plan like point get
|
|
func IsAutoCommitTxn(ctx sessionctx.Context) bool {
|
|
return ctx.GetSessionVars().IsAutocommit() && !ctx.GetSessionVars().InTxn()
|
|
}
|