executor: support index merge on cluster index (#18699)
* executor: support index merge on cluster index * fix fmt * fix * fix * fix * fix * address comments * address comments * address comments * fix * fix * fix Co-authored-by: ti-srebot <66930949+ti-srebot@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
f3554241bb
commit
56fd348d20
@ -2677,8 +2677,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd
|
||||
feedbacks = append(feedbacks, feedback)
|
||||
|
||||
if is, ok := v.PartialPlans[i][0].(*plannercore.PhysicalIndexScan); ok {
|
||||
// TODO: handle length for cluster index.
|
||||
tempReq, tempStreaming, err = buildIndexReq(b, len(is.Index.Columns), 0, v.PartialPlans[i])
|
||||
tempReq, tempStreaming, err = buildIndexReq(b, len(is.Index.Columns), ts.HandleCols.NumCols(), v.PartialPlans[i])
|
||||
keepOrders = append(keepOrders, is.KeepOrder)
|
||||
descs = append(descs, is.Desc)
|
||||
indexes = append(indexes, is.Index)
|
||||
@ -2697,7 +2696,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd
|
||||
partialReqs = append(partialReqs, tempReq)
|
||||
partialStreamings = append(partialStreamings, tempStreaming)
|
||||
}
|
||||
tableReq, tableStreaming, table, err := buildTableReq(b, v.Schema().Len(), v.TablePlans)
|
||||
tableReq, tableStreaming, tblInfo, err := buildTableReq(b, v.Schema().Len(), v.TablePlans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2709,7 +2708,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd
|
||||
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()),
|
||||
dagPBs: partialReqs,
|
||||
startTS: startTS,
|
||||
table: table,
|
||||
table: tblInfo,
|
||||
indexes: indexes,
|
||||
descs: descs,
|
||||
tableRequest: tableReq,
|
||||
@ -2720,6 +2719,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd
|
||||
tblPlans: v.TablePlans,
|
||||
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
|
||||
feedbacks: feedbacks,
|
||||
handleCols: ts.HandleCols,
|
||||
}
|
||||
collectTable := false
|
||||
e.tableRequest.CollectRangeCounts = &collectTable
|
||||
@ -2740,6 +2740,10 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg
|
||||
sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O)
|
||||
} else {
|
||||
ret.ranges = append(ret.ranges, v.PartialPlans[i][0].(*plannercore.PhysicalTableScan).Ranges)
|
||||
if ret.table.Meta().IsCommonHandle {
|
||||
tblInfo := ret.table.Meta()
|
||||
sctx.IndexNames = append(sctx.IndexNames, tblInfo.Name.O+":"+tables.FindPrimaryIndex(tblInfo).Name.O)
|
||||
}
|
||||
}
|
||||
}
|
||||
ts := v.TablePlans[0].(*plannercore.PhysicalTableScan)
|
||||
|
||||
@ -23,15 +23,14 @@ import (
|
||||
"github.com/pingcap/errors"
|
||||
"github.com/pingcap/failpoint"
|
||||
"github.com/pingcap/parser/model"
|
||||
"github.com/pingcap/parser/mysql"
|
||||
"github.com/pingcap/parser/terror"
|
||||
"github.com/pingcap/tidb/distsql"
|
||||
"github.com/pingcap/tidb/expression"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
plannercore "github.com/pingcap/tidb/planner/core"
|
||||
"github.com/pingcap/tidb/sessionctx"
|
||||
"github.com/pingcap/tidb/statistics"
|
||||
"github.com/pingcap/tidb/table"
|
||||
"github.com/pingcap/tidb/types"
|
||||
"github.com/pingcap/tidb/util"
|
||||
"github.com/pingcap/tidb/util/chunk"
|
||||
"github.com/pingcap/tidb/util/logutil"
|
||||
@ -102,6 +101,8 @@ type IndexMergeReaderExecutor struct {
|
||||
corColInAccess bool
|
||||
idxCols [][]*expression.Column
|
||||
colLens [][]int
|
||||
|
||||
handleCols plannercore.HandleCols
|
||||
}
|
||||
|
||||
// Open implements the Executor Open interface
|
||||
@ -110,7 +111,15 @@ func (e *IndexMergeReaderExecutor) Open(ctx context.Context) error {
|
||||
for i, plan := range e.partialPlans {
|
||||
_, ok := plan[0].(*plannercore.PhysicalIndexScan)
|
||||
if !ok {
|
||||
e.keyRanges = append(e.keyRanges, nil)
|
||||
if e.table.Meta().IsCommonHandle {
|
||||
keyRanges, err := distsql.CommonHandleRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.ranges[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.keyRanges = append(e.keyRanges, keyRanges)
|
||||
} else {
|
||||
e.keyRanges = append(e.keyRanges, nil)
|
||||
}
|
||||
continue
|
||||
}
|
||||
keyRange, err := distsql.IndexRangesToKVRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.indexes[i].ID, e.ranges[i], e.feedbacks[i])
|
||||
@ -194,13 +203,14 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
result, err := distsql.SelectWithRuntimeStats(ctx, e.ctx, kvReq, []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, e.feedbacks[workID], getPhysicalPlanIDs(e.partialPlans[workID]), e.id)
|
||||
result, err := distsql.SelectWithRuntimeStats(ctx, e.ctx, kvReq, e.handleCols.GetFieldsTypes(), e.feedbacks[workID], getPhysicalPlanIDs(e.partialPlans[workID]), e.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result.Fetch(ctx)
|
||||
worker := &partialIndexWorker{
|
||||
sc: e.ctx,
|
||||
batchSize: e.maxChunkSize,
|
||||
maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize,
|
||||
maxChunkSize: e.maxChunkSize,
|
||||
@ -220,7 +230,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context,
|
||||
var err error
|
||||
util.WithRecovery(
|
||||
func() {
|
||||
_, err = worker.fetchHandles(ctx1, result, exitCh, fetchCh, e.resultCh, e.finished)
|
||||
_, err = worker.fetchHandles(ctx1, result, exitCh, fetchCh, e.resultCh, e.finished, e.handleCols)
|
||||
},
|
||||
e.handleHandlesFetcherPanic(ctx, e.resultCh, "partialIndexWorker"),
|
||||
)
|
||||
@ -261,6 +271,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context,
|
||||
}
|
||||
tableInfo := e.partialPlans[workID][0].(*plannercore.PhysicalTableScan).Table
|
||||
worker := &partialTableWorker{
|
||||
sc: e.ctx,
|
||||
batchSize: e.maxChunkSize,
|
||||
maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize,
|
||||
maxChunkSize: e.maxChunkSize,
|
||||
@ -277,7 +288,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context,
|
||||
var err error
|
||||
util.WithRecovery(
|
||||
func() {
|
||||
_, err = worker.fetchHandles(ctx1, exitCh, fetchCh, e.resultCh, e.finished)
|
||||
_, err = worker.fetchHandles(ctx1, exitCh, fetchCh, e.resultCh, e.finished, e.handleCols)
|
||||
},
|
||||
e.handleHandlesFetcherPanic(ctx, e.resultCh, "partialTableWorker"),
|
||||
)
|
||||
@ -294,6 +305,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context,
|
||||
}
|
||||
|
||||
type partialTableWorker struct {
|
||||
sc sessionctx.Context
|
||||
batchSize int
|
||||
maxBatchSize int
|
||||
maxChunkSize int
|
||||
@ -302,25 +314,10 @@ type partialTableWorker struct {
|
||||
}
|
||||
|
||||
func (w *partialTableWorker) fetchHandles(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *lookupTableTask, resultCh chan<- *lookupTableTask,
|
||||
finished <-chan struct{}) (count int64, err error) {
|
||||
var chk *chunk.Chunk
|
||||
handleOffset := -1
|
||||
if w.tableInfo.PKIsHandle {
|
||||
handleCol := w.tableInfo.GetPkColInfo()
|
||||
columns := w.tableInfo.Columns
|
||||
for i := 0; i < len(columns); i++ {
|
||||
if columns[i].Name.L == handleCol.Name.L {
|
||||
handleOffset = i
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return 0, errors.Errorf("cannot find the column for handle")
|
||||
}
|
||||
|
||||
chk = chunk.NewChunkWithCapacity(retTypes(w.tableReader), w.maxChunkSize)
|
||||
finished <-chan struct{}, handleCols plannercore.HandleCols) (count int64, err error) {
|
||||
chk := chunk.NewChunkWithCapacity(retTypes(w.tableReader), w.maxChunkSize)
|
||||
for {
|
||||
handles, retChunk, err := w.extractTaskHandles(ctx, chk, handleOffset)
|
||||
handles, retChunk, err := w.extractTaskHandles(ctx, chk, handleCols)
|
||||
if err != nil {
|
||||
doneCh := make(chan error, 1)
|
||||
doneCh <- err
|
||||
@ -346,7 +343,7 @@ func (w *partialTableWorker) fetchHandles(ctx context.Context, exitCh <-chan str
|
||||
}
|
||||
}
|
||||
|
||||
func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, handleOffset int) (
|
||||
func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, handleCols plannercore.HandleCols) (
|
||||
handles []kv.Handle, retChk *chunk.Chunk, err error) {
|
||||
handles = make([]kv.Handle, 0, w.batchSize)
|
||||
for len(handles) < w.batchSize {
|
||||
@ -359,8 +356,11 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk.
|
||||
return handles, retChk, nil
|
||||
}
|
||||
for i := 0; i < chk.NumRows(); i++ {
|
||||
h := kv.IntHandle(chk.GetRow(i).GetInt64(handleOffset))
|
||||
handles = append(handles, h)
|
||||
handle, err := handleCols.BuildHandle(chk.GetRow(i))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
handles = append(handles, handle)
|
||||
}
|
||||
}
|
||||
w.batchSize *= 2
|
||||
@ -557,15 +557,23 @@ func (w *indexMergeProcessWorker) handleLoopFetcherPanic(ctx context.Context, re
|
||||
}
|
||||
|
||||
type partialIndexWorker struct {
|
||||
sc sessionctx.Context
|
||||
batchSize int
|
||||
maxBatchSize int
|
||||
maxChunkSize int
|
||||
}
|
||||
|
||||
func (w *partialIndexWorker) fetchHandles(ctx context.Context, result distsql.SelectResult, exitCh <-chan struct{}, fetchCh chan<- *lookupTableTask, resultCh chan<- *lookupTableTask, finished <-chan struct{}) (count int64, err error) {
|
||||
chk := chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, w.maxChunkSize)
|
||||
func (w *partialIndexWorker) fetchHandles(
|
||||
ctx context.Context,
|
||||
result distsql.SelectResult,
|
||||
exitCh <-chan struct{},
|
||||
fetchCh chan<- *lookupTableTask,
|
||||
resultCh chan<- *lookupTableTask,
|
||||
finished <-chan struct{},
|
||||
handleCols plannercore.HandleCols) (count int64, err error) {
|
||||
chk := chunk.NewChunkWithCapacity(handleCols.GetFieldsTypes(), w.maxChunkSize)
|
||||
for {
|
||||
handles, retChunk, err := w.extractTaskHandles(ctx, chk, result)
|
||||
handles, retChunk, err := w.extractTaskHandles(ctx, chk, result, handleCols)
|
||||
if err != nil {
|
||||
doneCh := make(chan error, 1)
|
||||
doneCh <- err
|
||||
@ -591,9 +599,8 @@ func (w *partialIndexWorker) fetchHandles(ctx context.Context, result distsql.Se
|
||||
}
|
||||
}
|
||||
|
||||
func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult) (
|
||||
func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.Chunk, idxResult distsql.SelectResult, handleCols plannercore.HandleCols) (
|
||||
handles []kv.Handle, retChk *chunk.Chunk, err error) {
|
||||
handleOffset := chk.NumCols() - 1
|
||||
handles = make([]kv.Handle, 0, w.batchSize)
|
||||
for len(handles) < w.batchSize {
|
||||
chk.SetRequiredRows(w.batchSize-len(handles), w.maxChunkSize)
|
||||
@ -605,8 +612,11 @@ func (w *partialIndexWorker) extractTaskHandles(ctx context.Context, chk *chunk.
|
||||
return handles, retChk, nil
|
||||
}
|
||||
for i := 0; i < chk.NumRows(); i++ {
|
||||
h := kv.IntHandle(chk.GetRow(i).GetInt64(handleOffset))
|
||||
handles = append(handles, h)
|
||||
handle, err := handleCols.BuildHandleFromIndexRow(chk.GetRow(i))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
handles = append(handles, handle)
|
||||
}
|
||||
}
|
||||
w.batchSize *= 2
|
||||
|
||||
@ -777,7 +777,10 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c
|
||||
totalRowCount += rowCount
|
||||
}
|
||||
|
||||
ts, partialCost := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount)
|
||||
ts, partialCost, err := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalCost += partialCost
|
||||
cop.tablePlan = ts
|
||||
cop.idxMergePartPlans = scans
|
||||
@ -842,7 +845,7 @@ func (ds *DataSource) convertToPartialTableScan(prop *property.PhysicalProperty,
|
||||
return tablePlan, partialCost, rowCount
|
||||
}
|
||||
|
||||
func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, tableFilters []expression.Expression, totalRowCount float64) (PhysicalPlan, float64) {
|
||||
func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, tableFilters []expression.Expression, totalRowCount float64) (PhysicalPlan, float64, error) {
|
||||
var partialCost float64
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
ts := PhysicalTableScan{
|
||||
@ -852,8 +855,21 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty,
|
||||
DBName: ds.DBName,
|
||||
isPartition: ds.isPartition,
|
||||
physicalTableID: ds.physicalTableID,
|
||||
HandleCols: ds.handleCols,
|
||||
}.Init(ds.ctx, ds.blockOffset)
|
||||
ts.SetSchema(ds.schema.Clone())
|
||||
if ts.HandleCols == nil {
|
||||
handleCol := ds.getPKIsHandleCol()
|
||||
if handleCol == nil {
|
||||
handleCol, _ = ts.appendExtraHandleCol(ds)
|
||||
}
|
||||
ts.HandleCols = NewIntHandleCols(handleCol)
|
||||
}
|
||||
var err error
|
||||
ts.HandleCols, err = ts.HandleCols.ResolveIndices(ts.schema)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
ts.Columns = ExpandVirtualColumn(ts.Columns, ts.schema, ts.Table.Columns)
|
||||
if ts.Table.PKIsHandle {
|
||||
if pkColInfo := ts.Table.GetPkColInfo(); pkColInfo != nil {
|
||||
@ -877,9 +893,9 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty,
|
||||
}
|
||||
sel := PhysicalSelection{Conditions: tableFilters}.Init(ts.ctx, ts.stats.ScaleByExpectCnt(selectivity*totalRowCount), ts.blockOffset)
|
||||
sel.SetChildren(ts)
|
||||
return sel, partialCost
|
||||
return sel, partialCost, nil
|
||||
}
|
||||
return ts, partialCost
|
||||
return ts, partialCost, nil
|
||||
}
|
||||
|
||||
func indexCoveringCol(col *expression.Column, indexCols []*expression.Column, idxColLens []int) bool {
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pingcap/parser/model"
|
||||
"github.com/pingcap/parser/mysql"
|
||||
"github.com/pingcap/tidb/expression"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/sessionctx/stmtctx"
|
||||
@ -32,6 +33,8 @@ type HandleCols interface {
|
||||
BuildHandle(row chunk.Row) (kv.Handle, error)
|
||||
// BuildHandleByDatums builds a Handle from a datum slice.
|
||||
BuildHandleByDatums(row []types.Datum) (kv.Handle, error)
|
||||
// BuildHandleFromIndexRow builds a Handle from index row data.
|
||||
BuildHandleFromIndexRow(row chunk.Row) (kv.Handle, error)
|
||||
// ResolveIndices resolves handle column indices.
|
||||
ResolveIndices(schema *expression.Schema) (HandleCols, error)
|
||||
// IsInt returns if the HandleCols is a single tnt column.
|
||||
@ -44,6 +47,8 @@ type HandleCols interface {
|
||||
NumCols() int
|
||||
// Compare compares two datum rows by handle order.
|
||||
Compare(a, b []types.Datum) (int, error)
|
||||
// GetFieldTypes return field types of columns
|
||||
GetFieldsTypes() []*types.FieldType
|
||||
}
|
||||
|
||||
// CommonHandleCols implements the kv.HandleCols interface.
|
||||
@ -54,12 +59,7 @@ type CommonHandleCols struct {
|
||||
sc *stmtctx.StatementContext
|
||||
}
|
||||
|
||||
// BuildHandle implements the kv.HandleCols interface.
|
||||
func (cb *CommonHandleCols) BuildHandle(row chunk.Row) (kv.Handle, error) {
|
||||
datumBuf := make([]types.Datum, 0, 4)
|
||||
for _, col := range cb.columns {
|
||||
datumBuf = append(datumBuf, row.GetDatum(col.Index, col.RetType))
|
||||
}
|
||||
func (cb *CommonHandleCols) buildHandleByDatumsBuffer(datumBuf []types.Datum) (kv.Handle, error) {
|
||||
datumBuf = tablecodec.TruncateIndexValuesIfNeeded(cb.tblInfo, cb.idxInfo, datumBuf)
|
||||
handleBytes, err := codec.EncodeKey(cb.sc, nil, datumBuf...)
|
||||
if err != nil {
|
||||
@ -68,18 +68,31 @@ func (cb *CommonHandleCols) BuildHandle(row chunk.Row) (kv.Handle, error) {
|
||||
return kv.NewCommonHandle(handleBytes)
|
||||
}
|
||||
|
||||
// BuildHandle implements the kv.HandleCols interface.
|
||||
func (cb *CommonHandleCols) BuildHandle(row chunk.Row) (kv.Handle, error) {
|
||||
datumBuf := make([]types.Datum, 0, 4)
|
||||
for _, col := range cb.columns {
|
||||
datumBuf = append(datumBuf, row.GetDatum(col.Index, col.RetType))
|
||||
}
|
||||
return cb.buildHandleByDatumsBuffer(datumBuf)
|
||||
}
|
||||
|
||||
// BuildHandleFromIndexRow implements the kv.HandleCols interface.
|
||||
func (cb *CommonHandleCols) BuildHandleFromIndexRow(row chunk.Row) (kv.Handle, error) {
|
||||
datumBuf := make([]types.Datum, 0, 4)
|
||||
for i := 0; i < cb.NumCols(); i++ {
|
||||
datumBuf = append(datumBuf, row.GetDatum(row.Len()-cb.NumCols()+i, cb.columns[i].RetType))
|
||||
}
|
||||
return cb.buildHandleByDatumsBuffer(datumBuf)
|
||||
}
|
||||
|
||||
// BuildHandleByDatums implements the kv.HandleCols interface.
|
||||
func (cb *CommonHandleCols) BuildHandleByDatums(row []types.Datum) (kv.Handle, error) {
|
||||
datumBuf := make([]types.Datum, 0, 4)
|
||||
for _, col := range cb.columns {
|
||||
datumBuf = append(datumBuf, row[col.Index])
|
||||
}
|
||||
datumBuf = tablecodec.TruncateIndexValuesIfNeeded(cb.tblInfo, cb.idxInfo, datumBuf)
|
||||
handleBytes, err := codec.EncodeKey(cb.sc, nil, datumBuf...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kv.NewCommonHandle(handleBytes)
|
||||
return cb.buildHandleByDatumsBuffer(datumBuf)
|
||||
}
|
||||
|
||||
// ResolveIndices implements the kv.HandleCols interface.
|
||||
@ -145,6 +158,15 @@ func (cb *CommonHandleCols) Compare(a, b []types.Datum) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// GetFieldsTypes implements the kv.HandleCols interface.
|
||||
func (cb *CommonHandleCols) GetFieldsTypes() []*types.FieldType {
|
||||
fieldTps := make([]*types.FieldType, 0, len(cb.columns))
|
||||
for _, col := range cb.columns {
|
||||
fieldTps = append(fieldTps, col.RetType)
|
||||
}
|
||||
return fieldTps
|
||||
}
|
||||
|
||||
// NewCommonHandleCols creates a new CommonHandleCols.
|
||||
func NewCommonHandleCols(sc *stmtctx.StatementContext, tblInfo *model.TableInfo, idxInfo *model.IndexInfo,
|
||||
tableColumns []*expression.Column) *CommonHandleCols {
|
||||
@ -170,6 +192,11 @@ func (ib *IntHandleCols) BuildHandle(row chunk.Row) (kv.Handle, error) {
|
||||
return kv.IntHandle(row.GetInt64(ib.col.Index)), nil
|
||||
}
|
||||
|
||||
// BuildHandleFromIndexRow implements the kv.HandleCols interface.
|
||||
func (ib *IntHandleCols) BuildHandleFromIndexRow(row chunk.Row) (kv.Handle, error) {
|
||||
return kv.IntHandle(row.GetInt64(row.Len() - 1)), nil
|
||||
}
|
||||
|
||||
// BuildHandleByDatums implements the kv.HandleCols interface.
|
||||
func (ib *IntHandleCols) BuildHandleByDatums(row []types.Datum) (kv.Handle, error) {
|
||||
return kv.IntHandle(row[ib.col.Index].GetInt64()), nil
|
||||
@ -220,6 +247,11 @@ func (ib *IntHandleCols) Compare(a, b []types.Datum) (int, error) {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// GetFieldsTypes implements the kv.HandleCols interface.
|
||||
func (ib *IntHandleCols) GetFieldsTypes() []*types.FieldType {
|
||||
return []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}
|
||||
}
|
||||
|
||||
// NewIntHandleCols creates a new IntHandleCols.
|
||||
func NewIntHandleCols(col *expression.Column) HandleCols {
|
||||
return &IntHandleCols{col: col}
|
||||
|
||||
@ -1374,10 +1374,10 @@ func (s *testIntegrationSuite) TestAccessPathOnClusterIndex(c *C) {
|
||||
s.testData.OnRecord(func() {
|
||||
output[i].SQL = tt
|
||||
output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + tt).Rows())
|
||||
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows())
|
||||
output[i].Res = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows())
|
||||
})
|
||||
tk.MustQuery("explain " + tt).Check(testkit.Rows(output[i].Plan...))
|
||||
tk.MustQuery(tt).Check(testkit.Rows(output[i].Res...))
|
||||
tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -426,7 +426,8 @@ type PhysicalTableScan struct {
|
||||
rangeDecidedBy []*expression.Column
|
||||
|
||||
// HandleIdx is the index of handle, which is only used for admin check table.
|
||||
HandleIdx []int
|
||||
HandleIdx []int
|
||||
HandleCols HandleCols
|
||||
|
||||
StoreType kv.StoreType
|
||||
|
||||
|
||||
@ -375,6 +375,7 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us
|
||||
}
|
||||
if ds.tableInfo.IsCommonHandle {
|
||||
path.IsCommonHandlePath = true
|
||||
path.Index = ds.possibleAccessPaths[i].Index
|
||||
} else {
|
||||
path.IsIntHandlePath = true
|
||||
}
|
||||
|
||||
@ -167,7 +167,9 @@
|
||||
"select /*+ use_index(t1, c) */ * from t1",
|
||||
"select * from t1 use index(c) where t1.c in (2.2, 3.3)",
|
||||
"select * from t1 where t1.a = 1 order by b",
|
||||
"select * from t1 order by a, b limit 1"
|
||||
"select * from t1 order by a, b limit 1",
|
||||
"select /*+ use_index_merge(t1 primary, c) */ * from t1 where t1.a >= 1 or t1.c = 2.2",
|
||||
"select /*+ use_index_merge(t1 primary, c) */ * from t1 where t1.a = 1 and t1.b = '111' or t1.c = 3.3"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
27
planner/core/testdata/integration_suite_out.json
vendored
27
planner/core/testdata/integration_suite_out.json
vendored
@ -954,6 +954,33 @@
|
||||
"Res": [
|
||||
"1 111 1.1000000000 11"
|
||||
]
|
||||
},
|
||||
{
|
||||
"SQL": "select /*+ use_index_merge(t1 primary, c) */ * from t1 where t1.a >= 1 or t1.c = 2.2",
|
||||
"Plan": [
|
||||
"IndexMerge_8 3.00 root ",
|
||||
"├─TableRangeScan_5(Build) 3.00 cop[tikv] table:t1 range:[1,+inf], keep order:false",
|
||||
"├─IndexRangeScan_6(Build) 1.00 cop[tikv] table:t1, index:c(c) range:[2.2000000000,2.2000000000], keep order:false",
|
||||
"└─TableRowIDScan_7(Probe) 3.00 cop[tikv] table:t1 keep order:false"
|
||||
],
|
||||
"Res": [
|
||||
"1 111 1.1000000000 11",
|
||||
"2 222 2.2000000000 12",
|
||||
"3 333 3.3000000000 13"
|
||||
]
|
||||
},
|
||||
{
|
||||
"SQL": "select /*+ use_index_merge(t1 primary, c) */ * from t1 where t1.a = 1 and t1.b = '111' or t1.c = 3.3",
|
||||
"Plan": [
|
||||
"IndexMerge_8 2.00 root ",
|
||||
"├─TableRangeScan_5(Build) 1.00 cop[tikv] table:t1 range:[1 \"111\",1 \"111\"], keep order:false",
|
||||
"├─IndexRangeScan_6(Build) 1.00 cop[tikv] table:t1, index:c(c) range:[3.3000000000,3.3000000000], keep order:false",
|
||||
"└─TableRowIDScan_7(Probe) 2.00 cop[tikv] table:t1 keep order:false"
|
||||
],
|
||||
"Res": [
|
||||
"1 111 1.1000000000 11",
|
||||
"3 333 3.3000000000 13"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user