planner: refactor the cost implementation for Aggs (#34227)
This commit is contained in:
@ -172,11 +172,13 @@ Union 26000.00 root
|
||||
├─HashAgg 16000.00 root group by:Column#10, funcs:firstrow(Column#12)->Column#10
|
||||
│ └─Union 16000.00 root
|
||||
│ ├─StreamAgg 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
|
||||
│ │ └─IndexReader 10000.00 root index:IndexFullScan
|
||||
│ │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
│ │ └─IndexReader 8000.00 root index:StreamAgg
|
||||
│ │ └─StreamAgg 8000.00 cop[tikv] group by:test.t2.c1,
|
||||
│ │ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
│ └─StreamAgg 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
|
||||
│ └─IndexReader 10000.00 root index:IndexFullScan
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
│ └─IndexReader 8000.00 root index:StreamAgg
|
||||
│ └─StreamAgg 8000.00 cop[tikv] group by:test.t2.c1,
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
└─IndexReader 10000.00 root index:IndexFullScan
|
||||
└─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo
|
||||
explain format = 'brief' select c1 from t2 union all select c1 from t2 union select c1 from t2;
|
||||
@ -184,14 +186,17 @@ id estRows task access object operator info
|
||||
HashAgg 24000.00 root group by:Column#10, funcs:firstrow(Column#11)->Column#10
|
||||
└─Union 24000.00 root
|
||||
├─StreamAgg 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
|
||||
│ └─IndexReader 10000.00 root index:IndexFullScan
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
│ └─IndexReader 8000.00 root index:StreamAgg
|
||||
│ └─StreamAgg 8000.00 cop[tikv] group by:test.t2.c1,
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
├─StreamAgg 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
|
||||
│ └─IndexReader 10000.00 root index:IndexFullScan
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
│ └─IndexReader 8000.00 root index:StreamAgg
|
||||
│ └─StreamAgg 8000.00 cop[tikv] group by:test.t2.c1,
|
||||
│ └─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
└─StreamAgg 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
|
||||
└─IndexReader 10000.00 root index:IndexFullScan
|
||||
└─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
└─IndexReader 8000.00 root index:StreamAgg
|
||||
└─StreamAgg 8000.00 cop[tikv] group by:test.t2.c1,
|
||||
└─IndexFullScan 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
|
||||
select * from information_schema.tidb_indexes where table_name='t4';
|
||||
TABLE_SCHEMA TABLE_NAME NON_UNIQUE KEY_NAME SEQ_IN_INDEX COLUMN_NAME SUB_PART INDEX_COMMENT Expression INDEX_ID IS_VISIBLE CLUSTERED
|
||||
test t4 0 PRIMARY 1 a NULL NULL 0 YES YES
|
||||
|
||||
42
executor/testdata/executor_suite_out.json
vendored
42
executor/testdata/executor_suite_out.json
vendored
@ -308,9 +308,10 @@
|
||||
"HashJoin_7 6400.00 root anti semi join, equal:[nulleq(test.t1.a, test.t3.a)], other cond:nulleq(cast(test.t1.b, decimal(20,0) BINARY), test.t3.b)",
|
||||
"├─TableReader_18(Build) 10000.00 root data:TableFullScan_17",
|
||||
"│ └─TableFullScan_17 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo",
|
||||
"└─HashAgg_10(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_15 10000.00 root data:TableFullScan_14",
|
||||
" └─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
"└─HashAgg_12(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_13 8000.00 root data:HashAgg_8",
|
||||
" └─HashAgg_8 8000.00 cop[tikv] group by:test.t1.a, test.t1.b, ",
|
||||
" └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Res": [
|
||||
"1 1",
|
||||
@ -324,9 +325,10 @@
|
||||
"HashJoin_7 6400.00 root semi join, equal:[nulleq(test.t1.a, test.t2.a)], other cond:nulleq(cast(test.t1.b, double BINARY), cast(test.t2.b, double BINARY))",
|
||||
"├─TableReader_18(Build) 10000.00 root data:TableFullScan_17",
|
||||
"│ └─TableFullScan_17 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo",
|
||||
"└─HashAgg_10(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_15 10000.00 root data:TableFullScan_14",
|
||||
" └─TableFullScan_14 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
"└─HashAgg_12(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_13 8000.00 root data:HashAgg_8",
|
||||
" └─HashAgg_8 8000.00 cop[tikv] group by:test.t1.a, test.t1.b, ",
|
||||
" └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Res": [
|
||||
"1 1",
|
||||
@ -392,9 +394,10 @@
|
||||
"└─HashJoin_14(Probe) 6400.00 root semi join, equal:[nulleq(test.t1.a, test.t2.a)], other cond:nulleq(cast(test.t1.b, double BINARY), cast(test.t2.b, double BINARY))",
|
||||
" ├─TableReader_24(Build) 10000.00 root data:TableFullScan_23",
|
||||
" │ └─TableFullScan_23 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo",
|
||||
" └─HashAgg_17(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_22 10000.00 root data:TableFullScan_21",
|
||||
" └─TableFullScan_21 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
" └─HashAgg_19(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_20 8000.00 root data:HashAgg_15",
|
||||
" └─HashAgg_15 8000.00 cop[tikv] group by:test.t1.a, test.t1.b, ",
|
||||
" └─TableFullScan_18 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Res": [
|
||||
"1 1",
|
||||
@ -412,9 +415,10 @@
|
||||
"└─HashJoin_17 6400.00 root semi join, equal:[nulleq(test.t2.a, test.t3.a)], other cond:nulleq(cast(test.t2.b, double BINARY), cast(test.t3.b, double BINARY))",
|
||||
" ├─TableReader_27(Build) 10000.00 root data:TableFullScan_26",
|
||||
" │ └─TableFullScan_26 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo",
|
||||
" └─HashAgg_20(Probe) 8000.00 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
" └─TableReader_25 10000.00 root data:TableFullScan_24",
|
||||
" └─TableFullScan_24 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo"
|
||||
" └─HashAgg_22(Probe) 8000.00 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
" └─TableReader_23 8000.00 root data:HashAgg_18",
|
||||
" └─HashAgg_18 8000.00 cop[tikv] group by:test.t2.a, test.t2.b, ",
|
||||
" └─TableFullScan_21 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Res": [
|
||||
"1 1",
|
||||
@ -431,12 +435,14 @@
|
||||
"├─HashJoin_20(Build) 6400.00 root semi join, equal:[nulleq(test.t2.a, test.t3.a)], other cond:nulleq(cast(test.t2.b, double BINARY), cast(test.t3.b, double BINARY))",
|
||||
"│ ├─TableReader_31(Build) 10000.00 root data:TableFullScan_30",
|
||||
"│ │ └─TableFullScan_30 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo",
|
||||
"│ └─HashAgg_23(Probe) 8000.00 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
"│ └─TableReader_28 10000.00 root data:TableFullScan_27",
|
||||
"│ └─TableFullScan_27 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo",
|
||||
"└─HashAgg_14(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_19 10000.00 root data:TableFullScan_18",
|
||||
" └─TableFullScan_18 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
"│ └─HashAgg_25(Probe) 8000.00 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
"│ └─TableReader_26 8000.00 root data:HashAgg_21",
|
||||
"│ └─HashAgg_21 8000.00 cop[tikv] group by:test.t2.a, test.t2.b, ",
|
||||
"│ └─TableFullScan_24 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo",
|
||||
"└─HashAgg_16(Probe) 8000.00 root group by:test.t1.a, test.t1.b, funcs:firstrow(test.t1.a)->test.t1.a, funcs:firstrow(test.t1.b)->test.t1.b",
|
||||
" └─TableReader_17 8000.00 root data:HashAgg_12",
|
||||
" └─HashAgg_12 8000.00 cop[tikv] group by:test.t1.a, test.t1.b, ",
|
||||
" └─TableFullScan_15 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Res": [
|
||||
"1 1",
|
||||
|
||||
@ -280,12 +280,41 @@ func (p *PhysicalHashJoin) GetPlanCost(taskType property.TaskType) (float64, err
|
||||
|
||||
// GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost.
|
||||
func (p *PhysicalStreamAgg) GetPlanCost(taskType property.TaskType) (float64, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
if p.planCostInit {
|
||||
return p.planCost, nil
|
||||
}
|
||||
childCost, err := p.children[0].GetPlanCost(taskType)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p.planCost = childCost
|
||||
p.planCost += p.GetCost(p.children[0].StatsCount(), taskType == property.RootTaskType)
|
||||
p.planCostInit = true
|
||||
return p.planCost, nil
|
||||
}
|
||||
|
||||
// GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost.
|
||||
func (p *PhysicalHashAgg) GetPlanCost(taskType property.TaskType) (float64, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
if p.planCostInit {
|
||||
return p.planCost, nil
|
||||
}
|
||||
childCost, err := p.children[0].GetPlanCost(taskType)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
p.planCost = childCost
|
||||
switch taskType {
|
||||
case property.RootTaskType:
|
||||
p.planCost += p.GetCost(p.children[0].StatsCount(), true, false)
|
||||
case property.CopSingleReadTaskType, property.CopDoubleReadTaskType:
|
||||
p.planCost += p.GetCost(p.children[0].StatsCount(), false, false)
|
||||
case property.MppTaskType:
|
||||
return 0, errors.New("not implemented")
|
||||
default:
|
||||
return 0, errors.Errorf("unknown task type %v", taskType)
|
||||
}
|
||||
p.planCostInit = true
|
||||
return p.planCost, nil
|
||||
}
|
||||
|
||||
// GetPlanCost calculates the cost of the plan if it has not been calculated yet and returns the cost.
|
||||
|
||||
@ -116,6 +116,37 @@ func TestNewCostInterfaceTiKV(t *testing.T) {
|
||||
"select * from t use index(b) where b+200 < 1000", // pushed down to lookup index-side
|
||||
"select * from t use index(b) where c+200 < 1000", // pushed down to lookup table-side
|
||||
"select * from t use index(b) where mod(b+c, 200) < 100", // not pushed down
|
||||
// aggregation
|
||||
"select /*+ hash_agg() */ count(*) from t use index(primary) where a < 200",
|
||||
"select /*+ hash_agg() */ sum(a) from t use index(primary) where a < 200",
|
||||
"select /*+ hash_agg() */ avg(a), b from t use index(primary) where a < 200 group by b",
|
||||
"select /*+ stream_agg() */ count(*) from t use index(primary) where a < 200",
|
||||
"select /*+ stream_agg() */ sum(a) from t use index(primary) where a < 200",
|
||||
"select /*+ stream_agg() */ avg(a), b from t use index(primary) where a < 200 group by b",
|
||||
"select /*+ stream_agg() */ avg(d), c from t use index(cd) group by c",
|
||||
// limit
|
||||
"select * from t use index(primary) where a < 200 limit 10", // table-scan + limit
|
||||
"select * from t use index(primary) where a = 200 limit 10",
|
||||
"select a, b, d from t use index(primary) where a < 200 limit 10",
|
||||
"select a, b, d from t use index(primary) where a = 200 limit 10",
|
||||
"select a from t use index(primary) where a < 200 limit 10",
|
||||
"select a from t use index(primary) where a = 200 limit 10",
|
||||
"select b from t use index(b) where b < 200 limit 10", // index-scan + limit
|
||||
"select b from t use index(b) where b = 200 limit 10",
|
||||
"select c, d from t use index(cd) where c < 200 limit 10",
|
||||
"select c, d from t use index(cd) where c = 200 limit 10",
|
||||
"select c, d from t use index(cd) where c = 200 and d < 200 limit 10",
|
||||
"select d from t use index(cd) where c < 200 limit 10",
|
||||
"select d from t use index(cd) where c = 200 limit 10",
|
||||
"select d from t use index(cd) where c = 200 and d < 200 limit 10",
|
||||
"select * from t use index(b) where b < 200 limit 10", // look-up + limit
|
||||
"select * from t use index(b) where b = 200 limit 10",
|
||||
"select a, b from t use index(cd) where c < 200 limit 10",
|
||||
"select a, b from t use index(cd) where c = 200 limit 10",
|
||||
"select a, b from t use index(cd) where c = 200 and d < 200 limit 10",
|
||||
"select * from t use index(cd) where c < 200 limit 10",
|
||||
"select * from t use index(cd) where c = 200 limit 10",
|
||||
"select * from t use index(cd) where c = 200 and d < 200 limit 10",
|
||||
// sort
|
||||
"select * from t use index(primary) where a < 200 order by a", // table-scan + sort
|
||||
"select * from t use index(primary) where a = 200 order by a",
|
||||
|
||||
@ -2056,6 +2056,7 @@ func computePartialCursorOffset(name string) int {
|
||||
func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task {
|
||||
t := tasks[0].copy()
|
||||
inputRows := t.count()
|
||||
final := p
|
||||
if cop, ok := t.(*copTask); ok {
|
||||
// We should not push agg down across double read, since the data of second read is ordered by handle instead of index.
|
||||
// The `extraHandleCol` is added if the double read needs to keep order. So we just use it to decided
|
||||
@ -2067,6 +2068,9 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task {
|
||||
} else {
|
||||
copTaskType := cop.getStoreType()
|
||||
partialAgg, finalAgg := p.newPartialAggregate(copTaskType, false)
|
||||
if finalAgg != nil {
|
||||
final = finalAgg.(*PhysicalStreamAgg)
|
||||
}
|
||||
if partialAgg != nil {
|
||||
if cop.tablePlan != nil {
|
||||
cop.finishIndexPlan()
|
||||
@ -2083,7 +2087,7 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task {
|
||||
partialAgg.SetChildren(cop.indexPlan)
|
||||
cop.indexPlan = partialAgg
|
||||
}
|
||||
cop.addCost(p.GetCost(inputRows, false))
|
||||
cop.addCost(partialAgg.(*PhysicalStreamAgg).GetCost(inputRows, false))
|
||||
partialAgg.SetCost(cop.cost())
|
||||
}
|
||||
t = cop.convertToRootTask(p.ctx)
|
||||
@ -2096,7 +2100,7 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task {
|
||||
} else {
|
||||
attachPlan2Task(p, t)
|
||||
}
|
||||
t.addCost(p.GetCost(inputRows, true))
|
||||
t.addCost(final.GetCost(inputRows, true))
|
||||
t.plan().SetCost(t.cost())
|
||||
return t
|
||||
}
|
||||
@ -2260,10 +2264,14 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task {
|
||||
func (p *PhysicalHashAgg) attach2Task(tasks ...task) task {
|
||||
t := tasks[0].copy()
|
||||
inputRows := t.count()
|
||||
final := p
|
||||
if cop, ok := t.(*copTask); ok {
|
||||
if len(cop.rootTaskConds) == 0 {
|
||||
copTaskType := cop.getStoreType()
|
||||
partialAgg, finalAgg := p.newPartialAggregate(copTaskType, false)
|
||||
if finalAgg != nil {
|
||||
final = finalAgg.(*PhysicalHashAgg)
|
||||
}
|
||||
if partialAgg != nil {
|
||||
if cop.tablePlan != nil {
|
||||
cop.finishIndexPlan()
|
||||
@ -2280,7 +2288,7 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task {
|
||||
partialAgg.SetChildren(cop.indexPlan)
|
||||
cop.indexPlan = partialAgg
|
||||
}
|
||||
cop.addCost(p.GetCost(inputRows, false, false))
|
||||
cop.addCost(partialAgg.(*PhysicalHashAgg).GetCost(inputRows, false, false))
|
||||
}
|
||||
// In `newPartialAggregate`, we are using stats of final aggregation as stats
|
||||
// of `partialAgg`, so the network cost of transferring result rows of `partialAgg`
|
||||
@ -2313,7 +2321,7 @@ func (p *PhysicalHashAgg) attach2Task(tasks ...task) task {
|
||||
// hash aggregation, it would cause under-estimation as the reason mentioned in comment above.
|
||||
// To make it simple, we also treat 2-phase parallel hash aggregation in TiDB layer as
|
||||
// 1-phase when computing cost.
|
||||
t.addCost(p.GetCost(inputRows, true, false))
|
||||
t.addCost(final.GetCost(inputRows, true, false))
|
||||
t.plan().SetCost(t.cost())
|
||||
return t
|
||||
}
|
||||
|
||||
@ -7,9 +7,10 @@
|
||||
"Plan": [
|
||||
"HashJoin 2.25 root inner join, equal:[eq(test.t1.a, test.t2.a) eq(test.t1.b, test.t2.b)]",
|
||||
"├─HashAgg(Build) 1.69 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
"│ └─TableReader 2.25 root data:Selection",
|
||||
"│ └─Selection 2.25 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))",
|
||||
"│ └─TableFullScan 4.00 cop[tikv] table:t2 keep order:false",
|
||||
"│ └─TableReader 1.69 root data:HashAgg",
|
||||
"│ └─HashAgg 1.69 cop[tikv] group by:test.t2.a, test.t2.b, ",
|
||||
"│ └─Selection 2.25 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))",
|
||||
"│ └─TableFullScan 4.00 cop[tikv] table:t2 keep order:false",
|
||||
"└─TableReader(Probe) 2.25 root data:Selection",
|
||||
" └─Selection 2.25 cop[tikv] not(isnull(test.t1.a)), not(isnull(test.t1.b))",
|
||||
" └─TableFullScan 4.00 cop[tikv] table:t1 keep order:false"
|
||||
@ -36,9 +37,10 @@
|
||||
"Plan": [
|
||||
"HashJoin 1.69 root inner join, equal:[eq(test.t2.a, test.t1.a) eq(test.t2.b, Column#7)]",
|
||||
"├─HashAgg(Build) 1.69 root group by:test.t2.a, test.t2.b, funcs:firstrow(test.t2.a)->test.t2.a, funcs:firstrow(test.t2.b)->test.t2.b",
|
||||
"│ └─TableReader 2.25 root data:Selection",
|
||||
"│ └─Selection 2.25 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))",
|
||||
"│ └─TableFullScan 4.00 cop[tikv] table:t2 keep order:false",
|
||||
"│ └─TableReader 1.69 root data:HashAgg",
|
||||
"│ └─HashAgg 1.69 cop[tikv] group by:test.t2.a, test.t2.b, ",
|
||||
"│ └─Selection 2.25 cop[tikv] not(isnull(test.t2.a)), not(isnull(test.t2.b))",
|
||||
"│ └─TableFullScan 4.00 cop[tikv] table:t2 keep order:false",
|
||||
"└─HashAgg(Probe) 2.25 root group by:test.t1.a, funcs:count(1)->Column#7, funcs:firstrow(test.t1.a)->test.t1.a",
|
||||
" └─TableReader 3.00 root data:Selection",
|
||||
" └─Selection 3.00 cop[tikv] not(isnull(test.t1.a))",
|
||||
|
||||
@ -1646,7 +1646,7 @@
|
||||
],
|
||||
"IndexPlan": [
|
||||
"Sort 199.80 root test_partition_1.t1.a",
|
||||
"└─IndexJoin 199.80 root inner join, inner:IndexReader, outer key:test_partition_1.t2.b, inner key:test_partition_1.t1.a, equal cond:eq(test_partition_1.t2.b, test_partition_1.t1.a)",
|
||||
"└─IndexHashJoin 199.80 root inner join, inner:IndexReader, outer key:test_partition_1.t2.b, inner key:test_partition_1.t1.a, equal cond:eq(test_partition_1.t2.b, test_partition_1.t1.a)",
|
||||
" ├─HashAgg(Build) 159.84 root group by:test_partition_1.t2.b, funcs:firstrow(test_partition_1.t2.b)->test_partition_1.t2.b",
|
||||
" │ └─IndexReader 159.84 root partition:p0 index:HashAgg",
|
||||
" │ └─HashAgg 159.84 cop[tikv] group by:test_partition_1.t2.b, ",
|
||||
@ -1676,7 +1676,7 @@
|
||||
],
|
||||
"IndexPlan": [
|
||||
"Sort 199.80 root test_partition_1.t1.a",
|
||||
"└─IndexJoin 199.80 root inner join, inner:IndexReader, outer key:test_partition_1.t1.b, inner key:test_partition_1.t1.a, equal cond:eq(test_partition_1.t1.b, test_partition_1.t1.a)",
|
||||
"└─IndexHashJoin 199.80 root inner join, inner:IndexReader, outer key:test_partition_1.t1.b, inner key:test_partition_1.t1.a, equal cond:eq(test_partition_1.t1.b, test_partition_1.t1.a)",
|
||||
" ├─HashAgg(Build) 159.84 root group by:test_partition_1.t1.b, funcs:firstrow(test_partition_1.t1.b)->test_partition_1.t1.b",
|
||||
" │ └─IndexReader 159.84 root partition:p0 index:HashAgg",
|
||||
" │ └─HashAgg 159.84 cop[tikv] group by:test_partition_1.t1.b, ",
|
||||
|
||||
32
planner/core/testdata/plan_suite_out.json
vendored
32
planner/core/testdata/plan_suite_out.json
vendored
@ -1678,10 +1678,10 @@
|
||||
{
|
||||
"SQL": "select count(distinct c) from t group by c;",
|
||||
"Plan": [
|
||||
"HashAgg 8000.00 root group by:test.t.c, funcs:count(distinct test.t.c)->Column#5",
|
||||
"└─IndexReader 8000.00 root index:HashAgg",
|
||||
" └─HashAgg 8000.00 cop[tikv] group by:test.t.c, ",
|
||||
" └─IndexFullScan 10000.00 cop[tikv] table:t, index:c(c) keep order:false, stats:pseudo"
|
||||
"StreamAgg 8000.00 root group by:test.t.c, funcs:count(distinct test.t.c)->Column#5",
|
||||
"└─IndexReader 8000.00 root index:StreamAgg",
|
||||
" └─StreamAgg 8000.00 cop[tikv] group by:test.t.c, ",
|
||||
" └─IndexFullScan 10000.00 cop[tikv] table:t, index:c(c) keep order:true, stats:pseudo"
|
||||
],
|
||||
"Result": [
|
||||
"0",
|
||||
@ -1743,13 +1743,13 @@
|
||||
"Plan": [
|
||||
"HashAgg 16000.00 root group by:Column#5, funcs:firstrow(Column#6)->Column#3",
|
||||
"└─PartitionUnion 16000.00 root ",
|
||||
" ├─HashAgg 8000.00 root group by:Column#15, funcs:firstrow(Column#13)->Column#6, funcs:firstrow(Column#14)->Column#5",
|
||||
" │ └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#13, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#14, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#15",
|
||||
" │ └─TableReader 10000.00 root data:TableFullScan",
|
||||
" ├─HashAgg 8000.00 root group by:Column#7, funcs:firstrow(Column#7)->Column#6, funcs:firstrow(Column#7)->Column#5",
|
||||
" │ └─TableReader 8000.00 root data:HashAgg",
|
||||
" │ └─HashAgg 8000.00 cop[tikv] group by:date_format(test.tc.timestamp, \"%Y-%m-%d %H\"), ",
|
||||
" │ └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072312 keep order:false, stats:pseudo",
|
||||
" └─HashAgg 8000.00 root group by:Column#18, funcs:firstrow(Column#16)->Column#6, funcs:firstrow(Column#17)->Column#5",
|
||||
" └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#16, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#17, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#18",
|
||||
" └─TableReader 10000.00 root data:TableFullScan",
|
||||
" └─HashAgg 8000.00 root group by:Column#10, funcs:firstrow(Column#10)->Column#6, funcs:firstrow(Column#10)->Column#5",
|
||||
" └─TableReader 8000.00 root data:HashAgg",
|
||||
" └─HashAgg 8000.00 cop[tikv] group by:date_format(test.tc.timestamp, \"%Y-%m-%d %H\"), ",
|
||||
" └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072313 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Result": null
|
||||
@ -1926,13 +1926,13 @@
|
||||
"Plan": [
|
||||
"HashAgg 16000.00 root group by:Column#5, funcs:firstrow(Column#6)->Column#3",
|
||||
"└─PartitionUnion 16000.00 root ",
|
||||
" ├─HashAgg 8000.00 root group by:Column#15, funcs:firstrow(Column#13)->Column#6, funcs:firstrow(Column#14)->Column#5",
|
||||
" │ └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#13, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#14, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#15",
|
||||
" │ └─TableReader 10000.00 root data:TableFullScan",
|
||||
" ├─HashAgg 8000.00 root group by:Column#7, funcs:firstrow(Column#7)->Column#6, funcs:firstrow(Column#7)->Column#5",
|
||||
" │ └─TableReader 8000.00 root data:HashAgg",
|
||||
" │ └─HashAgg 8000.00 cop[tikv] group by:date_format(test.tc.timestamp, \"%Y-%m-%d %H\"), ",
|
||||
" │ └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072312 keep order:false, stats:pseudo",
|
||||
" └─HashAgg 8000.00 root group by:Column#18, funcs:firstrow(Column#16)->Column#6, funcs:firstrow(Column#17)->Column#5",
|
||||
" └─Projection 10000.00 root date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#16, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#17, date_format(test.tc.timestamp, %Y-%m-%d %H)->Column#18",
|
||||
" └─TableReader 10000.00 root data:TableFullScan",
|
||||
" └─HashAgg 8000.00 root group by:Column#10, funcs:firstrow(Column#10)->Column#6, funcs:firstrow(Column#10)->Column#5",
|
||||
" └─TableReader 8000.00 root data:HashAgg",
|
||||
" └─HashAgg 8000.00 cop[tikv] group by:date_format(test.tc.timestamp, \"%Y-%m-%d %H\"), ",
|
||||
" └─TableFullScan 10000.00 cop[tikv] table:tc, partition:p2020072313 keep order:false, stats:pseudo"
|
||||
],
|
||||
"Result": null
|
||||
|
||||
Reference in New Issue
Block a user