planner: support cost model for tiflash table scan (#12868)
This commit is contained in:
committed by
pingcap-github-bot
parent
36f0f37b40
commit
6fd74f3e4c
@ -15,9 +15,9 @@ IndexReader_6 3323.33 root index:IndexScan_5
|
||||
└─IndexScan_5 3323.33 cop[tikv] table:access_path_selection, index:a, b, range:[-inf,3), keep order:false, stats:pseudo
|
||||
explain select a, b from access_path_selection where b < 3;
|
||||
id count task operator info
|
||||
IndexReader_13 3323.33 root index:Selection_12
|
||||
└─Selection_12 3323.33 cop[tikv] lt(Column#2, 3)
|
||||
└─IndexScan_11 10000.00 cop[tikv] table:access_path_selection, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
TableReader_7 3323.33 root data:Selection_6
|
||||
└─Selection_6 3323.33 cop[tikv] lt(Column#2, 3)
|
||||
└─TableScan_5 10000.00 cop[tikv] table:access_path_selection, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select a, b from access_path_selection where a < 3 and b < 3;
|
||||
id count task operator info
|
||||
IndexReader_11 1104.45 root index:Selection_10
|
||||
@ -39,6 +39,6 @@ StreamAgg_13 1.00 root funcs:max(Column#3)
|
||||
explain select count(1) from access_path_selection;
|
||||
id count task operator info
|
||||
StreamAgg_28 1.00 root funcs:count(Column#19)
|
||||
└─IndexReader_29 1.00 root index:StreamAgg_8
|
||||
└─TableReader_29 1.00 root data:StreamAgg_8
|
||||
└─StreamAgg_8 1.00 cop[tikv] funcs:count(1)
|
||||
└─IndexScan_25 10000.00 cop[tikv] table:access_path_selection, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─TableScan_24 10000.00 cop[tikv] table:access_path_selection, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
|
||||
@ -64,8 +64,8 @@ explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1;
|
||||
id count task operator info
|
||||
Projection_11 9990.00 root cast(Column#9)
|
||||
└─HashLeftJoin_21 9990.00 root inner join, inner:HashAgg_28, equal:[eq(Column#1, Column#5)]
|
||||
├─IndexReader_36 10000.00 root index:IndexScan_35
|
||||
│ └─IndexScan_35 10000.00 cop[tikv] table:a, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_34 10000.00 root data:TableScan_33
|
||||
│ └─TableScan_33 10000.00 cop[tikv] table:a, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─HashAgg_28 7992.00 root group by:Column#12, funcs:count(Column#10), firstrow(Column#12)
|
||||
└─TableReader_29 7992.00 root data:HashAgg_23
|
||||
└─HashAgg_23 7992.00 cop[tikv] group by:Column#5, funcs:count(Column#5)
|
||||
@ -93,16 +93,16 @@ id count task operator info
|
||||
StreamAgg_12 1.00 root funcs:sum(Column#12)
|
||||
└─Projection_23 10000.00 root cast(Column#8)
|
||||
└─HashLeftJoin_22 10000.00 root CARTESIAN left outer semi join, inner:IndexReader_21, other cond:eq(Column#1, Column#4)
|
||||
├─IndexReader_17 10000.00 root index:IndexScan_16
|
||||
│ └─IndexScan_16 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_15 10000.00 root data:TableScan_14
|
||||
│ └─TableScan_14 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_21 10000.00 root index:IndexScan_20
|
||||
└─IndexScan_20 10000.00 cop[tikv] table:t2, index:c1, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
explain select c1 from t1 where c1 in (select c2 from t2);
|
||||
id count task operator info
|
||||
Projection_9 9990.00 root Column#1
|
||||
└─HashLeftJoin_19 9990.00 root inner join, inner:HashAgg_23, equal:[eq(Column#1, Column#5)]
|
||||
├─IndexReader_34 10000.00 root index:IndexScan_33
|
||||
│ └─IndexScan_33 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_32 10000.00 root data:TableScan_31
|
||||
│ └─TableScan_31 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─HashAgg_23 7992.00 root group by:Column#5, funcs:firstrow(Column#5)
|
||||
└─TableReader_30 9990.00 root data:Selection_29
|
||||
└─Selection_29 9990.00 cop[tikv] not(isnull(Column#5))
|
||||
@ -123,8 +123,8 @@ explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1)
|
||||
id count task operator info
|
||||
Projection_12 10000.00 root eq(Column#2, Column#5)
|
||||
└─Apply_14 10000.00 root CARTESIAN left outer join, inner:Projection_43
|
||||
├─IndexReader_18 10000.00 root index:IndexScan_17
|
||||
│ └─IndexScan_17 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_16 10000.00 root data:TableScan_15
|
||||
│ └─TableScan_15 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─Projection_43 1.00 root Column#4, Column#5
|
||||
└─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1)
|
||||
├─Limit_41 1.00 cop[tikv] offset:0, count:1
|
||||
@ -154,12 +154,12 @@ Limit_8 1.00 root offset:0, count:1
|
||||
└─TableScan_11 3.00 cop[tikv] table:t4, range:(1,+inf], keep order:false, stats:pseudo
|
||||
explain select ifnull(null, t1.c1) from t1;
|
||||
id count task operator info
|
||||
IndexReader_7 10000.00 root index:IndexScan_6
|
||||
└─IndexScan_6 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
TableReader_5 10000.00 root data:TableScan_4
|
||||
└─TableScan_4 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select if(10, t1.c1, t1.c2) from t1;
|
||||
id count task operator info
|
||||
IndexReader_7 10000.00 root index:IndexScan_6
|
||||
└─IndexScan_6 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
TableReader_5 10000.00 root data:TableScan_4
|
||||
└─TableScan_4 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select c1 from t2 union select c1 from t2 union all select c1 from t2;
|
||||
id count task operator info
|
||||
Union_17 26000.00 root
|
||||
@ -198,16 +198,16 @@ explain select 1 from (select count(c2), count(c3) from t1) k;
|
||||
id count task operator info
|
||||
Projection_5 1.00 root 1
|
||||
└─StreamAgg_21 1.00 root funcs:firstrow(Column#15)
|
||||
└─IndexReader_22 1.00 root index:StreamAgg_9
|
||||
└─TableReader_22 1.00 root data:StreamAgg_9
|
||||
└─StreamAgg_9 1.00 cop[tikv] funcs:firstrow(1)
|
||||
└─IndexScan_20 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─TableScan_19 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select count(1) from (select max(c2), count(c3) as m from t1) k;
|
||||
id count task operator info
|
||||
StreamAgg_11 1.00 root funcs:count(1)
|
||||
└─StreamAgg_27 1.00 root funcs:firstrow(Column#15)
|
||||
└─IndexReader_28 1.00 root index:StreamAgg_15
|
||||
└─TableReader_28 1.00 root data:StreamAgg_15
|
||||
└─StreamAgg_15 1.00 cop[tikv] funcs:firstrow(1)
|
||||
└─IndexScan_26 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─TableScan_25 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select count(1) from (select count(c2) from t1 group by c3) k;
|
||||
id count task operator info
|
||||
StreamAgg_11 1.00 root funcs:count(1)
|
||||
@ -220,16 +220,16 @@ id count task operator info
|
||||
StreamAgg_12 1.00 root funcs:sum(Column#12)
|
||||
└─Projection_23 10000.00 root cast(Column#8)
|
||||
└─HashLeftJoin_22 10000.00 root CARTESIAN left outer semi join, inner:IndexReader_21, other cond:eq(Column#1, Column#4)
|
||||
├─IndexReader_17 10000.00 root index:IndexScan_16
|
||||
│ └─IndexScan_16 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_15 10000.00 root data:TableScan_14
|
||||
│ └─TableScan_14 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_21 10000.00 root index:IndexScan_20
|
||||
└─IndexScan_20 10000.00 cop[tikv] table:t2, index:c1, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
explain select 1 in (select c2 from t2) from t1;
|
||||
id count task operator info
|
||||
Projection_6 10000.00 root Column#8
|
||||
└─HashLeftJoin_7 10000.00 root CARTESIAN left outer semi join, inner:TableReader_14
|
||||
├─IndexReader_11 10000.00 root index:IndexScan_10
|
||||
│ └─IndexScan_10 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_9 10000.00 root data:TableScan_8
|
||||
│ └─TableScan_8 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_14 10.00 root data:Selection_13
|
||||
└─Selection_13 10.00 cop[tikv] eq(1, Column#5)
|
||||
└─TableScan_12 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
@ -238,8 +238,8 @@ id count task operator info
|
||||
StreamAgg_12 1.00 root funcs:sum(Column#12)
|
||||
└─Projection_22 10000.00 root cast(Column#8)
|
||||
└─HashLeftJoin_21 10000.00 root CARTESIAN left outer semi join, inner:TableReader_20
|
||||
├─IndexReader_17 10000.00 root index:IndexScan_16
|
||||
│ └─IndexScan_16 10000.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_15 10000.00 root data:TableScan_14
|
||||
│ └─TableScan_14 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_20 10.00 root data:Selection_19
|
||||
└─Selection_19 10.00 cop[tikv] eq(6, Column#5)
|
||||
└─TableScan_18 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
@ -253,14 +253,14 @@ color=black
|
||||
label = "root"
|
||||
"StreamAgg_12" -> "Projection_23"
|
||||
"Projection_23" -> "HashLeftJoin_22"
|
||||
"HashLeftJoin_22" -> "IndexReader_17"
|
||||
"HashLeftJoin_22" -> "TableReader_15"
|
||||
"HashLeftJoin_22" -> "IndexReader_21"
|
||||
}
|
||||
subgraph cluster16{
|
||||
subgraph cluster14{
|
||||
node [style=filled, color=lightgrey]
|
||||
color=black
|
||||
label = "cop"
|
||||
"IndexScan_16"
|
||||
"TableScan_14"
|
||||
}
|
||||
subgraph cluster20{
|
||||
node [style=filled, color=lightgrey]
|
||||
@ -268,7 +268,7 @@ color=black
|
||||
label = "cop"
|
||||
"IndexScan_20"
|
||||
}
|
||||
"IndexReader_17" -> "IndexScan_16"
|
||||
"TableReader_15" -> "TableScan_14"
|
||||
"IndexReader_21" -> "IndexScan_20"
|
||||
}
|
||||
|
||||
@ -281,14 +281,14 @@ node [style=filled, color=lightgrey]
|
||||
color=black
|
||||
label = "root"
|
||||
"Projection_6" -> "HashLeftJoin_7"
|
||||
"HashLeftJoin_7" -> "IndexReader_11"
|
||||
"HashLeftJoin_7" -> "TableReader_9"
|
||||
"HashLeftJoin_7" -> "TableReader_14"
|
||||
}
|
||||
subgraph cluster10{
|
||||
subgraph cluster8{
|
||||
node [style=filled, color=lightgrey]
|
||||
color=black
|
||||
label = "cop"
|
||||
"IndexScan_10"
|
||||
"TableScan_8"
|
||||
}
|
||||
subgraph cluster13{
|
||||
node [style=filled, color=lightgrey]
|
||||
@ -296,7 +296,7 @@ color=black
|
||||
label = "cop"
|
||||
"Selection_13" -> "TableScan_12"
|
||||
}
|
||||
"IndexReader_11" -> "IndexScan_10"
|
||||
"TableReader_9" -> "TableScan_8"
|
||||
"TableReader_14" -> "Selection_13"
|
||||
}
|
||||
|
||||
@ -415,9 +415,9 @@ IndexReader_6 10.00 root index:IndexScan_5
|
||||
└─IndexScan_5 10.00 cop[tikv] table:t, index:a, b, range:[1,1], keep order:false, stats:pseudo
|
||||
explain select * from t where b in (1, 2) and b in (1, 3);
|
||||
id count task operator info
|
||||
IndexReader_10 10.00 root index:Selection_9
|
||||
└─Selection_9 10.00 cop[tikv] eq(Column#2, 1)
|
||||
└─IndexScan_8 10000.00 cop[tikv] table:t, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
TableReader_7 10.00 root data:Selection_6
|
||||
└─Selection_6 10.00 cop[tikv] in(Column#2, 1, 2), in(Column#2, 1, 3)
|
||||
└─TableScan_5 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select * from t where a = 1 and a = 1;
|
||||
id count task operator info
|
||||
IndexReader_6 10.00 root index:IndexScan_5
|
||||
@ -431,20 +431,20 @@ TableDual_5 0.00 root rows:0
|
||||
explain select * from t t1 join t t2 where t1.b = t2.b and t2.b is null;
|
||||
id count task operator info
|
||||
Projection_7 0.00 root Column#1, Column#2, Column#4, Column#5
|
||||
└─HashRightJoin_9 0.00 root inner join, inner:IndexReader_15, equal:[eq(Column#5, Column#2)]
|
||||
├─IndexReader_15 0.00 root index:Selection_14
|
||||
│ └─Selection_14 0.00 cop[tikv] isnull(Column#5), not(isnull(Column#5))
|
||||
│ └─IndexScan_13 10000.00 cop[tikv] table:t2, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_21 9990.00 root index:Selection_20
|
||||
└─Selection_20 9990.00 cop[tikv] not(isnull(Column#2))
|
||||
└─IndexScan_19 10000.00 cop[tikv] table:t1, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─HashRightJoin_9 0.00 root inner join, inner:TableReader_12, equal:[eq(Column#5, Column#2)]
|
||||
├─TableReader_12 0.00 root data:Selection_11
|
||||
│ └─Selection_11 0.00 cop[tikv] isnull(Column#5), not(isnull(Column#5))
|
||||
│ └─TableScan_10 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_18 9990.00 root data:Selection_17
|
||||
└─Selection_17 9990.00 cop[tikv] not(isnull(Column#2))
|
||||
└─TableScan_16 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select * from t t1 where not exists (select * from t t2 where t1.b = t2.b);
|
||||
id count task operator info
|
||||
HashLeftJoin_9 8000.00 root anti semi join, inner:IndexReader_17, equal:[eq(Column#2, Column#5)]
|
||||
├─IndexReader_13 10000.00 root index:IndexScan_12
|
||||
│ └─IndexScan_12 10000.00 cop[tikv] table:t1, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_17 10000.00 root index:IndexScan_16
|
||||
└─IndexScan_16 10000.00 cop[tikv] table:t2, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
HashLeftJoin_9 8000.00 root anti semi join, inner:TableReader_15, equal:[eq(Column#2, Column#5)]
|
||||
├─TableReader_11 10000.00 root data:TableScan_10
|
||||
│ └─TableScan_10 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_15 10000.00 root data:TableScan_14
|
||||
└─TableScan_14 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
drop table if exists t;
|
||||
create table t(a bigint primary key);
|
||||
explain select * from t where a = 1 and a = 2;
|
||||
|
||||
@ -67,8 +67,8 @@ explain select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1;
|
||||
id count task operator info
|
||||
Projection_11 1985.00 root cast(Column#9)
|
||||
└─HashLeftJoin_21 1985.00 root inner join, inner:HashAgg_25, equal:[eq(Column#1, Column#5)]
|
||||
├─IndexReader_36 1999.00 root index:IndexScan_35
|
||||
│ └─IndexScan_35 1999.00 cop[tikv] table:a, index:c2, range:[NULL,+inf], keep order:false
|
||||
├─TableReader_34 1999.00 root data:TableScan_33
|
||||
│ └─TableScan_33 1999.00 cop[tikv] table:a, range:[-inf,+inf], keep order:false
|
||||
└─HashAgg_25 1985.00 root group by:Column#5, funcs:count(Column#5), firstrow(Column#5)
|
||||
└─TableReader_32 1985.00 root data:Selection_31
|
||||
└─Selection_31 1985.00 cop[tikv] not(isnull(Column#5))
|
||||
@ -94,8 +94,8 @@ explain select c1 from t1 where c1 in (select c2 from t2);
|
||||
id count task operator info
|
||||
Projection_9 1985.00 root Column#1
|
||||
└─HashLeftJoin_19 1985.00 root inner join, inner:HashAgg_23, equal:[eq(Column#1, Column#5)]
|
||||
├─IndexReader_34 1999.00 root index:IndexScan_33
|
||||
│ └─IndexScan_33 1999.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false
|
||||
├─TableReader_32 1999.00 root data:TableScan_31
|
||||
│ └─TableScan_31 1999.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false
|
||||
└─HashAgg_23 1985.00 root group by:Column#5, funcs:firstrow(Column#5)
|
||||
└─TableReader_30 1985.00 root data:Selection_29
|
||||
└─Selection_29 1985.00 cop[tikv] not(isnull(Column#5))
|
||||
@ -107,8 +107,8 @@ explain select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1)
|
||||
id count task operator info
|
||||
Projection_12 1999.00 root eq(Column#2, Column#5)
|
||||
└─Apply_14 1999.00 root CARTESIAN left outer join, inner:Projection_43
|
||||
├─IndexReader_18 1999.00 root index:IndexScan_17
|
||||
│ └─IndexScan_17 1999.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false
|
||||
├─TableReader_16 1999.00 root data:TableScan_15
|
||||
│ └─TableScan_15 1999.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false
|
||||
└─Projection_43 1.00 root Column#4, Column#5
|
||||
└─IndexLookUp_42 1.00 root limit embedded(offset:0, count:1)
|
||||
├─Limit_41 1.00 cop[tikv] offset:0, count:1
|
||||
@ -125,8 +125,8 @@ explain select 1 in (select c2 from t2) from t1;
|
||||
id count task operator info
|
||||
Projection_6 1999.00 root Column#8
|
||||
└─HashLeftJoin_7 1999.00 root CARTESIAN left outer semi join, inner:TableReader_14
|
||||
├─IndexReader_11 1999.00 root index:IndexScan_10
|
||||
│ └─IndexScan_10 1999.00 cop[tikv] table:t1, index:c2, range:[NULL,+inf], keep order:false
|
||||
├─TableReader_9 1999.00 root data:TableScan_8
|
||||
│ └─TableScan_8 1999.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false
|
||||
└─TableReader_14 0.00 root data:Selection_13
|
||||
└─Selection_13 0.00 cop[tikv] eq(1, Column#5)
|
||||
└─TableScan_12 1985.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false
|
||||
@ -139,14 +139,14 @@ node [style=filled, color=lightgrey]
|
||||
color=black
|
||||
label = "root"
|
||||
"Projection_6" -> "HashLeftJoin_7"
|
||||
"HashLeftJoin_7" -> "IndexReader_11"
|
||||
"HashLeftJoin_7" -> "TableReader_9"
|
||||
"HashLeftJoin_7" -> "TableReader_14"
|
||||
}
|
||||
subgraph cluster10{
|
||||
subgraph cluster8{
|
||||
node [style=filled, color=lightgrey]
|
||||
color=black
|
||||
label = "cop"
|
||||
"IndexScan_10"
|
||||
"TableScan_8"
|
||||
}
|
||||
subgraph cluster13{
|
||||
node [style=filled, color=lightgrey]
|
||||
@ -154,7 +154,7 @@ color=black
|
||||
label = "cop"
|
||||
"Selection_13" -> "TableScan_12"
|
||||
}
|
||||
"IndexReader_11" -> "IndexScan_10"
|
||||
"TableReader_9" -> "TableScan_8"
|
||||
"TableReader_14" -> "Selection_13"
|
||||
}
|
||||
|
||||
|
||||
@ -257,9 +257,9 @@ create table t (a int, b int, c int, key idx(a, b, c));
|
||||
explain select count(a) from t;
|
||||
id count task operator info
|
||||
StreamAgg_20 1.00 root funcs:count(Column#14)
|
||||
└─IndexReader_21 1.00 root index:StreamAgg_8
|
||||
└─TableReader_21 1.00 root data:StreamAgg_8
|
||||
└─StreamAgg_8 1.00 cop[tikv] funcs:count(Column#1)
|
||||
└─IndexScan_19 10000.00 cop[tikv] table:t, index:a, b, c, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─TableScan_18 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
select count(a) from t;
|
||||
count(a)
|
||||
0
|
||||
|
||||
@ -218,8 +218,8 @@ explain select /*+ TIDB_INLJ(t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5
|
||||
id count task operator info
|
||||
Limit_11 5.00 root offset:0, count:5
|
||||
└─IndexMergeJoin_19 5.00 root inner join, inner:IndexReader_17, outer key:Column#1, inner key:Column#3
|
||||
├─IndexReader_25 4.00 root index:IndexScan_24
|
||||
│ └─IndexScan_24 4.00 cop[tikv] table:t1, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_23 4.00 root data:TableScan_22
|
||||
│ └─TableScan_22 4.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_17 1.25 root index:IndexScan_16
|
||||
└─IndexScan_16 1.25 cop[tikv] table:t2, index:a, range: decided by [eq(Column#3, Column#1)], keep order:true, stats:pseudo
|
||||
explain select /*+ TIDB_INLJ(t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5;
|
||||
@ -227,8 +227,8 @@ id count task operator info
|
||||
Limit_12 5.00 root offset:0, count:5
|
||||
└─Selection_13 5.00 root isnull(Column#3)
|
||||
└─IndexMergeJoin_21 5.00 root left outer join, inner:IndexReader_19, outer key:Column#1, inner key:Column#3
|
||||
├─IndexReader_27 4.00 root index:IndexScan_26
|
||||
│ └─IndexScan_26 4.00 cop[tikv] table:t1, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
├─TableReader_25 4.00 root data:TableScan_24
|
||||
│ └─TableScan_24 4.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_19 1.25 root index:IndexScan_18
|
||||
└─IndexScan_18 1.25 cop[tikv] table:t2, index:a, range: decided by [eq(Column#3, Column#1)], keep order:true, stats:pseudo
|
||||
explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5;
|
||||
@ -251,17 +251,17 @@ Limit_12 5.00 root offset:0, count:5
|
||||
explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5;
|
||||
id count task operator info
|
||||
Limit_11 5.00 root offset:0, count:5
|
||||
└─HashLeftJoin_31 5.00 root inner join, inner:IndexReader_40, equal:[eq(Column#1, Column#3)]
|
||||
├─IndexReader_36 4.00 root index:IndexScan_35
|
||||
│ └─IndexScan_35 4.00 cop[tikv] table:t1, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_40 10000.00 root index:IndexScan_39
|
||||
└─IndexScan_39 10000.00 cop[tikv] table:t2, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─HashLeftJoin_31 5.00 root inner join, inner:TableReader_38, equal:[eq(Column#1, Column#3)]
|
||||
├─TableReader_34 4.00 root data:TableScan_33
|
||||
│ └─TableScan_33 4.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_38 10000.00 root data:TableScan_37
|
||||
└─TableScan_37 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5;
|
||||
id count task operator info
|
||||
Limit_12 5.00 root offset:0, count:5
|
||||
└─Selection_13 5.00 root isnull(Column#3)
|
||||
└─HashLeftJoin_24 5.00 root left outer join, inner:IndexReader_32, equal:[eq(Column#1, Column#3)]
|
||||
├─IndexReader_28 4.00 root index:IndexScan_27
|
||||
│ └─IndexScan_27 4.00 cop[tikv] table:t1, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─IndexReader_32 10000.00 root index:IndexScan_31
|
||||
└─IndexScan_31 10000.00 cop[tikv] table:t2, index:a, range:[NULL,+inf], keep order:false, stats:pseudo
|
||||
└─HashLeftJoin_24 5.00 root left outer join, inner:TableReader_30, equal:[eq(Column#1, Column#3)]
|
||||
├─TableReader_26 4.00 root data:TableScan_25
|
||||
│ └─TableScan_25 4.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
└─TableReader_30 10000.00 root data:TableScan_29
|
||||
└─TableScan_29 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"github.com/pingcap/parser/mysql"
|
||||
"github.com/pingcap/tidb/expression"
|
||||
"github.com/pingcap/tidb/expression/aggregation"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/planner/property"
|
||||
"github.com/pingcap/tidb/sessionctx"
|
||||
"github.com/pingcap/tidb/types"
|
||||
@ -513,7 +514,7 @@ func (p *LogicalJoin) buildIndexJoinInner2TableScan(
|
||||
outerIdx int, us *LogicalUnionScan, avgInnerRowCnt float64) (joins []PhysicalPlan) {
|
||||
var tblPath *accessPath
|
||||
for _, path := range ds.possibleAccessPaths {
|
||||
if path.isTablePath {
|
||||
if path.isTablePath && path.storeType == kv.TiKV {
|
||||
tblPath = path
|
||||
break
|
||||
}
|
||||
@ -678,7 +679,7 @@ func (p *LogicalJoin) constructInnerTableScanTask(
|
||||
for i := range ds.stats.Cardinality {
|
||||
ds.stats.Cardinality[i] = 1
|
||||
}
|
||||
rowSize := ds.TblColHists.GetAvgRowSize(ds.TblCols, false)
|
||||
rowSize := ds.TblColHists.GetTableAvgRowSize(ds.TblCols, ts.StoreType, true)
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
copTask := &copTask{
|
||||
tablePlan: ts,
|
||||
@ -761,7 +762,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask(
|
||||
cop.tablePlan = ts
|
||||
}
|
||||
is.initSchema(ds.id, path.index, path.fullIdxCols, cop.tablePlan != nil)
|
||||
rowSize := is.indexScanRowSize(path.index, ds)
|
||||
rowSize := is.indexScanRowSize(path.index, ds, true)
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
cop.cst = rowCount * rowSize * sessVars.ScanFactor
|
||||
indexConds, tblConds := splitIndexFilterConditions(filterConds, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo)
|
||||
|
||||
@ -355,6 +355,9 @@ func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candida
|
||||
}
|
||||
pruned := false
|
||||
for i := len(candidates) - 1; i >= 0; i-- {
|
||||
if candidates[i].path.storeType == kv.TiFlash {
|
||||
continue
|
||||
}
|
||||
result := compareCandidates(candidates[i], currentCandidate)
|
||||
if result == 1 {
|
||||
pruned = true
|
||||
@ -514,7 +517,7 @@ func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty,
|
||||
isCovered bool) {
|
||||
idx := path.index
|
||||
is, partialCost, rowCount := ds.getOriginalPhysicalIndexScan(prop, path, false, false)
|
||||
rowSize := is.indexScanRowSize(idx, ds)
|
||||
rowSize := is.indexScanRowSize(idx, ds, false)
|
||||
isCovered = isCoveringIndex(ds.schema.Columns, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo.PKIsHandle)
|
||||
indexConds := path.indexFilters
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
@ -584,7 +587,7 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty,
|
||||
}
|
||||
}
|
||||
}
|
||||
rowSize := ds.TblColHists.GetAvgRowSize(ds.TblCols, false)
|
||||
rowSize := ds.TblColHists.GetTableAvgRowSize(ds.TblCols, ts.StoreType, true)
|
||||
partialCost += totalRowCount * rowSize * sessVars.ScanFactor
|
||||
ts.stats = ds.tableStats.ScaleByExpectCnt(totalRowCount)
|
||||
if ds.statisticTable.Pseudo {
|
||||
@ -696,7 +699,7 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candid
|
||||
return task, nil
|
||||
}
|
||||
|
||||
func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSource) float64 {
|
||||
func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSource, isForScan bool) float64 {
|
||||
scanCols := make([]*expression.Column, 0, len(idx.Columns)+1)
|
||||
// If `initSchema` has already appended the handle column in schema, just use schema columns, otherwise, add extra handle column.
|
||||
if len(idx.Columns) == len(is.schema.Columns) {
|
||||
@ -708,6 +711,9 @@ func (is *PhysicalIndexScan) indexScanRowSize(idx *model.IndexInfo, ds *DataSour
|
||||
} else {
|
||||
scanCols = is.schema.Columns
|
||||
}
|
||||
if isForScan {
|
||||
return ds.TblColHists.GetIndexAvgRowSize(scanCols, is.Index.Unique)
|
||||
}
|
||||
return ds.TblColHists.GetAvgRowSize(scanCols, true)
|
||||
}
|
||||
|
||||
@ -1061,7 +1067,14 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper
|
||||
// we still need to assume values are uniformly distributed. For simplicity, we use uniform-assumption
|
||||
// for all columns now, as we do in `deriveStatsByFilter`.
|
||||
ts.stats = ds.tableStats.ScaleByExpectCnt(rowCount)
|
||||
rowSize := ds.TblColHists.GetAvgRowSize(ds.TblCols, false)
|
||||
var rowSize float64
|
||||
if ts.StoreType == kv.TiKV {
|
||||
rowSize = ds.TblColHists.GetTableAvgRowSize(ds.TblCols, ts.StoreType, true)
|
||||
} else {
|
||||
// If `ds.handleCol` is nil, then the schema of tableScan doesn't have handle column.
|
||||
// This logic can be ensured in column pruning.
|
||||
rowSize = ds.TblColHists.GetTableAvgRowSize(ts.Schema().Columns, ts.StoreType, ds.handleCol != nil)
|
||||
}
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
cost := rowCount * rowSize * sessVars.ScanFactor
|
||||
if isMatchProp {
|
||||
@ -1071,6 +1084,12 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper
|
||||
}
|
||||
ts.KeepOrder = true
|
||||
}
|
||||
switch ts.StoreType {
|
||||
case kv.TiKV:
|
||||
cost += float64(len(ts.Ranges)) * sessVars.SeekFactor
|
||||
case kv.TiFlash:
|
||||
cost += float64(len(ts.Ranges)) * float64(len(ts.Columns)) * sessVars.SeekFactor
|
||||
}
|
||||
return ts, cost, rowCount
|
||||
}
|
||||
|
||||
@ -1104,7 +1123,7 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper
|
||||
rowCount = math.Min(prop.ExpectedCnt/selectivity, rowCount)
|
||||
}
|
||||
is.stats = ds.tableStats.ScaleByExpectCnt(rowCount)
|
||||
rowSize := is.indexScanRowSize(idx, ds)
|
||||
rowSize := is.indexScanRowSize(idx, ds, true)
|
||||
sessVars := ds.ctx.GetSessionVars()
|
||||
cost := rowCount * rowSize * sessVars.ScanFactor
|
||||
if isMatchProp {
|
||||
@ -1114,5 +1133,6 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper
|
||||
}
|
||||
is.KeepOrder = true
|
||||
}
|
||||
cost += float64(len(is.Ranges)) * sessVars.SeekFactor
|
||||
return is, cost, rowCount
|
||||
}
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"github.com/pingcap/parser/mysql"
|
||||
"github.com/pingcap/tidb/expression"
|
||||
"github.com/pingcap/tidb/expression/aggregation"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/planner/property"
|
||||
"github.com/pingcap/tidb/statistics"
|
||||
"github.com/pingcap/tidb/table"
|
||||
@ -413,6 +414,7 @@ type accessPath struct {
|
||||
tableFilters []expression.Expression
|
||||
// isTablePath indicates whether this path is table path.
|
||||
isTablePath bool
|
||||
storeType kv.StoreType
|
||||
// forced means this path is generated by `use/force index()`.
|
||||
forced bool
|
||||
// partialIndexPaths store all index access paths.
|
||||
|
||||
@ -582,8 +582,11 @@ func isPrimaryIndex(indexName model.CIStr) bool {
|
||||
}
|
||||
|
||||
func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInfo, dbName, tblName model.CIStr) ([]*accessPath, error) {
|
||||
publicPaths := make([]*accessPath, 0, len(tblInfo.Indices)+1)
|
||||
publicPaths = append(publicPaths, &accessPath{isTablePath: true})
|
||||
publicPaths := make([]*accessPath, 0, len(tblInfo.Indices)+2)
|
||||
publicPaths = append(publicPaths, &accessPath{isTablePath: true, storeType: kv.TiKV})
|
||||
if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available {
|
||||
publicPaths = append(publicPaths, &accessPath{isTablePath: true, storeType: kv.TiFlash})
|
||||
}
|
||||
for _, index := range tblInfo.Indices {
|
||||
if index.State == model.StatePublic {
|
||||
publicPaths = append(publicPaths, &accessPath{index: index})
|
||||
|
||||
@ -130,7 +130,10 @@ func (t *copTask) finishIndexPlan() {
|
||||
}
|
||||
// Calculate the IO cost of table scan here because we cannot know its stats until we finish index plan.
|
||||
t.tablePlan.(*PhysicalTableScan).stats = t.indexPlan.statsInfo()
|
||||
rowSize := t.tblColHists.GetAvgRowSize(t.tblCols, false)
|
||||
var p PhysicalPlan
|
||||
for p = t.indexPlan; len(p.Children()) > 0; p = p.Children()[0] {
|
||||
}
|
||||
rowSize := t.tblColHists.GetIndexAvgRowSize(t.tblCols, p.(*PhysicalIndexScan).Index.Unique)
|
||||
t.cst += cnt * rowSize * sessVars.ScanFactor
|
||||
}
|
||||
|
||||
|
||||
14
planner/core/testdata/analyze_suite_out.json
vendored
14
planner/core/testdata/analyze_suite_out.json
vendored
@ -158,13 +158,13 @@
|
||||
],
|
||||
"Plan": [
|
||||
"Projection_7 0.00 root Column#1, Column#2, Column#4, Column#5",
|
||||
"└─HashRightJoin_9 0.00 root inner join, inner:IndexReader_15, equal:[eq(Column#5, Column#2)]",
|
||||
" ├─IndexReader_15 0.00 root index:Selection_14",
|
||||
" │ └─Selection_14 0.00 cop[tikv] isnull(Column#5), not(isnull(Column#5))",
|
||||
" │ └─IndexScan_13 10000.00 cop[tikv] table:t2, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo",
|
||||
" └─IndexReader_21 9990.00 root index:Selection_20",
|
||||
" └─Selection_20 9990.00 cop[tikv] not(isnull(Column#2))",
|
||||
" └─IndexScan_19 10000.00 cop[tikv] table:t1, index:a, b, range:[NULL,+inf], keep order:false, stats:pseudo"
|
||||
"└─HashRightJoin_9 0.00 root inner join, inner:TableReader_12, equal:[eq(Column#5, Column#2)]",
|
||||
" ├─TableReader_12 0.00 root data:Selection_11",
|
||||
" │ └─Selection_11 0.00 cop[tikv] isnull(Column#5), not(isnull(Column#5))",
|
||||
" │ └─TableScan_10 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo",
|
||||
" └─TableReader_18 9990.00 root data:Selection_17",
|
||||
" └─Selection_17 9990.00 cop[tikv] not(isnull(Column#2))",
|
||||
" └─TableScan_16 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
@ -299,10 +299,12 @@ type SessionVars struct {
|
||||
CopCPUFactor float64
|
||||
// NetworkFactor is the network cost of transferring 1 byte data.
|
||||
NetworkFactor float64
|
||||
// ScanFactor is the IO cost of scanning 1 byte data on TiKV.
|
||||
// ScanFactor is the IO cost of scanning 1 byte data on TiKV and TiFlash.
|
||||
ScanFactor float64
|
||||
// DescScanFactor is the IO cost of scanning 1 byte data on TiKV in desc order.
|
||||
// DescScanFactor is the IO cost of scanning 1 byte data on TiKV and TiFlash in desc order.
|
||||
DescScanFactor float64
|
||||
// SeekFactor is the IO cost of seeking the start value of a range in TiKV or TiFlash.
|
||||
SeekFactor float64
|
||||
// MemoryFactor is the memory cost of storing one tuple.
|
||||
MemoryFactor float64
|
||||
// ConcurrencyFactor is the CPU cost of additional one goroutine.
|
||||
@ -502,6 +504,7 @@ func NewSessionVars() *SessionVars {
|
||||
NetworkFactor: DefOptNetworkFactor,
|
||||
ScanFactor: DefOptScanFactor,
|
||||
DescScanFactor: DefOptDescScanFactor,
|
||||
SeekFactor: DefOptSeekFactor,
|
||||
MemoryFactor: DefOptMemoryFactor,
|
||||
ConcurrencyFactor: DefOptConcurrencyFactor,
|
||||
EnableRadixJoin: false,
|
||||
@ -822,6 +825,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error {
|
||||
s.ScanFactor = tidbOptFloat64(val, DefOptScanFactor)
|
||||
case TiDBOptDescScanFactor:
|
||||
s.DescScanFactor = tidbOptFloat64(val, DefOptDescScanFactor)
|
||||
case TiDBOptSeekFactor:
|
||||
s.SeekFactor = tidbOptFloat64(val, DefOptSeekFactor)
|
||||
case TiDBOptMemoryFactor:
|
||||
s.MemoryFactor = tidbOptFloat64(val, DefOptMemoryFactor)
|
||||
case TiDBOptConcurrencyFactor:
|
||||
|
||||
@ -657,6 +657,7 @@ var defaultSysVars = []*SysVar{
|
||||
{ScopeGlobal | ScopeSession, TiDBOptNetworkFactor, strconv.FormatFloat(DefOptNetworkFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBOptScanFactor, strconv.FormatFloat(DefOptScanFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBOptDescScanFactor, strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBOptSeekFactor, strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBOptMemoryFactor, strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBOptConcurrencyFactor, strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64)},
|
||||
{ScopeGlobal | ScopeSession, TiDBIndexJoinBatchSize, strconv.Itoa(DefIndexJoinBatchSize)},
|
||||
|
||||
@ -188,6 +188,8 @@ const (
|
||||
TiDBOptScanFactor = "tidb_opt_scan_factor"
|
||||
// tidb_opt_desc_factor is the IO cost of scanning 1 byte data on TiKV in desc order.
|
||||
TiDBOptDescScanFactor = "tidb_opt_desc_factor"
|
||||
// tidb_opt_seek_factor is the IO cost of seeking the start value in a range on TiKV or TiFlash.
|
||||
TiDBOptSeekFactor = "tidb_opt_seek_factor"
|
||||
// tidb_opt_memory_factor is the memory cost of storing one tuple.
|
||||
TiDBOptMemoryFactor = "tidb_opt_memory_factor"
|
||||
// tidb_opt_concurrency_factor is the CPU cost of additional one goroutine.
|
||||
@ -357,6 +359,7 @@ const (
|
||||
DefOptNetworkFactor = 1.0
|
||||
DefOptScanFactor = 1.5
|
||||
DefOptDescScanFactor = 3.0
|
||||
DefOptSeekFactor = 20.0
|
||||
DefOptMemoryFactor = 0.001
|
||||
DefOptConcurrencyFactor = 3.0
|
||||
DefOptInSubqToJoinAndAgg = true
|
||||
|
||||
@ -498,6 +498,7 @@ func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string,
|
||||
TiDBOptNetworkFactor,
|
||||
TiDBOptScanFactor,
|
||||
TiDBOptDescScanFactor,
|
||||
TiDBOptSeekFactor,
|
||||
TiDBOptMemoryFactor,
|
||||
TiDBOptConcurrencyFactor:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
|
||||
@ -336,6 +336,14 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) {
|
||||
c.Assert(val, Equals, "5.0")
|
||||
c.Assert(v.DescScanFactor, Equals, 5.0)
|
||||
|
||||
c.Assert(v.SeekFactor, Equals, 20.0)
|
||||
err = SetSessionSystemVar(v, TiDBOptSeekFactor, types.NewStringDatum("50.0"))
|
||||
c.Assert(err, IsNil)
|
||||
val, err = GetSessionSystemVar(v, TiDBOptSeekFactor)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(val, Equals, "50.0")
|
||||
c.Assert(v.SeekFactor, Equals, 50.0)
|
||||
|
||||
c.Assert(v.MemoryFactor, Equals, 0.001)
|
||||
err = SetSessionSystemVar(v, TiDBOptMemoryFactor, types.NewStringDatum("1.0"))
|
||||
c.Assert(err, IsNil)
|
||||
@ -433,6 +441,8 @@ func (s *testVarsutilSuite) TestValidate(c *C) {
|
||||
{TiDBOptScanFactor, "-2", true},
|
||||
{TiDBOptDescScanFactor, "a", true},
|
||||
{TiDBOptDescScanFactor, "-2", true},
|
||||
{TiDBOptSeekFactor, "a", true},
|
||||
{TiDBOptSeekFactor, "-2", true},
|
||||
{TiDBOptMemoryFactor, "a", true},
|
||||
{TiDBOptMemoryFactor, "-2", true},
|
||||
{TiDBOptConcurrencyFactor, "a", true},
|
||||
|
||||
@ -24,7 +24,9 @@ import (
|
||||
"github.com/pingcap/parser/model"
|
||||
"github.com/pingcap/parser/mysql"
|
||||
"github.com/pingcap/tidb/expression"
|
||||
"github.com/pingcap/tidb/kv"
|
||||
"github.com/pingcap/tidb/sessionctx/stmtctx"
|
||||
"github.com/pingcap/tidb/tablecodec"
|
||||
"github.com/pingcap/tidb/types"
|
||||
"github.com/pingcap/tidb/util/codec"
|
||||
"github.com/pingcap/tidb/util/ranger"
|
||||
@ -687,3 +689,32 @@ func (coll *HistColl) GetAvgRowSize(cols []*expression.Column, isEncodedKey bool
|
||||
// Add 1 byte for each column's flag byte. See `encode` for details.
|
||||
return size + float64(len(cols))
|
||||
}
|
||||
|
||||
// GetTableAvgRowSize computes average row size for a table scan, exclude the index key-value pairs.
|
||||
func (coll *HistColl) GetTableAvgRowSize(cols []*expression.Column, storeType kv.StoreType, handleInCols bool) (size float64) {
|
||||
size = coll.GetAvgRowSize(cols, false)
|
||||
switch storeType {
|
||||
case kv.TiKV:
|
||||
size += tablecodec.RecordRowKeyLen
|
||||
// The `cols` for TiKV always contain the row_id, so prefix row size subtract its length.
|
||||
size -= 8
|
||||
case kv.TiFlash:
|
||||
if !handleInCols {
|
||||
size += 8 /* row_id length */
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetIndexAvgRowSize computes average row size for a index scan.
|
||||
func (coll *HistColl) GetIndexAvgRowSize(cols []*expression.Column, isUnique bool) (size float64) {
|
||||
size = coll.GetAvgRowSize(cols, true)
|
||||
// tablePrefix(1) + tableID(8) + indexPrefix(2) + indexID(8)
|
||||
// Because the cols for index scan always contain the handle, so we don't add the rowID here.
|
||||
size += 19
|
||||
if !isUnique {
|
||||
// add the len("_")
|
||||
size++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -43,9 +43,10 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
idLen = 8
|
||||
prefixLen = 1 + idLen /*tableID*/ + 2
|
||||
recordRowKeyLen = prefixLen + idLen /*handle*/
|
||||
idLen = 8
|
||||
prefixLen = 1 + idLen /*tableID*/ + 2
|
||||
// RecordRowKeyLen is public for calculating avgerage row size.
|
||||
RecordRowKeyLen = prefixLen + idLen /*handle*/
|
||||
tablePrefixLength = 1
|
||||
recordPrefixSepLength = 2
|
||||
)
|
||||
@ -60,7 +61,7 @@ func TablePrefix() []byte {
|
||||
|
||||
// EncodeRowKey encodes the table id and record handle into a kv.Key
|
||||
func EncodeRowKey(tableID int64, encodedHandle []byte) kv.Key {
|
||||
buf := make([]byte, 0, recordRowKeyLen)
|
||||
buf := make([]byte, 0, RecordRowKeyLen)
|
||||
buf = appendTableRecordPrefix(buf, tableID)
|
||||
buf = append(buf, encodedHandle...)
|
||||
return buf
|
||||
@ -68,7 +69,7 @@ func EncodeRowKey(tableID int64, encodedHandle []byte) kv.Key {
|
||||
|
||||
// EncodeRowKeyWithHandle encodes the table id, row handle into a kv.Key
|
||||
func EncodeRowKeyWithHandle(tableID int64, handle int64) kv.Key {
|
||||
buf := make([]byte, 0, recordRowKeyLen)
|
||||
buf := make([]byte, 0, RecordRowKeyLen)
|
||||
buf = appendTableRecordPrefix(buf, tableID)
|
||||
buf = codec.EncodeInt(buf, handle)
|
||||
return buf
|
||||
@ -237,7 +238,7 @@ func DecodeTableID(key kv.Key) int64 {
|
||||
|
||||
// DecodeRowKey decodes the key and gets the handle.
|
||||
func DecodeRowKey(key kv.Key) (int64, error) {
|
||||
if len(key) != recordRowKeyLen || !hasTablePrefix(key) || !hasRecordPrefixSep(key[prefixLen-2:]) {
|
||||
if len(key) != RecordRowKeyLen || !hasTablePrefix(key) || !hasRecordPrefixSep(key[prefixLen-2:]) {
|
||||
return 0, errInvalidKey.GenWithStack("invalid key - %q", key)
|
||||
}
|
||||
u := binary.BigEndian.Uint64(key[prefixLen:])
|
||||
@ -696,8 +697,8 @@ func GenTablePrefix(tableID int64) kv.Key {
|
||||
|
||||
// TruncateToRowKeyLen truncates the key to row key length if the key is longer than row key.
|
||||
func TruncateToRowKeyLen(key kv.Key) kv.Key {
|
||||
if len(key) > recordRowKeyLen {
|
||||
return key[:recordRowKeyLen]
|
||||
if len(key) > RecordRowKeyLen {
|
||||
return key[:RecordRowKeyLen]
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
@ -390,7 +390,7 @@ func (s *testTableCodecSuite) TestPrefix(c *C) {
|
||||
prefixKey := GenTableIndexPrefix(tableID)
|
||||
c.Assert(DecodeTableID(prefixKey), Equals, tableID)
|
||||
|
||||
c.Assert(TruncateToRowKeyLen(append(indexPrefix, "xyz"...)), HasLen, recordRowKeyLen)
|
||||
c.Assert(TruncateToRowKeyLen(append(indexPrefix, "xyz"...)), HasLen, RecordRowKeyLen)
|
||||
c.Assert(TruncateToRowKeyLen(key), HasLen, len(key))
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user