For the following two reason, we should not push stream agg down to double read - The aggregate will lost the handle information - There's no sort operator. The second read is ordered with pk, not by index.
42 lines
1.5 KiB
Plaintext
42 lines
1.5 KiB
Plaintext
drop table if exists t;
|
|
create table t (id int, c1 timestamp);
|
|
show columns from t;
|
|
Field Type Null Key Default Extra
|
|
id int(11) YES NULL
|
|
c1 timestamp YES NULL
|
|
explain t;
|
|
Field Type Null Key Default Extra
|
|
id int(11) YES NULL
|
|
c1 timestamp YES NULL
|
|
describe t;
|
|
Field Type Null Key Default Extra
|
|
id int(11) YES NULL
|
|
c1 timestamp YES NULL
|
|
desc t;
|
|
Field Type Null Key Default Extra
|
|
id int(11) YES NULL
|
|
c1 timestamp YES NULL
|
|
desc t c1;
|
|
Field Type Null Key Default Extra
|
|
c1 timestamp YES NULL
|
|
desc t id;
|
|
Field Type Null Key Default Extra
|
|
id int(11) YES NULL
|
|
drop table if exists t;
|
|
create table t(id int primary key, a int, b int);
|
|
set session tidb_hashagg_partial_concurrency = 1;
|
|
set session tidb_hashagg_final_concurrency = 1;
|
|
explain select group_concat(a) from t group by id;
|
|
id count task operator info
|
|
StreamAgg_8 8000.00 root group by:Column#7, funcs:group_concat(Column#6, ",")
|
|
└─Projection_18 10000.00 root cast(Column#2), Column#1
|
|
└─TableReader_15 10000.00 root data:TableScan_14
|
|
└─TableScan_14 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo
|
|
explain select group_concat(a, b) from t group by id;
|
|
id count task operator info
|
|
StreamAgg_8 8000.00 root group by:Column#8, funcs:group_concat(Column#6, Column#7, ",")
|
|
└─Projection_18 10000.00 root cast(Column#2), cast(Column#3), Column#1
|
|
└─TableReader_15 10000.00 root data:TableScan_14
|
|
└─TableScan_14 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo
|
|
drop table t;
|