infoschema: show information about table partitions in information_schema.PARTITIONS (#14347)
This commit is contained in:
102
infoschema/tables.go
Normal file → Executable file
102
infoschema/tables.go
Normal file → Executable file
@ -1800,6 +1800,107 @@ func dataForPseudoProfiling() [][]types.Datum {
|
||||
return rows
|
||||
}
|
||||
|
||||
func dataForPartitions(ctx sessionctx.Context, schemas []*model.DBInfo) ([][]types.Datum, error) {
|
||||
tableRowsMap, colLengthMap, err := tableStatsCache.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checker := privilege.GetPrivilegeManager(ctx)
|
||||
var rows [][]types.Datum
|
||||
createTimeTp := partitionsCols[18].tp
|
||||
for _, schema := range schemas {
|
||||
for _, table := range schema.Tables {
|
||||
if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.SelectPriv) {
|
||||
continue
|
||||
}
|
||||
createTime := types.NewTime(types.FromGoTime(table.GetUpdateTime()), createTimeTp, types.DefaultFsp)
|
||||
|
||||
var rowCount, dataLength, indexLength uint64
|
||||
if table.GetPartitionInfo() == nil {
|
||||
rowCount = tableRowsMap[table.ID]
|
||||
dataLength, indexLength = getDataAndIndexLength(table, table.ID, rowCount, colLengthMap)
|
||||
avgRowLength := uint64(0)
|
||||
if rowCount != 0 {
|
||||
avgRowLength = dataLength / rowCount
|
||||
}
|
||||
record := types.MakeDatums(
|
||||
catalogVal, // TABLE_CATALOG
|
||||
schema.Name.O, // TABLE_SCHEMA
|
||||
table.Name.O, // TABLE_NAME
|
||||
nil, // PARTITION_NAME
|
||||
nil, // SUBPARTITION_NAME
|
||||
nil, // PARTITION_ORDINAL_POSITION
|
||||
nil, // SUBPARTITION_ORDINAL_POSITION
|
||||
nil, // PARTITION_METHOD
|
||||
nil, // SUBPARTITION_METHOD
|
||||
nil, // PARTITION_EXPRESSION
|
||||
nil, // SUBPARTITION_EXPRESSION
|
||||
nil, // PARTITION_DESCRIPTION
|
||||
rowCount, // TABLE_ROWS
|
||||
avgRowLength, // AVG_ROW_LENGTH
|
||||
dataLength, // DATA_LENGTH
|
||||
nil, // MAX_DATA_LENGTH
|
||||
indexLength, // INDEX_LENGTH
|
||||
nil, // DATA_FREE
|
||||
createTime, // CREATE_TIME
|
||||
nil, // UPDATE_TIME
|
||||
nil, // CHECK_TIME
|
||||
nil, // CHECKSUM
|
||||
nil, // PARTITION_COMMENT
|
||||
nil, // NODEGROUP
|
||||
nil, // TABLESPACE_NAME
|
||||
)
|
||||
rows = append(rows, record)
|
||||
} else {
|
||||
for i, pi := range table.GetPartitionInfo().Definitions {
|
||||
rowCount = tableRowsMap[pi.ID]
|
||||
dataLength, indexLength = getDataAndIndexLength(table, pi.ID, tableRowsMap[pi.ID], colLengthMap)
|
||||
|
||||
avgRowLength := uint64(0)
|
||||
if rowCount != 0 {
|
||||
avgRowLength = dataLength / rowCount
|
||||
}
|
||||
|
||||
var partitionDesc string
|
||||
if table.Partition.Type == model.PartitionTypeRange {
|
||||
partitionDesc = pi.LessThan[0]
|
||||
}
|
||||
|
||||
record := types.MakeDatums(
|
||||
catalogVal, // TABLE_CATALOG
|
||||
schema.Name.O, // TABLE_SCHEMA
|
||||
table.Name.O, // TABLE_NAME
|
||||
pi.Name.O, // PARTITION_NAME
|
||||
nil, // SUBPARTITION_NAME
|
||||
i+1, // PARTITION_ORDINAL_POSITION
|
||||
nil, // SUBPARTITION_ORDINAL_POSITION
|
||||
table.Partition.Type.String(), // PARTITION_METHOD
|
||||
nil, // SUBPARTITION_METHOD
|
||||
table.Partition.Expr, // PARTITION_EXPRESSION
|
||||
nil, // SUBPARTITION_EXPRESSION
|
||||
partitionDesc, // PARTITION_DESCRIPTION
|
||||
rowCount, // TABLE_ROWS
|
||||
avgRowLength, // AVG_ROW_LENGTH
|
||||
dataLength, // DATA_LENGTH
|
||||
uint64(0), // MAX_DATA_LENGTH
|
||||
indexLength, // INDEX_LENGTH
|
||||
uint64(0), // DATA_FREE
|
||||
createTime, // CREATE_TIME
|
||||
nil, // UPDATE_TIME
|
||||
nil, // CHECK_TIME
|
||||
nil, // CHECKSUM
|
||||
pi.Comment, // PARTITION_COMMENT
|
||||
nil, // NODEGROUP
|
||||
nil, // TABLESPACE_NAME
|
||||
)
|
||||
rows = append(rows, record)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func dataForKeyColumnUsage(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum {
|
||||
checker := privilege.GetPrivilegeManager(ctx)
|
||||
rows := make([][]types.Datum, 0, len(schemas)) // The capacity is not accurate, but it is not a big problem.
|
||||
@ -2308,6 +2409,7 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column)
|
||||
fullRows = dataForPseudoProfiling()
|
||||
}
|
||||
case tablePartitions:
|
||||
fullRows, err = dataForPartitions(ctx, dbs)
|
||||
case tableKeyColumn:
|
||||
fullRows = dataForKeyColumnUsage(ctx, dbs)
|
||||
case tableReferConst:
|
||||
|
||||
@ -1099,3 +1099,55 @@ func (s *testTableSuite) TestSelectHiddenColumn(c *C) {
|
||||
colInfo[2].Hidden = true
|
||||
tk.MustQuery("select count(*) from INFORMATION_SCHEMA.COLUMNS where table_name = 'hidden'").Check(testkit.Rows("0"))
|
||||
}
|
||||
|
||||
func (s *testTableSuite) TestPartitionsTable(c *C) {
|
||||
oldExpiryTime := infoschema.TableStatsCacheExpiry
|
||||
infoschema.TableStatsCacheExpiry = 0
|
||||
defer func() { infoschema.TableStatsCacheExpiry = oldExpiryTime }()
|
||||
|
||||
do := s.dom
|
||||
h := do.StatsHandle()
|
||||
h.Clear()
|
||||
is := do.InfoSchema()
|
||||
|
||||
tk := testkit.NewTestKit(c, s.store)
|
||||
|
||||
tk.MustExec("USE test;")
|
||||
tk.MustExec("DROP TABLE IF EXISTS `test_partitions`;")
|
||||
tk.MustExec(`CREATE TABLE test_partitions (a int, b int, c varchar(5), primary key(a), index idx(c)) PARTITION BY RANGE (a) (PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11), PARTITION p2 VALUES LESS THAN (16));`)
|
||||
err := h.HandleDDLEvent(<-h.DDLEventCh())
|
||||
c.Assert(err, IsNil)
|
||||
tk.MustExec(`insert into test_partitions(a, b, c) values(1, 2, "c"), (7, 3, "d"), (12, 4, "e");`)
|
||||
|
||||
tk.MustQuery("select PARTITION_NAME, PARTITION_DESCRIPTION from information_schema.PARTITIONS where table_name='test_partitions';").Check(
|
||||
testkit.Rows("" +
|
||||
"p0 6]\n" +
|
||||
"[p1 11]\n" +
|
||||
"[p2 16"))
|
||||
|
||||
tk.MustQuery("select table_rows, avg_row_length, data_length, index_length from information_schema.PARTITIONS where table_name='test_partitions';").Check(
|
||||
testkit.Rows("" +
|
||||
"0 0 0 0]\n" +
|
||||
"[0 0 0 0]\n" +
|
||||
"[0 0 0 0"))
|
||||
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
|
||||
c.Assert(h.Update(is), IsNil)
|
||||
tk.MustQuery("select table_rows, avg_row_length, data_length, index_length from information_schema.PARTITIONS where table_name='test_partitions';").Check(
|
||||
testkit.Rows("" +
|
||||
"1 18 18 2]\n" +
|
||||
"[1 18 18 2]\n" +
|
||||
"[1 18 18 2"))
|
||||
|
||||
// Test for table has no partitions.
|
||||
tk.MustExec("DROP TABLE IF EXISTS `test_partitions_1`;")
|
||||
tk.MustExec(`CREATE TABLE test_partitions_1 (a int, b int, c varchar(5), primary key(a), index idx(c));`)
|
||||
err = h.HandleDDLEvent(<-h.DDLEventCh())
|
||||
c.Assert(err, IsNil)
|
||||
tk.MustExec(`insert into test_partitions_1(a, b, c) values(1, 2, "c"), (7, 3, "d"), (12, 4, "e");`)
|
||||
c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil)
|
||||
c.Assert(h.Update(is), IsNil)
|
||||
tk.MustQuery("select PARTITION_NAME, TABLE_ROWS, AVG_ROW_LENGTH, DATA_LENGTH, INDEX_LENGTH from information_schema.PARTITIONS where table_name='test_partitions_1';").Check(
|
||||
testkit.Rows("<nil> 3 18 54 6"))
|
||||
|
||||
tk.MustExec("DROP TABLE `test_partitions`;")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user