// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "context" "fmt" "sort" "sync" "time" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/sqlexec" ) type memtableRetriever struct { dummyCloser table *model.TableInfo columns []*model.ColumnInfo rows [][]types.Datum rowIdx int retrieved bool initialized bool } // retrieve implements the infoschemaRetriever interface func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { if e.retrieved { return nil, nil } //Cache the ret full rows in schemataRetriever if !e.initialized { is := infoschema.GetInfoSchema(sctx) dbs := is.AllSchemas() sort.Sort(infoschema.SchemasSorter(dbs)) var err error switch e.table.Name.O { case infoschema.TableSchemata: e.setDataFromSchemata(sctx, dbs) case infoschema.TableTables: err = e.dataForTables(sctx, dbs) case infoschema.TablePartitions: err = e.dataForPartitions(sctx, dbs) case infoschema.TableTiDBIndexes: e.setDataFromIndexes(sctx, dbs) case infoschema.TableViews: e.setDataFromViews(sctx, dbs) case infoschema.TableEngines: e.setDataFromEngines() case infoschema.TableCharacterSets: e.setDataFromCharacterSets() case infoschema.TableCollations: e.setDataFromCollations() case infoschema.TableKeyColumn: e.setDataFromKeyColumnUsage(sctx, dbs) case infoschema.TableCollationCharacterSetApplicability: e.dataForCollationCharacterSetApplicability() case infoschema.TableUserPrivileges: e.setDataFromUserPrivileges(sctx) } if err != nil { return nil, err } e.initialized = true } //Adjust the amount of each return maxCount := 1024 retCount := maxCount if e.rowIdx+maxCount > len(e.rows) { retCount = len(e.rows) - e.rowIdx e.retrieved = true } ret := make([][]types.Datum, retCount) for i := e.rowIdx; i < e.rowIdx+retCount; i++ { ret[i-e.rowIdx] = e.rows[i] } e.rowIdx += retCount if len(e.columns) == len(e.table.Columns) { return ret, nil } rows := make([][]types.Datum, len(ret)) for i, fullRow := range ret { row := make([]types.Datum, len(e.columns)) for j, col := range e.columns { row[j] = fullRow[col.Offset] } rows[i] = row } return rows, nil } func getRowCountAllTable(ctx sessionctx.Context) (map[int64]uint64, error) { rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL("select table_id, count from mysql.stats_meta") if err != nil { return nil, err } rowCountMap := make(map[int64]uint64, len(rows)) for _, row := range rows { tableID := row.GetInt64(0) rowCnt := row.GetUint64(1) rowCountMap[tableID] = rowCnt } return rowCountMap, nil } type tableHistID struct { tableID int64 histID int64 } func getColLengthAllTables(ctx sessionctx.Context) (map[tableHistID]uint64, error) { rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL("select table_id, hist_id, tot_col_size from mysql.stats_histograms where is_index = 0") if err != nil { return nil, err } colLengthMap := make(map[tableHistID]uint64, len(rows)) for _, row := range rows { tableID := row.GetInt64(0) histID := row.GetInt64(1) totalSize := row.GetInt64(2) if totalSize < 0 { totalSize = 0 } colLengthMap[tableHistID{tableID: tableID, histID: histID}] = uint64(totalSize) } return colLengthMap, nil } func getDataAndIndexLength(info *model.TableInfo, physicalID int64, rowCount uint64, columnLengthMap map[tableHistID]uint64) (uint64, uint64) { columnLength := make(map[string]uint64, len(info.Columns)) for _, col := range info.Columns { if col.State != model.StatePublic { continue } length := col.FieldType.StorageLength() if length != types.VarStorageLen { columnLength[col.Name.L] = rowCount * uint64(length) } else { length := columnLengthMap[tableHistID{tableID: physicalID, histID: col.ID}] columnLength[col.Name.L] = length } } dataLength, indexLength := uint64(0), uint64(0) for _, length := range columnLength { dataLength += length } for _, idx := range info.Indices { if idx.State != model.StatePublic { continue } for _, col := range idx.Columns { if col.Length == types.UnspecifiedLength { indexLength += columnLength[col.Name.L] } else { indexLength += rowCount * uint64(col.Length) } } } return dataLength, indexLength } type statsCache struct { mu sync.Mutex loading bool modifyTime time.Time tableRows map[int64]uint64 colLength map[tableHistID]uint64 } var tableStatsCache = &statsCache{} // TableStatsCacheExpiry is the expiry time for table stats cache. var TableStatsCacheExpiry = 3 * time.Second func (c *statsCache) setLoading(loading bool) { c.mu.Lock() c.loading = loading c.mu.Unlock() } func (c *statsCache) get(ctx sessionctx.Context) (map[int64]uint64, map[tableHistID]uint64, error) { c.mu.Lock() if time.Since(c.modifyTime) < TableStatsCacheExpiry || c.loading { tableRows, colLength := c.tableRows, c.colLength c.mu.Unlock() return tableRows, colLength, nil } c.loading = true c.mu.Unlock() tableRows, err := getRowCountAllTable(ctx) if err != nil { c.setLoading(false) return nil, nil, err } colLength, err := getColLengthAllTables(ctx) if err != nil { c.setLoading(false) return nil, nil, err } c.mu.Lock() c.loading = false c.tableRows = tableRows c.colLength = colLength c.modifyTime = time.Now() c.mu.Unlock() return tableRows, colLength, nil } func getAutoIncrementID(ctx sessionctx.Context, schema *model.DBInfo, tblInfo *model.TableInfo) (int64, error) { is := ctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) tbl, err := is.TableByName(schema.Name, tblInfo.Name) if err != nil { return 0, err } return tbl.Allocator(ctx, autoid.RowIDAllocType).Base() + 1, nil } func (e *memtableRetriever) setDataFromSchemata(ctx sessionctx.Context, schemas []*model.DBInfo) { checker := privilege.GetPrivilegeManager(ctx) rows := make([][]types.Datum, 0, len(schemas)) for _, schema := range schemas { charset := mysql.DefaultCharset collation := mysql.DefaultCollationName if len(schema.Charset) > 0 { charset = schema.Charset // Overwrite default } if len(schema.Collate) > 0 { collation = schema.Collate // Overwrite default } if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, "", "", mysql.AllPrivMask) { continue } record := types.MakeDatums( infoschema.CatalogVal, // CATALOG_NAME schema.Name.O, // SCHEMA_NAME charset, // DEFAULT_CHARACTER_SET_NAME collation, // DEFAULT_COLLATION_NAME nil, ) rows = append(rows, record) } e.rows = rows } func (e *memtableRetriever) dataForTables(ctx sessionctx.Context, schemas []*model.DBInfo) error { tableRowsMap, colLengthMap, err := tableStatsCache.get(ctx) if err != nil { return err } checker := privilege.GetPrivilegeManager(ctx) var rows [][]types.Datum createTimeTp := mysql.TypeDatetime for _, schema := range schemas { for _, table := range schema.Tables { collation := table.Collate if collation == "" { collation = mysql.DefaultCollationName } createTime := types.NewTime(types.FromGoTime(table.GetUpdateTime()), createTimeTp, types.DefaultFsp) createOptions := "" if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { continue } if !table.IsView() { if table.GetPartitionInfo() != nil { createOptions = "partitioned" } var autoIncID interface{} hasAutoIncID, _ := infoschema.HasAutoIncrementColumn(table) if hasAutoIncID { autoIncID, err = getAutoIncrementID(ctx, schema, table) if err != nil { return err } } var rowCount, dataLength, indexLength uint64 if table.GetPartitionInfo() == nil { rowCount = tableRowsMap[table.ID] dataLength, indexLength = getDataAndIndexLength(table, table.ID, rowCount, colLengthMap) } else { for _, pi := range table.GetPartitionInfo().Definitions { rowCount += tableRowsMap[pi.ID] parDataLen, parIndexLen := getDataAndIndexLength(table, pi.ID, tableRowsMap[pi.ID], colLengthMap) dataLength += parDataLen indexLength += parIndexLen } } avgRowLength := uint64(0) if rowCount != 0 { avgRowLength = dataLength / rowCount } shardingInfo := infoschema.GetShardingInfo(schema, table) record := types.MakeDatums( infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME "BASE TABLE", // TABLE_TYPE "InnoDB", // ENGINE uint64(10), // VERSION "Compact", // ROW_FORMAT rowCount, // TABLE_ROWS avgRowLength, // AVG_ROW_LENGTH dataLength, // DATA_LENGTH uint64(0), // MAX_DATA_LENGTH indexLength, // INDEX_LENGTH uint64(0), // DATA_FREE autoIncID, // AUTO_INCREMENT createTime, // CREATE_TIME nil, // UPDATE_TIME nil, // CHECK_TIME collation, // TABLE_COLLATION nil, // CHECKSUM createOptions, // CREATE_OPTIONS table.Comment, // TABLE_COMMENT table.ID, // TIDB_TABLE_ID shardingInfo, // TIDB_ROW_ID_SHARDING_INFO ) rows = append(rows, record) } else { record := types.MakeDatums( infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME "VIEW", // TABLE_TYPE nil, // ENGINE nil, // VERSION nil, // ROW_FORMAT nil, // TABLE_ROWS nil, // AVG_ROW_LENGTH nil, // DATA_LENGTH nil, // MAX_DATA_LENGTH nil, // INDEX_LENGTH nil, // DATA_FREE nil, // AUTO_INCREMENT createTime, // CREATE_TIME nil, // UPDATE_TIME nil, // CHECK_TIME nil, // TABLE_COLLATION nil, // CHECKSUM nil, // CREATE_OPTIONS "VIEW", // TABLE_COMMENT table.ID, // TIDB_TABLE_ID nil, // TIDB_ROW_ID_SHARDING_INFO ) rows = append(rows, record) } } } e.rows = rows return nil } func (e *memtableRetriever) dataForPartitions(ctx sessionctx.Context, schemas []*model.DBInfo) error { tableRowsMap, colLengthMap, err := tableStatsCache.get(ctx) if err != nil { return err } checker := privilege.GetPrivilegeManager(ctx) var rows [][]types.Datum createTimeTp := mysql.TypeDatetime for _, schema := range schemas { for _, table := range schema.Tables { if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.SelectPriv) { continue } createTime := types.NewTime(types.FromGoTime(table.GetUpdateTime()), createTimeTp, types.DefaultFsp) var rowCount, dataLength, indexLength uint64 if table.GetPartitionInfo() == nil { rowCount = tableRowsMap[table.ID] dataLength, indexLength = getDataAndIndexLength(table, table.ID, rowCount, colLengthMap) avgRowLength := uint64(0) if rowCount != 0 { avgRowLength = dataLength / rowCount } record := types.MakeDatums( infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME nil, // PARTITION_NAME nil, // SUBPARTITION_NAME nil, // PARTITION_ORDINAL_POSITION nil, // SUBPARTITION_ORDINAL_POSITION nil, // PARTITION_METHOD nil, // SUBPARTITION_METHOD nil, // PARTITION_EXPRESSION nil, // SUBPARTITION_EXPRESSION nil, // PARTITION_DESCRIPTION rowCount, // TABLE_ROWS avgRowLength, // AVG_ROW_LENGTH dataLength, // DATA_LENGTH nil, // MAX_DATA_LENGTH indexLength, // INDEX_LENGTH nil, // DATA_FREE createTime, // CREATE_TIME nil, // UPDATE_TIME nil, // CHECK_TIME nil, // CHECKSUM nil, // PARTITION_COMMENT nil, // NODEGROUP nil, // TABLESPACE_NAME ) rows = append(rows, record) } else { for i, pi := range table.GetPartitionInfo().Definitions { rowCount = tableRowsMap[pi.ID] dataLength, indexLength = getDataAndIndexLength(table, pi.ID, tableRowsMap[pi.ID], colLengthMap) avgRowLength := uint64(0) if rowCount != 0 { avgRowLength = dataLength / rowCount } var partitionDesc string if table.Partition.Type == model.PartitionTypeRange { partitionDesc = pi.LessThan[0] } record := types.MakeDatums( infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME pi.Name.O, // PARTITION_NAME nil, // SUBPARTITION_NAME i+1, // PARTITION_ORDINAL_POSITION nil, // SUBPARTITION_ORDINAL_POSITION table.Partition.Type.String(), // PARTITION_METHOD nil, // SUBPARTITION_METHOD table.Partition.Expr, // PARTITION_EXPRESSION nil, // SUBPARTITION_EXPRESSION partitionDesc, // PARTITION_DESCRIPTION rowCount, // TABLE_ROWS avgRowLength, // AVG_ROW_LENGTH dataLength, // DATA_LENGTH uint64(0), // MAX_DATA_LENGTH indexLength, // INDEX_LENGTH uint64(0), // DATA_FREE createTime, // CREATE_TIME nil, // UPDATE_TIME nil, // CHECK_TIME nil, // CHECKSUM pi.Comment, // PARTITION_COMMENT nil, // NODEGROUP nil, // TABLESPACE_NAME ) rows = append(rows, record) } } } } e.rows = rows return nil } func (e *memtableRetriever) setDataFromIndexes(ctx sessionctx.Context, schemas []*model.DBInfo) { checker := privilege.GetPrivilegeManager(ctx) var rows [][]types.Datum for _, schema := range schemas { for _, tb := range schema.Tables { if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, tb.Name.L, "", mysql.AllPrivMask) { continue } if tb.PKIsHandle { var pkCol *model.ColumnInfo for _, col := range tb.Cols() { if mysql.HasPriKeyFlag(col.Flag) { pkCol = col break } } record := types.MakeDatums( schema.Name.O, // TABLE_SCHEMA tb.Name.O, // TABLE_NAME 0, // NON_UNIQUE "PRIMARY", // KEY_NAME 1, // SEQ_IN_INDEX pkCol.Name.O, // COLUMN_NAME nil, // SUB_PART "", // INDEX_COMMENT "NULL", // Expression 0, // INDEX_ID ) rows = append(rows, record) } for _, idxInfo := range tb.Indices { if idxInfo.State != model.StatePublic { continue } for i, col := range idxInfo.Columns { nonUniq := 1 if idxInfo.Unique { nonUniq = 0 } var subPart interface{} if col.Length != types.UnspecifiedLength { subPart = col.Length } colName := col.Name.O expression := "NULL" tblCol := tb.Columns[col.Offset] if tblCol.Hidden { colName = "NULL" expression = fmt.Sprintf("(%s)", tblCol.GeneratedExprString) } record := types.MakeDatums( schema.Name.O, // TABLE_SCHEMA tb.Name.O, // TABLE_NAME nonUniq, // NON_UNIQUE idxInfo.Name.O, // KEY_NAME i+1, // SEQ_IN_INDEX colName, // COLUMN_NAME subPart, // SUB_PART idxInfo.Comment, // INDEX_COMMENT expression, // Expression idxInfo.ID, // INDEX_ID ) rows = append(rows, record) } } } } e.rows = rows } func (e *memtableRetriever) setDataFromViews(ctx sessionctx.Context, schemas []*model.DBInfo) { checker := privilege.GetPrivilegeManager(ctx) var rows [][]types.Datum for _, schema := range schemas { for _, table := range schema.Tables { if !table.IsView() { continue } collation := table.Collate charset := table.Charset if collation == "" { collation = mysql.DefaultCollationName } if charset == "" { charset = mysql.DefaultCharset } if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { continue } record := types.MakeDatums( infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME table.View.SelectStmt, // VIEW_DEFINITION table.View.CheckOption.String(), // CHECK_OPTION "NO", // IS_UPDATABLE table.View.Definer.String(), // DEFINER table.View.Security.String(), // SECURITY_TYPE charset, // CHARACTER_SET_CLIENT collation, // COLLATION_CONNECTION ) rows = append(rows, record) } } e.rows = rows } func (e *memtableRetriever) setDataFromEngines() { var rows [][]types.Datum rows = append(rows, types.MakeDatums( "InnoDB", // Engine "DEFAULT", // Support "Supports transactions, row-level locking, and foreign keys", // Comment "YES", // Transactions "YES", // XA "YES", // Savepoints ), ) e.rows = rows } func (e *memtableRetriever) setDataFromCharacterSets() { var rows [][]types.Datum charsets := charset.GetSupportedCharsets() for _, charset := range charsets { rows = append(rows, types.MakeDatums(charset.Name, charset.DefaultCollation, charset.Desc, charset.Maxlen), ) } e.rows = rows } func (e *memtableRetriever) setDataFromCollations() { var rows [][]types.Datum collations := charset.GetSupportedCollations() for _, collation := range collations { isDefault := "" if collation.IsDefault { isDefault = "Yes" } rows = append(rows, types.MakeDatums(collation.Name, collation.CharsetName, collation.ID, isDefault, "Yes", 1), ) } e.rows = rows } func (e *memtableRetriever) dataForCollationCharacterSetApplicability() { var rows [][]types.Datum collations := charset.GetSupportedCollations() for _, collation := range collations { rows = append(rows, types.MakeDatums(collation.Name, collation.CharsetName), ) } e.rows = rows } func (e *memtableRetriever) setDataFromKeyColumnUsage(ctx sessionctx.Context, schemas []*model.DBInfo) { checker := privilege.GetPrivilegeManager(ctx) rows := make([][]types.Datum, 0, len(schemas)) // The capacity is not accurate, but it is not a big problem. for _, schema := range schemas { for _, table := range schema.Tables { if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { continue } rs := keyColumnUsageInTable(schema, table) rows = append(rows, rs...) } } e.rows = rows } func (e *memtableRetriever) setDataFromUserPrivileges(ctx sessionctx.Context) { pm := privilege.GetPrivilegeManager(ctx) e.rows = pm.UserPrivilegesTable() } func keyColumnUsageInTable(schema *model.DBInfo, table *model.TableInfo) [][]types.Datum { var rows [][]types.Datum if table.PKIsHandle { for _, col := range table.Columns { if mysql.HasPriKeyFlag(col.Flag) { record := types.MakeDatums( infoschema.CatalogVal, // CONSTRAINT_CATALOG schema.Name.O, // CONSTRAINT_SCHEMA infoschema.PrimaryConstraint, // CONSTRAINT_NAME infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME col.Name.O, // COLUMN_NAME 1, // ORDINAL_POSITION 1, // POSITION_IN_UNIQUE_CONSTRAINT nil, // REFERENCED_TABLE_SCHEMA nil, // REFERENCED_TABLE_NAME nil, // REFERENCED_COLUMN_NAME ) rows = append(rows, record) break } } } nameToCol := make(map[string]*model.ColumnInfo, len(table.Columns)) for _, c := range table.Columns { nameToCol[c.Name.L] = c } for _, index := range table.Indices { var idxName string if index.Primary { idxName = infoschema.PrimaryConstraint } else if index.Unique { idxName = index.Name.O } else { // Only handle unique/primary key continue } for i, key := range index.Columns { col := nameToCol[key.Name.L] record := types.MakeDatums( infoschema.CatalogVal, // CONSTRAINT_CATALOG schema.Name.O, // CONSTRAINT_SCHEMA idxName, // CONSTRAINT_NAME infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME col.Name.O, // COLUMN_NAME i+1, // ORDINAL_POSITION, nil, // POSITION_IN_UNIQUE_CONSTRAINT nil, // REFERENCED_TABLE_SCHEMA nil, // REFERENCED_TABLE_NAME nil, // REFERENCED_COLUMN_NAME ) rows = append(rows, record) } } for _, fk := range table.ForeignKeys { fkRefCol := "" if len(fk.RefCols) > 0 { fkRefCol = fk.RefCols[0].O } for i, key := range fk.Cols { col := nameToCol[key.L] record := types.MakeDatums( infoschema.CatalogVal, // CONSTRAINT_CATALOG schema.Name.O, // CONSTRAINT_SCHEMA fk.Name.O, // CONSTRAINT_NAME infoschema.CatalogVal, // TABLE_CATALOG schema.Name.O, // TABLE_SCHEMA table.Name.O, // TABLE_NAME col.Name.O, // COLUMN_NAME i+1, // ORDINAL_POSITION, 1, // POSITION_IN_UNIQUE_CONSTRAINT schema.Name.O, // REFERENCED_TABLE_SCHEMA fk.RefTable.O, // REFERENCED_TABLE_NAME fkRefCol, // REFERENCED_COLUMN_NAME ) rows = append(rows, record) } } return rows }