From 7eb6e61b3d97322edbf471b09f0841a0d4e205bb Mon Sep 17 00:00:00 2001 From: Rustin Liu Date: Thu, 9 Nov 2023 17:54:42 +0800 Subject: [PATCH] statistics: Refactor code to improve readability (#48414) --- pkg/statistics/handle/autoanalyze/BUILD.bazel | 1 + .../handle/autoanalyze/autoanalyze.go | 230 +++++++++++++----- .../handle/autoanalyze/autoanalyze_test.go | 11 +- pkg/statistics/handle/lockstats/lock_stats.go | 6 +- .../handle/lockstats/unlock_stats.go | 4 +- pkg/statistics/handle/util/BUILD.bazel | 2 + pkg/statistics/handle/util/interfaces.go | 7 + 7 files changed, 179 insertions(+), 82 deletions(-) diff --git a/pkg/statistics/handle/autoanalyze/BUILD.bazel b/pkg/statistics/handle/autoanalyze/BUILD.bazel index c27b9ca8fd..3e7281e0ff 100644 --- a/pkg/statistics/handle/autoanalyze/BUILD.bazel +++ b/pkg/statistics/handle/autoanalyze/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "//pkg/sessionctx/variable", "//pkg/statistics", "//pkg/statistics/handle/util", + "//pkg/table", "//pkg/types", "//pkg/util", "//pkg/util/chunk", diff --git a/pkg/statistics/handle/autoanalyze/autoanalyze.go b/pkg/statistics/handle/autoanalyze/autoanalyze.go index 0592ba2c19..27c3ec48a9 100644 --- a/pkg/statistics/handle/autoanalyze/autoanalyze.go +++ b/pkg/statistics/handle/autoanalyze/autoanalyze.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" statsutil "github.com/pingcap/tidb/pkg/statistics/handle/util" + "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" @@ -101,6 +102,8 @@ func parseAutoAnalyzeRatio(ratio string) float64 { return math.Max(autoAnalyzeRatio, 0) } +// parseAnalyzePeriod parses the start and end time for auto analyze. +// It parses the times in UTC location. func parseAnalyzePeriod(start, end string) (time.Time, time.Time, error) { if start == "" { start = variable.DefAutoAnalyzeStartTime @@ -129,21 +132,51 @@ func getAutoAnalyzeParameters(sctx sessionctx.Context) map[string]string { return parameters } +func getAllTidsAndPids(tbls []table.Table) []int64 { + tidsAndPids := make([]int64, 0, len(tbls)) + for _, tbl := range tbls { + tidsAndPids = append(tidsAndPids, tbl.Meta().ID) + tblInfo := tbl.Meta() + pi := tblInfo.GetPartitionInfo() + if pi != nil { + for _, def := range pi.Definitions { + tidsAndPids = append(tidsAndPids, def.ID) + } + } + } + return tidsAndPids +} + // HandleAutoAnalyze analyzes the newly created table or index. -func HandleAutoAnalyze(sctx sessionctx.Context, +func HandleAutoAnalyze( + sctx sessionctx.Context, statsHandle statsutil.StatsHandle, - is infoschema.InfoSchema) (analyzed bool) { + is infoschema.InfoSchema, +) (analyzed bool) { defer func() { if r := recover(); r != nil { - logutil.BgLogger().Error("HandleAutoAnalyze panicked", zap.Any("error", r), zap.Stack("stack")) + statsutil.StatsLogger.Error( + "HandleAutoAnalyze panicked", + zap.Any("recover", r), + zap.Stack("stack"), + ) } }() + dbs := is.AllSchemaNames() parameters := getAutoAnalyzeParameters(sctx) autoAnalyzeRatio := parseAutoAnalyzeRatio(parameters[variable.TiDBAutoAnalyzeRatio]) - start, end, err := parseAnalyzePeriod(parameters[variable.TiDBAutoAnalyzeStartTime], parameters[variable.TiDBAutoAnalyzeEndTime]) + + // Get the available time period for auto analyze and check if the current time is in the period. + start, end, err := parseAnalyzePeriod( + parameters[variable.TiDBAutoAnalyzeStartTime], + parameters[variable.TiDBAutoAnalyzeEndTime], + ) if err != nil { - logutil.BgLogger().Error("parse auto analyze period failed", zap.String("category", "stats"), zap.Error(err)) + statsutil.StatsLogger.Error( + "parse auto analyze period failed", + zap.Error(err), + ) return false } if !timeutil.WithinDayTimePeriod(start, end, time.Now()) { @@ -151,14 +184,17 @@ func HandleAutoAnalyze(sctx sessionctx.Context, } pruneMode := variable.PartitionPruneMode(sctx.GetSessionVars().PartitionPruneMode.Load()) + // Shuffle the database and table slice to randomize the order of analyzing tables. rd := rand.New(rand.NewSource(time.Now().UnixNano())) // #nosec G404 rd.Shuffle(len(dbs), func(i, j int) { dbs[i], dbs[j] = dbs[j], dbs[i] }) for _, db := range dbs { + // Ignore the memory and system database. if util.IsMemOrSysDB(strings.ToLower(db)) { continue } + tbls := is.SchemaTables(model.NewCIStr(db)) // We shuffle dbs and tbls so that the order of iterating tables is random. If the order is fixed and the auto // analyze job of one table fails for some reason, it may always analyze the same table and fail again and again @@ -168,41 +204,35 @@ func HandleAutoAnalyze(sctx sessionctx.Context, tbls[i], tbls[j] = tbls[j], tbls[i] }) - // We need to check every partition of every table to see if it needs to be analyzed. - tidsAndPids := make([]int64, 0, len(tbls)) - for _, tbl := range tbls { - tidsAndPids = append(tidsAndPids, tbl.Meta().ID) - tblInfo := tbl.Meta() - pi := tblInfo.GetPartitionInfo() - if pi != nil { - for _, def := range pi.Definitions { - tidsAndPids = append(tidsAndPids, def.ID) - } - } - } - + tidsAndPids := getAllTidsAndPids(tbls) lockedTables, err := statsHandle.GetLockedTables(tidsAndPids...) if err != nil { - logutil.BgLogger().Error("check table lock failed", - zap.String("category", "stats"), zap.Error(err)) + statsutil.StatsLogger.Error( + "check table lock failed", + zap.Error(err), + ) continue } + // We need to check every partition of every table to see if it needs to be analyzed. for _, tbl := range tbls { // If table locked, skip analyze all partitions of the table. // FIXME: This check is not accurate, because other nodes may change the table lock status at any time. if _, ok := lockedTables[tbl.Meta().ID]; ok { continue } + tblInfo := tbl.Meta() if tblInfo.IsView() { continue } + pi := tblInfo.GetPartitionInfo() + // No partitions, analyze the whole table. if pi == nil { statsTbl := statsHandle.GetTableStats(tblInfo) sql := "analyze table %n.%n" - analyzed := autoAnalyzeTable(sctx, statsHandle, tblInfo, statsTbl, autoAnalyzeRatio, sql, db, tblInfo.Name.O) + analyzed := tryAutoAnalyzeTable(sctx, statsHandle, tblInfo, statsTbl, autoAnalyzeRatio, sql, db, tblInfo.Name.O) if analyzed { // analyze one table at a time to let it get the freshest parameters. // others will be analyzed next round which is just 3s later. @@ -218,7 +248,7 @@ func HandleAutoAnalyze(sctx sessionctx.Context, } } if pruneMode == variable.Dynamic { - analyzed := autoAnalyzePartitionTableInDynamicMode(sctx, statsHandle, tblInfo, partitionDefs, db, autoAnalyzeRatio) + analyzed := tryAutoAnalyzePartitionTableInDynamicMode(sctx, statsHandle, tblInfo, partitionDefs, db, autoAnalyzeRatio) if analyzed { return true } @@ -227,37 +257,61 @@ func HandleAutoAnalyze(sctx sessionctx.Context, for _, def := range partitionDefs { sql := "analyze table %n.%n partition %n" statsTbl := statsHandle.GetPartitionStats(tblInfo, def.ID) - analyzed := autoAnalyzeTable(sctx, statsHandle, tblInfo, statsTbl, autoAnalyzeRatio, sql, db, tblInfo.Name.O, def.Name.O) + analyzed := tryAutoAnalyzeTable(sctx, statsHandle, tblInfo, statsTbl, autoAnalyzeRatio, sql, db, tblInfo.Name.O, def.Name.O) if analyzed { return true } } } } + return false } -// AutoAnalyzeMinCnt means if the count of table is less than this value, we needn't do auto analyze. +// AutoAnalyzeMinCnt means if the count of table is less than this value, we don't need to do auto analyze. +// Exported for testing. var AutoAnalyzeMinCnt int64 = 1000 -func autoAnalyzeTable(sctx sessionctx.Context, +// Determine whether the table and index require analysis. +func tryAutoAnalyzeTable( + sctx sessionctx.Context, statsHandle statsutil.StatsHandle, - tblInfo *model.TableInfo, statsTbl *statistics.Table, - ratio float64, sql string, params ...interface{}) bool { + tblInfo *model.TableInfo, + statsTbl *statistics.Table, + ratio float64, + sql string, + params ...interface{}, +) bool { + // 1. If the stats are not loaded, we don't need to analyze it. + // 2. If the table is too small, we don't want to waste time to analyze it. + // Leave the opportunity to other bigger tables. if statsTbl.Pseudo || statsTbl.RealtimeCount < AutoAnalyzeMinCnt { return false } - if needAnalyze, reason := NeedAnalyzeTable(statsTbl, 20*statsHandle.Lease(), ratio); needAnalyze { + + // Check if the table needs to analyze. + if needAnalyze, reason := NeedAnalyzeTable( + statsTbl, + ratio, + ); needAnalyze { escaped, err := sqlescape.EscapeSQL(sql, params...) if err != nil { return false } - logutil.BgLogger().Info("auto analyze triggered", zap.String("category", "stats"), zap.String("sql", escaped), zap.String("reason", reason)) + statsutil.StatsLogger.Info( + "auto analyze triggered", + zap.String("sql", escaped), + zap.String("reason", reason), + ) + tableStatsVer := sctx.GetSessionVars().AnalyzeVersion statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer) execAutoAnalyze(sctx, statsHandle, tableStatsVer, sql, params...) + return true } + + // Whether the table needs to analyze or not, we need to check the indices of the table. for _, idx := range tblInfo.Indices { if _, ok := statsTbl.Indices[idx.ID]; !ok && idx.State == model.StatePublic { sqlWithIdx := sql + " index %n" @@ -266,7 +320,11 @@ func autoAnalyzeTable(sctx sessionctx.Context, if err != nil { return false } - logutil.BgLogger().Info("auto analyze for unanalyzed", zap.String("category", "stats"), zap.String("sql", escaped)) + + statsutil.StatsLogger.Info( + "auto analyze for unanalyzed indexes", + zap.String("sql", escaped), + ) tableStatsVer := sctx.GetSessionVars().AnalyzeVersion statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer) execAutoAnalyze(sctx, statsHandle, tableStatsVer, sqlWithIdx, paramsWithIdx...) @@ -277,14 +335,13 @@ func autoAnalyzeTable(sctx sessionctx.Context, } // NeedAnalyzeTable checks if we need to analyze the table: -// 1. If the table has never been analyzed, we need to analyze it when it has -// not been modified for a while. +// 1. If the table has never been analyzed, we need to analyze it. // 2. If the table had been analyzed before, we need to analyze it when // "tbl.ModifyCount/tbl.Count > autoAnalyzeRatio" and the current time is // between `start` and `end`. // // Exposed for test. -func NeedAnalyzeTable(tbl *statistics.Table, _ time.Duration, autoAnalyzeRatio float64) (bool, string) { +func NeedAnalyzeTable(tbl *statistics.Table, autoAnalyzeRatio float64) (bool, string) { analyzed := TableAnalyzed(tbl) if !analyzed { return true, "table unanalyzed" @@ -304,8 +361,7 @@ func NeedAnalyzeTable(tbl *statistics.Table, _ time.Duration, autoAnalyzeRatio f return true, fmt.Sprintf("too many modifications(%v/%v>%v)", tbl.ModifyCount, tblCnt, autoAnalyzeRatio) } -// TableAnalyzed checks if the table is analyzed. -// Exposed for test. +// TableAnalyzed checks if any column or index of the table has been analyzed. func TableAnalyzed(tbl *statistics.Table) bool { for _, col := range tbl.Columns { if col.IsAnalyzed() { @@ -320,28 +376,43 @@ func TableAnalyzed(tbl *statistics.Table) bool { return false } -func autoAnalyzePartitionTableInDynamicMode(sctx sessionctx.Context, +// It is very similar to tryAutoAnalyzeTable, but it commits the analyze job in batch for partitions. +func tryAutoAnalyzePartitionTableInDynamicMode( + sctx sessionctx.Context, statsHandle statsutil.StatsHandle, - tblInfo *model.TableInfo, partitionDefs []model.PartitionDefinition, - db string, ratio float64) bool { + tblInfo *model.TableInfo, + partitionDefs []model.PartitionDefinition, + db string, + ratio float64, +) bool { tableStatsVer := sctx.GetSessionVars().AnalyzeVersion analyzePartitionBatchSize := int(variable.AutoAnalyzePartitionBatchSize.Load()) - partitionNames := make([]interface{}, 0, len(partitionDefs)) + needAnalyzePartitionNames := make([]interface{}, 0, len(partitionDefs)) + for _, def := range partitionDefs { partitionStatsTbl := statsHandle.GetPartitionStats(tblInfo, def.ID) + // 1. If the stats are not loaded, we don't need to analyze it. + // 2. If the table is too small, we don't want to waste time to analyze it. + // Leave the opportunity to other bigger tables. if partitionStatsTbl.Pseudo || partitionStatsTbl.RealtimeCount < AutoAnalyzeMinCnt { continue } - if needAnalyze, reason := NeedAnalyzeTable(partitionStatsTbl, 20*statsHandle.Lease(), ratio); needAnalyze { - partitionNames = append(partitionNames, def.Name.O) - logutil.BgLogger().Info("need to auto analyze", zap.String("category", "stats"), + if needAnalyze, reason := NeedAnalyzeTable( + partitionStatsTbl, + ratio, + ); needAnalyze { + needAnalyzePartitionNames = append(needAnalyzePartitionNames, def.Name.O) + statsutil.StatsLogger.Info( + "need to auto analyze", zap.String("database", db), zap.String("table", tblInfo.Name.String()), zap.String("partition", def.Name.O), - zap.String("reason", reason)) + zap.String("reason", reason), + ) statistics.CheckAnalyzeVerOnTable(partitionStatsTbl, &tableStatsVer) } } + getSQL := func(prefix, suffix string, numPartitions int) string { var sqlBuilder strings.Builder sqlBuilder.WriteString(prefix) @@ -354,63 +425,79 @@ func autoAnalyzePartitionTableInDynamicMode(sctx sessionctx.Context, sqlBuilder.WriteString(suffix) return sqlBuilder.String() } - if len(partitionNames) > 0 { - logutil.BgLogger().Info("start to auto analyze", zap.String("category", "stats"), + + if len(needAnalyzePartitionNames) > 0 { + statsutil.StatsLogger.Info("start to auto analyze", zap.String("database", db), zap.String("table", tblInfo.Name.String()), - zap.Any("partitions", partitionNames), - zap.Int("analyze partition batch size", analyzePartitionBatchSize)) + zap.Any("partitions", needAnalyzePartitionNames), + zap.Int("analyze partition batch size", analyzePartitionBatchSize), + ) + statsTbl := statsHandle.GetTableStats(tblInfo) statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer) - for i := 0; i < len(partitionNames); i += analyzePartitionBatchSize { + for i := 0; i < len(needAnalyzePartitionNames); i += analyzePartitionBatchSize { start := i end := start + analyzePartitionBatchSize - if end >= len(partitionNames) { - end = len(partitionNames) + if end >= len(needAnalyzePartitionNames) { + end = len(needAnalyzePartitionNames) } + + // Do batch analyze for partitions. sql := getSQL("analyze table %n.%n partition", "", end-start) - params := append([]interface{}{db, tblInfo.Name.O}, partitionNames[start:end]...) - logutil.BgLogger().Info("auto analyze triggered", zap.String("category", "stats"), + params := append([]interface{}{db, tblInfo.Name.O}, needAnalyzePartitionNames[start:end]...) + + statsutil.StatsLogger.Info( + "auto analyze triggered", zap.String("database", db), zap.String("table", tblInfo.Name.String()), - zap.Any("partitions", partitionNames[start:end])) + zap.Any("partitions", needAnalyzePartitionNames[start:end]), + ) execAutoAnalyze(sctx, statsHandle, tableStatsVer, sql, params...) } + return true } + // Check if any index of the table needs to analyze. for _, idx := range tblInfo.Indices { if idx.State != model.StatePublic { continue } + // Collect all the partition names that need to analyze. for _, def := range partitionDefs { partitionStatsTbl := statsHandle.GetPartitionStats(tblInfo, def.ID) if _, ok := partitionStatsTbl.Indices[idx.ID]; !ok { - partitionNames = append(partitionNames, def.Name.O) + needAnalyzePartitionNames = append(needAnalyzePartitionNames, def.Name.O) statistics.CheckAnalyzeVerOnTable(partitionStatsTbl, &tableStatsVer) } } - if len(partitionNames) > 0 { + if len(needAnalyzePartitionNames) > 0 { statsTbl := statsHandle.GetTableStats(tblInfo) statistics.CheckAnalyzeVerOnTable(statsTbl, &tableStatsVer) - for i := 0; i < len(partitionNames); i += analyzePartitionBatchSize { + + for i := 0; i < len(needAnalyzePartitionNames); i += analyzePartitionBatchSize { start := i end := start + analyzePartitionBatchSize - if end >= len(partitionNames) { - end = len(partitionNames) + if end >= len(needAnalyzePartitionNames) { + end = len(needAnalyzePartitionNames) } + sql := getSQL("analyze table %n.%n partition", " index %n", end-start) - params := append([]interface{}{db, tblInfo.Name.O}, partitionNames[start:end]...) + params := append([]interface{}{db, tblInfo.Name.O}, needAnalyzePartitionNames[start:end]...) params = append(params, idx.Name.O) - logutil.BgLogger().Info("auto analyze for unanalyzed", zap.String("category", "stats"), + statsutil.StatsLogger.Info("auto analyze for unanalyzed", zap.String("database", db), zap.String("table", tblInfo.Name.String()), zap.String("index", idx.Name.String()), - zap.Any("partitions", partitionNames[start:end])) + zap.Any("partitions", needAnalyzePartitionNames[start:end]), + ) execAutoAnalyze(sctx, statsHandle, tableStatsVer, sql, params...) } + return true } } + return false } @@ -420,10 +507,13 @@ var execOptionForAnalyze = map[int]sqlexec.OptionFuncAlias{ statistics.Version2: sqlexec.ExecOptionAnalyzeVer2, } -func execAutoAnalyze(sctx sessionctx.Context, +func execAutoAnalyze( + sctx sessionctx.Context, statsHandle statsutil.StatsHandle, statsVer int, - sql string, params ...interface{}) { + sql string, + params ...interface{}, +) { startTime := time.Now() _, _, err := execAnalyzeStmt(sctx, statsHandle, statsVer, sql, params...) dur := time.Since(startTime) @@ -433,17 +523,25 @@ func execAutoAnalyze(sctx sessionctx.Context, if err1 != nil { escaped = "" } - logutil.BgLogger().Error("auto analyze failed", zap.String("category", "stats"), zap.String("sql", escaped), zap.Duration("cost_time", dur), zap.Error(err)) + statsutil.StatsLogger.Error( + "auto analyze failed", + zap.String("sql", escaped), + zap.Duration("cost_time", dur), + zap.Error(err), + ) metrics.AutoAnalyzeCounter.WithLabelValues("failed").Inc() } else { metrics.AutoAnalyzeCounter.WithLabelValues("succ").Inc() } } -func execAnalyzeStmt(sctx sessionctx.Context, +func execAnalyzeStmt( + sctx sessionctx.Context, statsHandle statsutil.StatsHandle, statsVer int, - sql string, params ...interface{}) ([]chunk.Row, []*ast.ResultField, error) { + sql string, + params ...interface{}, +) ([]chunk.Row, []*ast.ResultField, error) { pruneMode := sctx.GetSessionVars().PartitionPruneMode.Load() analyzeSnapshot := sctx.GetSessionVars().EnableAnalyzeSnapshot optFuncs := []sqlexec.OptionFuncAlias{ diff --git a/pkg/statistics/handle/autoanalyze/autoanalyze_test.go b/pkg/statistics/handle/autoanalyze/autoanalyze_test.go index 4cada5ca98..0dff953bcf 100644 --- a/pkg/statistics/handle/autoanalyze/autoanalyze_test.go +++ b/pkg/statistics/handle/autoanalyze/autoanalyze_test.go @@ -138,14 +138,12 @@ func TestNeedAnalyzeTable(t *testing.T) { tests := []struct { tbl *statistics.Table ratio float64 - limit time.Duration result bool reason string }{ // table was never analyzed and has reach the limit { tbl: &statistics.Table{Version: oracle.GoTimeToTS(time.Now())}, - limit: 0, ratio: 0, result: true, reason: "table unanalyzed", @@ -153,7 +151,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was never analyzed but has not reached the limit { tbl: &statistics.Table{Version: oracle.GoTimeToTS(time.Now())}, - limit: time.Hour, ratio: 0, result: true, reason: "table unanalyzed", @@ -161,7 +158,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed but auto analyze is disabled { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 1, RealtimeCount: 1}}, - limit: 0, ratio: 0, result: false, reason: "", @@ -169,7 +165,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed but modify count is small { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 0, RealtimeCount: 1}}, - limit: 0, ratio: 0.3, result: false, reason: "", @@ -177,7 +172,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 1, RealtimeCount: 1}}, - limit: 0, ratio: 0.3, result: true, reason: "too many modifications", @@ -185,7 +179,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 1, RealtimeCount: 1}}, - limit: 0, ratio: 0.3, result: true, reason: "too many modifications", @@ -193,7 +186,6 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 1, RealtimeCount: 1}}, - limit: 0, ratio: 0.3, result: true, reason: "too many modifications", @@ -201,14 +193,13 @@ func TestNeedAnalyzeTable(t *testing.T) { // table was already analyzed { tbl: &statistics.Table{HistColl: statistics.HistColl{Columns: columns, ModifyCount: 1, RealtimeCount: 1}}, - limit: 0, ratio: 0.3, result: true, reason: "too many modifications", }, } for _, test := range tests { - needAnalyze, reason := autoanalyze.NeedAnalyzeTable(test.tbl, test.limit, test.ratio) + needAnalyze, reason := autoanalyze.NeedAnalyzeTable(test.tbl, test.ratio) require.Equal(t, test.result, needAnalyze) require.True(t, strings.HasPrefix(reason, test.reason)) } diff --git a/pkg/statistics/handle/lockstats/lock_stats.go b/pkg/statistics/handle/lockstats/lock_stats.go index e19e5a8493..7a64e846ee 100644 --- a/pkg/statistics/handle/lockstats/lock_stats.go +++ b/pkg/statistics/handle/lockstats/lock_stats.go @@ -129,8 +129,6 @@ func (sl *statsLockImpl) GetTableLockedAndClearForTest() (map[int64]struct{}, er } var ( - // Stats logger. - statsLogger = logutil.BgLogger().With(zap.String("category", "stats")) // useCurrentSession to make sure the sql is executed in current session. useCurrentSession = []sqlexec.OptionFuncAlias{sqlexec.ExecOptionUseCurSession} ) @@ -157,7 +155,7 @@ func AddLockedTables( ids = append(ids, pid) } } - statsLogger.Info("lock table", + util.StatsLogger.Info("lock table", zap.Any("tables", tables), ) @@ -211,7 +209,7 @@ func AddLockedPartitions( pNames = append(pNames, pName) } - statsLogger.Info("lock partitions", + util.StatsLogger.Info("lock partitions", zap.Int64("tableID", tid), zap.String("tableName", tableName), zap.Int64s("partitionIDs", pids), diff --git a/pkg/statistics/handle/lockstats/unlock_stats.go b/pkg/statistics/handle/lockstats/unlock_stats.go index 56bf40aa8f..0fe4dee548 100644 --- a/pkg/statistics/handle/lockstats/unlock_stats.go +++ b/pkg/statistics/handle/lockstats/unlock_stats.go @@ -52,7 +52,7 @@ func RemoveLockedTables( } } - statsLogger.Info("unlock table", + util.StatsLogger.Info("unlock table", zap.Any("tables", tables), ) @@ -105,7 +105,7 @@ func RemoveLockedPartitions( for pid := range pidNames { pids = append(pids, pid) } - statsLogger.Info("unlock partitions", + util.StatsLogger.Info("unlock partitions", zap.Int64("tableID", tid), zap.String("tableName", tableName), zap.Int64s("partitionIDs", pids), diff --git a/pkg/statistics/handle/util/BUILD.bazel b/pkg/statistics/handle/util/BUILD.bazel index 292161a378..5cf414f02d 100644 --- a/pkg/statistics/handle/util/BUILD.bazel +++ b/pkg/statistics/handle/util/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/util", "//pkg/util/chunk", "//pkg/util/intest", + "//pkg/util/logutil", "//pkg/util/sqlexec", "//pkg/util/sqlexec/mock", "@com_github_ngaut_pools//:pools", @@ -31,5 +32,6 @@ go_library( "@com_github_pingcap_tipb//go-tipb", "@com_github_tiancaiamao_gp//:gp", "@com_github_tikv_client_go_v2//oracle", + "@org_uber_go_zap//:zap", ], ) diff --git a/pkg/statistics/handle/util/interfaces.go b/pkg/statistics/handle/util/interfaces.go index e3cd3a2ef8..9aa6b0ab95 100644 --- a/pkg/statistics/handle/util/interfaces.go +++ b/pkg/statistics/handle/util/interfaces.go @@ -27,8 +27,15 @@ import ( "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/tiancaiamao/gp" + "go.uber.org/zap" +) + +var ( + // StatsLogger with category "stats" is used to log statistic related messages. + StatsLogger = logutil.BgLogger().With(zap.String("category", "stats")) ) // StatsGC is used to GC unnecessary stats.