linter: Recover() must be directly called by defer (#44192)
close pingcap/tidb#44191
This commit is contained in:
@ -125,6 +125,7 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
|
||||
"//build/linter/asciicheck",
|
||||
"//build/linter/bodyclose",
|
||||
"//build/linter/deferrecover",
|
||||
"//build/linter/durationcheck",
|
||||
"//build/linter/etcdconfig",
|
||||
"//build/linter/exportloopref",
|
||||
|
||||
14
build/linter/deferrecover/BUILD.bazel
Normal file
14
build/linter/deferrecover/BUILD.bazel
Normal file
@ -0,0 +1,14 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "deferrecover",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/pingcap/tidb/build/linter/deferrecover",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//build/linter/util",
|
||||
"@org_golang_x_tools//go/analysis",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect",
|
||||
"@org_golang_x_tools//go/ast/inspector",
|
||||
],
|
||||
)
|
||||
80
build/linter/deferrecover/analyzer.go
Normal file
80
build/linter/deferrecover/analyzer.go
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2023 PingCAP, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package deferrecover
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
|
||||
"github.com/pingcap/tidb/build/linter/util"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
"golang.org/x/tools/go/ast/inspector"
|
||||
)
|
||||
|
||||
// Analyzer is the analyzer struct of unconvert.
|
||||
var Analyzer = &analysis.Analyzer{
|
||||
Name: "recover",
|
||||
Doc: "Check Recover() is directly called by defer",
|
||||
Requires: []*analysis.Analyzer{inspect.Analyzer},
|
||||
Run: run,
|
||||
}
|
||||
|
||||
const (
|
||||
packagePath = "github.com/pingcap/tidb/util"
|
||||
packageName = "util"
|
||||
funcName = "Recover"
|
||||
)
|
||||
|
||||
func run(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, file := range pass.Files {
|
||||
packageName := util.GetPackageName(file.Imports, packagePath, packageName)
|
||||
if packageName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
i := inspector.New([]*ast.File{file})
|
||||
|
||||
i.WithStack([]ast.Node{&ast.CallExpr{}}, func(n ast.Node, push bool, stack []ast.Node) bool {
|
||||
if !push {
|
||||
return true
|
||||
}
|
||||
callExpr := n.(*ast.CallExpr)
|
||||
sel, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
usedPackage, ok := sel.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
if usedPackage.Name != packageName {
|
||||
return true
|
||||
}
|
||||
if sel.Sel.Name != funcName {
|
||||
return true
|
||||
}
|
||||
|
||||
// check defer is directly called
|
||||
parentStmt := stack[len(stack)-2]
|
||||
_, ok = parentStmt.(*ast.DeferStmt)
|
||||
if !ok {
|
||||
pass.Reportf(n.Pos(), "Recover() should be directly called by defer")
|
||||
return true
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "github.com/pingcap/tidb/build/linter/etcdconfig",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//build/linter/util",
|
||||
"@org_golang_x_tools//go/analysis",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect",
|
||||
],
|
||||
|
||||
@ -17,6 +17,7 @@ package etcdconfig
|
||||
import (
|
||||
"go/ast"
|
||||
|
||||
"github.com/pingcap/tidb/build/linter/util"
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/passes/inspect"
|
||||
)
|
||||
@ -35,20 +36,9 @@ const (
|
||||
configStructName = "Config"
|
||||
)
|
||||
|
||||
// Adapted from https://github.com/mdempsky/unconvert/blob/beb68d938016d2dec1d1b078054f4d3db25f97be/unconvert.go#L371-L414.
|
||||
func run(pass *analysis.Pass) (interface{}, error) {
|
||||
for _, file := range pass.Files {
|
||||
packageName := ""
|
||||
for _, spec := range file.Imports {
|
||||
if spec.Path.Value != "\""+configPackagePath+"\"" {
|
||||
continue
|
||||
}
|
||||
if spec.Name != nil {
|
||||
packageName = spec.Name.Name
|
||||
} else {
|
||||
packageName = configPackageName
|
||||
}
|
||||
}
|
||||
packageName := util.GetPackageName(file.Imports, configPackagePath, configPackageName)
|
||||
if packageName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -203,3 +203,17 @@ func FindOffset(fileText string, line, column int) int {
|
||||
}
|
||||
return -1 //not found
|
||||
}
|
||||
|
||||
// GetPackageName returns the package name used in this file.
|
||||
func GetPackageName(imports []*ast.ImportSpec, path, defaultName string) string {
|
||||
quoted := `"` + path + `"`
|
||||
for _, imp := range imports {
|
||||
if imp.Path.Value == quoted {
|
||||
if imp.Name != nil {
|
||||
return imp.Name.Name
|
||||
}
|
||||
return defaultName
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@ -1175,5 +1175,12 @@
|
||||
".*_test.go": "ignore test code",
|
||||
"external/": "no need to vet third party code"
|
||||
}
|
||||
},
|
||||
"deferrecover": {
|
||||
"exclude_files": {
|
||||
"parser/parser.go": "parser/parser.go code",
|
||||
".*_test.go": "ignore test code",
|
||||
"external/": "no need to vet third party code"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1201,10 +1201,11 @@ func (d *ddl) SetHook(h Callback) {
|
||||
|
||||
func (d *ddl) startCleanDeadTableLock() {
|
||||
defer func() {
|
||||
tidbutil.Recover(metrics.LabelDDL, "startCleanDeadTableLock", nil, false)
|
||||
d.wg.Done()
|
||||
}()
|
||||
|
||||
defer tidbutil.Recover(metrics.LabelDDL, "startCleanDeadTableLock", nil, false)
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
||||
@ -662,8 +662,10 @@ func (do *Domain) topNSlowQueryLoop() {
|
||||
func (do *Domain) infoSyncerKeeper() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("infoSyncerKeeper exited.")
|
||||
util.Recover(metrics.LabelDomain, "infoSyncerKeeper", nil, false)
|
||||
}()
|
||||
|
||||
defer util.Recover(metrics.LabelDomain, "infoSyncerKeeper", nil, false)
|
||||
|
||||
ticker := time.NewTicker(infosync.ReportInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
@ -686,8 +688,10 @@ func (do *Domain) infoSyncerKeeper() {
|
||||
func (do *Domain) globalConfigSyncerKeeper() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("globalConfigSyncerKeeper exited.")
|
||||
util.Recover(metrics.LabelDomain, "globalConfigSyncerKeeper", nil, false)
|
||||
}()
|
||||
|
||||
defer util.Recover(metrics.LabelDomain, "globalConfigSyncerKeeper", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-do.globalCfgSyncer.NotifyCh:
|
||||
@ -1568,8 +1572,9 @@ func (do *Domain) LoadPrivilegeLoop(sctx sessionctx.Context) error {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("loadPrivilegeInLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "loadPrivilegeInLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "loadPrivilegeInLoop", nil, false)
|
||||
|
||||
var count int
|
||||
for {
|
||||
ok := true
|
||||
@ -1617,8 +1622,9 @@ func (do *Domain) LoadSysVarCacheLoop(ctx sessionctx.Context) error {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("LoadSysVarCacheLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "LoadSysVarCacheLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "LoadSysVarCacheLoop", nil, false)
|
||||
|
||||
var count int
|
||||
for {
|
||||
ok := true
|
||||
@ -1675,8 +1681,8 @@ func (do *Domain) WatchTiFlashComputeNodeChange() error {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("WatchTiFlashComputeNodeChange exit")
|
||||
util.Recover(metrics.LabelDomain, "WatchTiFlashComputeNodeChange", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "WatchTiFlashComputeNodeChange", nil, false)
|
||||
|
||||
var count int
|
||||
var logCount int
|
||||
@ -1752,8 +1758,9 @@ func (do *Domain) globalBindHandleWorkerLoop(owner owner.Manager) {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("globalBindHandleWorkerLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "globalBindHandleWorkerLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "globalBindHandleWorkerLoop", nil, false)
|
||||
|
||||
bindWorkerTicker := time.NewTicker(bindinfo.Lease)
|
||||
gcBindTicker := time.NewTicker(100 * bindinfo.Lease)
|
||||
defer func() {
|
||||
@ -1794,8 +1801,9 @@ func (do *Domain) handleEvolvePlanTasksLoop(ctx sessionctx.Context, owner owner.
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("handleEvolvePlanTasksLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "handleEvolvePlanTasksLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "handleEvolvePlanTasksLoop", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -1825,8 +1833,9 @@ func (do *Domain) TelemetryReportLoop(ctx sessionctx.Context) {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("TelemetryReportLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "TelemetryReportLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "TelemetryReportLoop", nil, false)
|
||||
|
||||
owner := do.newOwnerManager(telemetry.Prompt, telemetry.OwnerKey)
|
||||
for {
|
||||
select {
|
||||
@ -1853,8 +1862,9 @@ func (do *Domain) TelemetryRotateSubWindowLoop(ctx sessionctx.Context) {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Info("TelemetryRotateSubWindowLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "TelemetryRotateSubWindowLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "TelemetryRotateSubWindowLoop", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -1942,9 +1952,10 @@ func (do *Domain) StartPlanReplayerHandle() {
|
||||
tikcer := time.NewTicker(time.Duration(lease))
|
||||
defer func() {
|
||||
tikcer.Stop()
|
||||
util.Recover(metrics.LabelDomain, "PlanReplayerTaskCollectHandle", nil, false)
|
||||
logutil.BgLogger().Info("PlanReplayerTaskCollectHandle exited.")
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "PlanReplayerTaskCollectHandle", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -1961,9 +1972,10 @@ func (do *Domain) StartPlanReplayerHandle() {
|
||||
do.wg.Run(func() {
|
||||
logutil.BgLogger().Info("PlanReplayerTaskDumpHandle started")
|
||||
defer func() {
|
||||
util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpHandle", nil, false)
|
||||
logutil.BgLogger().Info("PlanReplayerTaskDumpHandle exited.")
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpHandle", nil, false)
|
||||
|
||||
for _, worker := range do.planReplayerHandle.planReplayerTaskDumpHandle.workers {
|
||||
go worker.run()
|
||||
}
|
||||
@ -1994,9 +2006,10 @@ func (do *Domain) DumpFileGcCheckerLoop() {
|
||||
gcTicker := time.NewTicker(do.dumpFileGcChecker.gcLease)
|
||||
defer func() {
|
||||
gcTicker.Stop()
|
||||
util.Recover(metrics.LabelDomain, "dumpFileGcCheckerLoop", nil, false)
|
||||
logutil.BgLogger().Info("dumpFileGcChecker exited.")
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "dumpFileGcCheckerLoop", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -2024,9 +2037,10 @@ func (do *Domain) StartHistoricalStatsWorker() {
|
||||
do.wg.Run(func() {
|
||||
logutil.BgLogger().Info("HistoricalStatsWorker started")
|
||||
defer func() {
|
||||
util.Recover(metrics.LabelDomain, "HistoricalStatsWorkerLoop", nil, false)
|
||||
logutil.BgLogger().Info("HistoricalStatsWorker exited.")
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "HistoricalStatsWorkerLoop", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -2309,9 +2323,10 @@ func (do *Domain) updateStatsWorker(ctx sessionctx.Context, owner owner.Manager)
|
||||
deltaUpdateTicker.Stop()
|
||||
readMemTricker.Stop()
|
||||
do.SetStatsUpdating(false)
|
||||
util.Recover(metrics.LabelDomain, "updateStatsWorker", nil, false)
|
||||
logutil.BgLogger().Info("updateStatsWorker exited.")
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "updateStatsWorker", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-do.exit:
|
||||
@ -2493,8 +2508,9 @@ func (do *Domain) LoadSigningCertLoop(signingCert, signingKey string) {
|
||||
do.wg.Run(func() {
|
||||
defer func() {
|
||||
logutil.BgLogger().Debug("loadSigningCertLoop exited.")
|
||||
util.Recover(metrics.LabelDomain, "LoadSigningCertLoop", nil, false)
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "LoadSigningCertLoop", nil, false)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(sessionstates.LoadCertInterval):
|
||||
|
||||
@ -392,7 +392,6 @@ func (w *planReplayerTaskDumpWorker) handleTask(task *PlanReplayerDumpTask) {
|
||||
occupy := true
|
||||
handleTask := true
|
||||
defer func() {
|
||||
util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpWorker", nil, false)
|
||||
logutil.BgLogger().Debug("[plan-replayer-capture] handle task",
|
||||
zap.String("sql-digest", sqlDigest),
|
||||
zap.String("plan-digest", planDigest),
|
||||
@ -400,6 +399,8 @@ func (w *planReplayerTaskDumpWorker) handleTask(task *PlanReplayerDumpTask) {
|
||||
zap.Bool("occupy", occupy),
|
||||
zap.Bool("handle", handleTask))
|
||||
}()
|
||||
defer util.Recover(metrics.LabelDomain, "PlanReplayerTaskDumpWorker", nil, false)
|
||||
|
||||
if task.IsContinuesCapture {
|
||||
if w.status.checkTaskKeyFinishedBefore(task) {
|
||||
check = false
|
||||
|
||||
@ -97,9 +97,10 @@ func NewWorkerPool[T any](name string, component util.Component, numWorkers int,
|
||||
func (p *WorkerPool[T]) handleTaskWithRecover(w Worker[T], task T) {
|
||||
p.runningTask.Add(1)
|
||||
defer func() {
|
||||
tidbutil.Recover(metrics.LabelWorkerPool, "handleTaskWithRecover", nil, false)
|
||||
p.runningTask.Add(-1)
|
||||
}()
|
||||
defer tidbutil.Recover(metrics.LabelWorkerPool, "handleTaskWithRecover", nil, false)
|
||||
|
||||
w.HandleTask(task)
|
||||
}
|
||||
|
||||
|
||||
@ -104,11 +104,11 @@ func (sp *SQLCPUCollector) collectSQLCPULoop() {
|
||||
profileConsumer := make(cpuprofile.ProfileConsumer, 1)
|
||||
ticker := time.NewTicker(defCollectTickerInterval)
|
||||
defer func() {
|
||||
util.Recover("top-sql", "startAnalyzeProfileWorker", nil, false)
|
||||
sp.wg.Done()
|
||||
sp.doUnregister(profileConsumer)
|
||||
ticker.Stop()
|
||||
}()
|
||||
defer util.Recover("top-sql", "startAnalyzeProfileWorker", nil, false)
|
||||
|
||||
for {
|
||||
if topsqlstate.TopSQLEnabled() {
|
||||
|
||||
Reference in New Issue
Block a user