*: fix cop task runtime information is wrong in the concurrent executor (#19849)
Signed-off-by: crazycs520 <crazycs520@gmail.com>
This commit is contained in:
@ -798,11 +798,9 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, succ bool, hasMoreResults boo
|
||||
execDetail := sessVars.StmtCtx.GetExecDetails()
|
||||
// Attach commit/lockKeys runtime stats to executor runtime stats.
|
||||
if (execDetail.CommitDetail != nil || execDetail.LockKeysDetail != nil) && sessVars.StmtCtx.RuntimeStatsColl != nil {
|
||||
stats := sessVars.StmtCtx.RuntimeStatsColl.GetRootStats(a.Plan.ID())
|
||||
statsWithCommit := &execdetails.RuntimeStatsWithCommit{
|
||||
RuntimeStats: stats,
|
||||
Commit: execDetail.CommitDetail,
|
||||
LockKeys: execDetail.LockKeysDetail,
|
||||
Commit: execDetail.CommitDetail,
|
||||
LockKeys: execDetail.LockKeysDetail,
|
||||
}
|
||||
sessVars.StmtCtx.RuntimeStatsColl.RegisterStats(a.Plan.ID(), statsWithCommit)
|
||||
}
|
||||
|
||||
@ -246,7 +246,7 @@ func (e *HashAggExec) Close() error {
|
||||
}
|
||||
partialConcurrencyInfo := execdetails.NewConcurrencyInfo("PartialConcurrency", partialConcurrency)
|
||||
finalConcurrencyInfo := execdetails.NewConcurrencyInfo("FinalConcurrency", finalConcurrency)
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{BasicRuntimeStats: e.runtimeStats}
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{}
|
||||
runtimeStats.SetConcurrencyInfo(partialConcurrencyInfo, finalConcurrencyInfo)
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats)
|
||||
}
|
||||
|
||||
@ -105,7 +105,6 @@ func (e *BatchPointGetExec) Open(context.Context) error {
|
||||
if e.runtimeStats != nil {
|
||||
snapshotStats := &tikv.SnapshotRuntimeStats{}
|
||||
e.stats = &runtimeStatsWithSnapshot{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
SnapshotRuntimeStats: snapshotStats,
|
||||
}
|
||||
snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats)
|
||||
|
||||
@ -6287,6 +6287,11 @@ func (s *testSuite) TestCollectDMLRuntimeStats(c *C) {
|
||||
tk.MustQuery("select * from t1 for update").Check(testkit.Rows("5 6", "7 7"))
|
||||
c.Assert(getRootStats(), Matches, "time.*lock_keys.*time.* region.* keys.* lock_rpc:.* rpc_count.*")
|
||||
tk.MustExec("rollback")
|
||||
|
||||
tk.MustExec("begin pessimistic")
|
||||
tk.MustExec("insert ignore into t1 values (9,9)")
|
||||
c.Assert(getRootStats(), Matches, "time:.*, loops:.*, BatchGet:{num_rpc:.*, total_time:.*}, lock_keys: {time:.*, region:.*, keys:.*, lock_rpc:.*, rpc_count:.*}")
|
||||
tk.MustExec("rollback")
|
||||
}
|
||||
|
||||
func (s *testSuite) TestIssue13758(c *C) {
|
||||
|
||||
@ -148,9 +148,7 @@ func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error {
|
||||
e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker)
|
||||
e.innerPtrBytes = make([][]byte, 0, 8)
|
||||
if e.runtimeStats != nil {
|
||||
e.stats = &indexLookUpJoinRuntimeStats{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
}
|
||||
e.stats = &indexLookUpJoinRuntimeStats{}
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
|
||||
}
|
||||
e.startWorkers(ctx)
|
||||
|
||||
@ -178,9 +178,7 @@ func (e *IndexLookUpJoin) Open(ctx context.Context) error {
|
||||
e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker)
|
||||
e.innerPtrBytes = make([][]byte, 0, 8)
|
||||
if e.runtimeStats != nil {
|
||||
e.stats = &indexLookUpJoinRuntimeStats{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
}
|
||||
e.stats = &indexLookUpJoinRuntimeStats{}
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
|
||||
}
|
||||
e.startWorkers(ctx)
|
||||
@ -738,7 +736,6 @@ func (e *IndexLookUpJoin) Close() error {
|
||||
}
|
||||
|
||||
type indexLookUpJoinRuntimeStats struct {
|
||||
*execdetails.BasicRuntimeStats
|
||||
concurrency int
|
||||
probe int64
|
||||
innerWorker innerWorkerRuntimeStats
|
||||
@ -755,11 +752,8 @@ type innerWorkerRuntimeStats struct {
|
||||
|
||||
func (e *indexLookUpJoinRuntimeStats) String() string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 16))
|
||||
if e.BasicRuntimeStats != nil {
|
||||
buf.WriteString(e.BasicRuntimeStats.String())
|
||||
}
|
||||
if e.innerWorker.totalTime > 0 {
|
||||
buf.WriteString(", inner:{total:")
|
||||
buf.WriteString("inner:{total:")
|
||||
buf.WriteString(time.Duration(e.innerWorker.totalTime).String())
|
||||
buf.WriteString(", concurrency:")
|
||||
if e.concurrency > 0 {
|
||||
@ -787,3 +781,30 @@ func (e *indexLookUpJoinRuntimeStats) String() string {
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (e *indexLookUpJoinRuntimeStats) Clone() execdetails.RuntimeStats {
|
||||
return &indexLookUpJoinRuntimeStats{
|
||||
concurrency: e.concurrency,
|
||||
probe: e.probe,
|
||||
innerWorker: e.innerWorker,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *indexLookUpJoinRuntimeStats) Merge(rs execdetails.RuntimeStats) {
|
||||
tmp, ok := rs.(*indexLookUpJoinRuntimeStats)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
e.probe += tmp.probe
|
||||
e.innerWorker.totalTime += tmp.innerWorker.totalTime
|
||||
e.innerWorker.task += tmp.innerWorker.task
|
||||
e.innerWorker.construct += tmp.innerWorker.construct
|
||||
e.innerWorker.fetch += tmp.innerWorker.fetch
|
||||
e.innerWorker.build += tmp.innerWorker.build
|
||||
e.innerWorker.join += tmp.innerWorker.join
|
||||
}
|
||||
|
||||
// Tp implements the RuntimeStats interface.
|
||||
func (e *indexLookUpJoinRuntimeStats) Tp() int {
|
||||
return execdetails.TpIndexLookUpJoinRuntimeStats
|
||||
}
|
||||
|
||||
@ -734,7 +734,7 @@ func (e *IndexLookUpMergeJoin) Close() error {
|
||||
e.memTracker = nil
|
||||
if e.runtimeStats != nil {
|
||||
concurrency := cap(e.resultCh)
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{BasicRuntimeStats: e.runtimeStats}
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{}
|
||||
runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", concurrency))
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats)
|
||||
}
|
||||
|
||||
@ -932,7 +932,6 @@ func (e *InsertValues) collectRuntimeStatsEnabled() bool {
|
||||
if e.stats == nil {
|
||||
snapshotStats := &tikv.SnapshotRuntimeStats{}
|
||||
e.stats = &runtimeStatsWithSnapshot{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
SnapshotRuntimeStats: snapshotStats,
|
||||
}
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
|
||||
|
||||
@ -177,8 +177,7 @@ func (e *HashJoinExec) Open(ctx context.Context) error {
|
||||
}
|
||||
if e.runtimeStats != nil {
|
||||
e.stats = &hashJoinRuntimeStats{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
concurrent: cap(e.joiners),
|
||||
concurrent: cap(e.joiners),
|
||||
}
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
|
||||
}
|
||||
@ -805,7 +804,7 @@ func (e *NestedLoopApplyExec) Close() error {
|
||||
e.innerRows = nil
|
||||
e.memTracker = nil
|
||||
if e.runtimeStats != nil {
|
||||
runtimeStats := newJoinRuntimeStats(e.runtimeStats)
|
||||
runtimeStats := newJoinRuntimeStats()
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats)
|
||||
if e.canUseCache {
|
||||
var hitRatio float64
|
||||
@ -996,11 +995,9 @@ type joinRuntimeStats struct {
|
||||
hashStat hashStatistic
|
||||
}
|
||||
|
||||
func newJoinRuntimeStats(basic *execdetails.BasicRuntimeStats) *joinRuntimeStats {
|
||||
func newJoinRuntimeStats() *joinRuntimeStats {
|
||||
stats := &joinRuntimeStats{
|
||||
RuntimeStatsWithConcurrencyInfo: &execdetails.RuntimeStatsWithConcurrencyInfo{
|
||||
BasicRuntimeStats: basic,
|
||||
},
|
||||
RuntimeStatsWithConcurrencyInfo: &execdetails.RuntimeStatsWithConcurrencyInfo{},
|
||||
}
|
||||
return stats
|
||||
}
|
||||
@ -1037,9 +1034,12 @@ func (e *joinRuntimeStats) String() string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
type hashJoinRuntimeStats struct {
|
||||
*execdetails.BasicRuntimeStats
|
||||
// Tp implements the RuntimeStats interface.
|
||||
func (e *joinRuntimeStats) Tp() int {
|
||||
return execdetails.TpJoinRuntimeStats
|
||||
}
|
||||
|
||||
type hashJoinRuntimeStats struct {
|
||||
fetchAndBuildHashTable time.Duration
|
||||
hashStat hashStatistic
|
||||
fetchAndProbe int64
|
||||
@ -1060,11 +1060,15 @@ func (e *hashJoinRuntimeStats) setMaxFetchAndProbeTime(t int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// Tp implements the RuntimeStats interface.
|
||||
func (e *hashJoinRuntimeStats) Tp() int {
|
||||
return execdetails.TpHashJoinRuntimeStats
|
||||
}
|
||||
|
||||
func (e *hashJoinRuntimeStats) String() string {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 128))
|
||||
buf.WriteString(e.BasicRuntimeStats.String())
|
||||
if e.fetchAndBuildHashTable > 0 {
|
||||
buf.WriteString(", build_hash_table:{total:")
|
||||
buf.WriteString("build_hash_table:{total:")
|
||||
buf.WriteString(e.fetchAndBuildHashTable.String())
|
||||
buf.WriteString(", fetch:")
|
||||
buf.WriteString((e.fetchAndBuildHashTable - e.hashStat.buildTableElapse).String())
|
||||
@ -1091,3 +1095,29 @@ func (e *hashJoinRuntimeStats) String() string {
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (e *hashJoinRuntimeStats) Clone() execdetails.RuntimeStats {
|
||||
return &hashJoinRuntimeStats{
|
||||
fetchAndBuildHashTable: e.fetchAndBuildHashTable,
|
||||
hashStat: e.hashStat,
|
||||
fetchAndProbe: e.fetchAndProbe,
|
||||
probe: e.probe,
|
||||
concurrent: e.concurrent,
|
||||
maxFetchAndProbe: e.maxFetchAndProbe,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *hashJoinRuntimeStats) Merge(rs execdetails.RuntimeStats) {
|
||||
tmp, ok := rs.(*hashJoinRuntimeStats)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
e.fetchAndBuildHashTable += tmp.fetchAndBuildHashTable
|
||||
e.hashStat.buildTableElapse += tmp.hashStat.buildTableElapse
|
||||
e.hashStat.probeCollision += tmp.hashStat.probeCollision
|
||||
e.fetchAndProbe += tmp.fetchAndProbe
|
||||
e.probe += tmp.probe
|
||||
if e.maxFetchAndProbe < tmp.maxFetchAndProbe {
|
||||
e.maxFetchAndProbe = tmp.maxFetchAndProbe
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,6 +15,7 @@ package executor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
"github.com/pingcap/failpoint"
|
||||
@ -104,3 +105,40 @@ func (s *pkgTestSerialSuite) TestJoinExec(c *C) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *pkgTestSuite) TestHashJoinRuntimeStats(c *C) {
|
||||
stats := &hashJoinRuntimeStats{
|
||||
fetchAndBuildHashTable: 2 * time.Second,
|
||||
hashStat: hashStatistic{
|
||||
probeCollision: 1,
|
||||
buildTableElapse: time.Millisecond * 100,
|
||||
},
|
||||
fetchAndProbe: int64(5 * time.Second),
|
||||
probe: int64(4 * time.Second),
|
||||
concurrent: 4,
|
||||
maxFetchAndProbe: int64(2 * time.Second),
|
||||
}
|
||||
c.Assert(stats.String(), Equals, "build_hash_table:{total:2s, fetch:1.9s, build:100ms}, probe:{concurrency:4, total:5s, max:2s, probe:4s, fetch:1s, probe_collision:1}")
|
||||
c.Assert(stats.String(), Equals, stats.Clone().String())
|
||||
stats.Merge(stats.Clone())
|
||||
c.Assert(stats.String(), Equals, "build_hash_table:{total:4s, fetch:3.8s, build:200ms}, probe:{concurrency:4, total:10s, max:2s, probe:8s, fetch:2s, probe_collision:2}")
|
||||
}
|
||||
|
||||
func (s *pkgTestSuite) TestIndexJoinRuntimeStats(c *C) {
|
||||
stats := indexLookUpJoinRuntimeStats{
|
||||
concurrency: 5,
|
||||
probe: int64(time.Second),
|
||||
innerWorker: innerWorkerRuntimeStats{
|
||||
totalTime: int64(time.Second * 5),
|
||||
task: 16,
|
||||
construct: int64(100 * time.Millisecond),
|
||||
fetch: int64(300 * time.Millisecond),
|
||||
build: int64(250 * time.Millisecond),
|
||||
join: int64(150 * time.Millisecond),
|
||||
},
|
||||
}
|
||||
c.Assert(stats.String(), Equals, "inner:{total:5s, concurrency:5, task:16, construct:100ms, fetch:300ms, build:250ms, join:150ms}, probe:1s")
|
||||
c.Assert(stats.String(), Equals, stats.Clone().String())
|
||||
stats.Merge(stats.Clone())
|
||||
c.Assert(stats.String(), Equals, "inner:{total:10s, concurrency:5, task:32, construct:200ms, fetch:600ms, build:500ms, join:300ms}, probe:2s")
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func (e *ParallelNestedLoopApplyExec) Close() error {
|
||||
}
|
||||
|
||||
if e.runtimeStats != nil {
|
||||
runtimeStats := newJoinRuntimeStats(e.runtimeStats)
|
||||
runtimeStats := newJoinRuntimeStats()
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats)
|
||||
if e.useCache {
|
||||
var hitRatio float64
|
||||
|
||||
@ -140,7 +140,6 @@ func (e *PointGetExecutor) Open(context.Context) error {
|
||||
if e.runtimeStats != nil {
|
||||
snapshotStats := &tikv.SnapshotRuntimeStats{}
|
||||
e.stats = &runtimeStatsWithSnapshot{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
SnapshotRuntimeStats: snapshotStats,
|
||||
}
|
||||
e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats)
|
||||
@ -466,23 +465,43 @@ func getColInfoByID(tbl *model.TableInfo, colID int64) *model.ColumnInfo {
|
||||
}
|
||||
|
||||
type runtimeStatsWithSnapshot struct {
|
||||
*execdetails.BasicRuntimeStats
|
||||
*tikv.SnapshotRuntimeStats
|
||||
}
|
||||
|
||||
func (e *runtimeStatsWithSnapshot) String() string {
|
||||
var basic, rpcStatsStr string
|
||||
if e.BasicRuntimeStats != nil {
|
||||
basic = e.BasicRuntimeStats.String()
|
||||
}
|
||||
if e.SnapshotRuntimeStats != nil {
|
||||
rpcStatsStr = e.SnapshotRuntimeStats.String()
|
||||
return e.SnapshotRuntimeStats.String()
|
||||
}
|
||||
if rpcStatsStr == "" {
|
||||
return basic
|
||||
}
|
||||
if basic == "" {
|
||||
return rpcStatsStr
|
||||
}
|
||||
return basic + ", " + rpcStatsStr
|
||||
return ""
|
||||
}
|
||||
|
||||
// Clone implements the RuntimeStats interface.
|
||||
func (e *runtimeStatsWithSnapshot) Clone() execdetails.RuntimeStats {
|
||||
newRs := &runtimeStatsWithSnapshot{}
|
||||
if e.SnapshotRuntimeStats != nil {
|
||||
snapshotStats := e.SnapshotRuntimeStats.Clone()
|
||||
newRs.SnapshotRuntimeStats = snapshotStats.(*tikv.SnapshotRuntimeStats)
|
||||
}
|
||||
return newRs
|
||||
}
|
||||
|
||||
// Merge implements the RuntimeStats interface.
|
||||
func (e *runtimeStatsWithSnapshot) Merge(other execdetails.RuntimeStats) {
|
||||
tmp, ok := other.(*runtimeStatsWithSnapshot)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if tmp.SnapshotRuntimeStats != nil {
|
||||
if e.SnapshotRuntimeStats == nil {
|
||||
snapshotStats := tmp.SnapshotRuntimeStats.Clone()
|
||||
e.SnapshotRuntimeStats = snapshotStats.(*tikv.SnapshotRuntimeStats)
|
||||
return
|
||||
}
|
||||
e.SnapshotRuntimeStats.Merge(tmp.SnapshotRuntimeStats)
|
||||
}
|
||||
}
|
||||
|
||||
// Tp implements the RuntimeStats interface.
|
||||
func (e *runtimeStatsWithSnapshot) Tp() int {
|
||||
return execdetails.TpRuntimeStatsWithSnapshot
|
||||
}
|
||||
|
||||
@ -309,9 +309,7 @@ func (e *ProjectionExec) Close() error {
|
||||
}
|
||||
}
|
||||
if e.baseExecutor.runtimeStats != nil {
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
}
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{}
|
||||
if e.isUnparallelExec() {
|
||||
runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", 0))
|
||||
} else {
|
||||
|
||||
@ -145,7 +145,7 @@ func (e *ShuffleExec) Close() error {
|
||||
e.executed = false
|
||||
|
||||
if e.runtimeStats != nil {
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{BasicRuntimeStats: e.runtimeStats}
|
||||
runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{}
|
||||
runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("ShuffleConcurrency", e.concurrency))
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats)
|
||||
}
|
||||
|
||||
@ -303,7 +303,6 @@ func (e *UpdateExec) collectRuntimeStatsEnabled() bool {
|
||||
if e.stats == nil {
|
||||
snapshotStats := &tikv.SnapshotRuntimeStats{}
|
||||
e.stats = &runtimeStatsWithSnapshot{
|
||||
BasicRuntimeStats: e.runtimeStats,
|
||||
SnapshotRuntimeStats: snapshotStats,
|
||||
}
|
||||
e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats)
|
||||
|
||||
Reference in New Issue
Block a user