statistics: improve code for cache (#44748)

ref pingcap/tidb#44861
This commit is contained in:
Weizhen Wang
2023-06-21 15:59:42 +08:00
committed by GitHub
parent c6b4e9935a
commit 49dfef978a
9 changed files with 322 additions and 211 deletions

View File

@ -10,7 +10,6 @@ go_library(
"handle.go",
"handle_hist.go",
"historical_stats_handler.go",
"lru_cache.go",
"statscache.go",
"update.go",
],
@ -31,6 +30,9 @@ go_library(
"//sessionctx/variable",
"//sessiontxn",
"//statistics",
"//statistics/handle/internal/cache",
"//statistics/handle/internal/cache/lru",
"//statistics/handle/internal/cache/mapcache",
"//statistics/handle/metrics",
"//table",
"//types",
@ -65,19 +67,17 @@ go_test(
"dump_test.go",
"gc_test.go",
"handle_hist_test.go",
"lru_cache_test.go",
"main_test.go",
"update_list_test.go",
],
embed = [":handle"],
flaky = True,
race = "on",
shard_count = 34,
shard_count = 26,
deps = [
"//config",
"//domain",
"//parser/model",
"//parser/mysql",
"//sessionctx/stmtctx",
"//sessionctx/variable",
"//statistics",

View File

@ -0,0 +1,9 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "cache",
srcs = ["inner.go"],
importpath = "github.com/pingcap/tidb/statistics/handle/internal/cache",
visibility = ["//statistics/handle:__subpackages__"],
deps = ["//statistics"],
)

View File

@ -0,0 +1,47 @@
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import "github.com/pingcap/tidb/statistics"
// StatsCacheInner is the interface to manage the statsCache, it can be implemented by map, lru cache or other structures.
type StatsCacheInner interface {
// GetByQuery retrieves cache triggered by a query. Usually used in LRU.
GetByQuery(int64) (*statistics.Table, bool)
// Get gets the cache.
Get(int64) (*statistics.Table, bool)
// PutByQuery puts a cache triggered by a query. Usually used in LRU.
PutByQuery(int64, *statistics.Table)
// Put puts a cache.
Put(int64, *statistics.Table)
// Del deletes a cache.
Del(int64)
// Cost returns the memory usage of the cache.
Cost() int64
// Keys returns the keys of the cache.
Keys() []int64
// Values returns the values of the cache.
Values() []*statistics.Table
Map() map[int64]*statistics.Table
// Len returns the length of the cache.
Len() int
FreshMemUsage()
// Copy returns a copy of the cache
Copy() StatsCacheInner
// SetCapacity sets the capacity of the cache
SetCapacity(int64)
// Front returns the front element's owner tableID, only used for test
Front() int64
}

View File

@ -0,0 +1,29 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "lru",
srcs = ["lru_cache.go"],
importpath = "github.com/pingcap/tidb/statistics/handle/internal/cache/lru",
visibility = ["//statistics/handle:__subpackages__"],
deps = [
"//statistics",
"//statistics/handle/internal/cache",
"//statistics/handle/metrics",
],
)
go_test(
name = "lru_test",
timeout = "short",
srcs = ["lru_cache_test.go"],
embed = [":lru"],
flaky = True,
shard_count = 8,
deps = [
"//parser/model",
"//parser/mysql",
"//statistics",
"//types",
"@com_github_stretchr_testify//require",
],
)

View File

@ -1,4 +1,4 @@
// Copyright 2022 PingCAP, Inc.
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package handle
package lru
import (
"container/list"
@ -20,18 +20,21 @@ import (
"sync"
"github.com/pingcap/tidb/statistics"
handle_metrics "github.com/pingcap/tidb/statistics/handle/metrics"
"github.com/pingcap/tidb/statistics/handle/internal/cache"
"github.com/pingcap/tidb/statistics/handle/metrics"
)
type statsInnerCache struct {
// StatsInnerCache is the LRU cache for statistics.
type StatsInnerCache struct {
elements map[int64]*lruMapElement
// lru maintains item lru cache
lru *innerItemLruCache
sync.RWMutex
}
func newStatsLruCache(c int64) *statsInnerCache {
s := &statsInnerCache{
// NewStatsLruCache creates a new LRU cache for statistics.
func NewStatsLruCache(c int64) *StatsInnerCache {
s := &StatsInnerCache{
elements: make(map[int64]*lruMapElement),
lru: newInnerLruCache(c),
}
@ -52,7 +55,7 @@ func newInnerLruCache(c int64) *innerItemLruCache {
if c < 1 {
c = math.MaxInt64
}
handle_metrics.CapacityGauge.Set(float64(c))
metrics.CapacityGauge.Set(float64(c))
return &innerItemLruCache{
capacity: c,
cache: list.New(),
@ -81,7 +84,7 @@ func (l *lruMapElement) copy() *lruMapElement {
}
// GetByQuery implements statsCacheInner
func (s *statsInnerCache) GetByQuery(tblID int64) (*statistics.Table, bool) {
func (s *StatsInnerCache) GetByQuery(tblID int64) (*statistics.Table, bool) {
s.Lock()
defer s.Unlock()
element, ok := s.elements[tblID]
@ -100,7 +103,7 @@ func (s *statsInnerCache) GetByQuery(tblID int64) (*statistics.Table, bool) {
}
// Get implements statsCacheInner
func (s *statsInnerCache) Get(tblID int64) (*statistics.Table, bool) {
func (s *StatsInnerCache) Get(tblID int64) (*statistics.Table, bool) {
s.RLock()
defer s.RUnlock()
element, ok := s.elements[tblID]
@ -111,20 +114,20 @@ func (s *statsInnerCache) Get(tblID int64) (*statistics.Table, bool) {
}
// PutByQuery implements statsCacheInner
func (s *statsInnerCache) PutByQuery(tblID int64, tbl *statistics.Table) {
func (s *StatsInnerCache) PutByQuery(tblID int64, tbl *statistics.Table) {
s.Lock()
defer s.Unlock()
s.put(tblID, tbl, tbl.MemoryUsage(), true)
}
// Put implements statsCacheInner
func (s *statsInnerCache) Put(tblID int64, tbl *statistics.Table) {
func (s *StatsInnerCache) Put(tblID int64, tbl *statistics.Table) {
s.Lock()
defer s.Unlock()
s.put(tblID, tbl, tbl.MemoryUsage(), false)
}
func (s *statsInnerCache) put(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
func (s *StatsInnerCache) put(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
element, exist := s.elements[tblID]
if exist {
s.updateColumns(tblID, tbl, tblMemUsage, needMove)
@ -143,7 +146,7 @@ func (s *statsInnerCache) put(tblID int64, tbl *statistics.Table, tblMemUsage *s
}
}
func (s *statsInnerCache) updateIndices(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
func (s *StatsInnerCache) updateIndices(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
_, exist := s.elements[tblID]
if exist {
oldIdxs := s.lru.elements[tblID][true]
@ -164,7 +167,7 @@ func (s *statsInnerCache) updateIndices(tblID int64, tbl *statistics.Table, tblM
}
}
func (s *statsInnerCache) updateColumns(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
func (s *StatsInnerCache) updateColumns(tblID int64, tbl *statistics.Table, tblMemUsage *statistics.TableMemoryUsage, needMove bool) {
_, exist := s.elements[tblID]
if exist {
oldCols := s.lru.elements[tblID][false]
@ -186,7 +189,7 @@ func (s *statsInnerCache) updateColumns(tblID int64, tbl *statistics.Table, tblM
}
// Del implements statsCacheInner
func (s *statsInnerCache) Del(tblID int64) {
func (s *StatsInnerCache) Del(tblID int64) {
s.Lock()
defer s.Unlock()
element, exist := s.elements[tblID]
@ -205,13 +208,14 @@ func (s *statsInnerCache) Del(tblID int64) {
}
// Cost implements statsCacheInner
func (s *statsInnerCache) Cost() int64 {
func (s *StatsInnerCache) Cost() int64 {
s.RLock()
defer s.RUnlock()
return s.lru.trackingCost
}
func (s *statsInnerCache) TotalCost() int64 {
// TotalCost returns the total memory usage of all tables.
func (s *StatsInnerCache) TotalCost() int64 {
s.Lock()
defer s.Unlock()
totalCost := int64(0)
@ -223,7 +227,7 @@ func (s *statsInnerCache) TotalCost() int64 {
}
// Keys implements statsCacheInner
func (s *statsInnerCache) Keys() []int64 {
func (s *StatsInnerCache) Keys() []int64 {
s.RLock()
defer s.RUnlock()
r := make([]int64, 0, len(s.elements))
@ -234,7 +238,7 @@ func (s *statsInnerCache) Keys() []int64 {
}
// Values implements statsCacheInner
func (s *statsInnerCache) Values() []*statistics.Table {
func (s *StatsInnerCache) Values() []*statistics.Table {
s.RLock()
defer s.RUnlock()
r := make([]*statistics.Table, 0, len(s.elements))
@ -245,7 +249,7 @@ func (s *statsInnerCache) Values() []*statistics.Table {
}
// Map implements statsCacheInner
func (s *statsInnerCache) Map() map[int64]*statistics.Table {
func (s *StatsInnerCache) Map() map[int64]*statistics.Table {
s.RLock()
defer s.RUnlock()
r := make(map[int64]*statistics.Table, len(s.elements))
@ -256,14 +260,14 @@ func (s *statsInnerCache) Map() map[int64]*statistics.Table {
}
// Len implements statsCacheInner
func (s *statsInnerCache) Len() int {
func (s *StatsInnerCache) Len() int {
s.RLock()
defer s.RUnlock()
return len(s.elements)
}
// FreshMemUsage implements statsCacheInner
func (s *statsInnerCache) FreshMemUsage() {
func (s *StatsInnerCache) FreshMemUsage() {
s.Lock()
defer s.Unlock()
for tblID, element := range s.elements {
@ -272,10 +276,10 @@ func (s *statsInnerCache) FreshMemUsage() {
}
// Copy implements statsCacheInner
func (s *statsInnerCache) Copy() statsCacheInner {
func (s *StatsInnerCache) Copy() cache.StatsCacheInner {
s.RLock()
defer s.RUnlock()
newCache := newStatsLruCache(s.lru.capacity)
newCache := NewStatsLruCache(s.lru.capacity)
newCache.lru = s.lru.copy()
for tblID, element := range s.elements {
newCache.elements[tblID] = element.copy()
@ -285,14 +289,14 @@ func (s *statsInnerCache) Copy() statsCacheInner {
}
// SetCapacity implements statsCacheInner
func (s *statsInnerCache) SetCapacity(c int64) {
func (s *StatsInnerCache) SetCapacity(c int64) {
s.Lock()
defer s.Unlock()
s.lru.setCapacity(c)
}
// Front implements statsCacheInner
func (s *statsInnerCache) Front() int64 {
func (s *StatsInnerCache) Front() int64 {
s.RLock()
defer s.RUnlock()
ele := s.lru.cache.Front()
@ -302,7 +306,7 @@ func (s *statsInnerCache) Front() int64 {
return s.lru.cache.Front().Value.(*lruCacheItem).tblID
}
func (s *statsInnerCache) onEvict(tblID int64) {
func (s *StatsInnerCache) onEvict(tblID int64) {
element, exist := s.elements[tblID]
if !exist {
return
@ -310,32 +314,32 @@ func (s *statsInnerCache) onEvict(tblID int64) {
element.tblMemUsage = element.tbl.MemoryUsage()
}
func (s *statsInnerCache) freshTableCost(tblID int64, element *lruMapElement) {
func (s *StatsInnerCache) freshTableCost(tblID int64, element *lruMapElement) {
element.tblMemUsage = element.tbl.MemoryUsage()
s.put(tblID, element.tbl, element.tblMemUsage, false)
}
func (s *statsInnerCache) capacity() int64 {
func (s *StatsInnerCache) capacity() int64 {
return s.lru.capacity
}
func (c *innerItemLruCache) get(tblID, id int64, isIndex bool) (*lruCacheItem, bool) {
v, ok := c.elements[tblID]
if !ok {
handle_metrics.MissCounter.Inc()
metrics.MissCounter.Inc()
return nil, false
}
isIndexSet, ok := v[isIndex]
if !ok {
handle_metrics.MissCounter.Inc()
metrics.MissCounter.Inc()
return nil, false
}
ele, ok := isIndexSet[id]
if !ok {
handle_metrics.MissCounter.Inc()
metrics.MissCounter.Inc()
return nil, false
}
handle_metrics.HitCounter.Inc()
metrics.HitCounter.Inc()
c.cache.MoveToFront(ele)
return ele.Value.(*lruCacheItem), true
}
@ -353,7 +357,7 @@ func (c *innerItemLruCache) del(tblID, id int64, isIndex bool) {
if !ok {
return
}
handle_metrics.DelCounter.Inc()
metrics.DelCounter.Inc()
memUsage := c.elements[tblID][isIndex][id].Value.(*lruCacheItem).innerMemUsage
delete(c.elements[tblID][isIndex], id)
c.cache.Remove(ele)
@ -367,7 +371,7 @@ func (c *innerItemLruCache) del(tblID, id int64, isIndex bool) {
func (c *innerItemLruCache) put(tblID, id int64, isIndex bool, item statistics.TableCacheItem, itemMem statistics.CacheItemMemoryUsage,
needEvict, needMove bool) {
defer func() {
handle_metrics.UpdateCounter.Inc()
metrics.UpdateCounter.Inc()
if needEvict {
c.evictIfNeeded()
}
@ -416,7 +420,7 @@ func (c *innerItemLruCache) put(tblID, id int64, isIndex bool, item statistics.T
func (c *innerItemLruCache) evictIfNeeded() {
curr := c.cache.Back()
for c.trackingCost > c.capacity && curr != nil {
handle_metrics.EvictCounter.Inc()
metrics.EvictCounter.Inc()
prev := curr.Prev()
item := curr.Value.(*lruCacheItem)
oldMem := item.innerMemUsage
@ -458,6 +462,6 @@ func (c *innerItemLruCache) setCapacity(capacity int64) {
capacity = math.MaxInt64
}
c.capacity = capacity
handle_metrics.CapacityGauge.Set(float64(c.capacity))
metrics.CapacityGauge.Set(float64(c.capacity))
c.evictIfNeeded()
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package handle
package lru
import (
"testing"
@ -96,7 +96,7 @@ func mockTableRemoveIndex(t *statistics.Table) {
func TestLRUPutGetDel(t *testing.T) {
capacity := int64(100)
lru := newStatsLruCache(capacity)
lru := NewStatsLruCache(capacity)
require.Equal(t, capacity, lru.capacity())
mockTable := newMockStatisticsTable(1, 1, true, false, false)
mockTableID := int64(1)
@ -122,7 +122,7 @@ func TestLRUPutGetDel(t *testing.T) {
func TestLRUEvict(t *testing.T) {
capacity := int64(24)
lru := newStatsLruCache(capacity)
lru := NewStatsLruCache(capacity)
t1 := newMockStatisticsTable(2, 0, true, false, false)
require.Equal(t, t1.MemoryUsage().TotalIdxTrackingMemUsage(), int64(0))
require.Equal(t, t1.MemoryUsage().TotalColTrackingMemUsage(), 2*mockCMSMemoryUsage)
@ -160,7 +160,7 @@ func TestLRUEvict(t *testing.T) {
}
func TestLRUCopy(t *testing.T) {
lru := newStatsLruCache(1000)
lru := NewStatsLruCache(1000)
tables := make([]*statistics.Table, 0)
for i := 0; i < 5; i++ {
tables = append(tables, newMockStatisticsTable(1, 1, true, false, false))
@ -198,7 +198,7 @@ func TestLRUCopy(t *testing.T) {
}
func TestLRUFreshMemUsage(t *testing.T) {
lru := newStatsLruCache(1000)
lru := NewStatsLruCache(1000)
t1 := newMockStatisticsTable(1, 1, true, false, false)
t2 := newMockStatisticsTable(2, 2, true, false, false)
t3 := newMockStatisticsTable(3, 3, true, false, false)
@ -223,7 +223,7 @@ func TestLRUFreshMemUsage(t *testing.T) {
}
func TestLRUPutTooBig(t *testing.T) {
lru := newStatsLruCache(1)
lru := NewStatsLruCache(1)
mockTable := newMockStatisticsTable(1, 1, true, false, false)
// put mockTable, the index should be evicted
lru.Put(int64(1), mockTable)
@ -235,7 +235,7 @@ func TestLRUPutTooBig(t *testing.T) {
func TestCacheLen(t *testing.T) {
capacity := int64(12)
stats := newStatsLruCache(capacity)
stats := NewStatsLruCache(capacity)
t1 := newMockStatisticsTable(2, 1, true, false, false)
stats.Put(int64(1), t1)
t2 := newMockStatisticsTable(1, 1, true, false, false)
@ -256,7 +256,7 @@ func TestCacheLen(t *testing.T) {
func TestLRUMove(t *testing.T) {
capacity := int64(100)
s := newStatsLruCache(capacity)
s := NewStatsLruCache(capacity)
t1 := newMockStatisticsTable(1, 1, true, false, false)
t1ID := int64(1)
t2 := newMockStatisticsTable(1, 1, true, false, false)
@ -274,7 +274,7 @@ func TestLRUMove(t *testing.T) {
func TestLRUEvictPolicy(t *testing.T) {
capacity := int64(999)
s := newStatsLruCache(capacity)
s := NewStatsLruCache(capacity)
t1 := newMockStatisticsTable(1, 0, true, true, true)
s.Put(1, t1)
require.Equal(t, s.TotalCost(), mockCMSMemoryUsage+mockTopNMemoryUsage+mockHistMemoryUsage)
@ -307,7 +307,7 @@ func TestLRUEvictPolicy(t *testing.T) {
require.True(t, t1.Columns[1].IsTopNEvicted())
require.True(t, t1.Columns[1].IsAllEvicted())
s = newStatsLruCache(capacity)
s = NewStatsLruCache(capacity)
t2 := newMockStatisticsTable(0, 1, true, true, true)
s.Put(2, t2)
require.Equal(t, s.TotalCost(), mockCMSMemoryUsage+mockTopNMemoryUsage+mockHistMemoryUsage)

View File

@ -0,0 +1,12 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "mapcache",
srcs = ["map_cache.go"],
importpath = "github.com/pingcap/tidb/statistics/handle/internal/cache/mapcache",
visibility = ["//statistics/handle:__subpackages__"],
deps = [
"//statistics",
"//statistics/handle/internal/cache",
],
)

View File

@ -0,0 +1,162 @@
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mapcache
import (
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle/internal/cache"
)
type cacheItem struct {
value *statistics.Table
key int64
cost int64
}
func (c cacheItem) copy() cacheItem {
return cacheItem{
key: c.key,
value: c.value,
cost: c.cost,
}
}
// MapCache is a cache based on map.
type MapCache struct {
tables map[int64]cacheItem
memUsage int64
}
// NewMapCache creates a new map cache.
func NewMapCache() *MapCache {
return &MapCache{
tables: make(map[int64]cacheItem),
memUsage: 0,
}
}
// GetByQuery implements StatsCacheInner
func (m *MapCache) GetByQuery(k int64) (*statistics.Table, bool) {
return m.Get(k)
}
// Get implements StatsCacheInner
func (m *MapCache) Get(k int64) (*statistics.Table, bool) {
v, ok := m.tables[k]
return v.value, ok
}
// PutByQuery implements StatsCacheInner
func (m *MapCache) PutByQuery(k int64, v *statistics.Table) {
m.Put(k, v)
}
// Put implements StatsCacheInner
func (m *MapCache) Put(k int64, v *statistics.Table) {
item, ok := m.tables[k]
if ok {
oldCost := item.cost
newCost := v.MemoryUsage().TotalMemUsage
item.value = v
item.cost = newCost
m.tables[k] = item
m.memUsage += newCost - oldCost
return
}
cost := v.MemoryUsage().TotalMemUsage
item = cacheItem{
key: k,
value: v,
cost: cost,
}
m.tables[k] = item
m.memUsage += cost
}
// Del implements StatsCacheInner
func (m *MapCache) Del(k int64) {
item, ok := m.tables[k]
if !ok {
return
}
delete(m.tables, k)
m.memUsage -= item.cost
}
// Cost implements StatsCacheInner
func (m *MapCache) Cost() int64 {
return m.memUsage
}
// Keys implements StatsCacheInner
func (m *MapCache) Keys() []int64 {
ks := make([]int64, 0, len(m.tables))
for k := range m.tables {
ks = append(ks, k)
}
return ks
}
// Values implements StatsCacheInner
func (m *MapCache) Values() []*statistics.Table {
vs := make([]*statistics.Table, 0, len(m.tables))
for _, v := range m.tables {
vs = append(vs, v.value)
}
return vs
}
// Map implements StatsCacheInner
func (m *MapCache) Map() map[int64]*statistics.Table {
t := make(map[int64]*statistics.Table, len(m.tables))
for k, v := range m.tables {
t[k] = v.value
}
return t
}
// Len implements StatsCacheInner
func (m *MapCache) Len() int {
return len(m.tables)
}
// FreshMemUsage implements StatsCacheInner
func (m *MapCache) FreshMemUsage() {
for _, v := range m.tables {
oldCost := v.cost
newCost := v.value.MemoryUsage().TotalMemUsage
m.memUsage += newCost - oldCost
}
}
// Copy implements StatsCacheInner
func (m *MapCache) Copy() cache.StatsCacheInner {
newM := &MapCache{
tables: make(map[int64]cacheItem, len(m.tables)),
memUsage: m.memUsage,
}
for k, v := range m.tables {
newM.tables[k] = v.copy()
}
return newM
}
// SetCapacity implements StatsCacheInner
func (m *MapCache) SetCapacity(int64) {}
// Front implements StatsCacheInner
func (m *MapCache) Front() int64 {
return 0
}

View File

@ -26,50 +26,31 @@ import (
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/statistics"
"github.com/pingcap/tidb/statistics/handle/internal/cache"
"github.com/pingcap/tidb/statistics/handle/internal/cache/lru"
"github.com/pingcap/tidb/statistics/handle/internal/cache/mapcache"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/sqlexec"
"github.com/pingcap/tidb/util/syncutil"
)
// statsCacheInner is the interface to manage the statsCache, it can be implemented by map, lru cache or other structures.
type statsCacheInner interface {
GetByQuery(int64) (*statistics.Table, bool)
Get(int64) (*statistics.Table, bool)
PutByQuery(int64, *statistics.Table)
Put(int64, *statistics.Table)
Del(int64)
Cost() int64
Keys() []int64
Values() []*statistics.Table
Map() map[int64]*statistics.Table
Len() int
FreshMemUsage()
Copy() statsCacheInner
SetCapacity(int64)
// Front returns the front element's owner tableID, only used for test
Front() int64
}
func newStatsCache() statsCache {
enableQuota := config.GetGlobalConfig().Performance.EnableStatsCacheMemQuota
if enableQuota {
capacity := variable.StatsCacheMemQuota.Load()
return statsCache{
statsCacheInner: newStatsLruCache(capacity),
StatsCacheInner: lru.NewStatsLruCache(capacity),
}
}
return statsCache{
statsCacheInner: &mapCache{
tables: make(map[int64]cacheItem),
memUsage: 0,
},
StatsCacheInner: mapcache.NewMapCache(),
}
}
// statsCache caches the tables in memory for Handle.
type statsCache struct {
statsCacheInner
cache.StatsCacheInner
// version is the latest version of cache. It is bumped when new records of `mysql.stats_meta` are loaded into cache.
version uint64
// minorVersion is to differentiate the cache when the version is unchanged while the cache contents are
@ -84,7 +65,7 @@ type statsCache struct {
}
func (sc statsCache) len() int {
return sc.statsCacheInner.Len()
return sc.StatsCacheInner.Len()
}
func (sc statsCache) copy() statsCache {
@ -92,7 +73,7 @@ func (sc statsCache) copy() statsCache {
version: sc.version,
minorVersion: sc.minorVersion,
}
newCache.statsCacheInner = sc.statsCacheInner.Copy()
newCache.StatsCacheInner = sc.StatsCacheInner.Copy()
return newCache
}
@ -123,139 +104,6 @@ func (sc statsCache) update(tables []*statistics.Table, deletedIDs []int64, newV
return newCache
}
type cacheItem struct {
value *statistics.Table
key int64
cost int64
}
func (c cacheItem) copy() cacheItem {
return cacheItem{
key: c.key,
value: c.value,
cost: c.cost,
}
}
type mapCache struct {
tables map[int64]cacheItem
memUsage int64
}
// GetByQuery implements statsCacheInner
func (m *mapCache) GetByQuery(k int64) (*statistics.Table, bool) {
return m.Get(k)
}
// Get implements statsCacheInner
func (m *mapCache) Get(k int64) (*statistics.Table, bool) {
v, ok := m.tables[k]
return v.value, ok
}
// PutByQuery implements statsCacheInner
func (m *mapCache) PutByQuery(k int64, v *statistics.Table) {
m.Put(k, v)
}
// Put implements statsCacheInner
func (m *mapCache) Put(k int64, v *statistics.Table) {
item, ok := m.tables[k]
if ok {
oldCost := item.cost
newCost := v.MemoryUsage().TotalMemUsage
item.value = v
item.cost = newCost
m.tables[k] = item
m.memUsage += newCost - oldCost
return
}
cost := v.MemoryUsage().TotalMemUsage
item = cacheItem{
key: k,
value: v,
cost: cost,
}
m.tables[k] = item
m.memUsage += cost
}
// Del implements statsCacheInner
func (m *mapCache) Del(k int64) {
item, ok := m.tables[k]
if !ok {
return
}
delete(m.tables, k)
m.memUsage -= item.cost
}
// Cost implements statsCacheInner
func (m *mapCache) Cost() int64 {
return m.memUsage
}
// Keys implements statsCacheInner
func (m *mapCache) Keys() []int64 {
ks := make([]int64, 0, len(m.tables))
for k := range m.tables {
ks = append(ks, k)
}
return ks
}
// Values implements statsCacheInner
func (m *mapCache) Values() []*statistics.Table {
vs := make([]*statistics.Table, 0, len(m.tables))
for _, v := range m.tables {
vs = append(vs, v.value)
}
return vs
}
// Map implements statsCacheInner
func (m *mapCache) Map() map[int64]*statistics.Table {
t := make(map[int64]*statistics.Table, len(m.tables))
for k, v := range m.tables {
t[k] = v.value
}
return t
}
// Len implements statsCacheInner
func (m *mapCache) Len() int {
return len(m.tables)
}
// FreshMemUsage implements statsCacheInner
func (m *mapCache) FreshMemUsage() {
for _, v := range m.tables {
oldCost := v.cost
newCost := v.value.MemoryUsage().TotalMemUsage
m.memUsage += newCost - oldCost
}
}
// Copy implements statsCacheInner
func (m *mapCache) Copy() statsCacheInner {
newM := &mapCache{
tables: make(map[int64]cacheItem, len(m.tables)),
memUsage: m.memUsage,
}
for k, v := range m.tables {
newM.tables[k] = v.copy()
}
return newM
}
// SetCapacity implements statsCacheInner
func (m *mapCache) SetCapacity(int64) {}
// Front implements statsCacheInner
func (m *mapCache) Front() int64 {
return 0
}
// TableRowStatsCache is the cache of table row count.
var TableRowStatsCache = &StatsTableRowCache{}