util: improve cgroup to support Vertical Pod Autoscaler (#36901)

ref pingcap/tidb#37707
This commit is contained in:
Weizhen Wang
2022-09-21 21:35:04 +08:00
committed by GitHub
parent 7cbbaac920
commit e016a7d218
10 changed files with 1637 additions and 53 deletions

View File

@ -206,6 +206,7 @@
"ddl/column.go": "ddl/column.go",
"ddl/index.go": "ddl/index.go",
"ddl/ingest/": "ddl/ingest/",
"util/cgroup": "util/cgroup code",
"server/conn.go": "server/conn.go",
"server/conn_stmt.go": "server/conn_stmt.go",
"server/conn_test.go": "server/conn_test.go",
@ -733,6 +734,7 @@
"types/json_binary_functions.go": "types/json_binary_functions.go",
"types/json_binary_test.go": "types/json_binary_test.go",
"ddl/": "enable to ddl",
"util/cgroup": "util/cgroup code",
"expression/builtin_cast.go": "enable expression/builtin_cast.go",
"planner/core/plan.go": "planner/core/plan.go",
"server/conn.go": "server/conn.go",

28
util/cgroup/BUILD.bazel Normal file
View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "cgroup",
srcs = [
"cgroup.go",
"cgroup_cpu.go",
"cgroup_cpu_linux.go",
"cgroup_cpu_unsupport.go",
"cgroup_memory.go",
],
importpath = "github.com/pingcap/tidb/util/cgroup",
visibility = ["//visibility:public"],
deps = [
"@com_github_cockroachdb_errors//:errors",
"@com_github_pingcap_errors//:errors",
"@com_github_pingcap_log//:log",
"@org_uber_go_zap//:zap",
],
)
go_test(
name = "cgroup_test",
srcs = ["cgroup_mock_test.go"],
embed = [":cgroup"],
flaky = True,
deps = ["@com_github_stretchr_testify//require"],
)

409
util/cgroup/cgroup.go Normal file
View File

@ -0,0 +1,409 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cgroup
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/cockroachdb/errors"
"github.com/pingcap/log"
"go.uber.org/zap"
)
// CPUQuotaStatus presents the status of how CPU quota is used
type CPUQuotaStatus int
const (
// CPUQuotaUndefined is returned when CPU quota is undefined
CPUQuotaUndefined CPUQuotaStatus = iota
// CPUQuotaUsed is returned when a valid CPU quota can be used
CPUQuotaUsed
// CPUQuotaMinUsed is return when CPU quota is smaller than the min value
CPUQuotaMinUsed
)
const (
_maxProcsKey = "GOMAXPROCS"
// They are cgroup filename for different data
cgroupV1MemStat = "memory.stat"
cgroupV2MemStat = "memory.stat"
cgroupV2MemLimit = "memory.max"
cgroupV1MemUsage = "memory.usage_in_bytes"
cgroupV2MemUsage = "memory.current"
cgroupV1CPUQuota = "cpu.cfs_quota_us"
cgroupV1CPUPeriod = "cpu.cfs_period_us"
cgroupV1CPUSysUsage = "cpuacct.usage_sys"
cgroupV1CPUUserUsage = "cpuacct.usage_user"
cgroupV2CPUMax = "cpu.max"
cgroupV2CPUStat = "cpu.stat"
// {memory|cpu}.stat file keys
//
// key for # of bytes of file-backed memory on inactive LRU list in cgroupv1
cgroupV1MemInactiveFileUsageStatKey = "total_inactive_file"
// key for # of bytes of file-backed memory on inactive LRU list in cgroupv2
cgroupV2MemInactiveFileUsageStatKey = "inactive_file"
cgroupV1MemLimitStatKey = "hierarchical_memory_limit"
)
const (
procPathCGroup = "/proc/self/cgroup"
procPathMountInfo = "/proc/self/mountinfo"
)
// CPUUsage returns CPU usage and quotas for an entire cgroup.
type CPUUsage struct {
// System time and user time taken by this cgroup or process. In nanoseconds.
Stime, Utime uint64
// CPU period and quota for this process, in microseconds. This cgroup has
// access to up to (quota/period) proportion of CPU resources on the system.
// For instance, if there are 4 CPUs, quota = 150000, period = 100000,
// this cgroup can use around ~1.5 CPUs, or 37.5% of total scheduler time.
// If quota is -1, it's unlimited.
Period, Quota int64
// NumCPUs is the number of CPUs in the system. Always returned even if
// not called from a cgroup.
NumCPU int
}
// SetGOMAXPROCS is to set GOMAXPROCS to the number of CPUs.
func SetGOMAXPROCS() (func(), error) {
const minGOMAXPROCS int = 1
undoNoop := func() {
log.Info("maxprocs: No GOMAXPROCS change to reset")
}
if max, exists := os.LookupEnv(_maxProcsKey); exists {
log.Info(fmt.Sprintf("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max))
return undoNoop, nil
}
maxProcs, status, err := CPUQuotaToGOMAXPROCS(minGOMAXPROCS)
if err != nil {
return undoNoop, err
}
if status == CPUQuotaUndefined {
log.Info(fmt.Sprintf("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", runtime.GOMAXPROCS(0)))
return undoNoop, nil
}
prev := runtime.GOMAXPROCS(0)
undo := func() {
log.Info(fmt.Sprintf("maxprocs: Resetting GOMAXPROCS to %v", prev))
runtime.GOMAXPROCS(prev)
}
if prev == maxProcs {
return undoNoop, nil
}
switch status {
case CPUQuotaMinUsed:
log.Info(fmt.Sprintf("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs))
case CPUQuotaUsed:
log.Info(fmt.Sprintf("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs))
}
runtime.GOMAXPROCS(maxProcs)
return undo, nil
}
func readFile(filepath string) (res []byte, err error) {
var f *os.File
//nolint:gosec
f, err = os.Open(filepath)
if err != nil {
return nil, err
}
defer func() {
err = errors.CombineErrors(err, f.Close())
}()
res, err = io.ReadAll(f)
return res, err
}
// The controller is defined via either type `memory` for cgroup v1 or via empty type for cgroup v2,
// where the type is the second field in /proc/[pid]/cgroup file
func detectControlPath(cgroupFilePath string, controller string) (string, error) {
//nolint:gosec
cgroup, err := os.Open(cgroupFilePath)
if err != nil {
return "", errors.Wrapf(err, "failed to read %s cgroup from cgroups file: %s", controller, cgroupFilePath)
}
defer func() {
err := cgroup.Close()
if err != nil {
log.Error("close cgroupFilePath", zap.Error(err))
}
}()
scanner := bufio.NewScanner(cgroup)
var unifiedPathIfFound string
for scanner.Scan() {
fields := bytes.Split(scanner.Bytes(), []byte{':'})
if len(fields) != 3 {
// The lines should always have three fields, there's something fishy here.
continue
}
f0, f1 := string(fields[0]), string(fields[1])
// First case if v2, second - v1. We give v2 the priority here.
// There is also a `hybrid` mode when both versions are enabled,
// but no known container solutions support it.
if f0 == "0" && f1 == "" {
unifiedPathIfFound = string(fields[2])
} else if f1 == controller {
return string(fields[2]), nil
}
}
return unifiedPathIfFound, nil
}
// See http://man7.org/linux/man-pages/man5/proc.5.html for `mountinfo` format.
func getCgroupDetails(mountInfoPath string, cRoot string, controller string) (string, int, error) {
//nolint:gosec
info, err := os.Open(mountInfoPath)
if err != nil {
return "", 0, errors.Wrapf(err, "failed to read mounts info from file: %s", mountInfoPath)
}
defer func() {
err := info.Close()
if err != nil {
log.Error("close mountInfoPath", zap.Error(err))
}
}()
scanner := bufio.NewScanner(info)
for scanner.Scan() {
fields := bytes.Fields(scanner.Bytes())
if len(fields) < 10 {
continue
}
ver, ok := detectCgroupVersion(fields, controller)
if ok {
mountPoint := string(fields[4])
if ver == 2 {
return mountPoint, ver, nil
}
// It is possible that the controller mount and the cgroup path are not the same (both are relative to the NS root).
// So start with the mount and construct the relative path of the cgroup.
// To test:
// 1、start a docker to run unit test or tidb-server
// > docker run -it --cpus=8 --memory=8g --name test --rm ubuntu:18.04 bash
//
// 2、change the limit when the container is running
// docker update --cpus=8 <containers>
nsRelativePath := string(fields[3])
if !strings.Contains(nsRelativePath, "..") {
// We don't expect to see err here ever but in case that it happens
// the best action is to ignore the line and hope that the rest of the lines
// will allow us to extract a valid path.
if relPath, err := filepath.Rel(nsRelativePath, cRoot); err == nil {
return filepath.Join(mountPoint, relPath), ver, nil
}
}
}
}
return "", 0, fmt.Errorf("failed to detect cgroup root mount and version")
}
func cgroupFileToUint64(filepath, desc string) (res uint64, err error) {
contents, err := readFile(filepath)
if err != nil {
return 0, errors.Wrapf(err, "error when reading %s from cgroup v1 at %s", desc, filepath)
}
res, err = strconv.ParseUint(string(bytes.TrimSpace(contents)), 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "error when parsing %s from cgroup v1 at %s", desc, filepath)
}
return res, err
}
func cgroupFileToInt64(filepath, desc string) (res int64, err error) {
contents, err := readFile(filepath)
if err != nil {
return 0, errors.Wrapf(err, "error when reading %s from cgroup v1 at %s", desc, filepath)
}
res, err = strconv.ParseInt(string(bytes.TrimSpace(contents)), 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "error when parsing %s from cgroup v1 at %s", desc, filepath)
}
return res, nil
}
// Return version of cgroup mount for memory controller if found
func detectCgroupVersion(fields [][]byte, controller string) (_ int, found bool) {
if len(fields) < 10 {
return 0, false
}
// Due to strange format there can be optional fields in the middle of the set, starting
// from the field #7. The end of the fields is marked with "-" field
var pos = 6
for pos < len(fields) {
if bytes.Equal(fields[pos], []byte{'-'}) {
break
}
pos++
}
// No optional fields separator found or there is less than 3 fields after it which is wrong
if (len(fields) - pos - 1) < 3 {
return 0, false
}
pos++
// Check for controller specifically in cgroup v1 (it is listed in super
// options field), as the value can't be found if it is not enforced.
if bytes.Equal(fields[pos], []byte("cgroup")) && bytes.Contains(fields[pos+2], []byte(controller)) {
return 1, true
} else if bytes.Equal(fields[pos], []byte("cgroup2")) {
return 2, true
}
return 0, false
}
func detectCPUQuotaInV1(cRoot string) (period, quota int64, err error) {
quotaFilePath := filepath.Join(cRoot, cgroupV1CPUQuota)
periodFilePath := filepath.Join(cRoot, cgroupV1CPUPeriod)
quota, err = cgroupFileToInt64(quotaFilePath, "cpu quota")
if err != nil {
return 0, 0, err
}
period, err = cgroupFileToInt64(periodFilePath, "cpu period")
if err != nil {
return 0, 0, err
}
return period, quota, err
}
func detectCPUUsageInV1(cRoot string) (stime, utime uint64, err error) {
sysFilePath := filepath.Join(cRoot, cgroupV1CPUSysUsage)
userFilePath := filepath.Join(cRoot, cgroupV1CPUUserUsage)
stime, err = cgroupFileToUint64(sysFilePath, "cpu system time")
if err != nil {
return 0, 0, err
}
utime, err = cgroupFileToUint64(userFilePath, "cpu user time")
if err != nil {
return 0, 0, err
}
return stime, utime, err
}
func detectCPUQuotaInV2(cRoot string) (period, quota int64, err error) {
maxFilePath := filepath.Join(cRoot, cgroupV2CPUMax)
contents, err := readFile(maxFilePath)
if err != nil {
return 0, 0, errors.Wrapf(err, "error when read cpu quota from cgroup v2 at %s", maxFilePath)
}
fields := strings.Fields(string(contents))
if len(fields) > 2 || len(fields) == 0 {
return 0, 0, errors.Errorf("unexpected format when reading cpu quota from cgroup v2 at %s: %s", maxFilePath, contents)
}
if fields[0] == "max" {
// Negative quota denotes no limit.
quota = -1
} else {
quota, err = strconv.ParseInt(fields[0], 10, 64)
if err != nil {
return 0, 0, errors.Wrapf(err, "error when reading cpu quota from cgroup v2 at %s", maxFilePath)
}
}
if len(fields) == 2 {
period, err = strconv.ParseInt(fields[1], 10, 64)
if err != nil {
return 0, 0, errors.Wrapf(err, "error when reading cpu period from cgroup v2 at %s", maxFilePath)
}
}
return period, quota, nil
}
func detectCPUUsageInV2(cRoot string) (stime, utime uint64, err error) {
statFilePath := filepath.Join(cRoot, cgroupV2CPUStat)
var stat *os.File
//nolint:gosec
stat, err = os.Open(statFilePath)
if err != nil {
return 0, 0, errors.Wrapf(err, "can't read cpu usage from cgroup v2 at %s", statFilePath)
}
defer func() {
err = errors.CombineErrors(err, stat.Close())
}()
scanner := bufio.NewScanner(stat)
for scanner.Scan() {
fields := bytes.Fields(scanner.Bytes())
if len(fields) != 2 || (string(fields[0]) != "user_usec" && string(fields[0]) != "system_usec") {
continue
}
keyField := string(fields[0])
trimmed := string(bytes.TrimSpace(fields[1]))
usageVar := &stime
if keyField == "user_usec" {
usageVar = &utime
}
*usageVar, err = strconv.ParseUint(trimmed, 10, 64)
if err != nil {
return 0, 0, errors.Wrapf(err, "can't read cpu usage %s from cgroup v1 at %s", keyField, statFilePath)
}
}
return stime, utime, err
}
func readInt64Value(root, filename string, cgVersion int) (value uint64, err error) {
filePath := filepath.Join(root, filename)
//nolint:gosec
file, err := os.Open(filePath)
if err != nil {
return 0, errors.Wrapf(err, "can't read %s from cgroup v%d", filename, cgVersion)
}
defer file.Close()
scanner := bufio.NewScanner(file)
present := scanner.Scan()
if !present {
return 0, errors.Wrapf(err, "no value found in %s from cgroup v%d", filename, cgVersion)
}
data := scanner.Bytes()
trimmed := string(bytes.TrimSpace(data))
// cgroupv2 has certain control files that default to "max", so handle here.
if trimmed == "max" {
return math.MaxInt64, nil
}
value, err = strconv.ParseUint(trimmed, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "failed to parse value in %s from cgroup v%d", filename, cgVersion)
}
return value, nil
}

69
util/cgroup/cgroup_cpu.go Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cgroup
import (
"fmt"
"path/filepath"
"github.com/pingcap/errors"
)
// Helper function for getCgroupCPU. Root is always "/", except in tests.
func getCgroupCPU(root string) (CPUUsage, error) {
path, err := detectControlPath(filepath.Join(root, procPathCGroup), "cpu,cpuacct")
if err != nil {
return CPUUsage{}, err
}
// No CPU controller detected
if path == "" {
return CPUUsage{}, errors.New("no cpu controller detected")
}
mount, ver, err := getCgroupDetails(filepath.Join(root, procPathMountInfo), path, "cpu,cpuacct")
if err != nil {
return CPUUsage{}, err
}
var res CPUUsage
switch ver {
case 1:
cgroupRoot := filepath.Join(root, mount)
res.Period, res.Quota, err = detectCPUQuotaInV1(cgroupRoot)
if err != nil {
return res, err
}
res.Stime, res.Utime, err = detectCPUUsageInV1(cgroupRoot)
if err != nil {
return res, err
}
case 2:
cgroupRoot := filepath.Join(root, mount, path)
res.Period, res.Quota, err = detectCPUQuotaInV2(cgroupRoot)
if err != nil {
return res, err
}
res.Stime, res.Utime, err = detectCPUUsageInV2(cgroupRoot)
if err != nil {
return res, err
}
default:
return CPUUsage{}, fmt.Errorf("detected unknown cgroup version index: %d", ver)
}
return res, nil
}

View File

@ -0,0 +1,69 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build linux
// +build linux
package cgroup
import (
"math"
"os"
"runtime"
"strings"
)
// CPUShares returns the number of CPUs this cgroup can be expected to
// max out. If there's no limit, NumCPU is returned.
func (c CPUUsage) CPUShares() float64 {
if c.Period <= 0 || c.Quota <= 0 {
return float64(c.NumCPU)
}
return float64(c.Quota) / float64(c.Period)
}
// GetCgroupCPU returns the CPU usage and quota for the current cgroup.
func GetCgroupCPU() (CPUUsage, error) {
cpuusage, err := getCgroupCPU("/")
cpuusage.NumCPU = runtime.NumCPU()
return cpuusage, err
}
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value.
func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
quota, err := GetCgroupCPU()
if err != nil {
return -1, CPUQuotaUndefined, err
}
maxProcs := int(math.Ceil(quota.CPUShares()))
if minValue > 0 && maxProcs < minValue {
return minValue, CPUQuotaMinUsed, nil
}
return maxProcs, CPUQuotaUsed, nil
}
// InContainer returns true if the process is running in a container.
func InContainer() bool {
v, err := os.ReadFile(procPathCGroup)
if err != nil {
return false
}
if strings.Contains(string(v), "docker") ||
strings.Contains(string(v), "kubepods") ||
strings.Contains(string(v), "containerd") {
return true
}
return false
}

View File

@ -0,0 +1,41 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !linux
// +build !linux
package cgroup
import (
"runtime"
)
// GetCgroupCPU returns the CPU usage and quota for the current cgroup.
func GetCgroupCPU() (CPUUsage, error) {
var cpuUsage CPUUsage
cpuUsage.NumCPU = runtime.NumCPU()
return cpuUsage, nil
}
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
// current OS.
func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, nil
}
// InContainer returns true if the process is running in a container.
func InContainer() bool {
return false
}

View File

@ -0,0 +1,196 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cgroup
import (
"bufio"
"bytes"
"fmt"
"os"
"path/filepath"
"strconv"
"github.com/pingcap/errors"
"github.com/pingcap/log"
)
// GetMemoryLimit attempts to retrieve the cgroup memory limit for the current
// process.
func GetMemoryLimit() (limit uint64, err error) {
return getCgroupMemLimit("/")
}
// GetMemoryUsage attempts to retrieve the cgroup memory usage value (in bytes)
// for the current process.
func GetMemoryUsage() (usage uint64, err error) {
return getCgroupMemUsage("/")
}
// GetMemoryInactiveFileUsage attempts to retrieve the cgroup memory usage value (in bytes)
// for the current process.
func GetMemoryInactiveFileUsage() (usage uint64, err error) {
return getCgroupMemInactiveFileUsage("/")
}
// root is always "/" in the production. It will be changed for testing.
func getCgroupMemInactiveFileUsage(root string) (usage uint64, err error) {
path, err := detectControlPath(filepath.Join(root, procPathCGroup), "memory")
if err != nil {
return 0, err
}
if path == "" {
log.Warn("no cgroup memory controller detected")
return 0, nil
}
mount, ver, err := getCgroupDetails(filepath.Join(root, procPathMountInfo), path, "memory")
if err != nil {
return 0, err
}
switch ver {
case 1:
usage, err = detectMemInactiveFileUsageInV1(filepath.Join(root, mount))
case 2:
usage, err = detectMemInactiveFileUsageInV2(filepath.Join(root, mount, path))
default:
usage, err = 0, fmt.Errorf("detected unknown cgroup version index: %d", ver)
}
return usage, err
}
// getCgroupMemUsage reads the memory cgroup's current memory usage (in bytes).
func getCgroupMemUsage(root string) (usage uint64, err error) {
path, err := detectControlPath(filepath.Join(root, procPathCGroup), "memory")
if err != nil {
return 0, err
}
if path == "" {
log.Warn("no cgroup memory controller detected")
return 0, nil
}
mount, ver, err := getCgroupDetails(filepath.Join(root, procPathMountInfo), path, "memory")
if err != nil {
return 0, err
}
switch ver {
case 1:
// cgroupv1
usage, err = detectMemUsageInV1(filepath.Join(root, mount))
case 2:
// cgroupv2
usage, err = detectMemUsageInV2(filepath.Join(root, mount, path))
default:
usage, err = 0, fmt.Errorf("detected unknown cgroup version index: %d", ver)
}
return usage, err
}
// root is always "/" in the production. It will be changed for testing.
func getCgroupMemLimit(root string) (limit uint64, err error) {
path, err := detectControlPath(filepath.Join(root, procPathCGroup), "memory")
if err != nil {
return 0, err
}
if path == "" {
log.Warn("no cgroup memory controller detected")
return 0, nil
}
mount, ver, err := getCgroupDetails(filepath.Join(root, procPathMountInfo), path, "memory")
if err != nil {
return 0, err
}
switch ver {
case 1:
// cgroupv1
limit, err = detectMemLimitInV1(filepath.Join(root, mount))
case 2:
// cgroupv2
limit, err = detectMemLimitInV2(filepath.Join(root, mount, path))
default:
limit, err = 0, fmt.Errorf("detected unknown cgroup version index: %d", ver)
}
return limit, err
}
func detectMemLimitInV1(cRoot string) (limit uint64, err error) {
return detectMemStatValue(cRoot, cgroupV1MemStat, cgroupV1MemLimitStatKey, 1)
}
// TODO(hawkingrei): this implementation was based on podman+criu environment.
// It may cover not all the cases when v2 becomes more widely used in container
// world.
func detectMemLimitInV2(cRoot string) (limit uint64, err error) {
return readInt64Value(cRoot, cgroupV2MemLimit, 2)
}
func detectMemInactiveFileUsageInV1(root string) (uint64, error) {
return detectMemStatValue(root, cgroupV1MemStat, cgroupV1MemInactiveFileUsageStatKey, 1)
}
func detectMemInactiveFileUsageInV2(root string) (uint64, error) {
return detectMemStatValue(root, cgroupV2MemStat, cgroupV2MemInactiveFileUsageStatKey, 2)
}
func detectMemStatValue(cRoot, filename, key string, cgVersion int) (value uint64, err error) {
statFilePath := filepath.Join(cRoot, filename)
//nolint:gosec
stat, err := os.Open(statFilePath)
if err != nil {
return 0, errors.Wrapf(err, "can't read file %s from cgroup v%d", filename, cgVersion)
}
defer func() {
_ = stat.Close()
}()
scanner := bufio.NewScanner(stat)
for scanner.Scan() {
fields := bytes.Fields(scanner.Bytes())
if len(fields) != 2 || string(fields[0]) != key {
continue
}
trimmed := string(bytes.TrimSpace(fields[1]))
value, err = strconv.ParseUint(trimmed, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "can't read %q memory stat from cgroup v%d in %s", key, cgVersion, filename)
}
return value, nil
}
return 0, fmt.Errorf("failed to find expected memory stat %q for cgroup v%d in %s", key, cgVersion, filename)
}
func detectMemUsageInV1(cRoot string) (memUsage uint64, err error) {
return readInt64Value(cRoot, cgroupV1MemUsage, 1)
}
// TODO(hawkingrei): this implementation was based on podman+criu environment. It
// may cover not all the cases when v2 becomes more widely used in container
// world.
func detectMemUsageInV2(cRoot string) (memUsage uint64, err error) {
return readInt64Value(cRoot, cgroupV2MemUsage, 2)
}

View File

@ -0,0 +1,818 @@
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cgroup
import (
"os"
"path/filepath"
"regexp"
"testing"
"github.com/stretchr/testify/require"
)
func isError(err error, re string) bool {
if err == nil && re == "" {
return true
}
if err == nil || re == "" {
return false
}
matched, merr := regexp.MatchString(re, err.Error())
if merr != nil {
return false
}
return matched
}
func TestCgroupsGetMemoryUsage(t *testing.T) {
for _, tc := range []struct {
name string
paths map[string]string
errMsg string
value uint64
warn string
}{
{
errMsg: "failed to read memory cgroup from cgroups file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithoutMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
warn: "no cgroup memory controller detected",
value: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
},
errMsg: "failed to read mounts info from file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
errMsg: "failed to detect cgroup root mount and version",
value: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithMemController,
"/sys/fs/cgroup/memory/memory.usage_in_bytes": v1MemoryUsageInBytes,
},
value: 276328448,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryControllerNS,
"/proc/self/mountinfo": v1MountsWithMemControllerNS,
"/sys/fs/cgroup/memory/cgroup_test/memory.usage_in_bytes": v1MemoryUsageInBytes,
},
value: 276328448,
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
},
errMsg: "can't read memory.current from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.current": "unparsable\n",
},
errMsg: "failed to parse value in memory.current from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.current": "276328448",
},
value: 276328448,
},
} {
dir := createFiles(t, tc.paths)
limit, err := getCgroupMemUsage(dir)
require.True(t, isError(err, tc.errMsg),
"%v %v", err, tc.errMsg)
require.Equal(t, tc.value, limit)
}
}
func TestCgroupsGetMemoryInactiveFileUsage(t *testing.T) {
for _, tc := range []struct {
name string
paths map[string]string
errMsg string
value uint64
warn string
}{
{
errMsg: "failed to read memory cgroup from cgroups file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithoutMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
warn: "no cgroup memory controller detected",
value: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
},
errMsg: "failed to read mounts info from file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
errMsg: "failed to detect cgroup root mount and version",
value: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithMemController,
"/sys/fs/cgroup/memory/memory.stat": v1MemoryStat,
},
value: 1363746816,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryControllerNS,
"/proc/self/mountinfo": v1MountsWithMemControllerNS,
"/sys/fs/cgroup/memory/cgroup_test/memory.stat": v1MemoryStat,
},
value: 1363746816,
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
},
errMsg: "can't read file memory.stat from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.stat": "inactive_file unparsable\n",
},
errMsg: "can't read \"inactive_file\" memory stat from cgroup v2 in memory.stat",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.stat": v2MemoryStat,
},
value: 1363746816,
},
} {
dir := createFiles(t, tc.paths)
limit, err := getCgroupMemInactiveFileUsage(dir)
require.True(t, isError(err, tc.errMsg),
"%v %v", err, tc.errMsg)
require.Equal(t, tc.value, limit)
}
}
func TestCgroupsGetMemoryLimit(t *testing.T) {
for _, tc := range []struct {
name string
paths map[string]string
errMsg string
limit uint64
warn string
}{
{
errMsg: "failed to read memory cgroup from cgroups file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithoutMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
limit: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
},
errMsg: "failed to read mounts info from file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithoutMemController,
},
errMsg: "failed to detect cgroup root mount and version",
limit: 0,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryController,
"/proc/self/mountinfo": v1MountsWithMemController,
"/sys/fs/cgroup/memory/memory.stat": v1MemoryStat,
},
limit: 2936016896,
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithMemoryControllerNS,
"/proc/self/mountinfo": v1MountsWithMemControllerNS,
"/sys/fs/cgroup/memory/cgroup_test/memory.stat": v1MemoryStat,
},
limit: 2936016896,
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
},
errMsg: "can't read memory.max from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "unparsable\n",
},
errMsg: "failed to parse value in memory.max from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "1073741824\n",
},
limit: 1073741824,
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/memory.max": "max\n",
},
limit: 9223372036854775807,
},
} {
dir := createFiles(t, tc.paths)
limit, err := getCgroupMemLimit(dir)
require.True(t, isError(err, tc.errMsg),
"%v %v", err, tc.errMsg)
require.Equal(t, tc.limit, limit)
}
}
func TestCgroupsGetCPU(t *testing.T) {
for _, tc := range []struct {
name string
paths map[string]string
errMsg string
period int64
quota int64
user uint64
system uint64
}{
{
errMsg: "failed to read cpu,cpuacct cgroup from cgroups file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithoutCPUController,
"/proc/self/mountinfo": v1MountsWithoutCPUController,
},
errMsg: "no cpu controller detected",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUController,
},
errMsg: "failed to read mounts info from file:",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUController,
"/proc/self/mountinfo": v1MountsWithoutCPUController,
},
errMsg: "failed to detect cgroup root mount and version",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUController,
"/proc/self/mountinfo": v1MountsWithCPUController,
"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "12345",
"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890",
"/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_sys": "123",
"/sys/fs/cgroup/cpu,cpuacct/cpuacct.usage_user": "456",
},
quota: int64(12345),
period: int64(67890),
system: uint64(123),
user: uint64(456),
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUControllerNS,
"/proc/self/mountinfo": v1MountsWithCPUControllerNS,
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456",
},
quota: int64(12345),
period: int64(67890),
system: uint64(123),
user: uint64(456),
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUControllerNSMountRel,
"/proc/self/mountinfo": v1MountsWithCPUControllerNSMountRel,
},
errMsg: "failed to detect cgroup root mount and version",
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUControllerNSMountRelRemount,
"/proc/self/mountinfo": v1MountsWithCPUControllerNSMountRelRemount,
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456",
},
quota: int64(12345),
period: int64(67890),
system: uint64(123),
user: uint64(456),
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUControllerNS2,
"/proc/self/mountinfo": v1MountsWithCPUControllerNS2,
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_quota_us": "12345",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpu.cfs_period_us": "67890",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_sys": "123",
"/sys/fs/cgroup/cpu,cpuacct/crdb_test/cpuacct.usage_user": "456",
},
quota: int64(12345),
period: int64(67890),
system: uint64(123),
user: uint64(456),
},
{
paths: map[string]string{
"/proc/self/cgroup": v1CgroupWithCPUController,
"/proc/self/mountinfo": v1MountsWithCPUController,
"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us": "-1",
"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us": "67890",
},
quota: int64(-1),
period: int64(67890),
errMsg: "error when reading cpu system time from cgroup v1",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
},
errMsg: "error when read cpu quota from cgroup v2",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "foo bar\n",
},
errMsg: "error when reading cpu quota from cgroup v2 at",
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n",
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200",
},
quota: int64(100),
period: int64(1000),
user: uint64(100),
system: uint64(200),
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "max 1000\n",
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.stat": "user_usec 100\nsystem_usec 200",
},
quota: int64(-1),
period: int64(1000),
user: uint64(100),
system: uint64(200),
},
{
paths: map[string]string{
"/proc/self/cgroup": v2CgroupWithMemoryController,
"/proc/self/mountinfo": v2Mounts,
"/sys/fs/cgroup/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope/cpu.max": "100 1000\n",
},
quota: int64(100),
period: int64(1000),
errMsg: "can't read cpu usage from cgroup v2",
},
} {
dir := createFiles(t, tc.paths)
cpuusage, err := getCgroupCPU(dir)
require.True(t, isError(err, tc.errMsg),
"%v %v", err, tc.errMsg)
require.Equal(t, tc.quota, cpuusage.Quota)
require.Equal(t, tc.period, cpuusage.Period)
require.Equal(t, tc.system, cpuusage.Stime)
require.Equal(t, tc.user, cpuusage.Utime)
}
}
func createFiles(t *testing.T, paths map[string]string) (dir string) {
dir = t.TempDir()
for path, data := range paths {
path = filepath.Join(dir, path)
require.NoError(t, os.MkdirAll(filepath.Dir(path), 0755))
require.NoError(t, os.WriteFile(path, []byte(data), 0755))
}
return dir
}
const (
v1CgroupWithMemoryController = `11:blkio:/kubepods/besteffort/pod1bf924dd-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
10:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
9:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
8:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
`
v1CgroupWithoutMemoryController = `10:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
9:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
8:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
7:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
6:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
5:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
`
v1CgroupWithCPUController = `11:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
10:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
9:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
8:cpu,cpuacct:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
`
v1CgroupWithoutCPUController = `10:blkio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
9:devices:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
8:perf_event:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
7:pids:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
6:cpuset:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
5:memory:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
4:net_cls,net_prio:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
3:hugetlb:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
2:freezer:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
1:name=systemd:/kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3
`
v2CgroupWithMemoryController = `0::/machine.slice/libpod-f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810.scope
`
v1MountsWithMemController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work
626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro
703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer
726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb
727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio
733 703 0:28 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,memory
734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset
735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids
736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct
737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event
740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices
742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio
744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered
814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw
815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw
368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw
375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw
376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw
381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw
397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw
213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro
216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro
`
v1MountsWithoutMemController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work
626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro
703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer
726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb
727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio
734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset
735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids
736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct
737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event
740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices
742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio
744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered
814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw
815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw
368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw
375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw
376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw
381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw
397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw
213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro
216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro
`
v1MountsWithCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work
626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro
703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer
726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb
727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio
733 703 0:28 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime master:13 - cgroup cgroup rw,memory
734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset
735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids
736 703 0:31 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/cpu,cpuacct ro,nosuid,nodev,noexec,relatime master:16 - cgroup cgroup rw,cpu,cpuacct
737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event
740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices
742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40bbda777ee72e81471272a5b8ebffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio
744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered
814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw
815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw
368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw
375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw
376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw
381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw
397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw
213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro
216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro
`
v1MountsWithoutCPUController = `625 367 0:71 / / rw,relatime master:85 - overlay overlay rw,lowerdir=/var/lib/docker/overlay2/l/DOLSFLPSKANL4GJ7XKF3OG6PKN:/var/lib/docker/overlay2/l/P7UJPLDFEUSRQ7CZILB7L4T5OP:/var/lib/docker/overlay2/l/FSKO5FFFNQ6XOSVF7T6R2DWZVZ:/var/lib/docker/overlay2/l/YNE4EZZE2GW2DIXRBUP47LB3GU:/var/lib/docker/overlay2/l/F2JNS7YWT5CU7FUXHNV5JUJWQY,upperdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/diff,workdir=/var/lib/docker/overlay2/b12d4d510f3eaf4552a749f9d4f6da182d55bfcdc75755f1972fd8ca33f51278/work
626 625 0:79 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
687 625 0:75 / /dev rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
691 687 0:82 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
702 625 0:159 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs ro
703 702 0:99 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
705 703 0:23 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/systemd ro,nosuid,nodev,noexec,relatime master:9 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
711 703 0:25 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/freezer ro,nosuid,nodev,noexec,relatime master:10 - cgroup cgroup rw,freezer
726 703 0:26 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/hugetlb ro,nosuid,nodev,noexec,relatime master:11 - cgroup cgroup rw,hugetlb
727 703 0:27 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/net_cls,net_prio ro,nosuid,nodev,noexec,relatime master:12 - cgroup cgroup rw,net_cls,net_prio
734 703 0:29 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/cpuset ro,nosuid,nodev,noexec,relatime master:14 - cgroup cgroup rw,cpuset
735 703 0:30 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/pids ro,nosuid,nodev,noexec,relatime master:15 - cgroup cgroup rw,pids
737 703 0:32 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/perf_event ro,nosuid,nodev,noexec,relatime master:17 - cgroup cgroup rw,perf_event
740 703 0:33 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/devices ro,nosuid,nodev,noexec,relatime master:18 - cgroup cgroup rw,devices
742 703 0:34 /kubepods/besteffort/podcbfx2j5d-3f6f-11ea-983d-0abc95f90166/c17eb535a47774285717e40scha6577eedze81g7227xa518dbffd51fdf7f624e3 /sys/fs/cgroup/blkio ro,nosuid,nodev,noexec,relatime master:19 - cgroup cgroup rw,blkio
744 687 0:78 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
746 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/volumes/kubernetes.io~empty-dir/cockroach-env /etc/cockroach-env ro,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
760 687 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/containers/cockroachdb/3e868c1f /dev/termination-log rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
776 625 259:3 / /cockroach/cockroach-data rw,relatime - ext4 /dev/nvme2n1 rw,data=ordered
814 625 0:68 / /cockroach/cockroach-certs ro,relatime - tmpfs tmpfs rw
815 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/resolv.conf /etc/resolv.conf rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
816 625 259:1 /var/lib/docker/containers/b7d4d62b68384b4adb9b76bbe156e7a7bcd469c6d40cdd0e70f1949184260683/hostname /etc/hostname rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
817 625 259:1 /var/lib/kubelet/pods/1bf924dd-3f6f-11ea-983d-0abc95f90166/etc-hosts /etc/hosts rw,noatime - xfs /dev/nvme0n1p1 rw,attr2,inode64,noquota
818 687 0:77 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
819 625 0:69 / /run/secrets/kubernetes.io/serviceaccount ro,relatime - tmpfs tmpfs rw
368 626 0:79 /bus /proc/bus ro,relatime - proc proc rw
375 626 0:79 /fs /proc/fs ro,relatime - proc proc rw
376 626 0:79 /irq /proc/irq ro,relatime - proc proc rw
381 626 0:79 /sys /proc/sys ro,relatime - proc proc rw
397 626 0:79 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw
213 626 0:70 / /proc/acpi ro,relatime - tmpfs tmpfs ro
216 626 0:75 /null /proc/kcore rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
217 626 0:75 /null /proc/keys rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
218 626 0:75 /null /proc/latency_stats rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
222 626 0:75 /null /proc/timer_list rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
223 626 0:75 /null /proc/sched_debug rw,nosuid - tmpfs tmpfs rw,size=65536k,mode=755
224 702 0:101 / /sys/firmware ro,relatime - tmpfs tmpfs ro
`
v2Mounts = `371 344 0:35 / / rw,relatime - overlay overlay rw,context="system_u:object_r:container_file_t:s0:c200,c321",lowerdir=/var/lib/containers/storage/overlay/l/SPNDOAU3AZNJMNKU3F5THCA36R,upperdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/diff,workdir=/var/lib/containers/storage/overlay/7dcd88f815bded7b833fb5dc0f25de897250bcfa828624c0d78393689d0bc312/work
372 371 0:37 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
373 371 0:38 / /dev rw,nosuid - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=65536k,mode=755
374 371 0:39 / /sys ro,nosuid,nodev,noexec,relatime - sysfs sysfs rw,seclabel
375 373 0:40 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,context="system_u:object_r:container_file_t:s0:c200,c321",gid=5,mode=620,ptmxmode=666
376 373 0:36 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw,seclabel
377 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/hostname /etc/hostname rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755
378 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/.containerenv /run/.containerenv rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755
379 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/run/secrets /run/secrets rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755
380 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/resolv.conf /etc/resolv.conf rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755
381 371 0:24 /containers/storage/overlay-containers/f1c6b44c0d61f273952b8daecf154cee1be2d503b7e9184ebf7fcaf48e139810/userdata/hosts /etc/hosts rw,nosuid,nodev - tmpfs tmpfs rw,seclabel,mode=755
382 373 0:33 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=64000k
383 374 0:25 / /sys/fs/cgroup ro,nosuid,nodev,noexec,relatime - cgroup2 cgroup2 rw,seclabel
384 372 0:41 / /proc/acpi ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k
385 372 0:6 /null /proc/kcore rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755
386 372 0:6 /null /proc/keys rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755
387 372 0:6 /null /proc/timer_list rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755
388 372 0:6 /null /proc/sched_debug rw,nosuid - devtmpfs devtmpfs rw,seclabel,size=1869464k,nr_inodes=467366,mode=755
389 372 0:42 / /proc/scsi ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k
390 374 0:43 / /sys/firmware ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k
391 374 0:44 / /sys/fs/selinux ro,relatime - tmpfs tmpfs rw,context="system_u:object_r:container_file_t:s0:c200,c321",size=0k
392 372 0:37 /bus /proc/bus ro,relatime - proc proc rw
393 372 0:37 /fs /proc/fs ro,relatime - proc proc rw
394 372 0:37 /irq /proc/irq ro,relatime - proc proc rw
395 372 0:37 /sys /proc/sys ro,relatime - proc proc rw
396 372 0:37 /sysrq-trigger /proc/sysrq-trigger ro,relatime - proc proc rw
345 373 0:40 /0 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,context="system_u:object_r:container_file_t:s0:c200,c321",gid=5,mode=620,ptmxmode=666
`
v1MemoryStat = `cache 784113664
rss 1703952384
rss_huge 27262976
shmem 0
mapped_file 14520320
dirty 4096
writeback 0
swap 0
pgpgin 35979039
pgpgout 35447229
pgfault 24002539
pgmajfault 3871
inactive_anon 0
active_anon 815435776
inactive_file 1363746816
active_file 308867072
unevictable 0
hierarchical_memory_limit 2936016896
hierarchical_memsw_limit 9223372036854771712
total_cache 784113664
total_rss 1703952384
total_rss_huge 27262976
total_shmem 0
total_mapped_file 14520320
total_dirty 4096
total_writeback 0
total_swap 0
total_pgpgin 35979039
total_pgpgout 35447229
total_pgfault 24002539
total_pgmajfault 3871
total_inactive_anon 0
total_active_anon 815435776
total_inactive_file 1363746816
total_active_file 308867072
total_unevictable 0
`
v2MemoryStat = `anon 784113664
file 1703952384
kernel_stack 27262976
pagetables 0
percpu 14520320
sock 4096
shmem 0
file_mapped 0
file_dirty 35979039
file_writeback 35447229
swapcached 24002539
anon_thp 3871
file_thp 0
shmem_thp 815435776
inactive_anon 1363746816
active_anon 308867072
inactive_file 1363746816
active_file 2936016896
unevictable 9223372036854771712
slab_reclaimable 784113664
slab_unreclaimable 1703952384
slab 27262976
workingset_refault_anon 0
workingset_refault_file 14520320
workingset_activate_anon 4096
workingset_activate_file 0
workingset_restore_anon 0
workingset_restore_file 35979039
workingset_nodereclaim 35447229
pgfault 24002539
pgmajfault 3871
pgrefill 0
pgscan 815435776
pgsteal 1363746816
pgactivate 308867072
pgdeactivate 0
pglazyfree 0
pglazyfreed 0
thp_fault_alloc 0
thp_collapse_alloc 0
`
v1MemoryUsageInBytes = "276328448"
// Both /proc/<pid>/mountinfo and /proc/<pid>/cgroup will show the mount and the cgroup relative to the cgroup NS root
// This tests the case where the memory controller mount and the cgroup are not exactly the same (as is with k8s pods).
v1CgroupWithMemoryControllerNS = "12:memory:/cgroup_test"
v1MountsWithMemControllerNS = "50 35 0:44 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:25 - cgroup cgroup rw,memory"
// Example where the paths in /proc/self/mountinfo and /proc/self/cgroup are not the same for the cpu controller
//
// sudo cgcreate -t $USER:$USER -a $USER:$USER -g cpu:crdb_test
// echo 100000 > /sys/fs/cgroup/cpu/crdb_test/cpu.cfs_period_us
// echo 33300 > /sys/fs/cgroup/cpu/crdb_test/cpu.cfs_quota_us
// cgexec -g cpu:crdb_test ./cockroach ...
v1CgroupWithCPUControllerNS = "5:cpu,cpuacct:/crdb_test"
v1MountsWithCPUControllerNS = "43 35 0:37 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct"
// Same as above but with unshare -C
// Can't determine the location of the mount
v1CgroupWithCPUControllerNSMountRel = "5:cpu,cpuacct:/"
v1MountsWithCPUControllerNSMountRel = "43 35 0:37 /.. /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct"
// Same as above but with mounting the cgroup fs one more time in the NS
// sudo mount -t cgroup -o cpu,cpuacct none /sys/fs/cgroup/cpu,cpuacct/crdb_test
v1CgroupWithCPUControllerNSMountRelRemount = "5:cpu,cpuacct:/"
v1MountsWithCPUControllerNSMountRelRemount = `
43 35 0:37 /.. /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,cpu,cpuacct
161 43 0:37 / /sys/fs/cgroup/cpu,cpuacct/crdb_test rw,relatime shared:95 - cgroup none rw,cpu,cpuacct
`
// Same as above but exiting the NS w/o unmounting
v1CgroupWithCPUControllerNS2 = "5:cpu,cpuacct:/crdb_test"
v1MountsWithCPUControllerNS2 = "161 43 0:37 /crdb_test /sys/fs/cgroup/cpu,cpuacct/crdb_test rw,relatime shared:95 - cgroup none rw,cpu,cpuacct"
)

View File

@ -13,6 +13,7 @@ go_library(
"//errno",
"//metrics",
"//parser/terror",
"//util/cgroup",
"//util/dbterror",
"//util/logutil",
"@com_github_shirou_gopsutil_v3//mem",

View File

@ -15,14 +15,12 @@
package memory
import (
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/util/cgroup"
"github.com/shirou/gopsutil/v3/mem"
)
@ -60,12 +58,6 @@ func MemUsedNormal() (uint64, error) {
return v.Used, nil
}
const (
cGroupMemLimitPath = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
cGroupMemUsagePath = "/sys/fs/cgroup/memory/memory.usage_in_bytes"
selfCGroupPath = "/proc/self/cgroup"
)
type memInfoCache struct {
updateTime time.Time
mu *sync.RWMutex
@ -101,7 +93,7 @@ func MemTotalCGroup() (uint64, error) {
if time.Since(t) < 60*time.Second {
return memo, nil
}
memo, err := readUint(cGroupMemLimitPath)
memo, err := cgroup.GetMemoryLimit()
if err != nil {
return memo, err
}
@ -115,7 +107,7 @@ func MemUsedCGroup() (uint64, error) {
if time.Since(t) < 500*time.Millisecond {
return memo, nil
}
memo, err := readUint(cGroupMemUsagePath)
memo, err := cgroup.GetMemoryUsage()
if err != nil {
return memo, err
}
@ -124,7 +116,7 @@ func MemUsedCGroup() (uint64, error) {
}
func init() {
if inContainer() {
if cgroup.InContainer() {
MemTotal = MemTotalCGroup
MemUsed = MemUsedCGroup
} else {
@ -146,47 +138,6 @@ func init() {
terror.MustNil(err)
}
func inContainer() bool {
v, err := os.ReadFile(selfCGroupPath)
if err != nil {
return false
}
if strings.Contains(string(v), "docker") ||
strings.Contains(string(v), "kubepods") ||
strings.Contains(string(v), "containerd") {
return true
}
return false
}
// refer to https://github.com/containerd/cgroups/blob/318312a373405e5e91134d8063d04d59768a1bff/utils.go#L251
func parseUint(s string, base, bitSize int) (uint64, error) {
v, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
intValue, intErr := strconv.ParseInt(s, base, bitSize)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil &&
intErr.(*strconv.NumError).Err == strconv.ErrRange &&
intValue < 0 {
return 0, nil
}
return 0, err
}
return v, nil
}
// refer to https://github.com/containerd/cgroups/blob/318312a373405e5e91134d8063d04d59768a1bff/utils.go#L243
func readUint(path string) (uint64, error) {
v, err := os.ReadFile(path)
if err != nil {
return 0, err
}
return parseUint(strings.TrimSpace(string(v)), 10, 64)
}
// InstanceMemUsed returns the memory usage of this TiDB server
func InstanceMemUsed() (uint64, error) {
used, t := serverMemUsage.get()