om仓迁移
This commit is contained in:
198
other/transfer.py
Normal file
198
other/transfer.py
Normal file
@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env python3
|
||||
#Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
#openGauss is licensed under Mulan PSL v2.
|
||||
#You can use this software according to the terms and conditions of the Mulan PSL v2.
|
||||
#You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#-------------------------------------------------------------------------
|
||||
#
|
||||
# transfer.py
|
||||
# relfilenode to oid mapping cache.
|
||||
#
|
||||
# IDENTIFICATION
|
||||
# src/manager/om/other/transfer.py
|
||||
#
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
import getopt
|
||||
|
||||
from script.gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from script.gspylib.common.Common import DefaultValue
|
||||
GPPATH = os.getenv("GPHOME")
|
||||
DefaultValue.checkPathVaild(GPPATH)
|
||||
sys.path.insert(0, GPPATH)
|
||||
from script.gspylib.common.GaussLog import GaussLog
|
||||
from script.gspylib.common.ErrorCode import ErrorCode
|
||||
from script.gspylib.threads.SshTool import SshTool
|
||||
|
||||
# source file path
|
||||
SRCFILEPATH = ""
|
||||
DRCPATH = ""
|
||||
DNINSTANCEID = []
|
||||
ISALLHOSTS = False
|
||||
g_logger = None
|
||||
g_clusterUser = ""
|
||||
g_clusterInfo = None
|
||||
g_sshTool = None
|
||||
|
||||
|
||||
def usage():
|
||||
"""
|
||||
transfer.py is a utility to transfer C function lib file to all nodes or standy node.
|
||||
|
||||
Usage:
|
||||
transfer.py -? | --help
|
||||
transfer.py 1 sourcefile destinationpath copy sourcefile to Cluster all nodes.
|
||||
transfer.py 2 sourcefile pgxc_node_name copy sourcefile to the same path of node contain pgxc_node_name standy instance.
|
||||
"""
|
||||
|
||||
print (usage.__doc__)
|
||||
|
||||
|
||||
def initGlobals():
|
||||
global g_logger
|
||||
global g_clusterUser
|
||||
global g_clusterInfo
|
||||
global g_sshTool
|
||||
|
||||
if os.getuid() == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
sys.exit(1)
|
||||
# Init user
|
||||
g_clusterUser = pwd.getpwuid(os.getuid()).pw_name
|
||||
# Init logger
|
||||
logFile = DefaultValue.getOMLogPath(DefaultValue.LOCAL_LOG_FILE, g_clusterUser, "", "")
|
||||
g_logger = GaussLog(logFile, "Transfer_C_function_file")
|
||||
# Init ClusterInfo
|
||||
g_clusterInfo = dbClusterInfo()
|
||||
g_clusterInfo.initFromStaticConfig(g_clusterUser)
|
||||
# Init sshtool
|
||||
g_sshTool = SshTool(g_clusterInfo.getClusterNodeNames(), g_logger.logFile)
|
||||
|
||||
|
||||
def checkSrcFile(srcFile):
|
||||
g_logger.log("Check whether the source file exists.")
|
||||
if not os.path.isfile(srcFile):
|
||||
g_logger.debug("The %s does not exist. " % srcFile)
|
||||
return False
|
||||
else:
|
||||
g_logger.log("The source file exists.")
|
||||
return True
|
||||
|
||||
|
||||
def parseCommandLine():
|
||||
g_logger.log("Start parse parameter.")
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "")
|
||||
if len(args) != 3:
|
||||
raise getopt.GetoptError("The number of parameters is not equal to 3.")
|
||||
except getopt.GetoptError as e:
|
||||
g_logger.logExit("Parameter error, Error:\n%s" % str(e))
|
||||
|
||||
global SRCFILEPATH
|
||||
global DRCPATH
|
||||
global DNINSTANCEID
|
||||
global ISALLHOSTS
|
||||
|
||||
if args[0] not in ['1', '2']:
|
||||
g_logger.logExit("Parameter error.")
|
||||
if args[0] == "1":
|
||||
ISALLHOSTS = True
|
||||
if not checkSrcFile(args[1]):
|
||||
g_logger.logExit("Parameter error.")
|
||||
SRCFILEPATH = args[1]
|
||||
DRCPATH = args[2]
|
||||
elif args[0] == "2":
|
||||
if not checkSrcFile(args[1]):
|
||||
g_logger.logExit("Parameter error.")
|
||||
SRCFILEPATH = args[1]
|
||||
nodenamelst = args[2].split("_")
|
||||
# when the clustertype is primary-standy-dummy,the standby DNinstence ID is the third arg in "nodenamelst"
|
||||
if len(nodenamelst) == 3:
|
||||
DNINSTANCEID.append(nodenamelst[2])
|
||||
return
|
||||
# when the clustertype is primary-multi-standby,the standby DNinstence IDs are following the third parameter
|
||||
for dnId in nodenamelst[2:]:
|
||||
DNINSTANCEID.append(dnId)
|
||||
else:
|
||||
g_logger.logExit("Parameter error.")
|
||||
g_logger.log("Successfully parse parameter.")
|
||||
|
||||
|
||||
def scpFileToAllHost(srcFile, drcpath):
|
||||
try:
|
||||
g_logger.log("Transfer C function file to all hosts.")
|
||||
g_sshTool.scpFiles(srcFile, drcpath, g_clusterInfo.getClusterNodeNames())
|
||||
cmd = "chmod 600 '%s'" % drcpath
|
||||
g_sshTool.executeCommand(cmd,
|
||||
"Transfer C function file to all hosts.",
|
||||
DefaultValue.SUCCESS,
|
||||
g_clusterInfo.getClusterNodeNames())
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_536["GAUSS_53611"] % str(e))
|
||||
|
||||
|
||||
def scpFileToStandy(srcFile, InstanceID):
|
||||
try:
|
||||
g_logger.log("Transfer C function file to standy node.")
|
||||
|
||||
mirrorID = 0
|
||||
peerNode = []
|
||||
# Get instance mirrorID by InstanceID
|
||||
for dbNode in g_clusterInfo.dbNodes:
|
||||
for dbInst in dbNode.datanodes:
|
||||
if str(dbInst.instanceId) == InstanceID:
|
||||
mirrorID = dbInst.mirrorId
|
||||
|
||||
if mirrorID == 0:
|
||||
g_logger.logExit("Failed to find primary instance mirrorId.")
|
||||
|
||||
# Get standy instance
|
||||
for node in g_clusterInfo.dbNodes:
|
||||
for instance in node.datanodes:
|
||||
if instance.mirrorId == mirrorID and (instance.instanceType == 1 or instance.instanceType == 0):
|
||||
peerNode.append(node.name)
|
||||
|
||||
# send SOFile to peerInstance
|
||||
(despath, sofile) = os.path.split(srcFile)
|
||||
for deshost in peerNode:
|
||||
status = g_sshTool.checkRemoteFileExist(deshost, srcFile, "")
|
||||
if not status:
|
||||
g_sshTool.scpFiles(srcFile, despath, [deshost])
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_536["GAUSS_53611"] % str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# help info
|
||||
if "-?" in sys.argv[1:] or "--help" in sys.argv[1:]:
|
||||
usage()
|
||||
sys.exit(0)
|
||||
|
||||
# Init globle
|
||||
initGlobals()
|
||||
g_logger.log("Start transfer C function file.")
|
||||
|
||||
# parse command line
|
||||
parseCommandLine()
|
||||
# start send soFile
|
||||
try:
|
||||
if ISALLHOSTS:
|
||||
scpFileToAllHost(SRCFILEPATH, DRCPATH)
|
||||
else:
|
||||
for dnInstanceId in DNINSTANCEID:
|
||||
scpFileToStandy(SRCFILEPATH, dnInstanceId)
|
||||
except Exception as e:
|
||||
g_logger.logExit("Failed to transfer C function file. Error:%s" % str(e))
|
||||
g_logger.log("Successfully transfer C function file.")
|
||||
sys.exit(0)
|
||||
0
script/__init__.py
Normal file
0
script/__init__.py
Normal file
250
script/config/leap-seconds.conf
Normal file
250
script/config/leap-seconds.conf
Normal file
@ -0,0 +1,250 @@
|
||||
#
|
||||
# In the following text, the symbol '#' introduces
|
||||
# a comment, which continues from that symbol until
|
||||
# the end of the line. A plain comment line has a
|
||||
# whitespace character following the comment indicator.
|
||||
# There are also special comment lines defined below.
|
||||
# A special comment will always have a non-whitespace
|
||||
# character in column 2.
|
||||
#
|
||||
# A blank line should be ignored.
|
||||
#
|
||||
# The following table shows the corrections that must
|
||||
# be applied to compute International Atomic Time (TAI)
|
||||
# from the Coordinated Universal Time (UTC) values that
|
||||
# are transmitted by almost all time services.
|
||||
#
|
||||
# The first column shows an epoch as a number of seconds
|
||||
# since 1 January 1900, 00:00:00 (1900.0 is also used to
|
||||
# indicate the same epoch.) Both of these time stamp formats
|
||||
# ignore the complexities of the time scales that were
|
||||
# used before the current definition of UTC at the start
|
||||
# of 1972. (See note 3 below.)
|
||||
# The second column shows the number of seconds that
|
||||
# must be added to UTC to compute TAI for any timestamp
|
||||
# at or after that epoch. The value on each line is
|
||||
# valid from the indicated initial instant until the
|
||||
# epoch given on the next one or indefinitely into the
|
||||
# future if there is no next line.
|
||||
# (The comment on each line shows the representation of
|
||||
# the corresponding initial epoch in the usual
|
||||
# day-month-year format. The epoch always begins at
|
||||
# 00:00:00 UTC on the indicated day. See Note 5 below.)
|
||||
#
|
||||
# Important notes:
|
||||
#
|
||||
# 1. Coordinated Universal Time (UTC) is often referred to
|
||||
# as Greenwich Mean Time (GMT). The GMT time scale is no
|
||||
# longer used, and the use of GMT to designate UTC is
|
||||
# discouraged.
|
||||
#
|
||||
# 2. The UTC time scale is realized by many national
|
||||
# laboratories and timing centers. Each laboratory
|
||||
# identifies its realization with its name: Thus
|
||||
# UTC(NIST), UTC(USNO), etc. The differences among
|
||||
# these different realizations are typically on the
|
||||
# order of a few nanoseconds (i.e., 0.000 000 00x s)
|
||||
# and can be ignored for many purposes. These differences
|
||||
# are tabulated in Circular T, which is published monthly
|
||||
# by the International Bureau of Weights and Measures
|
||||
# (BIPM). See www.bipm.org for more information.
|
||||
#
|
||||
# 3. The current definition of the relationship between UTC
|
||||
# and TAI dates from 1 January 1972. A number of different
|
||||
# time scales were in use before that epoch, and it can be
|
||||
# quite difficult to compute precise timestamps and time
|
||||
# intervals in those "prehistoric" days. For more information,
|
||||
# consult:
|
||||
#
|
||||
# The Explanatory Supplement to the Astronomical
|
||||
# Ephemeris.
|
||||
# or
|
||||
# Terry Quinn, "The BIPM and the Accurate Measurement
|
||||
# of Time," Proc. of the IEEE, Vol. 79, pp. 894-905,
|
||||
# July, 1991.
|
||||
#
|
||||
# 4. The decision to insert a leap second into UTC is currently
|
||||
# the responsibility of the International Earth Rotation and
|
||||
# Reference Systems Service. (The name was changed from the
|
||||
# International Earth Rotation Service, but the acronym IERS
|
||||
# is still used.)
|
||||
#
|
||||
# Leap seconds are announced by the IERS in its Bulletin C.
|
||||
#
|
||||
# See www.iers.org for more details.
|
||||
#
|
||||
# Every national laboratory and timing center uses the
|
||||
# data from the BIPM and the IERS to construct UTC(lab),
|
||||
# their local realization of UTC.
|
||||
#
|
||||
# Although the definition also includes the possibility
|
||||
# of dropping seconds ("negative" leap seconds), this has
|
||||
# never been done and is unlikely to be necessary in the
|
||||
# foreseeable future.
|
||||
#
|
||||
# 5. If your system keeps time as the number of seconds since
|
||||
# some epoch (e.g., NTP timestamps), then the algorithm for
|
||||
# assigning a UTC time stamp to an event that happens during a positive
|
||||
# leap second is not well defined. The official name of that leap
|
||||
# second is 23:59:60, but there is no way of representing that time
|
||||
# in these systems.
|
||||
# Many systems of this type effectively stop the system clock for
|
||||
# one second during the leap second and use a time that is equivalent
|
||||
# to 23:59:59 UTC twice. For these systems, the corresponding TAI
|
||||
# timestamp would be obtained by advancing to the next entry in the
|
||||
# following table when the time equivalent to 23:59:59 UTC
|
||||
# is used for the second time. Thus the leap second which
|
||||
# occurred on 30 June 1972 at 23:59:59 UTC would have TAI
|
||||
# timestamps computed as follows:
|
||||
#
|
||||
# ...
|
||||
# 30 June 1972 23:59:59 (2287785599, first time): TAI= UTC + 10 seconds
|
||||
# 30 June 1972 23:59:60 (2287785599,second time): TAI= UTC + 11 seconds
|
||||
# 1 July 1972 00:00:00 (2287785600) TAI= UTC + 11 seconds
|
||||
# ...
|
||||
#
|
||||
# If your system realizes the leap second by repeating 00:00:00 UTC twice
|
||||
# (this is possible but not usual), then the advance to the next entry
|
||||
# in the table must occur the second time that a time equivalent to
|
||||
# 00:00:00 UTC is used. Thus, using the same example as above:
|
||||
#
|
||||
# ...
|
||||
# 30 June 1972 23:59:59 (2287785599): TAI= UTC + 10 seconds
|
||||
# 30 June 1972 23:59:60 (2287785600, first time): TAI= UTC + 10 seconds
|
||||
# 1 July 1972 00:00:00 (2287785600,second time): TAI= UTC + 11 seconds
|
||||
# ...
|
||||
#
|
||||
# in both cases the use of timestamps based on TAI produces a smooth
|
||||
# time scale with no discontinuity in the time interval. However,
|
||||
# although the long-term behavior of the time scale is correct in both
|
||||
# methods, the second method is technically not correct because it adds
|
||||
# the extra second to the wrong day.
|
||||
#
|
||||
# This complexity would not be needed for negative leap seconds (if they
|
||||
# are ever used). The UTC time would skip 23:59:59 and advance from
|
||||
# 23:59:58 to 00:00:00 in that case. The TAI offset would decrease by
|
||||
# 1 second at the same instant. This is a much easier situation to deal
|
||||
# with, since the difficulty of unambiguously representing the epoch
|
||||
# during the leap second does not arise.
|
||||
#
|
||||
# Some systems implement leap seconds by amortizing the leap second
|
||||
# over the last few minutes of the day. The frequency of the local
|
||||
# clock is decreased (or increased) to realize the positive (or
|
||||
# negative) leap second. This method removes the time step described
|
||||
# above. Although the long-term behavior of the time scale is correct
|
||||
# in this case, this method introduces an error during the adjustment
|
||||
# period both in time and in frequency with respect to the official
|
||||
# definition of UTC.
|
||||
#
|
||||
# Questions or comments to:
|
||||
# Judah Levine
|
||||
# Time and Frequency Division
|
||||
# NIST
|
||||
# Boulder, Colorado
|
||||
# Judah.Levine@nist.gov
|
||||
#
|
||||
# Last Update of leap second values: 8 July 2016
|
||||
#
|
||||
# The following line shows this last update date in NTP timestamp
|
||||
# format. This is the date on which the most recent change to
|
||||
# the leap second data was added to the file. This line can
|
||||
# be identified by the unique pair of characters in the first two
|
||||
# columns as shown below.
|
||||
#
|
||||
#$ 3676924800
|
||||
#
|
||||
# The NTP timestamps are in units of seconds since the NTP epoch,
|
||||
# which is 1 January 1900, 00:00:00. The Modified Julian Day number
|
||||
# corresponding to the NTP time stamp, X, can be computed as
|
||||
#
|
||||
# X/86400 + 15020
|
||||
#
|
||||
# where the first term converts seconds to days and the second
|
||||
# term adds the MJD corresponding to the time origin defined above.
|
||||
# The integer portion of the result is the integer MJD for that
|
||||
# day, and any remainder is the time of day, expressed as the
|
||||
# fraction of the day since 0 hours UTC. The conversion from day
|
||||
# fraction to seconds or to hours, minutes, and seconds may involve
|
||||
# rounding or truncation, depending on the method used in the
|
||||
# computation.
|
||||
#
|
||||
# The data in this file will be updated periodically as new leap
|
||||
# seconds are announced. In addition to being entered on the line
|
||||
# above, the update time (in NTP format) will be added to the basic
|
||||
# file name leap-seconds to form the name leap-seconds.<NTP TIME>.
|
||||
# In addition, the generic name leap-seconds.list will always point to
|
||||
# the most recent version of the file.
|
||||
#
|
||||
# This update procedure will be performed only when a new leap second
|
||||
# is announced.
|
||||
#
|
||||
# The following entry specifies the expiration date of the data
|
||||
# in this file in units of seconds since the origin at the instant
|
||||
# 1 January 1900, 00:00:00. This expiration date will be changed
|
||||
# at least twice per year whether or not a new leap second is
|
||||
# announced. These semi-annual changes will be made no later
|
||||
# than 1 June and 1 December of each year to indicate what
|
||||
# action (if any) is to be taken on 30 June and 31 December,
|
||||
# respectively. (These are the customary effective dates for new
|
||||
# leap seconds.) This expiration date will be identified by a
|
||||
# unique pair of characters in columns 1 and 2 as shown below.
|
||||
# In the unlikely event that a leap second is announced with an
|
||||
# effective date other than 30 June or 31 December, then this
|
||||
# file will be edited to include that leap second as soon as it is
|
||||
# announced or at least one month before the effective date
|
||||
# (whichever is later).
|
||||
# If an announcement by the IERS specifies that no leap second is
|
||||
# scheduled, then only the expiration date of the file will
|
||||
# be advanced to show that the information in the file is still
|
||||
# current -- the update time stamp, the data and the name of the file
|
||||
# will not change.
|
||||
#
|
||||
# Updated through IERS Bulletin C54
|
||||
# File expires on: 28 June 2018
|
||||
#
|
||||
#@ 3739132800
|
||||
#
|
||||
2272060800 10 # 1 Jan 1972
|
||||
2287785600 11 # 1 Jul 1972
|
||||
2303683200 12 # 1 Jan 1973
|
||||
2335219200 13 # 1 Jan 1974
|
||||
2366755200 14 # 1 Jan 1975
|
||||
2398291200 15 # 1 Jan 1976
|
||||
2429913600 16 # 1 Jan 1977
|
||||
2461449600 17 # 1 Jan 1978
|
||||
2492985600 18 # 1 Jan 1979
|
||||
2524521600 19 # 1 Jan 1980
|
||||
2571782400 20 # 1 Jul 1981
|
||||
2603318400 21 # 1 Jul 1982
|
||||
2634854400 22 # 1 Jul 1983
|
||||
2698012800 23 # 1 Jul 1985
|
||||
2776982400 24 # 1 Jan 1988
|
||||
2840140800 25 # 1 Jan 1990
|
||||
2871676800 26 # 1 Jan 1991
|
||||
2918937600 27 # 1 Jul 1992
|
||||
2950473600 28 # 1 Jul 1993
|
||||
2982009600 29 # 1 Jul 1994
|
||||
3029443200 30 # 1 Jan 1996
|
||||
3076704000 31 # 1 Jul 1997
|
||||
3124137600 32 # 1 Jan 1999
|
||||
3345062400 33 # 1 Jan 2006
|
||||
3439756800 34 # 1 Jan 2009
|
||||
3550089600 35 # 1 Jul 2012
|
||||
3644697600 36 # 1 Jul 2015
|
||||
3692217600 37 # 1 Jan 2017
|
||||
#
|
||||
# the following special comment contains the
|
||||
# hash value of the data in this file computed
|
||||
# use the secure hash algorithm as specified
|
||||
# by FIPS 180-1. See the files in ~/pub/sha for
|
||||
# the details of how this hash value is
|
||||
# computed. Note that the hash computation
|
||||
# ignores comments and whitespace characters
|
||||
# in data lines. It includes the NTP values
|
||||
# of both the last modification time and the
|
||||
# expiration time of the file, but not the
|
||||
# white space on those lines.
|
||||
# the hash line is also ignored in the
|
||||
# computation.
|
||||
#
|
||||
#h 5101445a 69948b51 9153e2b 2086e3d8 d54561a3
|
||||
56
script/config/logrotate.conf
Normal file
56
script/config/logrotate.conf
Normal file
@ -0,0 +1,56 @@
|
||||
compress
|
||||
/var/log/gaussdb/cm/cm_agent/*.log
|
||||
{
|
||||
dateext
|
||||
dateformat -%Y-%m-%d
|
||||
extension .log
|
||||
missingok
|
||||
copytruncate
|
||||
rotate 16
|
||||
size 16M
|
||||
noolddir
|
||||
}
|
||||
/var/log/gaussdb/cm/cm_server/*.log
|
||||
{
|
||||
dateext
|
||||
dateformat -%Y-%m-%d
|
||||
extension .log
|
||||
missingok
|
||||
copytruncate
|
||||
rotate 16
|
||||
size 16M
|
||||
noolddir
|
||||
}
|
||||
/var/log/gaussdb/cm/*.log
|
||||
{
|
||||
dateext
|
||||
dateformat -%Y-%m-%d
|
||||
extension .log
|
||||
missingok
|
||||
copytruncate
|
||||
rotate 16
|
||||
size 16M
|
||||
noolddir
|
||||
}
|
||||
/var/log/gaussdb/om/*.log
|
||||
{
|
||||
dateext
|
||||
dateformat -%Y-%m-%d
|
||||
extension .log
|
||||
missingok
|
||||
copytruncate
|
||||
rotate 16
|
||||
size 16M
|
||||
noolddir
|
||||
}
|
||||
/var/log/gaussdb/alarm/CM/*.log
|
||||
{
|
||||
dateext
|
||||
dateformat -%Y-%m-%d
|
||||
extension .log
|
||||
missingok
|
||||
nocopytruncate
|
||||
rotate 16
|
||||
size 16M
|
||||
noolddir
|
||||
}
|
||||
243
script/gs_backup
Normal file
243
script/gs_backup
Normal file
@ -0,0 +1,243 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_backup is a utility to back up or restore binary files and parameter files.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from impl.backup.OLAP.BackupImplOLAP import BackupImplOLAP
|
||||
|
||||
ACTION_BACKUP = "backup"
|
||||
ACTION_RESTORE = "restore"
|
||||
|
||||
|
||||
class Backup(ParallelBaseOM):
|
||||
'''
|
||||
classdocs
|
||||
input : NA
|
||||
output: NA
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
function: Constructor
|
||||
input : NA
|
||||
output: NA
|
||||
'''
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.nodename = ""
|
||||
self.backupDir = ""
|
||||
self.isParameter = False
|
||||
self.isBinary = False
|
||||
|
||||
####################################################################################
|
||||
# Help context. U:R:oC:v:
|
||||
####################################################################################
|
||||
def usage(self):
|
||||
"""
|
||||
gs_backup is a utility to back up or restore binary files and parameter files.
|
||||
|
||||
Usage:
|
||||
gs_backup -? | --help
|
||||
gs_backup -V | --version
|
||||
gs_backup -t backup --backup-dir=BACKUPDIR [-h HOSTNAME] [--parameter]
|
||||
[--binary] [--all] [-l LOGFILE]
|
||||
gs_backup -t restore --backup-dir=BACKUPDIR [-h HOSTNAME] [--parameter]
|
||||
[--binary] [--all] [-l LOGFILE]
|
||||
|
||||
General options:
|
||||
-t Operation type. It can be backup or restore.
|
||||
--backup-dir=BACKUPDIR Backup or restore directory.
|
||||
-h The node which stored the backup file,
|
||||
need to specify the node when recovering.
|
||||
If the node name is not specified,
|
||||
the backup sets are stored in each node.
|
||||
--parameter Back up or restore parameter files only.
|
||||
(This option is used by default.)
|
||||
--binary Back up or restore binary files only.
|
||||
--all Back up or restore both parameter files and binary files.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
|
||||
"""
|
||||
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def checkAction(self):
|
||||
"""
|
||||
function: check action
|
||||
if action if null, throw error
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
if (self.action == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % 't' + ".")
|
||||
|
||||
def checkUserParameter(self):
|
||||
"""
|
||||
function: check User Parameter
|
||||
if clusterUser is null, check user name
|
||||
if user name is null, throw error
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
if (self.user == ""):
|
||||
self.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
if (self.user == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % 'U' + ".")
|
||||
# check if user exist and is the right user
|
||||
DefaultValue.checkUser(self.user)
|
||||
|
||||
def checkBackupPara(self):
|
||||
"""
|
||||
function: check Backup Parameter
|
||||
if backupDir is null throw error
|
||||
if backupDir is not absolute throw error
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
if (self.backupDir == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % '-backup-dir' + ".")
|
||||
if (not os.path.isabs(self.backupDir)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50213"] % self.backupDir)
|
||||
|
||||
def checkLogFilePara(self):
|
||||
"""
|
||||
function: check log file parameter
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.GS_BACKUP_LOG_FILE, self.user, "")
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50213"] % "log file")
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: Parse command line and save to global variable
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ParaObj = Parameter()
|
||||
# get the dict of paramters
|
||||
ParaDict = ParaObj.ParameterCommandLine("backup")
|
||||
# check if has '--help'
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# parse --all parameter
|
||||
backupAll = False
|
||||
parameter_map = {"action": self.action,
|
||||
"backupDir": self.backupDir,
|
||||
"isBinary": self.isBinary,
|
||||
"isParameter": self.isParameter,
|
||||
"logFile": self.logFile,
|
||||
"all": backupAll}
|
||||
parameter_keys = parameter_map.keys()
|
||||
|
||||
for key in parameter_keys:
|
||||
if (ParaDict.__contains__(key)):
|
||||
parameter_map[key] = ParaDict.get(key)
|
||||
|
||||
self.action = parameter_map["action"]
|
||||
self.backupDir = parameter_map["backupDir"]
|
||||
self.isBinary = parameter_map["isBinary"]
|
||||
self.isParameter = parameter_map["isParameter"]
|
||||
self.logFile = parameter_map["logFile"]
|
||||
|
||||
if (parameter_map["all"]):
|
||||
self.isBinary = True
|
||||
self.isParameter = True
|
||||
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
nodename = ParaDict.get("nodename")
|
||||
if (len(nodename) != 1):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50004"] % "h" + \
|
||||
" The number of node must be equal 1. Please check it.")
|
||||
self.nodename = nodename[0]
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.checkAction()
|
||||
# check if user exist and is the right user
|
||||
self.checkUserParameter()
|
||||
self.checkLogFilePara()
|
||||
# check backupDir
|
||||
self.checkBackupPara()
|
||||
# check backup context parameter
|
||||
if (self.isParameter == False and self.isBinary == False):
|
||||
GaussLog.printMessage("Hint:Parameters '--parameter','--binary',"
|
||||
"and '--all' were not specified." +
|
||||
" Only parameter files will be backed up.")
|
||||
self.isParameter = True
|
||||
if self.action not in (ACTION_BACKUP, ACTION_RESTORE):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % 't'
|
||||
+ " The value of the '-t' parameter :"
|
||||
" backup or restore.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
function: main
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check if is root user
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
try:
|
||||
# Objectize class
|
||||
backupObj = Backup()
|
||||
# Initialize self and Parse command line and save to global variable
|
||||
backupObj.parseCommandLine()
|
||||
|
||||
# check the parameters is not OK
|
||||
backupObj.checkParameter()
|
||||
|
||||
# set action flag file
|
||||
DefaultValue.setActionFlagFile("gs_backup")
|
||||
|
||||
# get clustet Type
|
||||
impl = BackupImplOLAP(backupObj)
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
finally:
|
||||
DefaultValue.setActionFlagFile("gs_backup", None, False)
|
||||
sys.exit(0)
|
||||
1754
script/gs_check
Normal file
1754
script/gs_check
Normal file
File diff suppressed because it is too large
Load Diff
1571
script/gs_checkos
Normal file
1571
script/gs_checkos
Normal file
File diff suppressed because it is too large
Load Diff
307
script/gs_checkperf
Normal file
307
script/gs_checkperf
Normal file
@ -0,0 +1,307 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_checkperf is a utility to check the Gauss200 cluster
|
||||
# performance and SSD performance.
|
||||
#
|
||||
# PMK: database performance collecting and displaying,which only can be run with
|
||||
# cluster user.it depends on many PL/SQL procedures and tables/views(installed
|
||||
# in pmk schema of postgres database).pmk supports display streamline information
|
||||
# and detailed information.
|
||||
#
|
||||
# SSD: SSD disk performance checking,
|
||||
# which only can be run with root permission user.
|
||||
# it depends on binary of SSD disk.
|
||||
#############################################################################
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
import grp
|
||||
import time
|
||||
import threading
|
||||
import glob
|
||||
import shutil
|
||||
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from gspylib.common.Common import ClusterCommand, DefaultValue
|
||||
from gspylib.common.OMCommand import OMCommand
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from impl.checkperf.OLAP.CheckperfImplOLAP import CheckperfImplOLAP
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
|
||||
#############################################################################
|
||||
# Global variables
|
||||
# g_opts: global option
|
||||
# g_logger: global logger
|
||||
# g_sshTool: global ssh interface
|
||||
#############################################################################
|
||||
g_opts = None
|
||||
g_logger = None
|
||||
g_sshTool = None
|
||||
|
||||
|
||||
class CmdOptions():
|
||||
"""
|
||||
init the command options
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# initialize variable
|
||||
self.show_detail = False
|
||||
self.outFile = ""
|
||||
self.outFile_tmp = ""
|
||||
self.logFile = ""
|
||||
self.localLog = ""
|
||||
self.user = ""
|
||||
self.mpprcFile = ""
|
||||
self.checkItem = []
|
||||
self.databaseSizeFile = ""
|
||||
self.databaseSize = 0
|
||||
|
||||
|
||||
class Checkperf():
|
||||
"""
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
self.DWS_mode = False
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_checkperf is a utility to check the cluster performance and SSD performance.
|
||||
|
||||
Usage:
|
||||
gs_checkperf -? | --help
|
||||
gs_checkperf -V | --version
|
||||
gs_checkperf [-U USER] [-o OUTPUT] [-i ITEM] [--detail] [-l LOGFILE]
|
||||
|
||||
General options:
|
||||
-U Cluster user.
|
||||
-o Save the result to the specified file.
|
||||
-i PMK or SSD performance check items.
|
||||
Example: -i PMK -i SSD.
|
||||
--detail Show detailed information about the PMK check.
|
||||
-l Path of log files.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseItem(self, value):
|
||||
"""
|
||||
function: parse items by value
|
||||
input : value
|
||||
output: NA
|
||||
"""
|
||||
# parse the parameter '-i' value
|
||||
items = value
|
||||
for val in items:
|
||||
# remove space
|
||||
val = val.strip()
|
||||
# invert val into uppercase
|
||||
item = val.upper()
|
||||
if item in ("PMK", "SSD"):
|
||||
if item not in g_opts.checkItem:
|
||||
g_opts.checkItem.append(item)
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "i" + " Error: %s." % value)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: do parse command line
|
||||
get user input and save to variable
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
global g_opts
|
||||
g_opts = CmdOptions()
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("checkperf")
|
||||
if ("helpFlag" in ParaDict.keys()):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# get parameter value
|
||||
if ("logFile" in list(ParaDict.keys())):
|
||||
g_opts.logFile = ParaDict.get("logFile")
|
||||
if ("user" in list(ParaDict.keys())):
|
||||
g_opts.user = ParaDict.get("user")
|
||||
if ("outFile" in list(ParaDict.keys())):
|
||||
g_opts.outFile = ParaDict.get("outFile")
|
||||
if ("itemstr" in list(ParaDict.keys())):
|
||||
self.parseItem(ParaDict.get("itemstr"))
|
||||
if ("show_detail" in list(ParaDict.keys())):
|
||||
g_opts.show_detail = ParaDict.get("show_detail")
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check outputFile if the parameter '-o' value is not none
|
||||
if (g_opts.outFile != ''):
|
||||
DefaultValue.checkOutputFile(g_opts.outFile)
|
||||
# check mpprc file path
|
||||
g_opts.mpprcFile = DefaultValue.getMpprcFile()
|
||||
|
||||
# cannot check SSD by cluster user,
|
||||
# and cannot check PMK by root permission user.
|
||||
if (os.getuid() != 0):
|
||||
# check if SSD exists
|
||||
if ('SSD' in g_opts.checkItem):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50109"])
|
||||
else:
|
||||
if ("PMK" in g_opts.checkItem):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50011"]
|
||||
% ("-i", "PMK") +
|
||||
" Only cluster user can check the PMK.")
|
||||
|
||||
# default check PMK in cluster user or check SSD in root Permission user
|
||||
# if the parameter '-i' value is none
|
||||
if (not g_opts.checkItem):
|
||||
if (os.getuid() == 0):
|
||||
g_opts.checkItem.append('SSD')
|
||||
else:
|
||||
g_opts.checkItem.append('PMK')
|
||||
|
||||
# check user is the right user
|
||||
if (g_opts.user == ""):
|
||||
# the parameter '-U' is required when
|
||||
# the script is executed by root user
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "U" +
|
||||
" for a user with the root permission.")
|
||||
else:
|
||||
# get user
|
||||
g_opts.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
|
||||
# check if user exists and if is the right user
|
||||
DefaultValue.checkUser(g_opts.user)
|
||||
|
||||
# Get the temporary directory from PGHOST
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv(g_opts.user)
|
||||
|
||||
# check if tmpDir exists
|
||||
if (not os.path.exists(tmpDir)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% ("temporary directory[" + tmpDir + "]"))
|
||||
|
||||
# check log file
|
||||
if (g_opts.logFile == ""):
|
||||
g_opts.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.GS_CHECKPERF_LOG_FILE, g_opts.user, "")
|
||||
|
||||
# PMK is required if the parameter '--detail' exists
|
||||
if (g_opts.show_detail and "PMK" not in g_opts.checkItem):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50002"] % "-detail" + ".")
|
||||
|
||||
def initGlobal(self):
|
||||
"""
|
||||
function: Init logger
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# state global variable
|
||||
try:
|
||||
global g_logger
|
||||
global g_sshTool
|
||||
# initialize log
|
||||
g_logger = GaussLog(g_opts.logFile, "gs_checkperf")
|
||||
# modify the file's owner
|
||||
if (os.getuid() == 0):
|
||||
g_file.changeOwner(g_opts.user, g_logger.logFile)
|
||||
# Init cluster from static configuration file
|
||||
self.clusterInfo.initFromStaticConfig(g_opts.user)
|
||||
# get directory name
|
||||
dirName = os.path.dirname(g_opts.logFile)
|
||||
g_opts.localLog = os.path.join(dirName,
|
||||
DefaultValue.LOCAL_LOG_FILE)
|
||||
# check if appPath exists
|
||||
if (not os.path.exists(self.clusterInfo.appPath)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% ("local install path[" +
|
||||
self.clusterInfo.appPath + "]"))
|
||||
# initialize sshTool
|
||||
g_sshTool = SshTool(self.clusterInfo.getClusterNodeNames(),
|
||||
g_logger.logFile,
|
||||
DefaultValue.TIMEOUT_PSSH_CHECK)
|
||||
|
||||
binPath = os.path.join(self.clusterInfo.appPath, "bin")
|
||||
g_opts.databaseSizeFile = os.path.join(binPath,
|
||||
DefaultValue.DB_SIZE_FILE)
|
||||
except Exception as e:
|
||||
g_logger.logExit(str(e))
|
||||
|
||||
def checkUserInfo(self):
|
||||
"""
|
||||
function: Check user information
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# get user and group
|
||||
(user, group) = g_file.getfileUser(self.clusterInfo.appPath)
|
||||
# check if user right
|
||||
if (user != g_opts.user):
|
||||
g_logger.logExit(
|
||||
ErrorCode.GAUSS_503["GAUSS_50304"] % (g_opts.user, user))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# main function
|
||||
try:
|
||||
checkperf = Checkperf()
|
||||
# do parse command line
|
||||
checkperf.parseCommandLine()
|
||||
# Check parameter from command line
|
||||
checkperf.checkParameter()
|
||||
# Init logger
|
||||
checkperf.initGlobal()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
try:
|
||||
# Check user information
|
||||
checkperf.checkUserInfo()
|
||||
|
||||
impl = CheckperfImplOLAP()
|
||||
impl.opts = g_opts
|
||||
impl.logger = g_logger
|
||||
impl.clusterInfo = checkperf.clusterInfo
|
||||
impl.sshTool = g_sshTool
|
||||
impl.DWS_mode = checkperf.DWS_mode
|
||||
# Perform the whole extand process
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
g_logger.error(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
397
script/gs_collector
Normal file
397
script/gs_collector
Normal file
@ -0,0 +1,397 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_collector is a utility
|
||||
# to collect information about the cluster.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from impl.collect.OLAP.CollectImplOLAP import CollectImplOLAP
|
||||
|
||||
def my_obj_pairs_hook(lst):
|
||||
result = {}
|
||||
count = {}
|
||||
for key, val in lst:
|
||||
if key in count:
|
||||
count[key] = 1 + count[key]
|
||||
else:
|
||||
count[key] = 1
|
||||
if key in result:
|
||||
if count[key] >= 2:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51245"] % key)
|
||||
else:
|
||||
result[key] = [result[key], val]
|
||||
else:
|
||||
result[key] = val
|
||||
return result
|
||||
|
||||
|
||||
class Collect(ParallelBaseOM):
|
||||
"""
|
||||
define option
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
ParallelBaseOM.__init__(self)
|
||||
# initialize variable
|
||||
self.host = ""
|
||||
self.inFile = ""
|
||||
self.outFile = ""
|
||||
self.nodeName = []
|
||||
self.config = {}
|
||||
self.appPath = ""
|
||||
|
||||
self.begintime = ""
|
||||
self.endtime = ""
|
||||
self.keyword = ""
|
||||
# speed limit to copy/scp files, in MB/s
|
||||
self.speedLimit = 1024
|
||||
self.speedLimitFlag = 0
|
||||
|
||||
# config file
|
||||
self.configFile = ""
|
||||
|
||||
# Our products may generate 200MB/(1DN per day),
|
||||
# So max log size is (8DN * (1master+7standbys) + 1CN) * 200MB = 13GB/node
|
||||
# Other logs, such as OM/CM/Audit we ignore them here, which are too small.
|
||||
self.LOG_SIZE_PER_DAY_ONE_NODE = 1024 * 13
|
||||
|
||||
# As we test, the speed for packaging logs into a compressed tar file is 45MB/s.
|
||||
self.TAR_SPEED = 45
|
||||
|
||||
# endtime - begintime, in days, rounded up.
|
||||
self.duration = 0
|
||||
|
||||
#############################################################################
|
||||
# Parse and check parameters
|
||||
#############################################################################
|
||||
def usage(self):
|
||||
"""
|
||||
gs_collector is a utility to collect information about the cluster.
|
||||
|
||||
Usage:
|
||||
gs_collector -? | --help
|
||||
gs_collector -V | --version
|
||||
gs_collector --begin-time="BEGINTIME" --end-time="ENDTIME" [-h HOSTNAME | -f HOSTFILE]
|
||||
[--keyword=KEYWORD] [--speed-limit=SPEED] [-o OUTPUT] [-l LOGFILE]
|
||||
|
||||
General options:
|
||||
--begin-time=BEGINTIME Time to start log file collection. Pattern:yyyymmdd hh:mm.
|
||||
--end-time=ENDTIME Time to end log file collection. Pattern:yyyymmdd hh:mm.
|
||||
--speed-limit=SPEED Bandwidth to copy files, a nonnegative integer, in MByte/s.
|
||||
0 means unlimited. Only supported if rsync command exists.
|
||||
-h Names of hosts whose information is to be collected.
|
||||
Example: host1,host2.
|
||||
-f File listing names of all the hosts to connect to.
|
||||
--keyword=KEYWORD Save log files containing the keyword.
|
||||
-o Save the result to the specified file.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility, and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
-C gs_collector config file, listing which info to collect
|
||||
# gs_collector.json example
|
||||
{
|
||||
"Collect":
|
||||
[
|
||||
{"TypeName": "name", "Content": "value", "Interval": "seconds", "Count": "counts"} # interval is in Second
|
||||
]
|
||||
}
|
||||
|
||||
# TypeName : content
|
||||
COLLECT_INFO_MAP
|
||||
{
|
||||
"System" : "HardWareInfo,RunTimeInfo",
|
||||
"Database" : "pg_locks,pg_stat_activity,pg_thread_wait_status",
|
||||
"Log" : "DataNode,ClusterManager",
|
||||
"XLog": "DataNode",
|
||||
"Config" : "DataNode",
|
||||
"Gstack" : "DataNode",
|
||||
"CoreDump": "gaussdb,GaussMaster,gs_ctl"
|
||||
"Trace": "Dump"
|
||||
"Plan": "*" # Any database name or character "*"
|
||||
}
|
||||
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def dateCheck(self, datestr):
|
||||
"""
|
||||
function: check the type of date wether is is correct or not
|
||||
input : timedate
|
||||
output: bool
|
||||
"""
|
||||
# Check the time format
|
||||
try:
|
||||
time.strptime(datestr, "%Y%m%d %H:%M")
|
||||
if (len(datestr.split(" ")[0]) != 8
|
||||
or len(datestr.split(" ")[1]) != 5):
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: do parse command line
|
||||
input : cmdCommand
|
||||
output: help/version information
|
||||
"""
|
||||
# Parse command
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("collector")
|
||||
|
||||
# If help is included in the parameter,
|
||||
# the help message is printed and exited
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# Save parameter
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
self.nodeName = ParaDict.get("nodename")
|
||||
# Save parameter hostfile
|
||||
if (ParaDict.__contains__("hostfile")):
|
||||
self.inFile = ParaDict.get("hostfile")
|
||||
# Save parameter begintime
|
||||
if (ParaDict.__contains__("begintime")):
|
||||
self.begintime = ParaDict.get("begintime")
|
||||
# Check the begin time parameter format is correct
|
||||
if (not self.dateCheck(self.begintime)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50003"] %
|
||||
('-begin-time',
|
||||
"date") + " Pattern: yyyymmdd hh:mm.")
|
||||
# Save parameter endtime
|
||||
if (ParaDict.__contains__("endtime")):
|
||||
self.endtime = ParaDict.get("endtime")
|
||||
# Check the end time parameter format is correct
|
||||
if (not self.dateCheck(self.endtime)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50003"] %
|
||||
('-end-time',
|
||||
"date") + " Pattern: yyyymmdd hh:mm.")
|
||||
# Save parameter keyword
|
||||
if (ParaDict.__contains__("keyword")):
|
||||
self.keyword = ParaDict.get("keyword")
|
||||
# Save parameter outFile
|
||||
if (ParaDict.__contains__("outFile")):
|
||||
self.outFile = ParaDict.get("outFile")
|
||||
# Save parameter logFile
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
|
||||
# Get speed limit to copy/remote copy files.
|
||||
if (ParaDict.__contains__("speedLimit")):
|
||||
self.speedLimit = str(ParaDict.get("speedLimit"))
|
||||
if (not self.speedLimit.isdigit() or int(self.speedLimit) < 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50003"] %
|
||||
('-speed-limit',
|
||||
'a nonnegative integer'))
|
||||
self.speedLimit = int(self.speedLimit)
|
||||
self.speedLimitFlag = 1
|
||||
|
||||
# Save parameter configFile
|
||||
if (ParaDict.__contains__("configFile")):
|
||||
self.configFile = ParaDict.get("configFile")
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: do parameters checking
|
||||
input : check parameters
|
||||
output: ErrorCode
|
||||
"""
|
||||
|
||||
# The -h and -f parameters can not be specified at the same time
|
||||
if (len(self.nodeName) != 0 and self.inFile != ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50005"] % ('h', 'f'))
|
||||
|
||||
if (self.inFile != ""):
|
||||
# Check if the hostname file exists
|
||||
if (not os.path.exists(self.inFile)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50201"] % self.inFile)
|
||||
# Get the value in the hostname file
|
||||
with open(self.inFile, "r") as fp:
|
||||
for line in fp:
|
||||
node = line.strip().split("\n")[0]
|
||||
if node is not None and node != "" \
|
||||
and (node not in self.nodeName):
|
||||
self.nodeName.append(node)
|
||||
# An error exit if the node name is not available
|
||||
if len(self.nodeName) == 0:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50203"] % self.inFile)
|
||||
# check configFile
|
||||
if self.configFile == "":
|
||||
self.configFile = "%s/%s" % (
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
DefaultValue.GS_COLLECTOR_CONFIG_FILE)
|
||||
|
||||
if self.configFile != "":
|
||||
# Check if the config file exists
|
||||
if not os.path.exists(self.configFile):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50201"] % self.configFile)
|
||||
# Get the value in the configFile file
|
||||
try:
|
||||
with open(self.configFile, "r") as fp:
|
||||
config_json = json.loads(fp.read(),
|
||||
object_pairs_hook=my_obj_pairs_hook)
|
||||
items = config_json.items()
|
||||
|
||||
for key, value in items:
|
||||
if str(key) != "Collect":
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51242"] % (
|
||||
self.configFile, str(key)))
|
||||
for it in value:
|
||||
d_c = ""
|
||||
u_c = ""
|
||||
for k, v in it.items():
|
||||
if k not in DefaultValue.COLLECT_CONF_JSON_KEY_LIST:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51242"]
|
||||
% (self.configFile, str(k)))
|
||||
if k == "TypeName":
|
||||
d_c = DefaultValue.COLLECT_CONF_MAP[v]
|
||||
elif k == "Content":
|
||||
u_c = v
|
||||
elif k == "Interval" or k == "Count":
|
||||
if (not v.replace(" ", "").isdigit()
|
||||
or int(v.replace(" ", "")) < 0):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51241"]
|
||||
% (k, v))
|
||||
|
||||
if len(u_c) > 0 and len(d_c) > 0:
|
||||
T_Name = it["TypeName"]
|
||||
it["Content"] = ""
|
||||
if T_Name in "Plan,Database":
|
||||
it["Content"] = u_c
|
||||
else:
|
||||
uc = u_c.replace(" ", "").split(",")
|
||||
for c in uc:
|
||||
if c not in d_c:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51243"]
|
||||
% (c, it['TypeName'],
|
||||
self.configFile))
|
||||
elif DefaultValue.COLLECT_CONF_CONTENT_MAP.__contains__(c):
|
||||
it["Content"] += \
|
||||
DefaultValue.COLLECT_CONF_CONTENT_MAP[c] + ","
|
||||
else:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51244"]
|
||||
% c)
|
||||
if self.config.__contains__(T_Name):
|
||||
self.config[T_Name].append(it)
|
||||
else:
|
||||
contentList = [it]
|
||||
self.config[T_Name] = contentList
|
||||
else:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51240"])
|
||||
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_512["GAUSS_51239"] % self.configFile)
|
||||
|
||||
if len(self.config) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_535["GAUSS_53516"])
|
||||
|
||||
# An error exit if the begin time parameter is not entered
|
||||
if (not self.begintime):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% '-begin-time' + " for [gs_collector].")
|
||||
else:
|
||||
# Extract the time in --end-time according to the format
|
||||
self.begintime = self.begintime.replace(" ", "").replace(":", "")
|
||||
|
||||
# An error exit if the end time parameter is not entered
|
||||
if (not self.endtime):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% '-end-time' + " for [gs_collector].")
|
||||
else:
|
||||
# Extract the time in --begin-time according to the format
|
||||
self.endtime = self.endtime.replace(" ", "").replace(":", "")
|
||||
|
||||
if self.endtime and self.begintime:
|
||||
# The start time must be earlier than the end time,
|
||||
# notice: using string comparison !!!
|
||||
if (self.endtime < self.begintime):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "-end-time or --begin-time" +
|
||||
"The value of '--end-time' must"
|
||||
" be greater than the value "
|
||||
"of '--begin-time'.")
|
||||
datebegin = datetime.strptime(self.begintime, "%Y%m%d%H%M")
|
||||
dateend = datetime.strptime(self.endtime, "%Y%m%d%H%M")
|
||||
diff = dateend - datebegin
|
||||
self.duration = diff.days + 1
|
||||
|
||||
# check mpprc file path
|
||||
self.mpprcFile = DefaultValue.getMpprcFile()
|
||||
# check if user exist and is the right user
|
||||
try:
|
||||
self.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
DefaultValue.checkUser(self.user)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
# check log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.GS_COLLECTOR_LOG_FILE, self.user, "")
|
||||
|
||||
if (self.speedLimit == 0):
|
||||
self.speedLimit = 1024
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
function: main
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check if is root user
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
try:
|
||||
# Objectize class
|
||||
collectObj = Collect()
|
||||
|
||||
# Initialize self and Parse command line and save to global variable
|
||||
collectObj.parseCommandLine()
|
||||
# check the parameters is not OK
|
||||
collectObj.checkParameter()
|
||||
impl = CollectImplOLAP(collectObj)
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
sys.exit(0)
|
||||
329
script/gs_dropnode
Normal file
329
script/gs_dropnode
Normal file
@ -0,0 +1,329 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_dropnode is a utility to drop a standby node from the cluster
|
||||
#############################################################################
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import pwd
|
||||
import grp
|
||||
|
||||
sys.path.append(sys.path[0])
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.DbClusterStatus import DbClusterStatus
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue, ClusterCommand
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from impl.dropnode.DropnodeImpl import DropnodeImpl
|
||||
|
||||
ENV_LIST = ["MPPDB_ENV_SEPARATE_PATH", "GPHOME", "PATH",
|
||||
"LD_LIBRARY_PATH", "PYTHONPATH", "GAUSS_WARNING_TYPE",
|
||||
"GAUSSHOME", "PATH", "LD_LIBRARY_PATH",
|
||||
"S3_CLIENT_CRT_FILE", "GAUSS_VERSION", "PGHOST",
|
||||
"GS_CLUSTER_NAME", "GAUSSLOG", "GAUSS_ENV", "umask"]
|
||||
|
||||
|
||||
class Dropnode(ParallelBaseOM):
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
# Add the standby node backip list which need to be deleted
|
||||
self.hostIpListForDel = []
|
||||
self.hostMapForDel = {}
|
||||
self.hostMapForExist = {}
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
self.backIpNameMap = {}
|
||||
self.failureHosts = []
|
||||
self.flagOnlyPrimary = False
|
||||
envFile = DefaultValue.getEnv("MPPDB_ENV_SEPARATE_PATH")
|
||||
if envFile:
|
||||
self.envFile = envFile
|
||||
self.userProfile = envFile
|
||||
else:
|
||||
self.envFile = "/etc/profile"
|
||||
cmd = "echo ~%s" % self.user
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
self.userProfile = os.path.join(output, ".bashrc")
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_dropnode is a utility to delete the standby node from a cluster.
|
||||
|
||||
Usage:
|
||||
gs_dropnode -? | --help
|
||||
gs_dropnode -V | --version
|
||||
gs_dropnode -U USER -G GROUP -h nodeList
|
||||
General options:
|
||||
-U Cluster user.
|
||||
-G Group of the cluster user.
|
||||
-h The standby node backip list which need to be deleted
|
||||
Separate multiple nodes with commas (,).
|
||||
such as '-h 192.168.0.1,192.168.0.2'
|
||||
-?, --help Show help information for this
|
||||
utility, and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
parse parameter from command line
|
||||
"""
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("dropnode")
|
||||
|
||||
# parameter -h or -?
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# Resolves command line arguments
|
||||
# parameter -U
|
||||
if (ParaDict.__contains__("user")):
|
||||
self.user = ParaDict.get("user")
|
||||
DefaultValue.checkPathVaild(self.user)
|
||||
# parameter -G
|
||||
if (ParaDict.__contains__("group")):
|
||||
self.group = ParaDict.get("group")
|
||||
# parameter -h
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
self.hostIpListForDel = ParaDict.get("nodename")
|
||||
|
||||
def checkParameters(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
|
||||
# check user | group | node
|
||||
if len(self.user) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35801"] % "-U")
|
||||
if len(self.group) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35801"] % "-G")
|
||||
if len(self.hostIpListForDel) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35801"] % "-h")
|
||||
|
||||
try:
|
||||
pw_user = pwd.getpwnam(self.user)
|
||||
gr_group = grp.getgrnam(self.group)
|
||||
except KeyError as e:
|
||||
if self.user in e.args[0]:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_503["GAUSS_50300"] % self.user)
|
||||
if self.group in e.args[0]:
|
||||
self.logger.log("Group %s not exist." % self.group)
|
||||
sys.exit(1)
|
||||
|
||||
# get dbcluster info from static config file
|
||||
self.clusterInfo.initFromStaticConfig(self.user)
|
||||
appPath = self.clusterInfo.appPath
|
||||
db_uid = os.stat(appPath).st_uid
|
||||
db_gid = os.stat(appPath).st_gid
|
||||
if db_uid != pw_user.pw_uid or db_gid != gr_group.gr_gid:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_503["GAUSS_50323"] % self.user)
|
||||
self.backIpNameMap = {}
|
||||
for node in self.clusterInfo.dbNodes:
|
||||
self.backIpNameMap[node.name] = node.backIps[0]
|
||||
if node.backIps[0] in self.hostIpListForDel:
|
||||
self.hostMapForDel[node.name] = {'ipaddr': node.backIps[0],
|
||||
'datadir': [], 'dn_id': [],
|
||||
'port': []}
|
||||
for i in node.datanodes:
|
||||
self.hostMapForDel[node.name]['datadir'].append(i.datadir)
|
||||
self.hostMapForDel[node.name]['dn_id'].append(
|
||||
'dn_' + str(i.instanceId))
|
||||
self.hostMapForDel[node.name]['port'].append(str(i.port))
|
||||
else:
|
||||
self.hostMapForExist[node.name] = {'ipaddr': node.backIps[0],
|
||||
'datadir': [], 'dn_id': [],
|
||||
'port': [],
|
||||
'replToBeDel': [],
|
||||
'syncStandbyDel': [],
|
||||
'pghbaDel': []}
|
||||
for i in node.datanodes:
|
||||
self.hostMapForExist[node.name]['datadir'].append(i.datadir)
|
||||
self.hostMapForExist[node.name]['dn_id'].append(
|
||||
'dn_' + str(i.instanceId))
|
||||
self.hostMapForExist[node.name]['port'].append(str(i.port))
|
||||
localIp = self.backIpNameMap[DefaultValue.GetHostIpOrName()]
|
||||
if localIp in self.hostIpListForDel:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35803"] % \
|
||||
localIp)
|
||||
|
||||
for ipLoop in self.hostIpListForDel:
|
||||
if ipLoop not in self.backIpNameMap.values():
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35802"] % \
|
||||
self.hostIpListForDel)
|
||||
|
||||
if not self.hostMapForDel:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35802"] % \
|
||||
self.hostIpListForDel)
|
||||
|
||||
def check_repeat_process(self):
|
||||
"""
|
||||
function: Check whether only one node be left in the cluster
|
||||
return a flag
|
||||
"""
|
||||
cmd = "ps -ef | grep 'gs_dropnode -U %s -G %s' | grep -v grep" \
|
||||
% (self.user, self.group)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status == 0 and len(output.split('\n')) > 1:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35810"])
|
||||
|
||||
def flagForOnlyPrimaryLeft(self):
|
||||
"""
|
||||
function: Check whether only one node be left in the cluster
|
||||
return a flag
|
||||
"""
|
||||
countClusterNodes = len(self.backIpNameMap.values())
|
||||
if (countClusterNodes - len(self.hostIpListForDel)) == 1:
|
||||
flag = input(
|
||||
"The cluster will have only one standalone node left after the operation!"
|
||||
"\nDo you want to continue to drop the target node (yes/no)? ")
|
||||
count_f = 2
|
||||
while count_f:
|
||||
if (
|
||||
flag.upper() != "YES"
|
||||
and flag.upper() != "NO"
|
||||
and flag.upper() != "Y" and flag.upper() != "N"):
|
||||
count_f -= 1
|
||||
flag = input("Please type 'yes' or 'no': ")
|
||||
continue
|
||||
break
|
||||
if flag.upper() != "YES" and flag.upper() != "Y":
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_358["GAUSS_35805"] % flag.upper())
|
||||
self.flagOnlyPrimary = True
|
||||
|
||||
def check_cluster_status(self):
|
||||
"""
|
||||
function: Check whether the status of cluster is normal
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv()
|
||||
tmpFile = os.path.join(tmpDir, "gauss_cluster_status.dat_" + \
|
||||
str(datetime.datetime.now().strftime(
|
||||
'%Y%m%d%H%M%S')) + "_" + str(os.getpid()))
|
||||
cmd = ClusterCommand.getQueryStatusCmd(self.user, "", tmpFile, False)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
self.logger.debug("The cmd is %s " % cmd)
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % \
|
||||
cmd + "Error: \n%s" % output)
|
||||
|
||||
# Initialize cluster status information for the temporary file
|
||||
clusterStatus = DbClusterStatus()
|
||||
clusterStatus.initFromFile(tmpFile)
|
||||
|
||||
clsStatus = clusterStatus.clusterStatusDetail
|
||||
statusDelHost = "The target node to be dropped is %s \n" % str(
|
||||
self.hostMapForDel.keys())[9:]
|
||||
if clsStatus in ["Unknown", "Unavailable"]:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_358["GAUSS_35806"] % clsStatus)
|
||||
|
||||
for dndir_loop in \
|
||||
self.hostMapForExist[DefaultValue.GetHostIpOrName()]['datadir']:
|
||||
cmd = "gs_ctl query -D %s|grep '\<local_role\>'| " \
|
||||
"awk -F ':' '{print $2}'" % dndir_loop
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if 'Primary' not in output:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_358["GAUSS_35804"])
|
||||
|
||||
flag = input(
|
||||
statusDelHost + "Do you want to continue "
|
||||
"to drop the target node (yes/no)? ")
|
||||
count_f = 2
|
||||
while count_f:
|
||||
if (
|
||||
flag.upper() != "YES"
|
||||
and flag.upper() != "NO"
|
||||
and flag.upper() != "Y" and flag.upper() != "N"):
|
||||
count_f -= 1
|
||||
flag = input("Please type 'yes' or 'no': ")
|
||||
continue
|
||||
break
|
||||
if flag.upper() != "YES" and flag.upper() != "Y":
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_358["GAUSS_35805"] % flag.upper())
|
||||
|
||||
def checkConnection(self, hostnames, env):
|
||||
"""
|
||||
check the node connection, change the timeout to 30s as 330s is too long
|
||||
if the node which will not be deleted can't be connected, report ERR
|
||||
else continue
|
||||
"""
|
||||
command = "echo 1"
|
||||
sshTool = SshTool(hostnames, None, 30)
|
||||
resultMap, outputCollect = sshTool.getSshStatusOutput(command,
|
||||
hostnames, env)
|
||||
self.logger.debug(outputCollect)
|
||||
self.failureHosts = '.'.join(re.findall(r"\[FAILURE\] .*:.*\n",
|
||||
outputCollect))
|
||||
for host in list(self.hostMapForExist.keys()):
|
||||
if host in self.failureHosts:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_358["GAUSS_35807"] % host)
|
||||
|
||||
def initLogs(self):
|
||||
"""
|
||||
init log file
|
||||
"""
|
||||
if not os.path.isfile(self.userProfile):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50210"] % self.userProfile)
|
||||
log_path = DefaultValue.getEnvironmentParameterValue("GAUSSLOG",
|
||||
self.user,
|
||||
self.userProfile)
|
||||
self.logFile = os.path.realpath(
|
||||
"%s/om/%s" % (log_path, DefaultValue.DROPNODE_LOG_FILE))
|
||||
# if not absolute path
|
||||
if not os.path.isabs(self.logFile):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
self.initLogger("gs_dropnode")
|
||||
self.logger.ignoreErr = True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# check if user is root
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
dropNode = Dropnode()
|
||||
dropNode.parseCommandLine()
|
||||
dropNode.initLogs()
|
||||
dropNode.check_repeat_process()
|
||||
dropNode.checkParameters()
|
||||
dropNode.check_cluster_status()
|
||||
dropNode.flagForOnlyPrimaryLeft()
|
||||
dropNode.checkConnection(list(dropNode.backIpNameMap.keys()),
|
||||
dropNode.envFile)
|
||||
dropNodeImpl = DropnodeImpl(dropNode)
|
||||
dropNodeImpl.run()
|
||||
249
script/gs_expansion
Normal file
249
script/gs_expansion
Normal file
@ -0,0 +1,249 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_expansion is a utility to expansion standby node databases
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
sys.path.append(sys.path[0])
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo, \
|
||||
readOneClusterConfigItem, initParserXMLFile, dbNodeInfo, checkPathVaild
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from impl.preinstall.OLAP.PreinstallImplOLAP import PreinstallImplOLAP
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from impl.expansion.ExpansionImpl import ExpansionImpl
|
||||
|
||||
ENV_LIST = ["MPPDB_ENV_SEPARATE_PATH", "GPHOME", "PATH",
|
||||
"LD_LIBRARY_PATH", "PYTHONPATH", "GAUSS_WARNING_TYPE",
|
||||
"GAUSSHOME", "PATH", "LD_LIBRARY_PATH",
|
||||
"S3_CLIENT_CRT_FILE", "GAUSS_VERSION", "PGHOST",
|
||||
"GS_CLUSTER_NAME", "GAUSSLOG", "GAUSS_ENV", "umask"]
|
||||
|
||||
class Expansion(ParallelBaseOM):
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
# new added standby node backip list
|
||||
self.newHostList = []
|
||||
self.clusterInfoDict = {}
|
||||
self.backIpNameMap = {}
|
||||
self.packagepath = os.path.realpath(
|
||||
os.path.join(os.path.realpath(__file__), "../../"))
|
||||
|
||||
self.standbyLocalMode = False
|
||||
self.envFile = DefaultValue.getEnv("MPPDB_ENV_SEPARATE_PATH")
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_expansion is a utility to expansion standby node for a cluster.
|
||||
|
||||
Usage:
|
||||
gs_expansion -? | --help
|
||||
gs_expansion -V | --version
|
||||
gs_expansion -U USER -G GROUP -X XMLFILE -h nodeList [-L]
|
||||
General options:
|
||||
-U Cluster user.
|
||||
-G Group of the cluster user.
|
||||
-X Path of the XML configuration file.
|
||||
-h New standby node node backip list.
|
||||
Separate multiple nodes with commas (,).
|
||||
such as '-h 192.168.0.1,192.168.0.2'
|
||||
-L The standby database installed with
|
||||
local mode.
|
||||
-?, --help Show help information for this
|
||||
utility, and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
parse parameter from command line
|
||||
"""
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("expansion")
|
||||
|
||||
# parameter -h or -?
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# Resolves command line arguments
|
||||
# parameter -U
|
||||
if (ParaDict.__contains__("user")):
|
||||
self.user = ParaDict.get("user")
|
||||
DefaultValue.checkPathVaild(self.user)
|
||||
# parameter -G
|
||||
if (ParaDict.__contains__("group")):
|
||||
self.group = ParaDict.get("group")
|
||||
# parameter -X
|
||||
if (ParaDict.__contains__("confFile")):
|
||||
self.xmlFile = ParaDict.get("confFile")
|
||||
# parameter -L
|
||||
if (ParaDict.__contains__("localMode")):
|
||||
self.localMode = ParaDict.get("localMode")
|
||||
self.standbyLocalMode = ParaDict.get("localMode")
|
||||
# parameter -l
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
#parameter -h
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
self.newHostList = ParaDict.get("nodename")
|
||||
|
||||
|
||||
def checkParameters(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
|
||||
# check user | group | xmlfile | node
|
||||
if len(self.user) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-U")
|
||||
if len(self.group) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-G")
|
||||
if len(self.xmlFile) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-X")
|
||||
if len(self.newHostList) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-h")
|
||||
|
||||
clusterInfo = ExpansipnClusterInfo()
|
||||
hostNameIpDict = clusterInfo.initFromXml(self.xmlFile)
|
||||
clusterDict = clusterInfo.getClusterDirectorys()
|
||||
backIpList = clusterInfo.getClusterBackIps()
|
||||
nodeNameList = clusterInfo.getClusterNodeNames()
|
||||
|
||||
# only support single az now.
|
||||
azNames = clusterInfo.getazNames()
|
||||
self.azName = "AZ1"
|
||||
if len(azNames) > 0:
|
||||
self.azName = azNames[0]
|
||||
|
||||
self.localIp = backIpList[0]
|
||||
self.nodeNameList = nodeNameList
|
||||
self.backIpNameMap = {}
|
||||
for backip in backIpList:
|
||||
self.backIpNameMap[backip] = clusterInfo.getNodeNameByBackIp(backip)
|
||||
|
||||
# check parameter node must in xml config file
|
||||
for nodeid in self.newHostList:
|
||||
if nodeid not in backIpList:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35702"] % \
|
||||
nodeid)
|
||||
|
||||
# get corepath and toolpath from xml file
|
||||
corePath = clusterInfo.readClustercorePath(self.xmlFile)
|
||||
toolPath = clusterInfo.getToolPath(self.xmlFile)
|
||||
# parse xml file and cache node info
|
||||
clusterInfoDict = {}
|
||||
clusterInfoDict["appPath"] = clusterDict["appPath"][0]
|
||||
clusterInfoDict["logPath"] = clusterDict["logPath"][0]
|
||||
clusterInfoDict["corePath"] = corePath
|
||||
clusterInfoDict["toolPath"] = toolPath
|
||||
for nodeName in nodeNameList:
|
||||
hostInfo = hostNameIpDict[nodeName]
|
||||
ipList = hostInfo[0]
|
||||
portList = hostInfo[1]
|
||||
backIp = ""
|
||||
sshIp = ""
|
||||
if len(ipList) == 1:
|
||||
backIp = sshIp = ipList[0]
|
||||
elif len(ipList) == 2:
|
||||
backIp = ipList[0]
|
||||
sshIp = ipList[1]
|
||||
port = portList[0]
|
||||
cluster = clusterDict[nodeName]
|
||||
dataNode = cluster[2]
|
||||
clusterInfoDict[nodeName] = {
|
||||
"backIp": backIp,
|
||||
"sshIp": sshIp,
|
||||
"port": port,
|
||||
"localport": int(port) + 1,
|
||||
"localservice": int(port) + 4,
|
||||
"heartBeatPort": int(port) + 3,
|
||||
"dataNode": dataNode,
|
||||
"instanceType": -1
|
||||
}
|
||||
|
||||
nodeIdList = clusterInfo.getClusterNodeIds()
|
||||
for id in nodeIdList:
|
||||
insType = clusterInfo.getdataNodeInstanceType(id)
|
||||
hostName = clusterInfo.getHostNameByNodeId(id)
|
||||
clusterInfoDict[hostName]["instanceType"] = insType
|
||||
self.clusterInfoDict = clusterInfoDict
|
||||
|
||||
|
||||
def initLogs(self):
|
||||
"""
|
||||
init log file
|
||||
"""
|
||||
# if no log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.EXPANSION_LOG_FILE, self.user, "",
|
||||
self.xmlFile)
|
||||
# if not absolute path
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
self.initLogger("gs_expansion")
|
||||
self.logger.ignoreErr = True
|
||||
|
||||
class ExpansipnClusterInfo(dbClusterInfo):
|
||||
|
||||
def __init__(self):
|
||||
dbClusterInfo.__init__(self)
|
||||
|
||||
def getToolPath(self, xmlFile):
|
||||
"""
|
||||
function : Read tool path from default xml file
|
||||
input : String
|
||||
output : String
|
||||
"""
|
||||
self.setDefaultXmlFile(xmlFile)
|
||||
# read gaussdb tool path from xml file
|
||||
(retStatus, retValue) = readOneClusterConfigItem(
|
||||
initParserXMLFile(xmlFile), "gaussdbToolPath", "cluster")
|
||||
if retStatus != 0:
|
||||
raise Exception(ErrorCode.GAUSS_512["GAUSS_51200"]
|
||||
% "gaussdbToolPath" + " Error: \n%s" % retValue)
|
||||
toolPath = os.path.normpath(retValue)
|
||||
checkPathVaild(toolPath)
|
||||
return toolPath
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
"""
|
||||
expansion = Expansion()
|
||||
expansion.parseCommandLine()
|
||||
expansion.checkParameters()
|
||||
expansion.initLogs()
|
||||
expImpl = ExpansionImpl(expansion)
|
||||
expImpl.run()
|
||||
|
||||
308
script/gs_install
Normal file
308
script/gs_install
Normal file
@ -0,0 +1,308 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_install is a utility to deploy a Gauss200 server.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.append(sys.path[0])
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue, ClusterCommand
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from impl.install.OLAP.InstallImplOLAP import InstallImplOLAP
|
||||
|
||||
# exit code
|
||||
EXEC_SUCCESS = 0
|
||||
ROLLBACK_FAILED = 3
|
||||
|
||||
|
||||
class Install(ParallelBaseOM):
|
||||
"""
|
||||
The class is used to do perform installation
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: initialize the parameters
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.time_out = None
|
||||
self.alarm_component = ""
|
||||
self.dbInitParam = []
|
||||
self.dataGucParam = []
|
||||
self.action = "gs_install"
|
||||
self.initStep = "Init Install"
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_install is a utility to deploy a cluster server.
|
||||
|
||||
Usage:
|
||||
gs_install -? | --help
|
||||
gs_install -V | --version
|
||||
gs_install -X XMLFILE [--gsinit-parameter="PARAMETER" [...]]
|
||||
[--dn-guc="PARAMETER" [...]] [--alarm-component=ALARMCOMPONENT]
|
||||
[--time-out=SECS] [-l LOGFILE]
|
||||
|
||||
General options:
|
||||
-X Path of the XML configuration file.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility, and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
|
||||
--gsinit-parameter="PARAMETER" Parameters to initialize DN and CN.
|
||||
For more information, see \"gs_initdb --help\".
|
||||
--dn-guc="PARAMETER" Parameters to set the configuration of DN.
|
||||
For more information, see \"gs_guc --help\".
|
||||
--alarm-component=ALARMCOMPONENT Path of the alarm component.
|
||||
--time-out=SECS Maximum waiting time when start cluster.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def initGlobals(self):
|
||||
"""
|
||||
function: Init logger
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
self.initLogger(self.action)
|
||||
self.logger.debug(
|
||||
"gs_install execution takes %s steps in total" % ClusterCommand.countTotalSteps(
|
||||
self.action, "", self.readOperateStep()))
|
||||
self.logger.log("Parsing the configuration file.", "addStep")
|
||||
# parsing the configuration file, Parameter [refreshCN] does not refresh the CN number
|
||||
self.initClusterInfo(refreshCN=False)
|
||||
self.initComponent()
|
||||
# Initialize self.sshTool
|
||||
self.initSshTool(self.clusterInfo.getClusterNodeNames(),
|
||||
DefaultValue.TIMEOUT_PSSH_INSTALL)
|
||||
if (len(self.clusterInfo.getClusterNodeNames()) == 1 and
|
||||
self.clusterInfo.getClusterNodeNames()[0]
|
||||
== DefaultValue.GetHostIpOrName()):
|
||||
self.isSingle = True
|
||||
self.localMode = True
|
||||
except Exception as e:
|
||||
# failed to parse cluster config file
|
||||
raise Exception(str(e))
|
||||
# Successfully parsed the configuration file
|
||||
self.logger.debug("Successfully parsed the configuration file.",
|
||||
"constant")
|
||||
|
||||
def checkParaList(self, specialStr):
|
||||
"""
|
||||
function:
|
||||
input:
|
||||
output:
|
||||
"""
|
||||
VALUE_CHECK_LIST = ["|", ";", "&", "$", "<", ">", "`", "\\", "{", "}",
|
||||
"(", ")", "[", "]", "~", "*", "?", "!", "\n"]
|
||||
VALUE_CHECK_GUC_PARA_LIST = ["client_encoding", "--encoding"]
|
||||
VALUE_CHECK_ENCODING_LIST = ["LATIN5", "ISO_8859_7", "KOI8U",
|
||||
"LATIN7", "EUC_TW", "WIN1251", "LATIN8",
|
||||
"KOI8R", "UTF8",
|
||||
"ISO_8859_5", "ISO_8859_8", "LATIN9",
|
||||
"LATIN6", "EUC_JP", "EUC_KR", "WIN1255",
|
||||
"EUC_CN",
|
||||
"LATIN3", "LATIN1", "ISO_8859_6", "GBK"]
|
||||
gs_checkStr = specialStr[0]
|
||||
if (gs_checkStr.strip() == ""):
|
||||
return
|
||||
for rac in VALUE_CHECK_LIST:
|
||||
flag = gs_checkStr.find(rac)
|
||||
if flag >= 0:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"]
|
||||
% specialStr + " There are illegal "
|
||||
"characters in the parameter.")
|
||||
if (len(gs_checkStr.split("=")) != 2):
|
||||
return
|
||||
if (gs_checkStr.split("=")[1].strip().startswith("\'") is True and
|
||||
gs_checkStr.split("=")[1].strip().endswith("\'") is False) or \
|
||||
(gs_checkStr.split("=")[1].strip().startswith("\'") is False
|
||||
and gs_checkStr.split("=")[1].strip().endswith(
|
||||
"\'") is True):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50219"]
|
||||
% specialStr + " Lack of Paired Single "
|
||||
"Quotation Marks.value %s" % gs_checkStr)
|
||||
if (gs_checkStr.split("=")[1].strip().startswith("\"") is True and
|
||||
gs_checkStr.split("=")[1].strip().endswith("\"") is False) \
|
||||
or (
|
||||
gs_checkStr.split("=")[1].strip().startswith("\"") is False
|
||||
and gs_checkStr.split("=")[1].strip().endswith("\"") is True):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50219"] % specialStr
|
||||
+ " Lack of double quotation marks.value %s" % gs_checkStr)
|
||||
if gs_checkStr.split("=")[0].strip() in VALUE_CHECK_GUC_PARA_LIST and \
|
||||
(gs_checkStr.split("=")[1].strip().strip("\'").strip(
|
||||
"\"").strip() not in VALUE_CHECK_ENCODING_LIST):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_500["GAUSS_50011"] % (
|
||||
gs_checkStr.split("=")[0],
|
||||
gs_checkStr.split("=")[1].strip("\'").strip("\"").strip())
|
||||
+ "Please cheak parameter '--dn-guc' or '--gsinit-parameter'.")
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: Parse command line and save to global variable
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# init the ParaObj
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("install")
|
||||
# parameter -h or -?
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(EXEC_SUCCESS)
|
||||
|
||||
# parameter -X
|
||||
if (ParaDict.__contains__("confFile")):
|
||||
self.xmlFile = ParaDict.get("confFile")
|
||||
# parameter -l
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
# parameter --gsinit-parameter
|
||||
if (ParaDict.__contains__("dbInitParams")):
|
||||
self.dbInitParam = ParaDict.get("dbInitParams")
|
||||
self.checkParaList(self.dbInitParam)
|
||||
# parameter --dn-guc
|
||||
if (ParaDict.__contains__("dataGucParams")):
|
||||
self.dataGucParam = ParaDict.get("dataGucParams")
|
||||
self.checkParaList(self.dataGucParam)
|
||||
# parameter --alarm-component
|
||||
if (ParaDict.__contains__("alarm_component")):
|
||||
self.alarm_component = ParaDict.get("alarm_component")
|
||||
# parameter --time-out
|
||||
if (ParaDict.__contains__("time_out")):
|
||||
self.time_out = ParaDict.get("time_out")
|
||||
|
||||
def checkUser(self):
|
||||
"""
|
||||
"""
|
||||
# get user info
|
||||
self.user = g_OSlib.getUserInfo()['name']
|
||||
# get the group info
|
||||
self.group = g_OSlib.getUserInfo()['g_name']
|
||||
# check the user and group
|
||||
if (self.user == "" or self.group == ""):
|
||||
raise Exception(ErrorCode.GAUSS_503["GAUSS_50308"])
|
||||
if (self.user == "root" or self.group == "root"):
|
||||
raise Exception(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
|
||||
def checkConfigFile(self):
|
||||
"""
|
||||
"""
|
||||
if (self.xmlFile == ""):
|
||||
# there is no -X parameter
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50001"] % 'X'
|
||||
+ ' for the installation.')
|
||||
if (not os.path.exists(self.xmlFile)):
|
||||
# -X parameter value is not exists
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % self.xmlFile)
|
||||
if (not os.path.isabs(self.xmlFile)):
|
||||
# -X parameter value is not absolute path
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50213"] % "configuration file")
|
||||
|
||||
def checkDNPara(self):
|
||||
"""
|
||||
"""
|
||||
dnUnsupportedParameters = DefaultValue.findUnsupportedParameters(
|
||||
self.dataGucParam)
|
||||
if (len(dnUnsupportedParameters) != 0):
|
||||
GaussLog.printMessage("The following parameters set for database node will"
|
||||
" not take effect:\n%s"
|
||||
% str(dnUnsupportedParameters))
|
||||
for param in dnUnsupportedParameters:
|
||||
self.dataGucParam.remove(param)
|
||||
|
||||
def checkAlarm(self):
|
||||
"""
|
||||
"""
|
||||
if (self.alarm_component == ""):
|
||||
self.alarm_component = DefaultValue.ALARM_COMPONENT_PATH
|
||||
if (not os.path.isabs(self.alarm_component)):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50213"] % "alarm component")
|
||||
|
||||
def checkLogFile(self):
|
||||
"""
|
||||
"""
|
||||
if (self.logFile == ""):
|
||||
# if -l parameter is null
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.DEPLOY_LOG_FILE, self.user, "", self.xmlFile)
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check required parameters
|
||||
self.checkUser()
|
||||
# check mpprc file path
|
||||
self.mpprcFile = DefaultValue.getMpprcFile()
|
||||
# check config file
|
||||
self.checkConfigFile()
|
||||
# check unsupported -D parameter
|
||||
self.checkDNPara()
|
||||
# check alarm component
|
||||
self.checkAlarm()
|
||||
# check logFile
|
||||
self.checkLogFile()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
main function
|
||||
"""
|
||||
# check if user is root
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
try:
|
||||
REPEAT = False
|
||||
# Objectize class
|
||||
install = Install()
|
||||
# Initialize self and Parse command line and save to global variable
|
||||
install.parseCommandLine()
|
||||
# check the parameters is not OK
|
||||
install.checkParameter()
|
||||
# Initialize globals parameters
|
||||
install.initGlobals()
|
||||
# set action flag file
|
||||
DefaultValue.setActionFlagFile("gs_install")
|
||||
|
||||
impl = InstallImplOLAP(install)
|
||||
# Perform the whole install process
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
finally:
|
||||
DefaultValue.setActionFlagFile("gs_install", None, False)
|
||||
762
script/gs_om
Normal file
762
script/gs_om
Normal file
@ -0,0 +1,762 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_om is a utility to manage a Gauss200 cluster.
|
||||
#############################################################################
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from impl.om.OLAP.OmImplOLAP import OmImplOLAP
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
|
||||
# action type
|
||||
ACTION_START = "start"
|
||||
ACTION_STOP = "stop"
|
||||
ACTION_STATUS = "status"
|
||||
ACTION_REBUID = "generateconf"
|
||||
ACTION_CERT = "cert"
|
||||
STOP_MODE_FAST = "fast"
|
||||
STOP_MODE_IMMEDIATE = "immediate"
|
||||
ACTION_VIEW = "view"
|
||||
ACTION_QUERY = "query"
|
||||
ACTION_KERBEROS = "kerberos"
|
||||
ACTION_REFRESHCONF = "refreshconf"
|
||||
|
||||
# postgis
|
||||
ACTION_DEL_POSTGIs = "rmlib"
|
||||
|
||||
|
||||
class CmdOptions():
|
||||
"""
|
||||
define option
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.action = ""
|
||||
# if action is "express", use this parameter to store the list of
|
||||
# cluster node names passed by the
|
||||
# command line option "-h".
|
||||
self.nodeName = ""
|
||||
self.time_out = None
|
||||
# if action is "express", use this parameter to store whether to
|
||||
# show the detail message of cluster
|
||||
# node state.
|
||||
self.show_detail = False
|
||||
self.showAll = False
|
||||
self.dataDir = ""
|
||||
self.outFile = ""
|
||||
self.logFile = ""
|
||||
self.localLog = ""
|
||||
self.reset = False
|
||||
self.distribute = False
|
||||
self.certFile = ""
|
||||
self.certRollback = False
|
||||
self.NormalCNNode = []
|
||||
self.mode = ""
|
||||
|
||||
self.user = ""
|
||||
self.group = ""
|
||||
self.userInfo = ""
|
||||
self.mpprcFile = ""
|
||||
self.confFile = ""
|
||||
self.localMode = False
|
||||
self.instanceName = ""
|
||||
self.azName = ""
|
||||
self.nodeId = -1
|
||||
self.clusterInfo = None
|
||||
self.security_mode = "off"
|
||||
|
||||
# kerberos
|
||||
self.kerberosMode = ""
|
||||
self.clusterUser = ""
|
||||
self.kerberosType = ""
|
||||
self.clusterToolPath = ""
|
||||
|
||||
|
||||
###########################################
|
||||
class OperationManager(ParallelBaseOM):
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
init the command options
|
||||
save command line parameter values
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
# command line parameter collection
|
||||
self.g_opts = CmdOptions()
|
||||
self.OM_PARAMETER_DIR = ""
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_om is a utility to manage a cluster.
|
||||
|
||||
Usage:
|
||||
gs_om -? | --help
|
||||
gs_om -V | --version
|
||||
OLAP scene:
|
||||
gs_om -t start [-h HOSTNAME] [-D dataDir] [--time-out=SECS]
|
||||
[--security-mode=MODE] [-l LOGFILE]
|
||||
gs_om -t stop [-h HOSTNAME] [-D dataDir] [--time-out=SECS] [-m MODE]
|
||||
[-l LOGFILE]
|
||||
gs_om -t status [-h HOSTNAME] [-o OUTPUT] [--detail] [--all] [-l LOGFILE]
|
||||
gs_om -t generateconf -X XMLFILE [--distribute] [-l LOGFILE]
|
||||
gs_om -t cert [--cert-file=CERTFILE | --rollback] [-L] [-l LOGFILE]
|
||||
gs_om -t kerberos -m [install|uninstall] -U USER [-l LOGFILE]
|
||||
[--krb-server|--krb-client]
|
||||
gs_om -t view [-o OUTPUT]
|
||||
gs_om -t query [-o OUTPUT]
|
||||
gs_om -t refreshconf
|
||||
|
||||
General options:
|
||||
-t Type of the OM command.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
|
||||
Options for start
|
||||
-h Name of the host to be started.
|
||||
-D Path of dn
|
||||
--time-out=SECS Maximum waiting time when start the
|
||||
cluster or node.
|
||||
--security-mode=MODE database start with security mode: on or off
|
||||
on: start with security mode
|
||||
off: start without security mode
|
||||
|
||||
Options for stop
|
||||
-h Name of the host to be shut down.
|
||||
-m, --mode=MODE Shutdown mode. It can be f (fast),
|
||||
or i (immediate).
|
||||
-D Path of dn
|
||||
--time-out=SECS Maximum waiting time when start the cluster
|
||||
or node.
|
||||
Options for status
|
||||
-h Name of the host whose status is to be
|
||||
queried.
|
||||
--az Name of the single az whose status is to
|
||||
be queried.
|
||||
-o Save the result to the specified file.
|
||||
--detail Show detailed status information.
|
||||
--all Show all database node status information.
|
||||
|
||||
Options for generating configuration files
|
||||
-X Path of the XML configuration file.
|
||||
--distribute Distribute the static configuration file
|
||||
to installation directory of cluster nodes.
|
||||
|
||||
Options for cert
|
||||
--cert-file Path of cert file.
|
||||
--rollback Perform rollback SSL cert files.
|
||||
-L local mode.
|
||||
|
||||
Options for kerberos
|
||||
-m Kerberos management mode. It can be
|
||||
install or uninstall.
|
||||
-U %s cluster user.
|
||||
Install options:
|
||||
--krb-server Execute install for server. This parameter
|
||||
only work for install
|
||||
--krb-client Execute install for client. This parameter
|
||||
only work for install
|
||||
|
||||
"""
|
||||
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def initGlobal(self):
|
||||
"""
|
||||
function:Init logger
|
||||
input:NA
|
||||
output:NA
|
||||
"""
|
||||
try:
|
||||
# Init logger
|
||||
self.xmlFile = self.g_opts.confFile
|
||||
self.logFile = self.g_opts.logFile
|
||||
self.initLogger(self.g_opts.action)
|
||||
if (os.getuid() != 0):
|
||||
DefaultValue.modifyFileOwnerFromGPHOME(self.logger.logFile)
|
||||
|
||||
dirName = os.path.dirname(self.g_opts.logFile)
|
||||
self.g_opts.localLog = os.path.join(dirName,
|
||||
DefaultValue.LOCAL_LOG_FILE)
|
||||
|
||||
if (self.g_opts.action == ACTION_REBUID):
|
||||
self.initClusterInfo()
|
||||
else:
|
||||
# Initialize the self.clusterInfo variable
|
||||
if (self.g_opts.action == ACTION_STATUS):
|
||||
self.initClusterInfoFromStaticFile(self.g_opts.user, False)
|
||||
else:
|
||||
self.initClusterInfoFromStaticFile(self.g_opts.user)
|
||||
|
||||
# Check --az with single-primary-multi-standby
|
||||
if (self.g_opts.azName):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_500["GAUSS_50002"] % '-az' +
|
||||
". This parameter is used in single primary multi "
|
||||
"standby.")
|
||||
|
||||
# Obtain the owner and group of the cluster installation directory
|
||||
if self.g_opts.action != ACTION_KERBEROS:
|
||||
(self.g_opts.user, self.g_opts.group) = g_OSlib.getPathOwner(
|
||||
self.clusterInfo.appPath)
|
||||
if self.g_opts.user == "" or self.g_opts.group == "":
|
||||
raise Exception(ErrorCode.GAUSS_503["GAUSS_50308"])
|
||||
# Init base member
|
||||
self.user = self.g_opts.user
|
||||
self.group = self.g_opts.group
|
||||
|
||||
# init components
|
||||
if self.g_opts.action != ACTION_STATUS:
|
||||
self.initComponent()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def checkAction(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (ParaDict.__contains__("action")):
|
||||
self.g_opts.action = ParaDict.get("action")
|
||||
if (len(self.g_opts.action) == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "t" + ".")
|
||||
|
||||
def parseNode(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_START or
|
||||
self.g_opts.action == ACTION_STOP or
|
||||
self.g_opts.action == ACTION_STATUS):
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
nodename = ParaDict.get("nodename")
|
||||
self.g_opts.estimateNodeName = nodename
|
||||
# Only one -h parameter can be entered
|
||||
if (len(nodename) != 1):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50006"]
|
||||
% nodename[0] + " Please check it.")
|
||||
self.g_opts.nodeName = nodename[0]
|
||||
|
||||
if "nodeId" in ParaDict.keys():
|
||||
nodeId = int(ParaDict.get("nodeId"))
|
||||
if nodeId < 1:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50004"] %
|
||||
ParaDict.get("nodeId") + " Please check it.")
|
||||
self.g_opts.nodeId = nodeId
|
||||
|
||||
if ("dataDir" in ParaDict.keys()):
|
||||
self.g_opts.dataDir = ParaDict.get("dataDir")
|
||||
|
||||
def parseTimeOut(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if self.g_opts.action == ACTION_START or self.g_opts.action == \
|
||||
ACTION_STOP:
|
||||
if (ParaDict.__contains__("time_out")):
|
||||
self.g_opts.time_out = ParaDict.get("time_out")
|
||||
|
||||
def parseMode(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_STOP):
|
||||
if (ParaDict.__contains__("Mode")):
|
||||
self.g_opts.mode = ParaDict.get("Mode")
|
||||
|
||||
def parseKerberosMode(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_KERBEROS):
|
||||
if ("Mode" in ParaDict):
|
||||
self.g_opts.kerberosMode = ParaDict.get("Mode")
|
||||
|
||||
def parseStatus(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_STATUS):
|
||||
# A status query can specify an out file
|
||||
if (ParaDict.__contains__("outFile")):
|
||||
self.g_opts.outFile = ParaDict.get("outFile")
|
||||
# The status query can display detailed information
|
||||
if (ParaDict.__contains__("show_detail")):
|
||||
self.g_opts.show_detail = ParaDict.get("show_detail")
|
||||
if (ParaDict.__contains__("all")):
|
||||
self.g_opts.showAll = ParaDict.get("all")
|
||||
|
||||
def parseView(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_VIEW):
|
||||
# A view can specify an out file
|
||||
if ("outFile" in ParaDict.keys()):
|
||||
self.g_opts.outFile = ParaDict.get("outFile")
|
||||
|
||||
def parseQuery(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_QUERY):
|
||||
# A view can specify an out file
|
||||
if ("outFile" in ParaDict.keys()):
|
||||
self.g_opts.outFile = ParaDict.get("outFile")
|
||||
|
||||
def parseStart(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_START):
|
||||
# The start query can specify az name
|
||||
if ParaDict.__contains__("az_name"):
|
||||
self.g_opts.azName = ParaDict.get("az_name")
|
||||
if ParaDict.__contains__("security_mode"):
|
||||
self.g_opts.security_mode = ParaDict.get("security_mode")
|
||||
|
||||
def parseStop(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_STOP):
|
||||
# The start query can specify az name for OLAP
|
||||
if (ParaDict.__contains__("az_name")):
|
||||
self.g_opts.azName = ParaDict.get("az_name")
|
||||
|
||||
def parseConFile(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_REBUID):
|
||||
# Changeip, managecn, and generateconf require the -X parameter
|
||||
if (ParaDict.__contains__("confFile")):
|
||||
self.g_opts.confFile = ParaDict.get("confFile")
|
||||
|
||||
def parseGenerateconf(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_REBUID):
|
||||
# Generateconf can specify the distribution file
|
||||
if (ParaDict.__contains__("distribute")):
|
||||
self.g_opts.distribute = ParaDict.get("distribute")
|
||||
|
||||
def parseCert(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (self.g_opts.action == ACTION_CERT):
|
||||
# cert can change cert file
|
||||
if (ParaDict.__contains__("cert-file")):
|
||||
self.g_opts.certFile = ParaDict.get("cert-file")
|
||||
if (ParaDict.__contains__("rollback")):
|
||||
self.g_opts.certRollback = ParaDict.get("rollback")
|
||||
if (ParaDict.__contains__("localMode")):
|
||||
self.g_opts.localMode = ParaDict.get("localMode")
|
||||
if (ParaDict.__contains__("cert-file") and ParaDict.__contains__(
|
||||
"rollback")):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50005"]
|
||||
% ("-cert-file", "-rollback"))
|
||||
|
||||
def parseKerberos(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if self.g_opts.action == ACTION_KERBEROS:
|
||||
if "user" in ParaDict:
|
||||
self.g_opts.clusterUser = ParaDict.get("user")
|
||||
if self.g_opts.kerberosMode == "install":
|
||||
if "krb-server" in ParaDict and "krb-client" in ParaDict:
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50005"]
|
||||
% ("-krb-server", "-krb-client"))
|
||||
if (("krb-server" not in ParaDict) and (
|
||||
"krb-client" not in ParaDict)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "-krb-server' or '--krb-client")
|
||||
if "krb-server" in ParaDict:
|
||||
self.g_opts.kerberosType = "krb-server"
|
||||
if "krb-client" in ParaDict:
|
||||
self.g_opts.kerberosType = "krb-client"
|
||||
if self.g_opts.kerberosMode == "uninstall":
|
||||
if "krb-server" in ParaDict:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50002"]
|
||||
% "-krb-server")
|
||||
if "krb-client" in ParaDict:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50002"]
|
||||
% "-krb-client")
|
||||
|
||||
|
||||
def parseLog(self, ParaDict):
|
||||
"""
|
||||
"""
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.g_opts.logFile = ParaDict.get("logFile")
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function:Parse command line and save to global variable
|
||||
input:NA
|
||||
output:NA
|
||||
"""
|
||||
# Parse command line
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("gs_om")
|
||||
# If help is included in the parameter,
|
||||
# the help message is printed and exited
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# The -t parameter is required
|
||||
self.checkAction(ParaDict)
|
||||
# Starting a cluster, stopping a cluster, querying a state,
|
||||
# and switching between active and standby devices require obtaining
|
||||
# node information
|
||||
self.parseNode(ParaDict)
|
||||
# Starting a Cluster and Stopping a Cluster
|
||||
# can specify a timeout period
|
||||
self.parseTimeOut(ParaDict)
|
||||
# Stop the cluster and managecn to specify the mode
|
||||
self.parseMode(ParaDict)
|
||||
# Kerberos to specify the mode
|
||||
self.parseKerberosMode(ParaDict)
|
||||
# Parse start parameter
|
||||
self.parseStart(ParaDict)
|
||||
# Parse stop parameter
|
||||
self.parseStop(ParaDict)
|
||||
# Parse status parameter
|
||||
self.parseStatus(ParaDict)
|
||||
# Parse view parameter
|
||||
self.parseView(ParaDict)
|
||||
# Parse query parameter
|
||||
self.parseQuery(ParaDict)
|
||||
# Parse -X parameter
|
||||
self.parseConFile(ParaDict)
|
||||
# Parse generateconf parameter
|
||||
self.parseGenerateconf(ParaDict)
|
||||
# Parse cert parameter
|
||||
self.parseCert(ParaDict)
|
||||
# Parse kerberos parameter
|
||||
self.parseKerberos(ParaDict)
|
||||
# Parse log parameter
|
||||
self.parseLog(ParaDict)
|
||||
|
||||
###########################################################################
|
||||
# Check parameters for all operations
|
||||
###########################################################################
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function:Check parameter from command line
|
||||
input:NA
|
||||
output:NA
|
||||
"""
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"] + \
|
||||
" When the parameter '-t' value is not "
|
||||
"dailyAlarm or not estimate install "
|
||||
"consume or not extension connector with "
|
||||
"add, delete, restart, upgrade in -m "
|
||||
"parameter.")
|
||||
|
||||
if (self.g_opts.action == ACTION_START):
|
||||
self.checkStartParameter()
|
||||
elif (self.g_opts.action == ACTION_STOP):
|
||||
self.checkStopParameter()
|
||||
elif (self.g_opts.action == ACTION_STATUS):
|
||||
self.checkOutFileParameter()
|
||||
elif (self.g_opts.action == ACTION_REBUID):
|
||||
self.checkGenerateConfParameter()
|
||||
elif (self.g_opts.action == ACTION_CERT):
|
||||
self.checkCertParameter()
|
||||
elif (self.g_opts.action == ACTION_KERBEROS):
|
||||
self.checkKerberosParameter()
|
||||
elif (self.g_opts.action == ACTION_VIEW):
|
||||
self.checkOutFileParameter()
|
||||
elif (self.g_opts.action == ACTION_QUERY):
|
||||
self.checkOutFileParameter()
|
||||
elif (self.g_opts.action == ACTION_REFRESHCONF):
|
||||
pass
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % "t")
|
||||
|
||||
# check mpprc file path
|
||||
self.g_opts.mpprcFile = DefaultValue.getMpprcFile()
|
||||
|
||||
# check if user exist and is the right user
|
||||
if (self.g_opts.user == ""):
|
||||
self.g_opts.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
if (self.g_opts.user == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] %
|
||||
"U" + ".")
|
||||
|
||||
# Check user on installed cluster
|
||||
DefaultValue.checkUser(self.g_opts.user)
|
||||
# Check whether the current user is consistent with -U if no root
|
||||
if (os.getuid() != 0):
|
||||
cmd = "id -un"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (output != self.g_opts.user):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_530["GAUSS_53033"] % self.g_opts.user)
|
||||
|
||||
self.OM_PARAMETER_DIR = "%s/om_parameter_dir" % \
|
||||
DefaultValue.getTmpDirFromEnv(self.g_opts.user)
|
||||
|
||||
# check log file
|
||||
if (self.g_opts.logFile == ""):
|
||||
self.g_opts.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.OM_LOG_FILE, self.g_opts.user, "",
|
||||
action=self.g_opts.action)
|
||||
if (not os.path.isabs(self.g_opts.logFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
def checkStartParameter(self):
|
||||
"""
|
||||
Check parameter for start the cluster and node
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# if the parameter -I is exits, then the -h parameter is required.
|
||||
if (self.g_opts.instanceName and (not self.g_opts.nodeName)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % "h")
|
||||
# If the cluster does not specify a time-out period, the default is
|
||||
# 300 seconds
|
||||
if (self.g_opts.time_out is None):
|
||||
self.g_opts.time_out = DefaultValue.TIMEOUT_CLUSTER_START
|
||||
else:
|
||||
# The timeout parameter must be a pure number
|
||||
if (not str(self.g_opts.time_out).isdigit()):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50003"] %
|
||||
("-time-out", "a nonnegative integer"))
|
||||
self.g_opts.time_out = int(self.g_opts.time_out)
|
||||
# The timeout parameter must be greater than 0
|
||||
# The timeout parameter must be less than the integer maximum
|
||||
if (self.g_opts.time_out <= 0 or self.g_opts.time_out
|
||||
>= 2147483647):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "-time-out")
|
||||
|
||||
if self.g_opts.security_mode != "off" and self.g_opts.security_mode \
|
||||
!= "on":
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "-security-mode")
|
||||
|
||||
def checkStopParameter(self):
|
||||
"""
|
||||
Check parameter for stop clster and node
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# if the parameter -I is exits, then the -h parameter is required.
|
||||
if (self.g_opts.instanceName and (not self.g_opts.nodeName)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % "h")
|
||||
# If no stop type is specified, the default is fast
|
||||
if (self.g_opts.mode == ""):
|
||||
self.g_opts.mode = STOP_MODE_FAST
|
||||
# Specifies that the stop type must be f, i or s
|
||||
if (self.g_opts.mode not in [STOP_MODE_FAST, STOP_MODE_IMMEDIATE,
|
||||
"f", "i"]):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % "m")
|
||||
|
||||
def checkOutFileParameter(self):
|
||||
"""
|
||||
Check parameter for status
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# Check the status query for the specified output file
|
||||
if (self.g_opts.outFile != ''):
|
||||
DefaultValue.checkOutputFile(self.g_opts.outFile)
|
||||
|
||||
def checkGenerateConfParameter(self):
|
||||
"""
|
||||
Check parameter for generate config
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
|
||||
# check xml file
|
||||
if (self.g_opts.confFile == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % 'X' + ".")
|
||||
if (not os.path.isfile(self.g_opts.confFile)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50210"] % self.g_opts.confFile)
|
||||
|
||||
def checkCertParameter(self):
|
||||
"""
|
||||
Check parameter for cert
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# add cert must specify the --cert-file parameter
|
||||
if (self.g_opts.certFile == "" and not self.g_opts.certRollback):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] %
|
||||
'-cert-file or --rollback')
|
||||
# certFile must be exist
|
||||
if (self.g_opts.certFile != "" and self.g_opts.certFile[-4:]
|
||||
!= ".zip"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "-cert-file type is not 'zip'")
|
||||
if (not os.path.isfile(self.g_opts.certFile)
|
||||
and not self.g_opts.certRollback):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50210"]
|
||||
% self.g_opts.certFile)
|
||||
|
||||
def checkKerberosParameter(self):
|
||||
"""
|
||||
Check parameter for kerberos
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
if (self.g_opts.kerberosMode != "install" and
|
||||
self.g_opts.kerberosMode != "uninstall"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% 'm' + "Value: %s"
|
||||
% self.g_opts.kerberosMode)
|
||||
|
||||
# get user info
|
||||
self.user = g_OSlib.getUserInfo()['name']
|
||||
if (self.g_opts.clusterUser == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % 'U')
|
||||
if self.g_opts.clusterUser != self.user:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50323"]
|
||||
% self.g_opts.clusterUser)
|
||||
|
||||
if (self.g_opts.kerberosMode == "install" and
|
||||
self.g_opts.kerberosType == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "-krb-server' or '--krb-client")
|
||||
|
||||
def checkDSN(self, dsnName):
|
||||
"""
|
||||
function: Check the path:
|
||||
the path must be composed of letters, numbers,
|
||||
underscores, slashes, hyphen, and spaces
|
||||
input : path_type_in
|
||||
output: NA
|
||||
"""
|
||||
nameLen = len(dsnName)
|
||||
if (nameLen > 64):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50219"]
|
||||
% dsnName
|
||||
+ "Error:\nThe DSN name is too long.")
|
||||
wrongChar = None
|
||||
i = 0
|
||||
a_ascii = ord('a')
|
||||
z_ascii = ord('z')
|
||||
A_ascii = ord('A')
|
||||
Z_ascii = ord('Z')
|
||||
num0_ascii = ord('0')
|
||||
num9_ascii = ord('9')
|
||||
sep_ascii = ord('_')
|
||||
for i in range(0, nameLen):
|
||||
char_check = ord(dsnName[i])
|
||||
if (not (a_ascii <= char_check <= z_ascii or A_ascii <=
|
||||
char_check <= Z_ascii or num0_ascii <= char_check <=
|
||||
num9_ascii or char_check == sep_ascii)):
|
||||
wrongChar = dsnName[i]
|
||||
break
|
||||
if (wrongChar != None):
|
||||
return wrongChar
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
main function
|
||||
"""
|
||||
try:
|
||||
REPEAT = False
|
||||
# Objectize class
|
||||
manager = OperationManager()
|
||||
# parse cmd lines
|
||||
manager.parseCommandLine()
|
||||
# check parameters
|
||||
manager.checkParameter()
|
||||
|
||||
# init global variables
|
||||
manager.initGlobal()
|
||||
# set action flag file
|
||||
DefaultValue.setActionFlagFile(manager.g_opts.action, manager.logger)
|
||||
except Exception as e:
|
||||
if (manager.g_opts.action in (ACTION_STATUS, ACTION_STOP,
|
||||
ACTION_START)):
|
||||
actionDict = {ACTION_STATUS: DefaultValue.TASK_QUERY_STATUS,
|
||||
ACTION_STOP: DefaultValue.TASK_STOP,
|
||||
ACTION_START: DefaultValue.TASK_START}
|
||||
if REPEAT:
|
||||
manager.sshTool = SshTool(
|
||||
manager.clusterInfo.getClusterNodeNames(), manager.logFile,
|
||||
timeout=DefaultValue.TIMEOUT_PSSH_COMMON)
|
||||
manager.logger.logExit(str(e))
|
||||
|
||||
try:
|
||||
impl = OmImplOLAP(manager)
|
||||
|
||||
if (manager.g_opts.action not in [ACTION_START,
|
||||
ACTION_STOP,
|
||||
ACTION_STATUS,
|
||||
ACTION_REBUID,
|
||||
ACTION_CERT,
|
||||
ACTION_KERBEROS,
|
||||
ACTION_VIEW,
|
||||
ACTION_QUERY,
|
||||
ACTION_REFRESHCONF
|
||||
]):
|
||||
raise Exception(ErrorCode.GAUSS_531['GAUSS_53104']
|
||||
% ("gs_om -t " + manager.g_opts.action))
|
||||
elif (manager.g_opts.action == ACTION_CERT and
|
||||
manager.g_opts.certRollback):
|
||||
impl.doDNSSLCertRollback()
|
||||
|
||||
# Depending on the function, different operations are performed
|
||||
if (manager.g_opts.action == ACTION_START):
|
||||
impl.doStart()
|
||||
elif (manager.g_opts.action == ACTION_STOP):
|
||||
impl.doStop()
|
||||
elif (manager.g_opts.action == ACTION_STATUS):
|
||||
impl.doStatus()
|
||||
elif (manager.g_opts.action == ACTION_REBUID):
|
||||
impl.doRebuildConf()
|
||||
elif (manager.g_opts.action == ACTION_KERBEROS):
|
||||
if DefaultValue.isUnderUpgrade(manager.user):
|
||||
raise Exception(ErrorCode.GAUSS_529["GAUSS_52936"])
|
||||
impl.doKerberos()
|
||||
elif (manager.g_opts.action == ACTION_CERT
|
||||
and not manager.g_opts.certRollback):
|
||||
impl.doReplaceSSLCert()
|
||||
elif (manager.g_opts.action == ACTION_VIEW):
|
||||
impl.doView()
|
||||
elif (manager.g_opts.action == ACTION_QUERY):
|
||||
impl.doQuery()
|
||||
elif (manager.g_opts.action == ACTION_REFRESHCONF):
|
||||
impl.doRefreshConf()
|
||||
|
||||
manager.logger.closeLog()
|
||||
except Exception as e:
|
||||
manager.logger.logExit(str(e))
|
||||
finally:
|
||||
DefaultValue.setActionFlagFile("gs_om", None, False)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
416
script/gs_postuninstall
Normal file
416
script/gs_postuninstall
Normal file
@ -0,0 +1,416 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_postuninstall is a utility to clean up the environment
|
||||
# after uninstalling a Gauss200 server.
|
||||
#############################################################################
|
||||
|
||||
import os, sys
|
||||
import subprocess
|
||||
import grp
|
||||
import pwd
|
||||
import platform
|
||||
|
||||
package_path = os.path.dirname(os.path.realpath(__file__))
|
||||
ld_path = package_path + "/gspylib/clib"
|
||||
if 'LD_LIBRARY_PATH' not in os.environ:
|
||||
os.environ['LD_LIBRARY_PATH'] = ld_path
|
||||
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
|
||||
if ld_path not in os.environ.get('LD_LIBRARY_PATH'):
|
||||
os.environ['LD_LIBRARY_PATH'] = \
|
||||
ld_path + ":" + os.environ['LD_LIBRARY_PATH']
|
||||
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
|
||||
|
||||
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.os.gsfile import g_Platform
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from impl.postuninstall.OLAP.PostUninstallImplOLAP import \
|
||||
PostUninstallImplOLAP
|
||||
|
||||
|
||||
class Postuninstall(ParallelBaseOM):
|
||||
"""
|
||||
init the command options
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: init parameters
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.deleteUser = False
|
||||
self.deleteGroup = False
|
||||
self.clean_gphome = False
|
||||
self.clean_host = []
|
||||
self.sshpwd = ""
|
||||
self.nodeList = []
|
||||
self.clusterToolPath = ""
|
||||
self.userHome = ""
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_postuninstall is a utility to clean up the environment
|
||||
after uninstalling the cluster.
|
||||
|
||||
Usage:
|
||||
gs_postuninstall -? |--help
|
||||
gs_postuninstall -V |--version
|
||||
gs_postuninstall -U USER -X XMLFILE [-L] [--delete-user] [--delete-group]
|
||||
[-l LOGFILE] [--clean-gphome]
|
||||
|
||||
General options:
|
||||
-U Cluster user.
|
||||
-X Path of the XML configuration file.
|
||||
-L Only clean up local nodes.
|
||||
--delete-user Delete the OS user.
|
||||
--delete-group Delete the group of the OS user.
|
||||
--clean-gphome Only execute the gphome directory
|
||||
cleanup task.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: Parse command line and save to global variable
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("postuninstall")
|
||||
# check if has '--help' parameter
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
|
||||
# check the parameters of postuninstall command
|
||||
if (ParaDict.__contains__("user")):
|
||||
self.user = ParaDict.get("user")
|
||||
if (ParaDict.__contains__("confFile")):
|
||||
self.xmlFile = ParaDict.get("confFile")
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
|
||||
if (ParaDict.__contains__("delete-user")):
|
||||
self.deleteUser = ParaDict.get("delete-user")
|
||||
if (ParaDict.__contains__("delete-group")):
|
||||
self.deleteGroup = ParaDict.get("delete-group")
|
||||
if (ParaDict.__contains__("clean-gphome")):
|
||||
self.clean_gphome = ParaDict.get("clean-gphome")
|
||||
if (ParaDict.__contains__("nodename")):
|
||||
if not "HOST_IP" in os.environ.keys():
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_518["GAUSS_51801"] % "HOST_IP doesn't" +
|
||||
" so -h parameter is not needed.")
|
||||
self.clean_host = ParaDict.get("nodename")
|
||||
if len(self.clean_host) == 0:
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50004"] % 'h')
|
||||
|
||||
if (ParaDict.__contains__("localMode")):
|
||||
self.localMode = ParaDict.get("localMode")
|
||||
|
||||
if "HOST_IP" in os.environ.keys():
|
||||
if not ParaDict.__contains__("localMode"):
|
||||
if not (ParaDict.__contains__("clean-gphome")
|
||||
and ParaDict.__contains__("nodename")):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_518["GAUSS_51801"] % "HOST_IP" +
|
||||
" so you must specify the -L parameter or (-h and "
|
||||
"--clean-gphome) parameters.")
|
||||
if ParaDict.__contains__("clean-gphome"):
|
||||
if ParaDict.__contains__("localMode") and\
|
||||
ParaDict.__contains__("nodename"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50005"]
|
||||
% ("-L", "-h"))
|
||||
|
||||
if (self.deleteGroup == True and self.deleteUser != True):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "-delete-user" + ".")
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: check parameter
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check user
|
||||
self.checkUser()
|
||||
# check config file
|
||||
self.checkConfigFile()
|
||||
# check log file
|
||||
self.checkLogFile()
|
||||
# check mpprc file if needed, should be done
|
||||
# before check preinstall step
|
||||
self.checkMpprcFile()
|
||||
# check preInstall
|
||||
self.checkPreInstall()
|
||||
# check group for redhat
|
||||
self.checkGroup()
|
||||
|
||||
def checkUser(self):
|
||||
"""
|
||||
function: check the user
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check if no user
|
||||
if (self.user == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] % "U"
|
||||
+ ".")
|
||||
# check if is root user
|
||||
if (self.user == "root"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50301"])
|
||||
|
||||
try:
|
||||
DefaultValue.checkUser(self.user, False)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
if (pwd.getpwnam(self.user).pw_uid == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50302"])
|
||||
|
||||
def checkConfigFile(self):
|
||||
"""
|
||||
function: check Config File
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# if no config file
|
||||
if (self.xmlFile == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % "X" + ".")
|
||||
# if path not exists
|
||||
if (not os.path.exists(self.xmlFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% "configuration file" + " %s."
|
||||
% self.xmlFile)
|
||||
# if not absolute path
|
||||
if (not os.path.isabs(self.xmlFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"]
|
||||
% "configure file")
|
||||
|
||||
def checkLogFile(self):
|
||||
"""
|
||||
function: check log File
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# if no log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.UNPREINSTALL_LOG_FILE, self.user, "",
|
||||
self.xmlFile)
|
||||
# if not absolute path
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
def checkMpprcFile(self):
|
||||
"""
|
||||
function: check MpprcFile
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# get path of MpprcFile
|
||||
self.mpprcFile = DefaultValue.getEnv(DefaultValue.MPPRC_FILE_ENV)
|
||||
|
||||
try:
|
||||
# get tool path
|
||||
self.clusterToolPath = DefaultValue.getPreClusterToolPath(
|
||||
self.user, self.xmlFile)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
# if MpprcFile is null
|
||||
if (self.mpprcFile == None):
|
||||
self.mpprcFile = ""
|
||||
# if MpprcFile is not null
|
||||
if (self.mpprcFile != ""):
|
||||
# if no MpprcFile
|
||||
if (not os.path.exists(self.mpprcFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% "MPPRC file" + " %s."
|
||||
% self.mpprcFile)
|
||||
# if is not absolute path
|
||||
if (not os.path.isabs(self.mpprcFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_512["GAUSS_51206"]
|
||||
% self.mpprcFile)
|
||||
|
||||
def checkPreInstall(self):
|
||||
"""
|
||||
function: check preInstall
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# check if agent-mode
|
||||
if "HOST_IP" in os.environ.keys():
|
||||
# get om_agent path
|
||||
agent_path_cmd = "ps aux | grep 'om_agent.py' | grep %s | grep " \
|
||||
"-v grep | head -n 1 | awk '{print $NF}'" % \
|
||||
self.user
|
||||
(status, output) = subprocess.getstatusoutput(agent_path_cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_535["GAUSS_53507"]
|
||||
% agent_path_cmd)
|
||||
agent_path = os.path.dirname(output.strip())
|
||||
agent_conf_file = os.path.join(agent_path, 'om_agent.conf')
|
||||
if not os.path.exists(agent_conf_file):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% agent_conf_file)
|
||||
|
||||
# get agent sep_env_file
|
||||
with open(agent_conf_file) as fp:
|
||||
recordLines = fp.readlines()
|
||||
sep_env_file = ""
|
||||
for tmp in recordLines:
|
||||
if 'sep_env_file' in tmp:
|
||||
sep_env_file = tmp.split("=")[-1].strip()
|
||||
if not os.path.exists(sep_env_file):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_502["GAUSS_50201"] % sep_env_file)
|
||||
|
||||
cmd = "su - %s -c 'source %s && echo $GAUSS_ENV' 2>/dev/null"\
|
||||
% (self.user, sep_env_file)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_518["GAUSS_51802"]
|
||||
% "$GAUSS_ENV" + "Error: \n%s." % output
|
||||
+ "The cmd is %s" % cmd)
|
||||
gaussEnv = output.strip()
|
||||
else:
|
||||
# check if has mpprcFile
|
||||
if (self.mpprcFile != ""):
|
||||
userprofile = self.mpprcFile
|
||||
else:
|
||||
userprofile = "/home/%s/.bashrc" % self.user
|
||||
|
||||
cmd = "su - %s -c 'source %s && echo $GAUSS_ENV' 2>/dev/null"\
|
||||
% (self.user, userprofile)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_518["GAUSS_51802"]
|
||||
% "$GAUSS_ENV" + "Error: \n%s." % output
|
||||
+ "The cmd is %s" % cmd)
|
||||
gaussEnv = output.strip()
|
||||
|
||||
# if gaussEnv is 2, user do not do uninstall before
|
||||
if (str(gaussEnv) == "2"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_525["GAUSS_52501"]
|
||||
% "gs_uninstall")
|
||||
# if gaussEnv is not 1, user do not do preinstall before
|
||||
elif (str(gaussEnv) != "1" and not self.clean_gphome):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_525["GAUSS_52501"] % "gs_preinstall" +
|
||||
"If you do preinstall with seperate file mode, please input "
|
||||
"sep-env-file before postuninstall. ")
|
||||
elif (str(gaussEnv) == "1" or str(gaussEnv) == "2")\
|
||||
and self.clean_gphome:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_525["GAUSS_52501"]
|
||||
% "'gs_uninstall' or 'gs_postuninstall"
|
||||
" no clean gphome'")
|
||||
|
||||
def checkGroup(self):
|
||||
"""
|
||||
function: check user group
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
# get user group
|
||||
group = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name
|
||||
distname, version, idnum = g_Platform.dist()
|
||||
# check if OS version is redhat or Euler
|
||||
if (distname in ("redhat", "euleros", "centos", "openEuler")):
|
||||
if (self.deleteGroup != True and self.deleteUser == True
|
||||
and self.user == group):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"] %
|
||||
"-delete-group" + ". You must "
|
||||
"delete the "
|
||||
"group when you "
|
||||
"delete the "
|
||||
"user which has "
|
||||
"the same name "
|
||||
"with the group "
|
||||
"in redhat.")
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50308"]
|
||||
+ "Failed to obtain the group for %s" %
|
||||
self.user + "Error:\n%s" % str(e))
|
||||
|
||||
def initGlobals(self):
|
||||
"""
|
||||
function: init Logg file
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.initLogger("gs_postuninstall")
|
||||
self.logger.ignoreErr = True
|
||||
|
||||
try:
|
||||
self.logger.log("Parsing the configuration file.", "addStep")
|
||||
# get cluster info from xml file
|
||||
# Initialize the self.clusterInfo variable
|
||||
self.initClusterInfo()
|
||||
# Initialize the self.sshTool variable
|
||||
self.initSshTool(self.clusterInfo.getClusterNodeNames(),
|
||||
DefaultValue.TIMEOUT_PSSH_POSTPREINSTALL)
|
||||
self.logger.debug("The cluster's information:\n%s."
|
||||
% str(self.clusterInfo))
|
||||
self.logger.log("Successfully parsed the configuration file.",
|
||||
"constant")
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
|
||||
dirName = os.path.dirname(self.logFile)
|
||||
self.localLog = os.path.join(dirName, DefaultValue.LOCAL_LOG_FILE)
|
||||
self.userHome = DefaultValue.getUserHome(self.user)
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
main function
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
if (os.getuid() != 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50104"])
|
||||
|
||||
try:
|
||||
postuninstall = Postuninstall()
|
||||
postuninstall.parseCommandLine()
|
||||
postuninstall.checkParameter()
|
||||
postuninstall.initGlobals()
|
||||
|
||||
if len(postuninstall.clusterInfo.getClusterNodeNames()) == 1 and \
|
||||
postuninstall.clusterInfo.getClusterNodeNames()[
|
||||
0] == DefaultValue.GetHostIpOrName():
|
||||
postuninstall.localMode = True
|
||||
impl = PostUninstallImplOLAP(postuninstall)
|
||||
|
||||
# Perform the whole extand process
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
479
script/gs_preinstall
Normal file
479
script/gs_preinstall
Normal file
@ -0,0 +1,479 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_preinstall is a utility to create an installation
|
||||
# environment for a cluster.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import pwd
|
||||
import sys
|
||||
import grp
|
||||
import subprocess
|
||||
from gspylib.common.CheckPythonVersion import checkPythonVersion
|
||||
checkPythonVersion()
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo, \
|
||||
readOneClusterConfigItem, initParserXMLFile
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from impl.preinstall.OLAP.PreinstallImplOLAP import PreinstallImplOLAP
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
|
||||
#############################################################################
|
||||
# Global variables
|
||||
#############################################################################
|
||||
userNameFirtChar = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
|
||||
|
||||
|
||||
class Preinstall(ParallelBaseOM):
|
||||
def __init__(self):
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.password = ""
|
||||
self.envParams = []
|
||||
self.rootUser = ""
|
||||
self.rootPasswd = ""
|
||||
self.createUserSshTrust = True
|
||||
self.clusterToolPath = ""
|
||||
self.needFixOwnerPaths = []
|
||||
self.preMode = False
|
||||
self.skipOSSet = False
|
||||
self.skipHostnameSet = False
|
||||
self.passwordsec = ""
|
||||
self.corePath = ""
|
||||
self.is_new_root_path = False
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_preinstall is a utility to create an installation environment for a cluster.
|
||||
|
||||
Usage:
|
||||
gs_preinstall -? | --help
|
||||
gs_preinstall -V | --version
|
||||
gs_preinstall -U USER -G GROUP -X XMLFILE
|
||||
[-L] [--skip-os-set] [--env-var="ENVVAR" [...]]
|
||||
[--sep-env-file=MPPRCFILE] [--skip-hostname-set] [-l LOGFILE]
|
||||
[--non-interactive] [-M MOUNTDIR]
|
||||
|
||||
General options:
|
||||
-U Cluster user.
|
||||
-G Group of the cluster user.
|
||||
-X Path of the XML configuration file.
|
||||
-L Only perform preinstallation on local
|
||||
nodes.
|
||||
--skip-os-set Whether to skip OS parameter setting.
|
||||
(The default value is set.)
|
||||
--env-var="ENVVAR" OS user environment variables.
|
||||
--sep-env-file=MPPRCFILE Path of the MPP environment file.
|
||||
--skip-hostname-set Whether to skip hostname setting.
|
||||
(The default value is set.)
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this
|
||||
utility, and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
--non-interactive Pre-execution of non-secure mode.
|
||||
If it is not specified, you can choose
|
||||
whether create the SSH trust for root
|
||||
user or cluster user.
|
||||
If it is specified, you must ensure the
|
||||
SSH trust for root user and cluster
|
||||
user have been created.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
# get parameter from command
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: Parse command line and save to global variable
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
# init the ParaObj
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("preinstall")
|
||||
# parameter -h or -?
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
|
||||
# Resolves command line arguments
|
||||
# parameter -U
|
||||
if (ParaDict.__contains__("user")):
|
||||
self.user = ParaDict.get("user")
|
||||
DefaultValue.checkPathVaild(self.user)
|
||||
# parameter -G
|
||||
if (ParaDict.__contains__("group")):
|
||||
self.group = ParaDict.get("group")
|
||||
# parameter -X
|
||||
if (ParaDict.__contains__("confFile")):
|
||||
self.xmlFile = ParaDict.get("confFile")
|
||||
# parameter -L
|
||||
if (ParaDict.__contains__("localMode")):
|
||||
self.localMode = ParaDict.get("localMode")
|
||||
# parameter -l
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
# parameter --env-var
|
||||
if (ParaDict.__contains__("envparams")):
|
||||
self.envParams = ParaDict.get("envparams")
|
||||
# parameter --sep-env-file
|
||||
if (ParaDict.__contains__("mpprcFile")):
|
||||
self.mpprcFile = ParaDict.get("mpprcFile")
|
||||
DefaultValue.checkPathVaild(self.mpprcFile)
|
||||
# parameter --skip-hostname-set
|
||||
if (ParaDict.__contains__("skipHostnameSet")):
|
||||
self.skipHostnameSet = ParaDict.get("skipHostnameSet")
|
||||
# parameter --skip-os-set
|
||||
if (ParaDict.__contains__("skipOSSet")):
|
||||
self.skipOSSet = ParaDict.get("skipOSSet")
|
||||
# parameter --non-interactive
|
||||
if (ParaDict.__contains__("preMode")):
|
||||
self.preMode = ParaDict.get("preMode")
|
||||
|
||||
def checkUserParameter(self, check_clusterInfo):
|
||||
"""
|
||||
"""
|
||||
if (self.user == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % 'U' + ".")
|
||||
elif (":" in self.user):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % 'U')
|
||||
|
||||
# check group info
|
||||
self.checkGroupParameter()
|
||||
|
||||
# check if user exists
|
||||
cmd = "cat /etc/passwd|grep -v nologin|grep -v halt|" \
|
||||
"grep -v shutdown|awk -F: '{ print $1 }'|" \
|
||||
" grep '^%s$' 2>/dev/null" % self.user
|
||||
status = subprocess.getstatusoutput(cmd)[0]
|
||||
if status == 0:
|
||||
if pwd.getpwnam(self.user).pw_uid == 0:
|
||||
# user exists and uid is 0, exit.
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50302"])
|
||||
|
||||
# check the local user and the localmode,
|
||||
# if user not exist exit with error
|
||||
if (self.localMode):
|
||||
try:
|
||||
DefaultValue.getUserId(self.user)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def checkGroupParameter(self):
|
||||
"""
|
||||
"""
|
||||
if (self.group == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% 'G' + ".")
|
||||
if (self.user == "root" or self.group == "root"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50301"]
|
||||
+ "User:Group[%s:%s]."
|
||||
% (self.user, self.group))
|
||||
|
||||
def checkUserAndGroup(self):
|
||||
"""
|
||||
"""
|
||||
if (self.localMode):
|
||||
usergroup = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name
|
||||
if (self.group != usergroup):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50305"]
|
||||
+ "User:Group[%s:%s]"
|
||||
% (self.user, self.group))
|
||||
|
||||
def checkConfigFile(self):
|
||||
"""
|
||||
"""
|
||||
if (self.xmlFile == ""):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50001"] % 'X' + ".")
|
||||
if (not os.path.exists(self.xmlFile)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50201"] % self.xmlFile)
|
||||
if (not os.path.isabs(self.xmlFile)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_502["GAUSS_50213"] % "configuration file")
|
||||
|
||||
def checkEnvValueParameter(self):
|
||||
"""
|
||||
"""
|
||||
for param in self.envParams:
|
||||
# check environmental variables vaild
|
||||
if (param.find("\'") >= 0 or param.find(";") >= 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "-env-var" + " There are illegal"
|
||||
" characters in the"
|
||||
" parameter.")
|
||||
|
||||
def checkLogFile(self):
|
||||
"""
|
||||
"""
|
||||
if (self.logFile == ""):
|
||||
self.logFile = self.getPreOMLogPath(
|
||||
DefaultValue.PREINSTALL_LOG_FILE, self.xmlFile)
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"]
|
||||
% self.logFile)
|
||||
|
||||
def checkMpprcFile(self):
|
||||
"""
|
||||
"""
|
||||
if (self.mpprcFile == ""):
|
||||
return
|
||||
|
||||
if (not os.path.isabs(self.mpprcFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_512["GAUSS_51206"]
|
||||
% self.mpprcFile)
|
||||
|
||||
# check mpprc file path
|
||||
mpprcFilePath = os.path.normpath(self.mpprcFile)
|
||||
if (mpprcFilePath == "/home/%s" % self.user):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % \
|
||||
'-sep-env-file' + " The file [%s] can not"
|
||||
" be a reserved home "
|
||||
"directory."
|
||||
% self.mpprcFile)
|
||||
if (os.path.isdir(self.mpprcFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % \
|
||||
'-sep-env-file' + " The file [%s] can not "
|
||||
"be a directory."
|
||||
% self.mpprcFile)
|
||||
|
||||
try:
|
||||
# check the user if exist
|
||||
DefaultValue.getUserId(self.user)
|
||||
except Exception as e:
|
||||
mpprcFileTopPath = os.path.dirname(self.mpprcFile)
|
||||
# the mpprc file can not be specified in the /home/user directory
|
||||
userpath = "/home/%s/" % self.user
|
||||
if (mpprcFilePath.startswith(userpath)):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50004"] % '-sep-env-file' + \
|
||||
" Environment variable separation file can not be "
|
||||
"created under %s." % mpprcFileTopPath)
|
||||
|
||||
DefaultValue.checkMpprcFileChange(self.mpprcFile, "", self.mpprcFile)
|
||||
(checkstatus, checkoutput) = DefaultValue.checkEnvFile(self.mpprcFile)
|
||||
if (not checkstatus):
|
||||
if (self.mpprcFile != ""):
|
||||
envfile = self.mpprcFile + " and /etc/profile"
|
||||
else:
|
||||
envfile = "/etc/profile and ~/.bashrc"
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_518["GAUSS_51808"] % \
|
||||
checkoutput + "Please check %s." % envfile)
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
# remove HOST_IP info with /etc/profile and environ
|
||||
cmd = "sed -i '/^export[ ]*HOST_IP=/d' /etc/profile"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50205"]
|
||||
% "/etc/profile" + "The cmd is %s" % cmd)
|
||||
if "HOST_IP" in os.environ.keys():
|
||||
os.environ.pop("HOST_IP")
|
||||
|
||||
# check config file
|
||||
self.checkConfigFile()
|
||||
check_clusterInfo = dbClusterInfo()
|
||||
check_clusterInfo.initFromXml(self.xmlFile)
|
||||
# check user info
|
||||
self.checkUserParameter(check_clusterInfo)
|
||||
# check user group match
|
||||
self.checkUserAndGroup()
|
||||
# check env-val
|
||||
self.checkEnvValueParameter()
|
||||
# check mpprc file
|
||||
self.checkMpprcFile()
|
||||
|
||||
# check log file
|
||||
self.checkLogFile()
|
||||
|
||||
# set LD_LIBRARY_PATH add local lib
|
||||
def setLibPath(self):
|
||||
package_path = os.path.dirname(os.path.realpath(__file__))
|
||||
ld_path = package_path + "/gspylib/clib"
|
||||
rerun = True
|
||||
|
||||
if not 'LD_LIBRARY_PATH' in os.environ:
|
||||
os.environ['LD_LIBRARY_PATH'] = ld_path
|
||||
elif not ld_path in os.environ.get('LD_LIBRARY_PATH'):
|
||||
os.environ['LD_LIBRARY_PATH'] = \
|
||||
ld_path + ":" + os.environ['LD_LIBRARY_PATH']
|
||||
else:
|
||||
rerun = False
|
||||
|
||||
if rerun:
|
||||
try:
|
||||
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
# init global variables
|
||||
def initGlobals(self):
|
||||
"""
|
||||
function: init global parameters
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
# init the log file
|
||||
self.initLogger("gs_preinstall")
|
||||
|
||||
# get the clusterToolPath
|
||||
self.clusterToolPath = self.getPreClusterToolPath(self.xmlFile)
|
||||
os.environ[DefaultValue.TOOL_PATH_ENV] = self.clusterToolPath
|
||||
|
||||
self.logger.log("Parsing the configuration file.", "addStep")
|
||||
try:
|
||||
# parse the configuration file
|
||||
self.initClusterInfo()
|
||||
self.sshTool = SshTool(self.clusterInfo.getClusterNodeNames(),
|
||||
self.logFile,
|
||||
DefaultValue.TIMEOUT_PSSH_PREINSTALL)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
|
||||
# check the local hostname
|
||||
if DefaultValue.GetHostIpOrName() not in \
|
||||
self.clusterInfo.getClusterNodeNames():
|
||||
self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"]
|
||||
% DefaultValue.GetHostIpOrName())
|
||||
self.logger.log("Successfully parsed the configuration file.",
|
||||
"constant")
|
||||
|
||||
def getPreOMLogPath(self, logName, xml):
|
||||
"""
|
||||
function: get the OM log path
|
||||
input: logName, xml
|
||||
output: fullLogPath
|
||||
"""
|
||||
try:
|
||||
fullLogPath = ""
|
||||
# get the log path
|
||||
configedLogPath = self.getOneClusterConfigItem("gaussdbLogPath",
|
||||
xml)
|
||||
DefaultValue.checkPathVaild(configedLogPath)
|
||||
# check gaussdbLogPath is not null
|
||||
if configedLogPath == "":
|
||||
fullLogPath = "%s/%s/om/%s" % (
|
||||
DefaultValue.GAUSSDB_DIR, self.user, logName)
|
||||
else:
|
||||
fullLogPath = "%s/%s/om/%s" % (
|
||||
os.path.normpath(configedLogPath), self.user, logName)
|
||||
|
||||
return fullLogPath
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def getOneClusterConfigItem(self, item_name, xml):
|
||||
"""
|
||||
function: get the item_name's value in xml file
|
||||
input: item_name, xml
|
||||
output: item_name's value in the xml
|
||||
"""
|
||||
try:
|
||||
# set the environment variable
|
||||
os.putenv("CLUSTERCONFIGFILE", xml)
|
||||
# get the item_name's value in the xml
|
||||
(retStatus, retValue) = readOneClusterConfigItem(
|
||||
initParserXMLFile(xml), item_name, "cluster")
|
||||
if (retStatus == 0):
|
||||
return os.path.normpath(retValue)
|
||||
elif (retStatus == 2):
|
||||
return ""
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50204"]
|
||||
% "the cluster configuration item file"
|
||||
+ " Error: \n%s." % retValue)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def getPreClusterToolPath(self, xml):
|
||||
"""
|
||||
function: get the cluster tool path
|
||||
input: xml
|
||||
output: configedPath
|
||||
"""
|
||||
try:
|
||||
# get the cluster tool path in the xml file
|
||||
configedPath = self.getOneClusterConfigItem("gaussdbToolPath", xml)
|
||||
DefaultValue.checkPathVaild(configedPath)
|
||||
# check the gaussdbToolPath is not null
|
||||
if configedPath == "":
|
||||
configedPath = DefaultValue.CLUSTER_TOOL_PATH
|
||||
return configedPath
|
||||
except Exception as e:
|
||||
self.context.logger.logExit(str(e))
|
||||
|
||||
def change_lib_path(self):
|
||||
"""
|
||||
if gs_preinstall current path is /root/gauss_om/username,
|
||||
so change its lib path
|
||||
:return:
|
||||
"""
|
||||
gsom_path = os.path.realpath(
|
||||
os.path.join(os.path.realpath(__file__), "../../../"))
|
||||
package_path = os.path.dirname(os.path.realpath(__file__))
|
||||
lib_path = os.path.join(package_path, "lib")
|
||||
sys.path.insert(0, lib_path)
|
||||
if gsom_path == DefaultValue.ROOT_SCRIPTS_PATH:
|
||||
self.is_new_root_path = True
|
||||
|
||||
|
||||
def clearHistTimeFormat():
|
||||
cmd = "sed -i '/HISTTIMEFORMAT=/d' /etc/profile"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
GaussLog.exitWithError("Clear HISTTIMEFORMAT from /etc/profile "
|
||||
"failed.\nError: %s\nThe cmd is: %s\n" %
|
||||
(output,cmd))
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
main function
|
||||
"""
|
||||
# check if user is root
|
||||
if os.getuid() != 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50104"])
|
||||
clearHistTimeFormat()
|
||||
try:
|
||||
# Objectize class
|
||||
preinstall = Preinstall()
|
||||
# set LD_LIBRARY_PATH
|
||||
preinstall.setLibPath()
|
||||
# parse cmd lines
|
||||
preinstall.parseCommandLine()
|
||||
# check parameters
|
||||
preinstall.checkParameter()
|
||||
# init global variables
|
||||
preinstall.initGlobals()
|
||||
preinstall.change_lib_path()
|
||||
impl = PreinstallImplOLAP(preinstall)
|
||||
# Perform the whole extand process
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
215
script/gs_ssh
Normal file
215
script/gs_ssh
Normal file
@ -0,0 +1,215 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_ssh is a utility to execute one command on all nodes.
|
||||
#############################################################################
|
||||
import os
|
||||
import sys
|
||||
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
|
||||
class ParallelSsh(ParallelBaseOM):
|
||||
"""
|
||||
The class is used to execute one command on all nodes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: initialize the parameters
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.userInfo = ""
|
||||
self.cmd = ""
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_ssh is a utility to execute one command on all %s cluster nodes.
|
||||
|
||||
Usage:
|
||||
gs_ssh -? | --help
|
||||
gs_ssh -V | --version
|
||||
gs_ssh -c COMMAND
|
||||
|
||||
General options:
|
||||
-c Command to be executed in cluster.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
print(self.usage.__doc__ % VersionInfo.PRODUCT_NAME)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: parse command line
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
##Parse command
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("ssh")
|
||||
# If help is included in the parameter,
|
||||
# the help message is printed and exited
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# Gets the cmd parameter
|
||||
if (ParaDict.__contains__("cmd")):
|
||||
self.cmd = ParaDict.get("cmd")
|
||||
# The cmd parameter is required
|
||||
if (self.cmd == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% 'c' + ".")
|
||||
|
||||
def initGlobal(self):
|
||||
"""
|
||||
function: Init global parameter
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
# Get user information
|
||||
self.user = g_OSlib.getUserInfo()["name"]
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
self.clusterInfo.initFromStaticConfig(self.user)
|
||||
|
||||
nodeNames = self.clusterInfo.getClusterNodeNames()
|
||||
self.sshTool = SshTool(nodeNames)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def executeCommand(self):
|
||||
"""
|
||||
function: execute command
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
failedNodes = ""
|
||||
succeedNodes = ""
|
||||
try:
|
||||
# Queries the existence of objects that
|
||||
# the command executes in all nodes
|
||||
command = (self.cmd.strip()).split(" ")
|
||||
checkCmd = g_file.SHELL_CMD_DICT["getFullPathForShellCmd"] % \
|
||||
command[0]
|
||||
(status, output) = self.sshTool.getSshStatusOutput(checkCmd)
|
||||
# Resolve all node execution results
|
||||
for node in status.keys():
|
||||
if (status[node] != DefaultValue.SUCCESS):
|
||||
failedNodes += "%s " % node
|
||||
else:
|
||||
succeedNodes += "%s " % node
|
||||
if (failedNodes != ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_524["GAUSS_52403"]
|
||||
% (command[0], failedNodes))
|
||||
failedNodes = ""
|
||||
succeedNodes = ""
|
||||
executeCmd = self.cmd
|
||||
#############################################################
|
||||
# Create a temporary shell file
|
||||
cmdFile = "%s/ClusterCall_%d.sh"\
|
||||
% (DefaultValue.getTmpDirFromEnv(), os.getpid())
|
||||
|
||||
g_file.createFile(cmdFile, True, DefaultValue.FILE_MODE)
|
||||
|
||||
# Writes the cmd command to the shell
|
||||
with open(cmdFile, "a") as fp:
|
||||
fp.write("#!/bin/sh")
|
||||
fp.write(os.linesep)
|
||||
fp.write("%s" % executeCmd)
|
||||
fp.write(os.linesep)
|
||||
fp.flush()
|
||||
|
||||
##############################################################
|
||||
cmdDir = DefaultValue.getTmpDirFromEnv() + '/'
|
||||
# Distribute the shell file to the temporary directory
|
||||
# for each node
|
||||
self.sshTool.scpFiles(cmdFile, cmdDir)
|
||||
# Execute the shell file on all nodes
|
||||
cmdExecute = g_file.SHELL_CMD_DICT["execShellFile"] % cmdFile
|
||||
(status, output) = self.sshTool.getSshStatusOutput(cmdExecute)
|
||||
# Resolve the execution results of all nodes
|
||||
for node in status.keys():
|
||||
if (status[node] != DefaultValue.SUCCESS):
|
||||
failedNodes += "%s " % node
|
||||
else:
|
||||
succeedNodes += "%s " % node
|
||||
# Some nodes fail to execute
|
||||
if (failedNodes != "" and succeedNodes != ""):
|
||||
GaussLog.printMessage(
|
||||
"Failed to execute command on %s." % failedNodes)
|
||||
GaussLog.printMessage(
|
||||
"Successfully execute command on %s.\n" % succeedNodes)
|
||||
# All nodes execute successfully
|
||||
elif (failedNodes == ""):
|
||||
GaussLog.printMessage(
|
||||
"Successfully execute command on all nodes.\n")
|
||||
# All nodes fail to execute
|
||||
elif (succeedNodes == ""):
|
||||
GaussLog.printMessage(
|
||||
"Failed to execute command on all nodes.\n")
|
||||
# Output Execution result
|
||||
GaussLog.printMessage("Output:\n%s" % output)
|
||||
# Delete the temporary shell file at all nodes
|
||||
cmdFileRm = g_file.SHELL_CMD_DICT["deleteFile"]\
|
||||
% (cmdFile, cmdFile)
|
||||
self.sshTool.executeCommand(cmdFileRm, "remove files")
|
||||
|
||||
except Exception as e:
|
||||
cmdFileRm = g_file.SHELL_CMD_DICT["deleteFile"]\
|
||||
% (cmdFile, cmdFile)
|
||||
self.sshTool.executeCommand(cmdFileRm, "remove files")
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
function: Perform the whole process
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
# parse cmd lines
|
||||
self.parseCommandLine()
|
||||
# init globals
|
||||
self.initGlobal()
|
||||
# execute command
|
||||
self.executeCommand()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# main function
|
||||
# Can not run as root
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
|
||||
try:
|
||||
parallelSsh = ParallelSsh()
|
||||
parallelSsh.run()
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
sys.exit(0)
|
||||
1149
script/gs_sshexkey
Normal file
1149
script/gs_sshexkey
Normal file
File diff suppressed because it is too large
Load Diff
185
script/gs_uninstall
Normal file
185
script/gs_uninstall
Normal file
@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_uninstall is a utility to uninstall a Gauss200 server.
|
||||
#############################################################################
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
|
||||
sys.path.append(sys.path[0] + "/../")
|
||||
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.OMCommand import OMCommand
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.os.gsfile import g_file
|
||||
from impl.uninstall.OLAP.UninstallImplOLAP import UninstallImplOLAP
|
||||
|
||||
#####################################################
|
||||
# Ation type
|
||||
#####################################################
|
||||
ACTION_CLEAN_TEMP_DIR = "clean_tmp_dir"
|
||||
|
||||
|
||||
class Uninstall(ParallelBaseOM):
|
||||
"""
|
||||
class about cmd options
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: init function
|
||||
"""
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.cleanInstance = False
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_uninstall is a utility to uninstall a cluster.
|
||||
|
||||
Usage:
|
||||
gs_uninstall -? | --help
|
||||
gs_uninstall -V | --version
|
||||
gs_uninstall [--delete-data] [-L] [-l LOGFILE]
|
||||
|
||||
General options:
|
||||
--delete-data Clean up instance data files.
|
||||
-L Only uninstall local nodes.
|
||||
-l Path of log file.
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function: Parse command line and save to global variable
|
||||
"""
|
||||
ParaObj = Parameter()
|
||||
# Parse the parameter with uninstall
|
||||
ParaDict = ParaObj.ParameterCommandLine("uninstall")
|
||||
|
||||
# check if helpFlag exists
|
||||
if (ParaDict.__contains__("helpFlag")):
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
# check if -l parameter exists
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
# check if -L parameter exists
|
||||
if (ParaDict.__contains__("localMode")):
|
||||
self.localMode = ParaDict.get("localMode")
|
||||
# check if need clean instances
|
||||
if (ParaDict.__contains__("cleanInstance")):
|
||||
self.cleanInstance = True
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
"""
|
||||
# check user
|
||||
self.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
# if no user info, throw error
|
||||
if (self.user == ""):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_503["GAUSS_50308"])
|
||||
# else check user
|
||||
try:
|
||||
DefaultValue.checkUser(self.user, False)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
# check log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.UNINSTALL_LOG_FILE, self.user, "")
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
# get user env info
|
||||
self.mpprcFile = DefaultValue.getMpprcFile()
|
||||
|
||||
def initGlobals(self):
|
||||
"""
|
||||
function: init global parameters
|
||||
"""
|
||||
# init log file for uninstall
|
||||
self.initLogger("gs_uninstall")
|
||||
try:
|
||||
# OLAP
|
||||
self.initClusterInfoFromStaticFile(self.user)
|
||||
# Initialize the self.sshTool variable
|
||||
self.initSshTool(self.clusterInfo.getClusterNodeNames(),
|
||||
DefaultValue.TIMEOUT_PSSH_UNINSTALL)
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
|
||||
def checkLogFilePath(self):
|
||||
"""
|
||||
function: Check log file path
|
||||
"""
|
||||
clusterPath = []
|
||||
try:
|
||||
# get tool path
|
||||
clusterPath.append(DefaultValue.getClusterToolPath())
|
||||
# get tmp path
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv()
|
||||
clusterPath.append(tmpDir)
|
||||
# get cluster path
|
||||
hostName = DefaultValue.GetHostIpOrName()
|
||||
dirs = self.clusterInfo.getClusterDirectorys(hostName, False)
|
||||
# loop all cluster path
|
||||
for checkdir in dirs.values():
|
||||
clusterPath.extend(checkdir)
|
||||
self.logger.debug("Cluster paths %s." % clusterPath)
|
||||
|
||||
# check directory
|
||||
g_file.checkIsInDirectory(self.logFile, clusterPath)
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
main function
|
||||
"""
|
||||
# check if user is root
|
||||
if (os.getuid() == 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
|
||||
try:
|
||||
# Objectize class
|
||||
uninstall = Uninstall()
|
||||
uninstall.parseCommandLine()
|
||||
uninstall.checkParameter()
|
||||
uninstall.initGlobals()
|
||||
|
||||
if (uninstall.xmlFile):
|
||||
pass
|
||||
impl = UninstallImplOLAP(uninstall)
|
||||
|
||||
# Perform the whole extand process
|
||||
impl.run()
|
||||
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
249
script/gs_upgradectl
Normal file
249
script/gs_upgradectl
Normal file
@ -0,0 +1,249 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : gs_upgradectl is a utility to upgrade a Gauss200 application.
|
||||
#
|
||||
# gs_upgradectl is a upgrade framework,which control the upgrade process.
|
||||
# it contains binary upgrade, in-place upgrade and on-line binary upgrade.
|
||||
#
|
||||
# binary upgrade: which includes stopping old cluster,
|
||||
# replacing binary and starting
|
||||
# new cluster,only used for no database objects changed between old cluster
|
||||
# and new cluster.
|
||||
#
|
||||
# on-line binary upgrade: rolling upgrade, upgrade standby instances
|
||||
# firstly, switch over,
|
||||
# and then upgrade the master instances. only used for no database objects
|
||||
# changed
|
||||
# between old cluster and new cluster now.
|
||||
#
|
||||
# in-place upgrade: which includes binary upgrade and update database
|
||||
# mete-data(system tables,
|
||||
# system views, functions, and so on) ,used for some database objects had
|
||||
# been changed
|
||||
# between old cluster and new cluster.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
import pwd
|
||||
import grp
|
||||
import socket
|
||||
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
import impl.upgrade.UpgradeConst as Const
|
||||
from impl.upgrade.OLAP.UpgradeImplOLAP import UpgradeImplOLAP
|
||||
|
||||
|
||||
class Upgrade(ParallelBaseOM):
|
||||
"""
|
||||
The class about upgrade
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
ParallelBaseOM.__init__(self)
|
||||
self.oldClusterInfo = ""
|
||||
self.oldVersion = ""
|
||||
# the directory that when do binary upgrade the information store
|
||||
self.upgradeBackupPath = ""
|
||||
self.userProfile = ""
|
||||
self.tmpDir = ""
|
||||
self.newClusterAppPath = ""
|
||||
self.oldClusterAppPath = ""
|
||||
self.clusterNodes = []
|
||||
##static parameter
|
||||
self.binTarName = "binary_%s.tar" % DefaultValue.GetHostIpOrName()
|
||||
self.rollback = False
|
||||
self.is_inplace_upgrade = True
|
||||
self.guc_paras = {}
|
||||
self.newClusterVersion = None
|
||||
self.newClusterNumber = None
|
||||
self.oldclusterVersion = None
|
||||
self.oldClusterNumber = None
|
||||
self.forceRollback = False
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
gs_upgradectl is a utility to upgrade a cluster.
|
||||
|
||||
Usage:
|
||||
gs_upgradectl -? | --help
|
||||
gs_upgradectl -V | --version
|
||||
gs_upgradectl -t chose-strategy [-l LOGFILE]
|
||||
gs_upgradectl -t commit-upgrade -X XMLFILE [-l LOGFILE]
|
||||
|
||||
gs_upgradectl -t auto-upgrade -X XMLFILE [-l LOGFILE]
|
||||
gs_upgradectl -t auto-rollback -X XMLFILE [-l LOGFILE] [--force]
|
||||
|
||||
General options:
|
||||
-?, --help Show help information for this utility,
|
||||
and exit the command line mode.
|
||||
-V, --version Show version information.
|
||||
-t Subcommand for upgrade. It can be
|
||||
chose-strategy, auto-upgrade, auto-rollback,
|
||||
commit-upgrade.
|
||||
-X Path of the XML configuration file of the
|
||||
later version cluster.
|
||||
--force Force to rollback when cluster status is
|
||||
not normal
|
||||
"""
|
||||
|
||||
print(self.usage.__doc__)
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
Parse command line and save to global variable
|
||||
"""
|
||||
# Resolves incoming parameters
|
||||
ParaObj = Parameter()
|
||||
ParaDict = ParaObj.ParameterCommandLine("upgradectl")
|
||||
if "helpFlag" in ParaDict.keys():
|
||||
self.usage()
|
||||
sys.exit(0)
|
||||
|
||||
# get action information
|
||||
if "action" in ParaDict.keys():
|
||||
self.action = ParaDict.get("action")
|
||||
if "confFile" in ParaDict.keys():
|
||||
self.xmlFile = ParaDict.get("confFile")
|
||||
# get logFile information
|
||||
if "logFile" in ParaDict.keys():
|
||||
self.logFile = ParaDict.get("logFile")
|
||||
|
||||
def checkUser(self):
|
||||
"""
|
||||
function: check user
|
||||
"""
|
||||
# check user
|
||||
# it will do more check about user after get the cluster config info
|
||||
# get user information
|
||||
self.user = pwd.getpwuid(os.getuid()).pw_name
|
||||
# get group information
|
||||
self.group = grp.getgrgid(pwd.getpwnam(self.user).pw_gid).gr_name
|
||||
# if the user or group is null, exit
|
||||
if (self.user == "" or self.group == ""):
|
||||
raise Exception(ErrorCode.GAUSS_503["GAUSS_50308"])
|
||||
# if the user or group is 'root', exit
|
||||
if (self.user == "root" or self.group == "root"):
|
||||
raise Exception(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
|
||||
# we must make sure the env 'GAUSSHOME', 'GS_CLUSTER_NAME',
|
||||
# 'GAUSS_ENV' exists
|
||||
if (DefaultValue.getEnvironmentParameterValue("GAUSSHOME",
|
||||
self.user) == ""):
|
||||
raise Exception(ErrorCode.GAUSS_518["GAUSS_51800"] % "$GAUSSHOME")
|
||||
if (DefaultValue.getEnvironmentParameterValue("GS_CLUSTER_NAME",
|
||||
self.user) == ""):
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_518["GAUSS_51800"] % "$GS_CLUSTER_NAME")
|
||||
if (DefaultValue.getEnvironmentParameterValue("GAUSS_ENV",
|
||||
self.user) == ""):
|
||||
raise Exception(ErrorCode.GAUSS_518["GAUSS_51800"] % "$GAUSS_ENV")
|
||||
|
||||
# depending on the environment variable GPHOME, access to the python
|
||||
GPHOME = DefaultValue.getEnv(DefaultValue.TOOL_PATH_ENV)
|
||||
if (GPHOME == None or GPHOME == ""):
|
||||
raise Exception(ErrorCode.GAUSS_518["GAUSS_51800"] % "$GPHOME")
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
function: Check parameter from command line
|
||||
"""
|
||||
if (self.action == ""):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50001"] % "t" + ".")
|
||||
# when we do auto-upgrade, auto-rollback or commit-upgrade,
|
||||
# we must incoming '-X' and make sure the xml file exists.
|
||||
if (self.action != Const.ACTION_CHOSE_STRATEGY):
|
||||
if (self.xmlFile == ""):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50001"] % 'X' + ".")
|
||||
if (not os.path.exists(self.xmlFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
self.xmlFile)
|
||||
|
||||
# check mpprc file path
|
||||
# get mpprcFile by MPPDB_ENV_SEPARATE_PATH. Even if the return value
|
||||
# is "" or None, no need to pay attention
|
||||
self.mpprcFile = DefaultValue.getEnv(DefaultValue.MPPRC_FILE_ENV)
|
||||
|
||||
# make sure which env file we use
|
||||
# If self.mpprcFile is not "" and None, return self.mpprcFile; else
|
||||
# return '~/.bashrc'
|
||||
self.userProfile = DefaultValue.getMpprcFile()
|
||||
self.checkUser()
|
||||
|
||||
# check log file
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(
|
||||
DefaultValue.UPGRADE_LOG_FILE, self.user, "", "")
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
self.initLogger(self.action)
|
||||
|
||||
def initGlobalInfos(self):
|
||||
"""
|
||||
function: init global infos
|
||||
"""
|
||||
self.logger.debug("Init global infos")
|
||||
|
||||
# init cluster info
|
||||
if (self.xmlFile):
|
||||
self.initClusterInfo()
|
||||
else:
|
||||
self.initClusterInfoFromStaticFile(self.user)
|
||||
self.logger.debug("Successfully init global infos")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
"""
|
||||
main function
|
||||
"""
|
||||
if os.getuid() == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50105"])
|
||||
|
||||
try:
|
||||
REPEAT = False
|
||||
upgrade = Upgrade()
|
||||
upgrade.parseCommandLine()
|
||||
upgrade.checkParameter()
|
||||
|
||||
# set action flag file
|
||||
DefaultValue.setActionFlagFile("gs_upgradectl", upgrade.logger)
|
||||
|
||||
upgrade.initGlobalInfos()
|
||||
|
||||
# Check if -h value:nodename in clusterNodes and obtain clusterNodes
|
||||
for dbNode in upgrade.clusterInfo.dbNodes:
|
||||
upgrade.clusterNodes.append(dbNode.name)
|
||||
if len(upgrade.clusterNodes) == 0:
|
||||
raise Exception(ErrorCode.GAUSS_512["GAUSS_51201"])
|
||||
|
||||
impl = UpgradeImplOLAP(upgrade)
|
||||
impl.run()
|
||||
except Exception as e:
|
||||
if REPEAT:
|
||||
upgrade.sshTool = SshTool(upgrade.clusterNodes, upgrade.localLog,
|
||||
DefaultValue.TIMEOUT_PSSH_COMMON)
|
||||
GaussLog.exitWithError(str(e))
|
||||
finally:
|
||||
DefaultValue.setActionFlagFile("gs_upgradectl", None, False)
|
||||
0
script/gspylib/__init__.py
Normal file
0
script/gspylib/__init__.py
Normal file
43
script/gspylib/common/CheckPythonVersion.py
Normal file
43
script/gspylib/common/CheckPythonVersion.py
Normal file
@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import sys
|
||||
import platform
|
||||
import re
|
||||
|
||||
|
||||
def checkPythonVersion():
|
||||
pythonVersion = sys.version_info[0:2]
|
||||
distName = platform.platform()
|
||||
if re.search("oe1", distName) is not None:
|
||||
if not pythonVersion == (3, 7):
|
||||
raise Exception("[GAUSS-52200] : version of python"
|
||||
" is not correct: %s." %
|
||||
distName + " should use Python 3.7.*")
|
||||
else:
|
||||
if not pythonVersion >= (3, 6):
|
||||
raise Exception("[GAUSS-52200] : version of python"
|
||||
" is not correct: %s." %
|
||||
distName + " should use Python 3.6.*")
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
checkPythonVersion()
|
||||
except Exception as e:
|
||||
raise Exception(e)
|
||||
407
script/gspylib/common/CommandLineParser.py
Normal file
407
script/gspylib/common/CommandLineParser.py
Normal file
@ -0,0 +1,407 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
"""
|
||||
@brief The command line parser module.
|
||||
@details Parse command line parameters and store them as variables with
|
||||
the same name.
|
||||
"""
|
||||
|
||||
# export module
|
||||
__all__ = ["CommandLineParser", "Option"]
|
||||
|
||||
# system import.
|
||||
import sys as _sys
|
||||
import optparse as _optparse
|
||||
|
||||
# import typing for comment.
|
||||
try:
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Tuple
|
||||
from typing import Any
|
||||
except ImportError:
|
||||
Dict = dict
|
||||
List = list
|
||||
Tuple = tuple
|
||||
Any = str or int or complex or list or dict
|
||||
|
||||
# local import
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
|
||||
class Option(_optparse.Option, object):
|
||||
"""
|
||||
The command line option class which use to the "OptionParser" instance.
|
||||
But this class does not accept the "dest"
|
||||
parameter.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Initialize the command line option instance.
|
||||
|
||||
:param args: The command line command string, maximum
|
||||
length is 2.
|
||||
:param kwargs: The command line command parameters.
|
||||
:param action: The named parameter, specified command line
|
||||
parameter action allowed by option parser.
|
||||
:param type: The named parameter, specified command line
|
||||
parameter type of current option.
|
||||
:param default: The named parameter, specified command line
|
||||
parameter default value.
|
||||
:param nargs: The named parameter, specified the number of
|
||||
the command line parameter value.
|
||||
:param const: The named parameter, specified the const
|
||||
value of the command line parameter.
|
||||
:param choices: The named parameter, specified the choice
|
||||
range of the command line parameter, the item
|
||||
in the choices list must be string type,
|
||||
and must not set the "type" parameter.
|
||||
Otherwise, it will lead to unexpected errors.
|
||||
:param callback: The named parameter, specified the handler
|
||||
function for the command line parameter.
|
||||
:param callback_args: The named parameter, specified the unnamed
|
||||
parameters of the handler function for the
|
||||
command line parameter.
|
||||
:param callback_kwargs: The named parameter, specified the named
|
||||
parameters of the handler function for the
|
||||
command line parameter.
|
||||
:param help: The named parameter, the help string for the
|
||||
command line parameter.
|
||||
:param metavar: The named parameter, the display string for
|
||||
the command line parameter value.
|
||||
|
||||
:type args: str
|
||||
:type kwargs: *
|
||||
:type action: str
|
||||
:type type: str
|
||||
:type default: *
|
||||
:type nargs: int
|
||||
:type const: *
|
||||
:type choices: List[str]
|
||||
:type callback: function
|
||||
:type callback_args: tuple
|
||||
:type callback_kwargs: dict
|
||||
:type help: str
|
||||
:type metavar: str
|
||||
"""
|
||||
# Remove the "dest" parameter.
|
||||
if "dest" in kwargs:
|
||||
kwargs.pop("dest")
|
||||
# Initialize the command line option instance.
|
||||
_optparse.Option.__init__(self, *args, **kwargs)
|
||||
|
||||
|
||||
class OptionParser(_optparse.OptionParser, object):
|
||||
"""
|
||||
The command line option parser.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""
|
||||
Initialize the internal command line parser.
|
||||
|
||||
:param args: The additional unnamed parameter of the
|
||||
command line parser.
|
||||
:param kwargs: The additional named parameter of the
|
||||
command line parser.
|
||||
:param usage: A usage string for your program. Before
|
||||
it is displayed to the user, "%prog" will
|
||||
be expanded to the name of your program
|
||||
(prog or os.path.basename(sys.argv[0])).
|
||||
:param option_list: A list of option instance for this parser.
|
||||
:param option_class: The command line option class type,
|
||||
default is "optparse.Option".
|
||||
:param version: The version string for this scripts.
|
||||
:param conflict_handler: The solutions after command line options
|
||||
conflict, "resolve" will override before
|
||||
option, "errors" will raise error,
|
||||
default is "error".
|
||||
:param description: A paragraph of text giving a brief
|
||||
overview of your program. optparse re-formats
|
||||
this paragraph to fit the current
|
||||
terminal width and prints it when the user
|
||||
requests help (after usage, but before
|
||||
the list of options).
|
||||
:param formatter: The formatter instance for the
|
||||
description information.
|
||||
:param add_help_option: Whether add the help option instance
|
||||
automatically.
|
||||
:param prog: The name of the current program (to
|
||||
override os.path.basename(sys.argv[0])).
|
||||
:param epilog: A paragraph of help text to print after
|
||||
option help.
|
||||
|
||||
:type args: str | list | bool | type |
|
||||
_optparse.IndentedHelpFormatter
|
||||
:type kwargs: str | list | bool | type |
|
||||
_optparse.IndentedHelpFormatter
|
||||
:type usage: str
|
||||
:type option_list: List[Option]
|
||||
:type option_class: type
|
||||
:type version: str
|
||||
:type conflict_handler: str
|
||||
:type description: str
|
||||
:type formatter: _optparse.IndentedHelpFormatter
|
||||
:type add_help_option: bool
|
||||
:type prog: str
|
||||
:type epilog: str
|
||||
"""
|
||||
# Call the parent init function.
|
||||
_optparse.OptionParser.__init__(self, *args, **kwargs)
|
||||
|
||||
def print_help(self, _file=_sys.stderr):
|
||||
"""
|
||||
print_help(file : file = stderr)
|
||||
|
||||
Print an extended help message, listing all options and any help
|
||||
text provided with them, to 'file' (default
|
||||
stderr).
|
||||
|
||||
:param _file: The file descriptor instance.
|
||||
:type _file: file
|
||||
|
||||
:rtype: None
|
||||
"""
|
||||
_optparse.OptionParser.print_help(self, _file)
|
||||
|
||||
def print_usage(self, _file=_sys.stderr):
|
||||
"""
|
||||
print_usage(file : file = stderr)
|
||||
|
||||
Print the usage message for the current program (self.usage) to
|
||||
'file' (default stderr). Any occurrence of the
|
||||
string "%prog" in self.usage is replaced with the name of the
|
||||
current program (basename of sys.argv[0]).
|
||||
Does nothing if self.usage is empty or not defined.
|
||||
|
||||
:param _file: The file descriptor instance.
|
||||
:type _file: file
|
||||
|
||||
:rtype: None
|
||||
"""
|
||||
_optparse.OptionParser.print_usage(self, _file)
|
||||
|
||||
def print_version(self, _file=_sys.stderr):
|
||||
"""
|
||||
print_version(file : file = stderr)
|
||||
|
||||
Print the version message for this program (self.version) to 'file'
|
||||
(default stderr). As with print_usage(),
|
||||
any occurrence of "%prog" in self.version is replaced by the
|
||||
current program's name. Does nothing if
|
||||
self.version is empty or undefined.
|
||||
|
||||
:param _file: The file descriptor instance.
|
||||
:type _file: file
|
||||
|
||||
:rtype: None
|
||||
"""
|
||||
_optparse.OptionParser.print_version(self, _file)
|
||||
|
||||
def error(self, _msg):
|
||||
"""
|
||||
error(msg : string)
|
||||
|
||||
Print a usage message incorporating 'msg' to stderr and exit. If you
|
||||
override this in a subclass, it should not
|
||||
return -- it should either exit or raise an exception.
|
||||
|
||||
:param _msg: The error message.
|
||||
:type _msg: str
|
||||
|
||||
:rtype: None
|
||||
"""
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50015"] % _msg)
|
||||
|
||||
|
||||
class CommandLineMetaClass(type):
|
||||
"""
|
||||
The command line parser metaclass.
|
||||
|
||||
Used to magically save command line parsing options instances.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, attrs):
|
||||
"""
|
||||
Create an new command line parser class.
|
||||
|
||||
:param name: The name of the current class.
|
||||
:param bases: The parent instances of the current class.
|
||||
:param attrs: The attribute dict of the current class.
|
||||
|
||||
:type name: str
|
||||
:type bases: Tuple[type]
|
||||
:type attrs: Dict[str, Any]
|
||||
:return:
|
||||
"""
|
||||
# If it is the base command line parser class, we will do nothing.
|
||||
if name == "CommandLineOption":
|
||||
return type.__new__(mcs, name, bases, attrs)
|
||||
|
||||
# Store the command line option instance mapping.
|
||||
mappings = {}
|
||||
# Store the attribute key-value pair list of the current class.
|
||||
items = list(attrs.items())
|
||||
|
||||
# Store the command line option instance to the mapping, and remove
|
||||
# it from current class attribute.
|
||||
for key, value in items:
|
||||
if isinstance(value, Option):
|
||||
mappings.setdefault(key, value)
|
||||
attrs.pop(key)
|
||||
|
||||
# Add the additional function.
|
||||
if value.action in ["append", "append_const", "count"]:
|
||||
def ensure_value(_self, _attr, _value):
|
||||
"""
|
||||
Ensure the non-existence of object attributes and
|
||||
set the value of attributes.
|
||||
|
||||
:param _self: The object instance.
|
||||
:param _attr: The object attribute name.
|
||||
:param _value: The object attribute value.
|
||||
|
||||
:type _self: Option
|
||||
:type _attr: str
|
||||
:type _value: *
|
||||
|
||||
:return: Return the object attribute value.
|
||||
:rtype: *
|
||||
"""
|
||||
if not hasattr(_self, _attr) or getattr(_self,
|
||||
_attr) is None:
|
||||
setattr(_self, _attr, _value)
|
||||
return getattr(_self, _attr)
|
||||
|
||||
# Add function.
|
||||
attrs["ensure_value"] = ensure_value
|
||||
|
||||
# Store the mapping into a named parameter of current class.
|
||||
attrs["__mappings__"] = mappings
|
||||
|
||||
return type.__new__(mcs, name, bases, attrs)
|
||||
|
||||
|
||||
class CommandLineParser(object):
|
||||
"""
|
||||
The base class of the command line parser.
|
||||
"""
|
||||
# Set the metaclass type, this approach is not supported python 3.x.
|
||||
__metaclass__ = CommandLineMetaClass
|
||||
|
||||
def __init__(self, _parameters=None, *args, **kwargs):
|
||||
"""
|
||||
Initialize the command line parser.
|
||||
|
||||
:param _parameters: The command line parameters list,
|
||||
default is sys.argv.
|
||||
:param args: The additional unnamed parameter of the
|
||||
command line parser.
|
||||
:param kwargs: The additional named parameter of the
|
||||
command line parser.
|
||||
:param usage: A usage string for your program. Before
|
||||
it is displayed to the user, "%prog" will
|
||||
be expanded to the name of your program
|
||||
(prog or os.path.basename(sys.argv[0])).
|
||||
:param option_list: A list of option instance for this parser.
|
||||
:param option_class: The command line option class type,
|
||||
default is "optparse.Option".
|
||||
:param version: The version string for this scripts.
|
||||
:param conflict_handler: The solutions after command line options
|
||||
conflict, "resolve" will override before
|
||||
option, "errors" will raise error,
|
||||
default is "error".
|
||||
:param description: A paragraph of text giving a brief
|
||||
overview of your program. optparse re-formats
|
||||
this paragraph to fit the current
|
||||
terminal width and prints it when the user
|
||||
requests help (after usage, but before
|
||||
the list of options).
|
||||
:param formatter: The formatter instance for the
|
||||
description information.
|
||||
:param add_help_option: Whether add the help option instance
|
||||
automatically.
|
||||
:param prog: The name of the current program (to
|
||||
override os.path.basename(sys.argv[0])).
|
||||
:param epilog: A paragraph of help text to print after
|
||||
option help.
|
||||
|
||||
:type _parameters: List[str] | None
|
||||
:type args: str | list | bool | type |
|
||||
_optparse.IndentedHelpFormatter
|
||||
:type kwargs: str | list | bool | type |
|
||||
_optparse.IndentedHelpFormatter
|
||||
:type usage: str
|
||||
:type option_list: List[Option]
|
||||
:type option_class: type
|
||||
:type version: str
|
||||
:type conflict_handler: str
|
||||
:type description: str
|
||||
:type formatter: _optparse.IndentedHelpFormatter
|
||||
:type add_help_option: bool
|
||||
:type prog: str
|
||||
:type epilog: str
|
||||
"""
|
||||
# Create a new command line parser.
|
||||
opt = OptionParser(*args, **kwargs)
|
||||
|
||||
# Add the "dest" attribute to the command line option instance,
|
||||
# and add the option instance to the parser.
|
||||
# noinspection PyUnresolvedReferences
|
||||
for key, value in list(self.__mappings__.items()):
|
||||
setattr(value, "dest", key)
|
||||
opt.add_option(value)
|
||||
|
||||
# Parse the command line parameter.
|
||||
if not _parameters:
|
||||
_parameters = _sys.argv[1:]
|
||||
_, unknown_args = opt.parse_args(_parameters, self)
|
||||
|
||||
# If some command line parameter does not supplied by user, we will
|
||||
# set it to "None".
|
||||
# noinspection PyUnresolvedReferences
|
||||
for key in list(self.__mappings__.keys()):
|
||||
if not hasattr(self, key):
|
||||
# noinspection PyUnresolvedReferences
|
||||
value = self.__mappings__.get(key)
|
||||
if hasattr(value, "default") and getattr(value,
|
||||
"default") != \
|
||||
_optparse.NO_DEFAULT:
|
||||
setattr(self, key, getattr(value, "default"))
|
||||
elif hasattr(value, "const") and getattr(value,
|
||||
"const") != \
|
||||
_optparse.NO_DEFAULT:
|
||||
setattr(self, key, getattr(value, "const"))
|
||||
else:
|
||||
setattr(self, key, None)
|
||||
|
||||
# If it contains configuration that cannot be resolved, save it.
|
||||
if unknown_args:
|
||||
setattr(self, "unknown_args", unknown_args)
|
||||
|
||||
|
||||
class ExecuteCommand(object):
|
||||
"""
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
|
||||
"""
|
||||
pass
|
||||
5411
script/gspylib/common/Common.py
Normal file
5411
script/gspylib/common/Common.py
Normal file
File diff suppressed because it is too large
Load Diff
6392
script/gspylib/common/DbClusterInfo.py
Normal file
6392
script/gspylib/common/DbClusterInfo.py
Normal file
File diff suppressed because it is too large
Load Diff
1009
script/gspylib/common/DbClusterStatus.py
Normal file
1009
script/gspylib/common/DbClusterStatus.py
Normal file
File diff suppressed because it is too large
Load Diff
1280
script/gspylib/common/ErrorCode.py
Normal file
1280
script/gspylib/common/ErrorCode.py
Normal file
File diff suppressed because it is too large
Load Diff
1851
script/gspylib/common/GaussLog.py
Normal file
1851
script/gspylib/common/GaussLog.py
Normal file
File diff suppressed because it is too large
Load Diff
3045
script/gspylib/common/GaussStat.py
Normal file
3045
script/gspylib/common/GaussStat.py
Normal file
File diff suppressed because it is too large
Load Diff
166
script/gspylib/common/LocalBaseOM.py
Normal file
166
script/gspylib/common/LocalBaseOM.py
Normal file
@ -0,0 +1,166 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.component.Kernel.DN_OLAP.DN_OLAP import DN_OLAP
|
||||
|
||||
|
||||
class LocalBaseOM(object):
|
||||
"""
|
||||
Base class for local command
|
||||
"""
|
||||
|
||||
def __init__(self, logFile=None, user=None, clusterConf=None,
|
||||
dwsMode=False, initParas=None, gtmInitParas=None):
|
||||
'''
|
||||
Constructor
|
||||
'''
|
||||
if (logFile is not None):
|
||||
self.logger = GaussLog(logFile, self.__class__.__name__)
|
||||
else:
|
||||
self.logger = None
|
||||
self.clusterInfo = None
|
||||
self.dbNodeInfo = None
|
||||
self.clusterConfig = clusterConf
|
||||
self.user = user
|
||||
self.group = ""
|
||||
self.dws_mode = dwsMode
|
||||
if initParas is None:
|
||||
initParas = []
|
||||
self.initParas = initParas
|
||||
if gtmInitParas is None:
|
||||
gtmInitParas = []
|
||||
self.gtmInitParas = gtmInitParas
|
||||
self.etcdCons = []
|
||||
self.cmCons = []
|
||||
self.gtmCons = []
|
||||
self.cnCons = []
|
||||
self.dnCons = []
|
||||
self.gtsCons = []
|
||||
|
||||
def initComponent(self):
|
||||
"""
|
||||
function: Init component
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.initKernelComponent()
|
||||
|
||||
def initComponentAttributes(self, component):
|
||||
"""
|
||||
function: Init component attributes on current node
|
||||
input : Object component
|
||||
output: NA
|
||||
"""
|
||||
component.logger = self.logger
|
||||
component.binPath = "%s/bin" % self.clusterInfo.appPath
|
||||
component.dwsMode = self.dws_mode
|
||||
|
||||
def initKernelComponent(self):
|
||||
"""
|
||||
function: Init kernel component on current node
|
||||
input : Object nodeInfo
|
||||
output: NA
|
||||
"""
|
||||
for inst in self.dbNodeInfo.datanodes:
|
||||
component = DN_OLAP()
|
||||
# init component cluster type
|
||||
component.clusterType = self.clusterInfo.clusterType
|
||||
component.instInfo = inst
|
||||
component.instInfo.peerInstanceInfos = \
|
||||
self.clusterInfo.getPeerInstance(component.instInfo)
|
||||
self.initComponentAttributes(component)
|
||||
component.initParas = self.initParas
|
||||
self.dnCons.append(component)
|
||||
|
||||
def readConfigInfo(self):
|
||||
"""
|
||||
function: Read config from static config file
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
hostName = DefaultValue.GetHostIpOrName()
|
||||
dynamicFileExist = False
|
||||
if self.__class__.__name__ == "Start":
|
||||
dynamicFileExist = \
|
||||
self.clusterInfo.dynamicConfigExists(self.user)
|
||||
if dynamicFileExist:
|
||||
self.clusterInfo.readDynamicConfig(self.user)
|
||||
self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
|
||||
else:
|
||||
self.clusterInfo.initFromStaticConfig(self.user)
|
||||
self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
|
||||
if self.dbNodeInfo is None:
|
||||
self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
|
||||
hostName)
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
|
||||
self.logger.debug("Instance information on local node:\n%s" %
|
||||
str(self.dbNodeInfo))
|
||||
|
||||
def readConfigInfoByXML(self):
|
||||
"""
|
||||
function: Read config from xml config file
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
if (self.clusterConfig is None):
|
||||
self.logger.logExit(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
"XML configuration file")
|
||||
static_config_file = "%s/bin/cluster_static_config" % \
|
||||
DefaultValue.getInstallDir(self.user)
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
self.clusterInfo.initFromXml(self.clusterConfig,
|
||||
static_config_file)
|
||||
hostName = DefaultValue.GetHostIpOrName()
|
||||
self.dbNodeInfo = self.clusterInfo.getDbNodeByName(hostName)
|
||||
if (self.dbNodeInfo is None):
|
||||
self.logger.logExit(ErrorCode.GAUSS_516["GAUSS_51619"] %
|
||||
hostName)
|
||||
except Exception as e:
|
||||
self.logger.logExit(str(e))
|
||||
self.logger.debug("Instance information on local node:\n%s" %
|
||||
str(self.dbNodeInfo))
|
||||
|
||||
def getUserInfo(self):
|
||||
"""
|
||||
Get user and group
|
||||
"""
|
||||
if os.path.islink(self.clusterInfo.appPath):
|
||||
appPath = os.path.realpath(self.clusterInfo.appPath)
|
||||
elif os.path.exists(self.clusterInfo.appPath):
|
||||
appPath = self.clusterInfo.appPath
|
||||
else:
|
||||
commitid = VersionInfo.getCommitid()
|
||||
appPath = self.clusterInfo.appPath + "_" + commitid
|
||||
self.logger.debug("Get the install path %s user info." % appPath)
|
||||
(self.user, self.group) = g_OSlib.getPathOwner(appPath)
|
||||
if (self.user == "" or self.group == ""):
|
||||
self.logger.logExit(ErrorCode.GAUSS_503["GAUSS_50308"])
|
||||
322
script/gspylib/common/OMCommand.py
Normal file
322
script/gspylib/common/OMCommand.py
Normal file
@ -0,0 +1,322 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : OMCommand.py is utility to execute the OM command
|
||||
#############################################################################
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import re
|
||||
import subprocess
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.Common import DefaultValue, ClusterCommand, \
|
||||
TempfileManagement
|
||||
from gspylib.common.DbClusterStatus import DbClusterStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.os.gsplatform import g_Platform
|
||||
|
||||
|
||||
class OMCommand():
|
||||
"""
|
||||
Descript command of om
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Constructor
|
||||
'''
|
||||
|
||||
@staticmethod
|
||||
def getLocalScript(script):
|
||||
"""
|
||||
function: get local script by GPHOME
|
||||
input : script, path
|
||||
output: path
|
||||
"""
|
||||
Current_Path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if os.getgid() != 0:
|
||||
gp_home = DefaultValue.getEnv("GPHOME")
|
||||
Current_Path = os.path.join(gp_home, "script/gspylib/common")
|
||||
|
||||
LocalScript = {
|
||||
"Local_Backup": os.path.normpath(
|
||||
Current_Path + "/../../local/Backup.py"),
|
||||
"Local_Check_Config": os.path.normpath(
|
||||
Current_Path + "/../../local/CheckConfig.py"),
|
||||
"Local_Check_Install": os.path.normpath(
|
||||
Current_Path + "/../../local/CheckInstall.py"),
|
||||
"Local_Check_Uninstall": os.path.normpath(
|
||||
Current_Path + "/../../local/CheckUninstall.py"),
|
||||
"Local_Clean_Instance": os.path.normpath(
|
||||
Current_Path + "/../../local/CleanInstance.py"),
|
||||
"Local_Clean_OsUser": os.path.normpath(
|
||||
Current_Path + "/../../local/CleanOsUser.py"),
|
||||
"Local_Config_Hba": os.path.normpath(
|
||||
Current_Path + "/../../local/ConfigHba.py"),
|
||||
"Local_Config_Instance": os.path.normpath(
|
||||
Current_Path + "/../../local/ConfigInstance.py"),
|
||||
"Local_Init_Instance": os.path.normpath(
|
||||
Current_Path + "/../../local/InitInstance.py"),
|
||||
"Local_Install": os.path.normpath(
|
||||
Current_Path + "/../../local/Install.py"),
|
||||
"Local_Restore": os.path.normpath(
|
||||
Current_Path + "/../../local/Restore.py"),
|
||||
"Local_Uninstall": os.path.normpath(
|
||||
Current_Path + "/../../local/Uninstall.py"),
|
||||
"Local_PreInstall": os.path.normpath(
|
||||
Current_Path + "/../../local/PreInstallUtility.py"),
|
||||
"Local_Check_PreInstall": os.path.normpath(
|
||||
Current_Path + "/../../local/CheckPreInstall.py"),
|
||||
"Local_UnPreInstall": os.path.normpath(
|
||||
Current_Path + "/../../local/UnPreInstallUtility.py"),
|
||||
"Local_Roach": os.path.normpath(
|
||||
Current_Path + "/../../local/LocalRoach.py"),
|
||||
"Gauss_UnInstall": os.path.normpath(
|
||||
Current_Path + "/../../gs_uninstall"),
|
||||
"Gauss_Backup": os.path.normpath(
|
||||
Current_Path + "/../../gs_backup"),
|
||||
"Local_CheckOS": os.path.normpath(
|
||||
Current_Path + "/../../local/LocalCheckOS.py"),
|
||||
"Local_Check": os.path.normpath(
|
||||
Current_Path + "/../../local/LocalCheck.py"),
|
||||
"LOCAL_PERFORMANCE_CHECK": os.path.normpath(
|
||||
Current_Path + "/../../local/LocalPerformanceCheck.py"),
|
||||
"Gauss_CheckOS": os.path.normpath(
|
||||
Current_Path + "/../../gs_checkos"),
|
||||
"Gauss_PreInstall": os.path.normpath(
|
||||
Current_Path + "/../../gs_preinstall"),
|
||||
"Gauss_Replace": os.path.normpath(
|
||||
Current_Path + "/../../gs_replace"),
|
||||
"Gauss_Om": os.path.normpath(Current_Path + "/../../gs_om"),
|
||||
"UTIL_GAUSS_STAT": os.path.normpath(
|
||||
Current_Path + "/../../gspylib/common/GaussStat.py"),
|
||||
"Gauss_Check": os.path.normpath(Current_Path + "/../../gs_check"),
|
||||
"Local_Collect": os.path.normpath(
|
||||
Current_Path + "/../../local/LocalCollect.py"),
|
||||
"Local_Kerberos": os.path.normpath(
|
||||
Current_Path + "/../../local/KerberosUtility.py"),
|
||||
"Local_Execute_Sql": os.path.normpath(
|
||||
Current_Path + "/../../local/ExecuteSql.py"),
|
||||
"Local_StartInstance": os.path.normpath(
|
||||
Current_Path + "/../../local/StartInstance.py"),
|
||||
"Local_StopInstance": os.path.normpath(
|
||||
Current_Path + "/../../local/StopInstance.py"),
|
||||
"Local_Check_Upgrade": os.path.normpath(
|
||||
Current_Path + "/../../local/CheckUpgrade.py"),
|
||||
"Local_Upgrade_Utility": os.path.normpath(
|
||||
Current_Path + "/../../local/UpgradeUtility.py")
|
||||
}
|
||||
|
||||
return "python3 '%s'" % LocalScript[script]
|
||||
|
||||
@staticmethod
|
||||
def getSetCronCmd(user, appPath):
|
||||
"""
|
||||
function: Set the crontab
|
||||
input : user, appPath
|
||||
output: cmd
|
||||
"""
|
||||
log_path = DefaultValue.getOMLogPath(DefaultValue.OM_MONITOR_DIR_FILE,
|
||||
"", appPath)
|
||||
cronFile = "%s/gauss_cron_%d" % (
|
||||
DefaultValue.getTmpDirFromEnv(), os.getpid())
|
||||
cmd = "crontab -l > %s;" % cronFile
|
||||
cmd += "sed -i '/\\/bin\\/om_monitor/d' %s; " % cronFile
|
||||
cmd += "echo \"*/1 * * * * source /etc/profile;(if [ -f ~/.profile " \
|
||||
"];then source ~/.profile;fi);source ~/.bashrc;nohup " \
|
||||
"%s/bin/om_monitor -L %s >>/dev/null 2>&1 &\" >> %s;" % (
|
||||
appPath, log_path, cronFile)
|
||||
cmd += "crontab -u %s %s;service cron restart;" % (user, cronFile)
|
||||
cmd += "rm -f %s" % cronFile
|
||||
|
||||
return cmd
|
||||
|
||||
@staticmethod
|
||||
def getRemoveCronCmd(user):
|
||||
"""
|
||||
function: get remove crontab command
|
||||
input : user
|
||||
output: cmd
|
||||
"""
|
||||
cmd = "crontab -u %s -r;service cron restart" % user
|
||||
|
||||
return cmd
|
||||
|
||||
@staticmethod
|
||||
def adaptArchiveCommand(localInstDataDir, similarInstDataDir):
|
||||
"""
|
||||
function: Adapt guc parameter 'archive_command' for each new instance.
|
||||
It will be invoked by GaussReplace.py and GaussDilatation.py
|
||||
input : localInstDataDir, similarInstDataDir
|
||||
output: NA
|
||||
"""
|
||||
GUC_PARAM_PATTERN = "^\\s*archive_command.*=.*$"
|
||||
pattern = re.compile(GUC_PARAM_PATTERN)
|
||||
archiveParaLine = ""
|
||||
archiveDir = "%s/pg_xlog/archive" % localInstDataDir
|
||||
archiveCommand = ""
|
||||
try:
|
||||
configFile = os.path.join(localInstDataDir, "postgresql.conf")
|
||||
|
||||
with open(configFile, 'r') as fp:
|
||||
resList = fp.readlines()
|
||||
lineNum = 0
|
||||
for oneLine in resList:
|
||||
lineNum += 1
|
||||
# skip blank line
|
||||
if (oneLine.strip() == ""):
|
||||
continue
|
||||
# skip comment line
|
||||
if ((oneLine.strip()).startswith('#')):
|
||||
continue
|
||||
# search valid line
|
||||
result = pattern.match(oneLine)
|
||||
if (result is not None):
|
||||
# have adapt archive_command parameter
|
||||
archiveParaLine = oneLine.replace(similarInstDataDir,
|
||||
localInstDataDir)
|
||||
archiveList = archiveParaLine.split('#')
|
||||
if (len(archiveList) > 0):
|
||||
archiveCommand = archiveList[0]
|
||||
break
|
||||
|
||||
if (archiveParaLine != ""):
|
||||
if (archiveParaLine.find("%f") < 0):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50009"]
|
||||
+ " The parameter archive command should "
|
||||
"be set with %%f : %s." % archiveCommand)
|
||||
|
||||
if (archiveParaLine.find("%p") < 0):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50009"]
|
||||
+ " The parameter archive command should"
|
||||
" be set with %%p: %s." % archiveCommand)
|
||||
|
||||
setCmd = "sed -i \"%dc%s\" %s" % (lineNum, archiveParaLine,
|
||||
configFile)
|
||||
(status, output) = subprocess.getstatusoutput(setCmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"]
|
||||
% setCmd + " Error: \n%s" % output)
|
||||
|
||||
if (os.path.exists(archiveDir) and os.path.isdir(archiveDir)):
|
||||
return
|
||||
|
||||
mkDirCmd = "mkdir -p '%s' -m %s" % (
|
||||
archiveDir, DefaultValue.KEY_DIRECTORY_MODE)
|
||||
(status, output) = subprocess.getstatusoutput(mkDirCmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50208"]
|
||||
% archiveDir + " Error: \n%s." % output
|
||||
+ "The cmd is %s" % mkDirCmd)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
@staticmethod
|
||||
def getClusterStatus(user, isExpandScene=False):
|
||||
"""
|
||||
function: get cluster status
|
||||
input : user
|
||||
output: clusterStatus
|
||||
"""
|
||||
userAbsolutePath = g_Platform.getUserHomePath()
|
||||
statusFile = "%s/gauss_check_status_%d.dat" % (
|
||||
userAbsolutePath, os.getpid())
|
||||
TempfileManagement.removeTempFile(statusFile)
|
||||
cmd = ClusterCommand.getQueryStatusCmd(user, "", statusFile)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
TempfileManagement.removeTempFile(statusFile)
|
||||
return None
|
||||
clusterStatus = DbClusterStatus()
|
||||
clusterStatus.initFromFile(statusFile, isExpandScene)
|
||||
TempfileManagement.removeTempFile(statusFile)
|
||||
return clusterStatus
|
||||
|
||||
@staticmethod
|
||||
def getClusterDbNodeInfo(clusterUser, xmlFile=""):
|
||||
"""
|
||||
function: get cluster and database node info from static config file
|
||||
input : clusterUser, xmlFile
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
clusterInfo = dbClusterInfo()
|
||||
if (os.getuid() == 0):
|
||||
clusterInfo.initFromXml(xmlFile)
|
||||
else:
|
||||
clusterInfo.initFromStaticConfig(clusterUser)
|
||||
hostName = DefaultValue.GetHostIpOrName()
|
||||
dbNodeInfo = clusterInfo.getDbNodeByName(hostName)
|
||||
if (dbNodeInfo is None):
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51619"] % hostName)
|
||||
return clusterInfo, dbNodeInfo
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
@staticmethod
|
||||
def checkHostname(nodename):
|
||||
"""
|
||||
function: check host name
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
retry = 1
|
||||
cmd = "pssh -s -H %s hostname" % (nodename)
|
||||
while True:
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status == 0 and output.find("%s" % nodename) >= 0:
|
||||
break
|
||||
if retry >= 3:
|
||||
raise Exception(ErrorCode.GAUSS_512["GAUSS_51222"]
|
||||
+ " Command: \"%s\". Error: \n%s"
|
||||
% (cmd, output))
|
||||
retry += 1
|
||||
time.sleep(1)
|
||||
|
||||
hostnameCmd = "pssh -s -H %s 'cat /etc/hostname'" % (nodename)
|
||||
(status, output) = subprocess.getstatusoutput(hostnameCmd)
|
||||
if status == 0 and output.strip() == nodename:
|
||||
pass
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_512["GAUSS_51248"] % nodename
|
||||
+ " Command: \"%s\". Error: \n%s"
|
||||
% (hostnameCmd, output))
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
@staticmethod
|
||||
def checkHostnameMapping(clusterInfo, logFile):
|
||||
"""
|
||||
function: check host name mapping
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
nodes = clusterInfo.getClusterNodeNames()
|
||||
if (len(nodes) > 0):
|
||||
try:
|
||||
pool = ThreadPool(DefaultValue.getCpuSet())
|
||||
results = pool.map(OMCommand.checkHostname, nodes)
|
||||
pool.close()
|
||||
pool.join()
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
937
script/gspylib/common/ParallelBaseOM.py
Normal file
937
script/gspylib/common/ParallelBaseOM.py
Normal file
@ -0,0 +1,937 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import signal
|
||||
import copy
|
||||
import subprocess
|
||||
import re
|
||||
import getpass
|
||||
from datetime import datetime, timedelta
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.common.Common import DefaultValue, ClusterCommand, \
|
||||
TempfileManagement
|
||||
from gspylib.common.DbClusterStatus import DbClusterStatus
|
||||
from gspylib.common.OMCommand import OMCommand
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.os.gsplatform import g_Platform
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.component.Kernel.DN_OLAP.DN_OLAP import DN_OLAP
|
||||
|
||||
SPACE_USAGE_DBUSER = 80
|
||||
|
||||
|
||||
class ParallelBaseOM(object):
|
||||
"""
|
||||
Base class of parallel command
|
||||
"""
|
||||
ACTION_INSTALL = "install"
|
||||
ACTION_CONFIG = "config"
|
||||
ACTION_START = "start"
|
||||
ACTION_REDISTRIBUTE = "redistribute"
|
||||
ACTION_HEALTHCHECK = "healthcheck"
|
||||
|
||||
HEALTH_CHECK_BEFORE = "before"
|
||||
HEALTH_CHECK_AFTER = "after"
|
||||
"""
|
||||
Base class for parallel command
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Constructor
|
||||
'''
|
||||
self.logger = None
|
||||
self.clusterInfo = None
|
||||
self.oldClusterInfo = None
|
||||
self.sshTool = None
|
||||
self.action = ""
|
||||
|
||||
# Cluster config file.
|
||||
self.xmlFile = ""
|
||||
self.oldXmlFile = ""
|
||||
|
||||
self.logType = DefaultValue.LOCAL_LOG_FILE
|
||||
self.logFile = ""
|
||||
self.localLog = ""
|
||||
self.user = ""
|
||||
self.group = ""
|
||||
self.mpprcFile = ""
|
||||
# Temporary catalog for install
|
||||
self.operateStepDir = TempfileManagement.getTempDir(
|
||||
"%s_step" % self.__class__.__name__.lower())
|
||||
# Temporary files for install step
|
||||
self.operateStepFile = "%s/%s_step.dat" % (
|
||||
self.operateStepDir, self.__class__.__name__.lower())
|
||||
self.initStep = ""
|
||||
self.dws_mode = False
|
||||
self.rollbackCommands = []
|
||||
self.etcdCons = []
|
||||
self.cmCons = []
|
||||
self.gtmCons = []
|
||||
self.cnCons = []
|
||||
self.dnCons = []
|
||||
# localMode is same as isSingle in all OM script, expect for
|
||||
# gs_preinstall.
|
||||
# in gs_preinstall, localMode means local mode for master-standby
|
||||
# cluster.
|
||||
# in gs_preinstall, localMode also means local mode for single
|
||||
# cluster(will not create os user).
|
||||
# in gs_preinstall, isSingle means single cluster, it will create
|
||||
# os user.
|
||||
# not isSingle and not localMode : master-standby cluster global
|
||||
# mode(will create os user).
|
||||
# not isSingle and localMode : master-standby cluster local
|
||||
# mode(will not create os user).
|
||||
# isSingle and not localMode : single cluster(will create os user).
|
||||
# isSingle and localMode : single cluster(will not create os user).
|
||||
self.localMode = False
|
||||
self.isSingle = False
|
||||
# Indicates whether there is a logical cluster.
|
||||
# If elastic_group exists, the current cluster is a logical cluster.
|
||||
# Otherwise, it is a large physical cluster.
|
||||
self.isElasticGroup = False
|
||||
self.isAddElasticGroup = False
|
||||
self.lcGroup_name = ""
|
||||
# Lock the cluster mode, there are two modes: exclusive lock and
|
||||
# wait lock mode,
|
||||
# the default exclusive lock
|
||||
self.lockMode = "exclusiveLock"
|
||||
|
||||
# SinglePrimaryMultiStandby support binary upgrade, inplace upgrade
|
||||
self.isSinglePrimaryMultiStandby = False
|
||||
|
||||
# Adapt to 200 and 300
|
||||
self.productVersion = None
|
||||
|
||||
def initComponent(self):
|
||||
"""
|
||||
function: Init component
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
for nodeInfo in self.clusterInfo.dbNodes:
|
||||
self.initKernelComponent(nodeInfo)
|
||||
|
||||
def initComponentAttributes(self, component):
|
||||
"""
|
||||
function: Init component attributes on current node
|
||||
input : Object component
|
||||
output: NA
|
||||
"""
|
||||
component.logger = self.logger
|
||||
component.binPath = "%s/bin" % self.clusterInfo.appPath
|
||||
component.dwsMode = self.dws_mode
|
||||
|
||||
def initKernelComponent(self, nodeInfo):
|
||||
"""
|
||||
function: Init kernel component
|
||||
input : Object nodeInfo
|
||||
output: NA
|
||||
"""
|
||||
for inst in nodeInfo.datanodes:
|
||||
component = DN_OLAP()
|
||||
# init component cluster type
|
||||
component.clusterType = self.clusterInfo.clusterType
|
||||
component.instInfo = inst
|
||||
self.initComponentAttributes(component)
|
||||
self.dnCons.append(component)
|
||||
|
||||
def initLogger(self, module=""):
|
||||
"""
|
||||
function: Init logger
|
||||
input : module
|
||||
output: NA
|
||||
"""
|
||||
# log level
|
||||
LOG_DEBUG = 1
|
||||
self.logger = GaussLog(self.logFile, module, LOG_DEBUG)
|
||||
|
||||
dirName = os.path.dirname(self.logFile)
|
||||
self.localLog = os.path.join(dirName, DefaultValue.LOCAL_LOG_FILE)
|
||||
|
||||
def initClusterInfo(self, refreshCN=True):
|
||||
"""
|
||||
function: Init cluster info
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
if (refreshCN):
|
||||
static_config_file = "%s/bin/cluster_static_config" % \
|
||||
DefaultValue.getInstallDir(self.user)
|
||||
self.clusterInfo.initFromXml(self.xmlFile, static_config_file)
|
||||
else:
|
||||
self.clusterInfo.initFromXml(self.xmlFile)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
self.logger.debug("Instance information of cluster:\n%s." %
|
||||
str(self.clusterInfo))
|
||||
|
||||
def initClusterInfoFromStaticFile(self, user, flag=True):
|
||||
"""
|
||||
function: Function to init clusterInfo from static file
|
||||
input : user
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
self.clusterInfo = dbClusterInfo()
|
||||
self.clusterInfo.initFromStaticConfig(user)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
if flag:
|
||||
self.logger.debug("Instance information of cluster:\n%s." %
|
||||
str(self.clusterInfo))
|
||||
|
||||
def initSshTool(self, nodeNames, timeout=0):
|
||||
"""
|
||||
function: Init ssh tool
|
||||
input : nodeNames, timeout
|
||||
output: NA
|
||||
"""
|
||||
self.sshTool = SshTool(nodeNames, self.logger.logFile, timeout)
|
||||
|
||||
def check_cluster_version_consistency(self, clusterNodes, newNodes=None):
|
||||
"""
|
||||
"""
|
||||
self.logger.log("Check cluster version consistency.")
|
||||
if newNodes is None:
|
||||
newNodes = []
|
||||
dic_version_info = {}
|
||||
# check version.cfg on every node.
|
||||
gp_home = DefaultValue.getEnv("GPHOME")
|
||||
gauss_home = DefaultValue.getEnv("GAUSSHOME")
|
||||
if not (os.path.exists(gp_home) and os.path.exists(gauss_home)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
("%s", "or %s") % (gp_home, gauss_home))
|
||||
for ip in clusterNodes:
|
||||
if ip in newNodes:
|
||||
cmd = "pssh -s -H %s 'cat %s/version.cfg'" % \
|
||||
(ip, DefaultValue.getEnv("GPHOME"))
|
||||
else:
|
||||
cmd = "pssh -s -H %s 'cat %s/bin/upgrade_version'" % \
|
||||
(ip, DefaultValue.getEnv("GAUSSHOME"))
|
||||
status, output = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error:\n%s" % str(output))
|
||||
if len(output.strip().split()) < 3:
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51623"])
|
||||
dic_version_info[ip] = ",".join(output.strip().split()[1:])
|
||||
|
||||
self.logger.debug("The cluster version on every node.")
|
||||
for check_ip, version_info in dic_version_info.items():
|
||||
self.logger.debug("%s : %s" % (check_ip, version_info))
|
||||
if len(set(dic_version_info.values())) != 1:
|
||||
L_inconsistent = list(set(dic_version_info.values()))
|
||||
self.logger.debug("The package version on some nodes are "
|
||||
"inconsistent\n%s" % str(L_inconsistent))
|
||||
raise Exception("The package version on some nodes are "
|
||||
"inconsistent,%s" % str(L_inconsistent))
|
||||
self.logger.log("Successfully checked cluster version.")
|
||||
|
||||
def checkBaseFile(self, checkXml=True):
|
||||
"""
|
||||
function: Check xml file and log file
|
||||
input : checkXml
|
||||
output: NA
|
||||
"""
|
||||
if (checkXml):
|
||||
if (self.xmlFile == ""):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50001"] % 'X' + ".")
|
||||
|
||||
if (not os.path.exists(self.xmlFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
("configuration file [%s]" % self.xmlFile))
|
||||
|
||||
if (not os.path.isabs(self.xmlFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50213"] %
|
||||
("configuration file [%s]" % self.xmlFile))
|
||||
else:
|
||||
self.xmlFile = ""
|
||||
|
||||
if (self.logFile == ""):
|
||||
self.logFile = DefaultValue.getOMLogPath(self.logType,
|
||||
self.user, "",
|
||||
self.xmlFile)
|
||||
|
||||
if (not os.path.isabs(self.logFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50213"] % "log")
|
||||
|
||||
def initSignalHandler(self):
|
||||
"""
|
||||
function: Function to init signal handler
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
|
||||
|
||||
def print_signal_stack(self, frame):
|
||||
"""
|
||||
function: Function to print signal stack
|
||||
input : frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is None):
|
||||
return
|
||||
try:
|
||||
import inspect
|
||||
stacks = inspect.getouterframes(frame)
|
||||
for curr in range(len(stacks)):
|
||||
stack = stacks[curr]
|
||||
self.logger.debug("Stack level: %d. File: %s. Function: "
|
||||
"%s. LineNo: %d." % (
|
||||
curr, stack[1], stack[3],
|
||||
stack[2]))
|
||||
self.logger.debug("Code: %s." % (
|
||||
stack[4][0].strip().strip("\n")))
|
||||
except Exception as e:
|
||||
self.logger.debug("Failed to print signal stack. Error: \n%s" %
|
||||
str(e))
|
||||
|
||||
def raise_handler(self, signal_num, frame):
|
||||
"""
|
||||
function: Function to raise handler
|
||||
input : signal_num, frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is not None):
|
||||
self.logger.debug("Received signal[%d]." % (signal_num))
|
||||
self.print_signal_stack(frame)
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51614"] % (signal_num))
|
||||
|
||||
def setupTimeoutHandler(self):
|
||||
"""
|
||||
function: Function to set up time out handler
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
signal.signal(signal.SIGALRM, self.timeout_handler)
|
||||
|
||||
def setTimer(self, timeout):
|
||||
"""
|
||||
function: Function to set timer
|
||||
input : timeout
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Set timer. The timeout: %d." % timeout)
|
||||
signal.signal(signal.SIGALRM, self.timeout_handler)
|
||||
signal.alarm(timeout)
|
||||
|
||||
def resetTimer(self):
|
||||
"""
|
||||
function: Reset timer
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
||||
self.logger.debug("Reset timer. Left time: %d." % signal.alarm(0))
|
||||
|
||||
def timeout_handler(self, signal_num, frame):
|
||||
"""
|
||||
function: Received the timeout signal
|
||||
input : signal_num, frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is not None):
|
||||
self.logger.debug("Received the timeout signal: [%d]." %
|
||||
(signal_num))
|
||||
self.print_signal_stack(frame)
|
||||
raise Timeout("Time out.")
|
||||
|
||||
def waitProcessStop(self, processKeywords, hostname):
|
||||
"""
|
||||
function: Wait the process stop
|
||||
input : process name
|
||||
output: NA
|
||||
"""
|
||||
count = 0
|
||||
while (True):
|
||||
psCmd = "ps ux|grep -v grep |awk '{print \$11}'|grep '%s' " % \
|
||||
processKeywords.strip()
|
||||
(status, output) = self.sshTool.getSshStatusOutput(
|
||||
psCmd, [hostname])
|
||||
# Determine whether the process can be found.
|
||||
if (status[hostname] != DefaultValue.SUCCESS):
|
||||
self.logger.debug("The %s process stopped." % processKeywords)
|
||||
break
|
||||
|
||||
count += 1
|
||||
if (count % 20 == 0):
|
||||
self.logger.debug("The %s process exists." % processKeywords)
|
||||
time.sleep(3)
|
||||
|
||||
def managerOperateStepDir(self, action='create', nodes=None):
|
||||
"""
|
||||
function: manager operate step directory
|
||||
input : NA
|
||||
output: currentStep
|
||||
"""
|
||||
if nodes is None:
|
||||
nodes = []
|
||||
try:
|
||||
# Creating the backup directory
|
||||
if (action == "create"):
|
||||
cmd = "(if [ ! -d '%s' ];then mkdir -p '%s' -m %s;fi)" % (
|
||||
self.operateStepDir, self.operateStepDir,
|
||||
DefaultValue.KEY_DIRECTORY_MODE)
|
||||
else:
|
||||
cmd = "(if [ -d '%s' ];then rm -rf '%s';fi)" % (
|
||||
self.operateStepDir, self.operateStepDir)
|
||||
DefaultValue.execCommandWithMode(cmd,
|
||||
"%s temporary directory" % action,
|
||||
self.sshTool,
|
||||
self.localMode or self.isSingle,
|
||||
"",
|
||||
nodes)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
def readOperateStep(self):
|
||||
"""
|
||||
function: read operate step signal
|
||||
input : NA
|
||||
output: currentStep
|
||||
"""
|
||||
currentStep = self.initStep
|
||||
|
||||
if not os.path.exists(self.operateStepFile):
|
||||
self.logger.debug("The %s does not exits." % self.operateStepFile)
|
||||
return currentStep
|
||||
|
||||
if not os.path.isfile(self.operateStepFile):
|
||||
self.logger.debug("The %s must be a file." % self.operateStepFile)
|
||||
return currentStep
|
||||
|
||||
with open(self.operateStepFile, "r") as fp:
|
||||
line = fp.readline().strip()
|
||||
if line is not None and line != "":
|
||||
currentStep = line
|
||||
|
||||
return currentStep
|
||||
|
||||
def writeOperateStep(self, stepName, nodes=None):
|
||||
"""
|
||||
function: write operate step signal
|
||||
input : step
|
||||
output: NA
|
||||
"""
|
||||
if nodes is None:
|
||||
nodes = []
|
||||
try:
|
||||
# write the step into INSTALL_STEP
|
||||
# open the INSTALL_STEP
|
||||
with open(self.operateStepFile, "w") as g_DB:
|
||||
# write the INSTALL_STEP
|
||||
g_DB.write(stepName)
|
||||
g_DB.write(os.linesep)
|
||||
g_DB.flush()
|
||||
# change the INSTALL_STEP permissions
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, self.operateStepFile)
|
||||
|
||||
# distribute file to all nodes
|
||||
cmd = "mkdir -p -m %s '%s'" % (DefaultValue.KEY_DIRECTORY_MODE,
|
||||
self.operateStepDir)
|
||||
DefaultValue.execCommandWithMode(cmd,
|
||||
"create backup directory "
|
||||
"on all nodes",
|
||||
self.sshTool,
|
||||
self.localMode or self.isSingle,
|
||||
"",
|
||||
nodes)
|
||||
|
||||
if not self.localMode and not self.isSingle:
|
||||
self.sshTool.scpFiles(self.operateStepFile,
|
||||
self.operateStepDir, nodes)
|
||||
except Exception as e:
|
||||
# failed to write the step into INSTALL_STEP
|
||||
raise Exception(str(e))
|
||||
|
||||
def distributeFiles(self):
|
||||
"""
|
||||
function: distribute package to every host
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Distributing files.")
|
||||
try:
|
||||
# get the all nodes
|
||||
hosts = self.clusterInfo.getClusterNodeNames()
|
||||
if DefaultValue.GetHostIpOrName() not in hosts:
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51619"] %
|
||||
DefaultValue.GetHostIpOrName())
|
||||
hosts.remove(DefaultValue.GetHostIpOrName())
|
||||
# Send xml file to every host
|
||||
DefaultValue.distributeXmlConfFile(self.sshTool, self.xmlFile,
|
||||
hosts, self.mpprcFile)
|
||||
# Successfully distributed files
|
||||
self.logger.debug("Successfully distributed files.")
|
||||
except Exception as e:
|
||||
# failed to distribute package to every host
|
||||
raise Exception(str(e))
|
||||
|
||||
def checkPreInstall(self, user, flag, nodes=None):
|
||||
"""
|
||||
function: check if have done preinstall on given nodes
|
||||
input : user, nodes
|
||||
output: NA
|
||||
"""
|
||||
if nodes is None:
|
||||
nodes = []
|
||||
try:
|
||||
cmd = "%s -U %s -t %s" % (
|
||||
OMCommand.getLocalScript("Local_Check_PreInstall"), user, flag)
|
||||
DefaultValue.execCommandWithMode(
|
||||
cmd, "check preinstall", self.sshTool,
|
||||
self.localMode or self.isSingle, "", nodes)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
def checkNodeInstall(self, nodes=None, checkParams=None,
|
||||
strictUserCheck=True):
|
||||
"""
|
||||
function: Check node install
|
||||
input : nodes, checkParams, strictUserCheck
|
||||
output: NA
|
||||
"""
|
||||
if nodes is None:
|
||||
nodes = []
|
||||
if checkParams is None:
|
||||
checkParams = []
|
||||
validParam = ["shared_buffers", "max_connections"]
|
||||
cooGucParam = ""
|
||||
for param in checkParams:
|
||||
entry = param.split("=")
|
||||
if (len(entry) != 2):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50009"])
|
||||
if (entry[0].strip() in validParam):
|
||||
cooGucParam += " -C \\\"%s\\\"" % param
|
||||
self.logger.log("Checking installation environment on all nodes.")
|
||||
cmd = "%s -U %s:%s -R %s %s -l %s -X '%s'" % (
|
||||
OMCommand.getLocalScript("Local_Check_Install"), self.user,
|
||||
self.group, self.clusterInfo.appPath, cooGucParam, self.localLog,
|
||||
self.xmlFile)
|
||||
if (not strictUserCheck):
|
||||
cmd += " -O"
|
||||
self.logger.debug("Checking the install command: %s." % cmd)
|
||||
DefaultValue.execCommandWithMode(cmd,
|
||||
"check installation environment",
|
||||
self.sshTool,
|
||||
self.localMode or self.isSingle,
|
||||
"",
|
||||
nodes)
|
||||
|
||||
def cleanNodeConfig(self, nodes=None, datadirs=None):
|
||||
"""
|
||||
function: Clean instance
|
||||
input : nodes, datadirs
|
||||
output: NA
|
||||
"""
|
||||
self.logger.log("Deleting instances from all nodes.")
|
||||
if nodes is None:
|
||||
nodes = []
|
||||
if datadirs is None:
|
||||
datadirs = []
|
||||
cmdParam = ""
|
||||
for datadir in datadirs:
|
||||
cmdParam += " -D %s " % datadir
|
||||
cmd = "%s -U %s %s -l %s" % (
|
||||
OMCommand.getLocalScript("Local_Clean_Instance"),
|
||||
self.user, cmdParam, self.localLog)
|
||||
DefaultValue.execCommandWithMode(
|
||||
cmd, "clean instance", self.sshTool,
|
||||
self.localMode or self.isSingle, "", nodes)
|
||||
self.logger.log("Successfully deleted instances from all nodes.")
|
||||
|
||||
@staticmethod
|
||||
def getPrepareKeysCmd(key_file, user, confFile, destPath, logfile,
|
||||
userProfile="", localMode=False):
|
||||
"""
|
||||
function: get etcd communication keys command
|
||||
input: key_file, user, confFile, destPath, localMode:do not scp keys
|
||||
output: NA
|
||||
"""
|
||||
if (not os.path.exists(key_file)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % key_file)
|
||||
if (not userProfile):
|
||||
userProfile = DefaultValue.getMpprcFile()
|
||||
# create the directory on all nodes
|
||||
cmd = "source %s; %s -U %s -X %s --src-file=%s --dest-path=%s -l %s" \
|
||||
% (userProfile, OMCommand.getLocalScript("Local_PrepareKeys"),
|
||||
user, confFile, key_file, destPath, logfile)
|
||||
# if local mode, only prepare keys, do not scp keys to cluster nodes
|
||||
if (localMode):
|
||||
cmd += " -L"
|
||||
return cmd
|
||||
|
||||
def getClusterRings(self, clusterInfo):
|
||||
"""
|
||||
function: get clusterRings from cluster info
|
||||
input: DbclusterInfo() instance
|
||||
output: list
|
||||
"""
|
||||
hostPerNodeList = self.getDNHostnamesPerNode(clusterInfo)
|
||||
# Loop the hostname list on each node where the master and slave
|
||||
# of the DB instance.
|
||||
for i in range(len(hostPerNodeList)):
|
||||
# Loop the list after the i-th list
|
||||
for perNodelist in hostPerNodeList[i + 1:len(hostPerNodeList)]:
|
||||
# Define a tag
|
||||
flag = 0
|
||||
# Loop the elements of each perNodelist
|
||||
for hostNameElement in perNodelist:
|
||||
# If elements on the i-th node, each element of the
|
||||
# list are joined in hostPerNodeList[i
|
||||
if hostNameElement in hostPerNodeList[i]:
|
||||
flag = 1
|
||||
for element in perNodelist:
|
||||
if element not in hostPerNodeList[i]:
|
||||
hostPerNodeList[i].append(element)
|
||||
if (flag == 1):
|
||||
hostPerNodeList.remove(perNodelist)
|
||||
|
||||
return hostPerNodeList
|
||||
|
||||
def getDNHostnamesPerNode(self, clusterInfo):
|
||||
"""
|
||||
function: get DB hostnames per node
|
||||
input: DbclusterInfo() instance
|
||||
output: list
|
||||
"""
|
||||
hostPerNodeList = []
|
||||
for dbNode in clusterInfo.dbNodes:
|
||||
nodeDnlist = []
|
||||
# loop per node
|
||||
for dnInst in dbNode.datanodes:
|
||||
if (dnInst.instanceType == DefaultValue.MASTER_INSTANCE):
|
||||
if dnInst.hostname not in nodeDnlist:
|
||||
nodeDnlist.append(dnInst.hostname)
|
||||
# get other standby and dummy hostname
|
||||
instances = clusterInfo.getPeerInstance(dnInst)
|
||||
for inst in instances:
|
||||
if inst.hostname not in nodeDnlist:
|
||||
nodeDnlist.append(inst.hostname)
|
||||
if nodeDnlist != []:
|
||||
hostPerNodeList.append(nodeDnlist)
|
||||
return hostPerNodeList
|
||||
|
||||
# for olap function
|
||||
def checkIsElasticGroupExist(self, dbNodes):
|
||||
"""
|
||||
function: Check if elastic_group exists.
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Checking if elastic group exists.")
|
||||
|
||||
self.isElasticGroup = False
|
||||
coorNode = []
|
||||
# traverse old nodes
|
||||
for dbNode in dbNodes:
|
||||
if (len(dbNode.coordinators) >= 1):
|
||||
coorNode.append(dbNode.coordinators[0])
|
||||
break
|
||||
|
||||
# check elastic group
|
||||
CHECK_GROUP_SQL = "SELECT count(*) FROM pg_catalog.pgxc_group " \
|
||||
"WHERE group_name='elastic_group' " \
|
||||
"and group_kind='e'; "
|
||||
(checkstatus, checkoutput) = ClusterCommand.remoteSQLCommand(
|
||||
CHECK_GROUP_SQL, self.user, coorNode[0].hostname, coorNode[0].port)
|
||||
if (checkstatus != 0 or not checkoutput.isdigit()):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
|
||||
"node group" + " Error:\n%s" % str(checkoutput))
|
||||
elif (checkoutput.strip() == '1'):
|
||||
self.isElasticGroup = True
|
||||
elif (checkoutput.strip() == '0'):
|
||||
self.isElasticGroup = False
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
|
||||
"the number of node group")
|
||||
|
||||
self.logger.debug("Successfully checked if elastic group exists.")
|
||||
|
||||
def checkHostnameIsLoop(self, nodenameList):
|
||||
"""
|
||||
function: check if hostname is looped
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
isRing = True
|
||||
# 1.get ring information in the cluster
|
||||
clusterRings = self.getClusterRings(self.clusterInfo)
|
||||
nodeRing = ""
|
||||
nodenameRings = []
|
||||
# 2.Check if the node is in the ring
|
||||
for num in iter(clusterRings):
|
||||
ringNodeList = []
|
||||
for nodename in nodenameList:
|
||||
if (nodename in num):
|
||||
ringNodeList.append(nodename)
|
||||
if (len(ringNodeList) != 0 and len(ringNodeList) ==
|
||||
len(num)):
|
||||
nodenameRings.append(ringNodeList)
|
||||
if (len(ringNodeList) != 0 and len(ringNodeList) !=
|
||||
len(num)):
|
||||
isRing = False
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if not isRing:
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50004"] % "h" +
|
||||
" The hostname (%s) specified by the -h parameter "
|
||||
"must be looped." % nodeRing)
|
||||
return (clusterRings, nodenameRings)
|
||||
|
||||
def getDNinstanceByNodeName(self, hostname, isMaster=True):
|
||||
"""
|
||||
function: Get the DB instance of the node based on the node name.
|
||||
input : hostname
|
||||
isMaster: get master DB instance
|
||||
output: NA
|
||||
"""
|
||||
masterdnInsts = []
|
||||
standbydnInsts = []
|
||||
# notice
|
||||
for dbNode in self.clusterInfo.dbNodes:
|
||||
if (dbNode.name == hostname):
|
||||
for dbInst in dbNode.datanodes:
|
||||
# get master DB instance
|
||||
if (dbInst.instanceType == DefaultValue.MASTER_INSTANCE):
|
||||
masterdnInsts.append(dbInst)
|
||||
# get standby or dummy DB instance
|
||||
else:
|
||||
standbydnInsts.append(dbInst)
|
||||
|
||||
if (isMaster):
|
||||
return masterdnInsts
|
||||
else:
|
||||
return standbydnInsts
|
||||
|
||||
def getSQLResultList(self, sql, user, hostname, port,
|
||||
database="postgres"):
|
||||
"""
|
||||
"""
|
||||
(status, output) = ClusterCommand.remoteSQLCommand(sql, user,
|
||||
hostname, port,
|
||||
False, database)
|
||||
if status != 0 or ClusterCommand.findErrorInSql(output):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % sql +
|
||||
" Error:\n%s" % str(output))
|
||||
# split the output string with '\n'
|
||||
resultList = output.split("\n")
|
||||
return resultList
|
||||
|
||||
def getCooInst(self):
|
||||
"""
|
||||
function: get CN instance
|
||||
input : NA
|
||||
output: CN instance
|
||||
"""
|
||||
coorInst = []
|
||||
# get CN on nodes
|
||||
for dbNode in self.clusterInfo.dbNodes:
|
||||
if (len(dbNode.coordinators) >= 1):
|
||||
coorInst.append(dbNode.coordinators[0])
|
||||
# check if contain CN on nodes
|
||||
if (len(coorInst) == 0):
|
||||
raise Exception(ErrorCode.GAUSS_526["GAUSS_52602"])
|
||||
else:
|
||||
return coorInst
|
||||
|
||||
def getGroupName(self, fieldName, fieldVaule):
|
||||
"""
|
||||
function: Get nodegroup name by field name and field vaule.
|
||||
input : field name and field vaule
|
||||
output: node group name
|
||||
"""
|
||||
# 1.get CN instance info from cluster
|
||||
cooInst = self.getCooInst()
|
||||
|
||||
# 2.obtain the node group
|
||||
OBTAIN_SQL = "select group_name from pgxc_group where %s = %s; " % \
|
||||
(fieldName, fieldVaule)
|
||||
# execute the sql command
|
||||
(status, output) = ClusterCommand.remoteSQLCommand(OBTAIN_SQL,
|
||||
self.user,
|
||||
cooInst[0].hostname,
|
||||
cooInst[0].port,
|
||||
ignoreError=False)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] %
|
||||
OBTAIN_SQL + " Error:\n%s" % str(output))
|
||||
|
||||
return output.strip()
|
||||
|
||||
def killKernalSnapshotThread(self, coorInst):
|
||||
"""
|
||||
function: kill snapshot thread in Kernel,
|
||||
avoid dead lock with redistribution)
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Stopping snapshot thread in database node Kernel.")
|
||||
killSnapshotSQL = "select * from kill_snapshot();"
|
||||
|
||||
(status, output) = ClusterCommand.remoteSQLCommand(
|
||||
killSnapshotSQL, self.user, coorInst.hostname, coorInst.port,
|
||||
False, DefaultValue.DEFAULT_DB_NAME)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] %
|
||||
killSnapshotSQL + " Error:\n%s" % str(output))
|
||||
self.logger.debug("Successfully stopped snapshot "
|
||||
"thread in database node Kernel.")
|
||||
|
||||
def createServerCa(self, hostList=None):
|
||||
"""
|
||||
function: create grpc ca file
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Generating CA files.")
|
||||
if hostList is None:
|
||||
hostList = []
|
||||
appPath = DefaultValue.getInstallDir(self.user)
|
||||
caPath = os.path.join(appPath, "share/sslcert/om")
|
||||
self.logger.debug("The ca file dir is: %s." % caPath)
|
||||
if (len(hostList) == 0):
|
||||
for dbNode in self.clusterInfo.dbNodes:
|
||||
hostList.append(dbNode.name)
|
||||
# Create CA dir and prepare files for using.
|
||||
self.logger.debug("Create CA file directory.")
|
||||
try:
|
||||
DefaultValue.createCADir(self.sshTool, caPath, hostList)
|
||||
self.logger.debug("Add hostname to config file.")
|
||||
DefaultValue.createServerCA(DefaultValue.SERVER_CA, caPath,
|
||||
self.logger)
|
||||
# Clean useless files, and change permission of ca file to 600.
|
||||
DefaultValue.cleanServerCaDir(caPath)
|
||||
self.logger.debug("Scp CA files to all nodes.")
|
||||
except Exception as e:
|
||||
certFile = caPath + "/demoCA/cacert.pem"
|
||||
if os.path.exists(certFile):
|
||||
g_file.removeFile(certFile)
|
||||
DefaultValue.cleanServerCaDir(caPath)
|
||||
raise Exception(str(e))
|
||||
for certFile in DefaultValue.SERVER_CERT_LIST:
|
||||
scpFile = os.path.join(caPath, "%s" % certFile)
|
||||
self.sshTool.scpFiles(scpFile, caPath, hostList)
|
||||
self.logger.debug("Successfully generated server CA files.")
|
||||
|
||||
def createGrpcCa(self, hostList=None):
|
||||
"""
|
||||
function: create grpc ca file
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Generating grpc CA files.")
|
||||
if hostList is None:
|
||||
hostList = []
|
||||
appPath = DefaultValue.getInstallDir(self.user)
|
||||
caPath = os.path.join(appPath, "share/sslcert/grpc")
|
||||
self.logger.debug("The ca file dir is: %s." % caPath)
|
||||
if (len(hostList) == 0):
|
||||
for dbNode in self.clusterInfo.dbNodes:
|
||||
hostList.append(dbNode.name)
|
||||
# Create CA dir and prepare files for using.
|
||||
self.logger.debug("Create CA file directory.")
|
||||
try:
|
||||
DefaultValue.createCADir(self.sshTool, caPath, hostList)
|
||||
self.logger.debug("Add hostname to config file.")
|
||||
configPath = os.path.join(appPath,
|
||||
"share/sslcert/grpc/openssl.cnf")
|
||||
self.logger.debug("The ca file dir is: %s." % caPath)
|
||||
# Add hostname to openssl.cnf file.
|
||||
DefaultValue.changeOpenSslConf(configPath, hostList)
|
||||
self.logger.debug("Generate CA files.")
|
||||
DefaultValue.createCA(DefaultValue.GRPC_CA, caPath)
|
||||
# Clean useless files, and change permission of ca file to 600.
|
||||
DefaultValue.cleanCaDir(caPath)
|
||||
self.logger.debug("Scp CA files to all nodes.")
|
||||
except Exception as e:
|
||||
certFile = caPath + "/demoCA/cacertnew.pem"
|
||||
if os.path.exists(certFile):
|
||||
g_file.removeFile(certFile)
|
||||
DefaultValue.cleanCaDir(caPath)
|
||||
raise Exception(str(e))
|
||||
for certFile in DefaultValue.GRPC_CERT_LIST:
|
||||
scpFile = os.path.join(caPath, "%s" % certFile)
|
||||
self.sshTool.scpFiles(scpFile, caPath, hostList)
|
||||
self.logger.debug("Successfully generated grpc CA files.")
|
||||
|
||||
def genCipherAndRandFile(self, hostList=None):
|
||||
self.logger.debug("Encrypting cipher and rand files.")
|
||||
if hostList is None:
|
||||
hostList = []
|
||||
appPath = DefaultValue.getInstallDir(self.user)
|
||||
binPath = os.path.join(appPath, "bin")
|
||||
retry = 0
|
||||
while True:
|
||||
sshpwd = getpass.getpass("Please enter password for database:")
|
||||
sshpwd_check = getpass.getpass("Please repeat for database:")
|
||||
if sshpwd_check != sshpwd:
|
||||
sshpwd = ""
|
||||
sshpwd_check = ""
|
||||
self.logger.error(
|
||||
ErrorCode.GAUSS_503["GAUSS_50306"] % "database"
|
||||
+ "The two passwords are different, "
|
||||
"please enter password again.")
|
||||
else:
|
||||
cmd = "%s/gs_guc encrypt -M server -K %s -D %s " % (binPath,
|
||||
sshpwd,
|
||||
binPath)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
sshpwd = ""
|
||||
sshpwd_check = ""
|
||||
if status != 0:
|
||||
self.logger.error(
|
||||
ErrorCode.GAUSS_503["GAUSS_50322"] % "database"
|
||||
+ "Error:\n %s" % output)
|
||||
else:
|
||||
break
|
||||
if retry >= 2:
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_503["GAUSS_50322"] % "database")
|
||||
retry += 1
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE,
|
||||
"'%s'/server.key.cipher" % binPath)
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE,
|
||||
"'%s'/server.key.rand" % binPath)
|
||||
if len(hostList) == 0:
|
||||
for dbNode in self.clusterInfo.dbNodes:
|
||||
hostList.append(dbNode.name)
|
||||
for certFile in DefaultValue.BIN_CERT_LIST:
|
||||
scpFile = os.path.join(binPath, "%s" % certFile)
|
||||
self.sshTool.scpFiles(scpFile, binPath, hostList)
|
||||
self.logger.debug("Successfully encrypted cipher and rand files.")
|
||||
|
||||
|
||||
class Timeout(Exception):
|
||||
pass
|
||||
674
script/gspylib/common/ParameterParsecheck.py
Normal file
674
script/gspylib/common/ParameterParsecheck.py
Normal file
@ -0,0 +1,674 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : ParameterParsecheck.py is a utility to get Parameter
|
||||
# information and check it.
|
||||
#############################################################################
|
||||
import os
|
||||
import getopt
|
||||
import sys
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
|
||||
PARAMETER_VALUEDICT = {}
|
||||
PARAMETER_KEYLIST = []
|
||||
ParameterDict = {}
|
||||
Itemstr = []
|
||||
skipItems = []
|
||||
user_passwd = []
|
||||
EnvParams = []
|
||||
DbInitParam = []
|
||||
DataGucParam = []
|
||||
NODE_NAME = []
|
||||
|
||||
# Add parameter: the logic cluster name
|
||||
PARA_CHECK_LIST = ["-t", "-h", "-m", "--mode",
|
||||
"-i", "-j", "-U", "-u", "-G", "-g", "--alarm-type",
|
||||
"-n", "-g",
|
||||
"-N", "--time-out", "--alarm-component",
|
||||
"--parallel-jobs", '--redis-mode', "--ring-num",
|
||||
"--virtual-ip",
|
||||
"--nodeName", "--name", "--failure-limit"]
|
||||
PATH_CHEKC_LIST = ["-M", "-o", "-f", "-X", "-P", "-s", "-R", "-Q",
|
||||
"--position", "-B",
|
||||
"--backupdir", "--sep-env-file", "-l", "--logpath",
|
||||
"--backup-dir",
|
||||
"--priority-tables", "--exclude-tables"]
|
||||
VALUE_CHECK_LIST = ["|", ";", "&", "$", "<", ">", "`", "\\", "'", "\"", "{",
|
||||
"}", "(", ")",
|
||||
"[", "]", "~", "*", "?", "!", "\n"]
|
||||
|
||||
# append ':' after short options if it required parameter
|
||||
# append '=' after long options if it required parameter
|
||||
# no child branch
|
||||
gs_preinstall = ["-?", "--help", "-V", "--version", "-U:", "-G:", "-L",
|
||||
"--skip-os-set", "-X:",
|
||||
"--env-var=", "--sep-env-file=", "--skip-hostname-set",
|
||||
"-l:", "--non-interactive"]
|
||||
gs_install = ["-?", "--help", "-V", "--version", "-X:", "-l:",
|
||||
"--gsinit-parameter=", "--dn-guc=",
|
||||
"--time-out=", "--alarm-component="]
|
||||
gs_uninstall = ["-?", "--help", "-V", "--version", "-l:", "-L",
|
||||
"--delete-data"]
|
||||
gs_postuninstall = ["-?", "--help", "-V", "--version", "--delete-user",
|
||||
"--delete-group", "--clean-gphome",
|
||||
"-U:", "-X:", "-l:", "-L"]
|
||||
gs_check = ["-?", "--help", "-V", "--version", "-e:", "-i:",
|
||||
"-U:", "-o:", "-l:", "-L", "--hosts=",
|
||||
"--format=", "--cid=", "--disk-threshold=",
|
||||
"--time-out=", "--routing=", "--skip-items=",
|
||||
"--ShrinkNodes=", "--nodegroup-name=",
|
||||
"--skip-root-items", "--set"]
|
||||
gs_sshexkey = ["-?", "--help", "-V", "--version",
|
||||
"-f:", "--skip-hostname-set", "-l:"]
|
||||
gs_backup = ["-?", "--help", "-V", "--version", "--backup-dir=",
|
||||
"--parameter",
|
||||
"--binary", "--all", "-l:", "-h:", "-t:", "-X:"]
|
||||
gs_collector = ["-?", "--help", "-V", "--version", "--begin-time=",
|
||||
"--end-time=",
|
||||
"--keyword=", "--speed-limit=", "-h:", "-f:", "-o:",
|
||||
"-l:", "-C:"]
|
||||
gs_checkperf = ["-?", "--help", "-V", "--version", "--detail", "-o:",
|
||||
"-i:", "-l:", "-U:"]
|
||||
gs_ssh = ["-?", "--help", "-V", "--version", "-c:"]
|
||||
gs_checkos = ["-?", "--help", "-V", "--version", "-h:", "-f:", "-o:",
|
||||
"-i:", "--detail",
|
||||
"-l:", "-X:"]
|
||||
gs_expansion = ["-?", "--help", "-V", "--version", "-U:", "-G:", "-L",
|
||||
"-X:", "-h:", "--sep-env-file="]
|
||||
gs_dropnode = ["-?", "--help", "-V", "--version", "-U:", "-G:",
|
||||
"-h:", "--sep-env-file="]
|
||||
|
||||
# gs_om child branch
|
||||
gs_om_start = ["-t:", "-?", "--help", "-V", "--version", "-h:", "-I:",
|
||||
"--time-out=", "--az=", "-l:", "--nodeId=", "-D:",
|
||||
"--security-mode="]
|
||||
gs_om_stop = ["-t:", "-?", "--help", "-V", "--version", "-h:", "-I:", "-m:",
|
||||
"--az=", "-l:", "--mode=", "--nodeId=", "--time-out=", "-D:"]
|
||||
gs_om_view = ["-t:", "-?", "--help", "-V", "--version", "-o:", "-l:"]
|
||||
gs_om_query = ["-t:", "-?", "--help", "-V", "--version", "-o:", "-l:"]
|
||||
gs_om_status = ["-t:", "-?", "--help", "-V", "--version", "-h:", "-o:",
|
||||
"--detail", "--all", "-l:"]
|
||||
gs_om_generateconf = ["-t:", "-?", "--help", "-V", "--version", "-X:",
|
||||
"--distribute", "-l:"]
|
||||
gs_om_cert = ["-t:", "-?", "--help", "-V", "--version", "-L", "-l:",
|
||||
"--cert-file=", "--rollback"]
|
||||
gs_om_kerberos = ["-t:", "-?", "--help", "-V", "--version", "-m:", "-U:",
|
||||
"-X:", "-l:", "--krb-server", "--krb-client"]
|
||||
gs_sql_list = ["-t:", "-?", "--help", "-V", "--version", "-c:",
|
||||
"--dbname=", "--dbuser=", "-W:"]
|
||||
gs_start = ["-n:", "-?", "--help", "-V", "--version", "-t:",
|
||||
"-D:"]
|
||||
gs_stop = ["-n:", "-?", "--help", "-V", "--version", "-t:",
|
||||
"-D:", "-m:"]
|
||||
gs_om_refreshconf = ["-t:", "-?", "--help", "-V", "--version", "-l:"]
|
||||
# gs_upgradectl child branch
|
||||
# AP and TP are same
|
||||
gs_upgradectl_chose_strategy = ["-t:", "-?", "--help", "-V", "--version",
|
||||
"-l:"]
|
||||
# auto-upgrade parameter lists
|
||||
gs_upgradectl_auto_upgrade = ["-t:", "-?", "--help", "-V", "--version", "-l:",
|
||||
"-X:"]
|
||||
# auto-rollback parameter lists
|
||||
gs_upgradectl_auto_rollback = ["-t:", "-?", "--help", "-V", "--version",
|
||||
"-l:", "-X:", "--force"]
|
||||
# commit-upgrade parameter lists
|
||||
gs_upgradectl_commit = ["-t:", "-?", "--help", "-V", "--version", "-l:", "-X:"]
|
||||
|
||||
ParameterDict = {"preinstall": gs_preinstall,
|
||||
"install": gs_install,
|
||||
"uninstall": gs_uninstall,
|
||||
"checkos": gs_checkos,
|
||||
"checkperf": gs_checkperf,
|
||||
"check": gs_check,
|
||||
"auto_upgrade": gs_upgradectl_auto_upgrade,
|
||||
"chose_strategy": gs_upgradectl_chose_strategy,
|
||||
"commit_upgrade": gs_upgradectl_commit,
|
||||
"auto_rollback": gs_upgradectl_auto_rollback,
|
||||
"start": gs_om_start,
|
||||
"stop": gs_om_stop,
|
||||
"status": gs_om_status,
|
||||
"generateconf": gs_om_generateconf,
|
||||
"cert": gs_om_cert,
|
||||
"kerberos": gs_om_kerberos,
|
||||
"sshexkey": gs_sshexkey,
|
||||
"backup": gs_backup,
|
||||
"collector": gs_collector,
|
||||
"ssh": gs_ssh,
|
||||
"postuninstall": gs_postuninstall,
|
||||
"view": gs_om_view,
|
||||
"query": gs_om_query,
|
||||
"refreshconf": gs_om_refreshconf,
|
||||
"expansion": gs_expansion,
|
||||
"dropnode": gs_dropnode
|
||||
}
|
||||
|
||||
# List of scripts with the -t parameter
|
||||
special_list = ["gs_om", "backup", "upgradectl"]
|
||||
|
||||
# The -t parameter list
|
||||
action_om = ["start", "stop", "status", "generateconf", "kerberos",
|
||||
"cert", "view", "query", "refreshconf"]
|
||||
action_upgradectl = ["chose-strategy", "auto-upgrade", "auto-rollback",
|
||||
"commit-upgrade"]
|
||||
|
||||
|
||||
class Parameter():
|
||||
'''
|
||||
get Parameter information and check it.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
'''
|
||||
self.action = ""
|
||||
self.mode = ""
|
||||
self.helpflag = False
|
||||
self.versionflag = False
|
||||
|
||||
def ParseParameterValue(self, module):
|
||||
"""
|
||||
function: parse the parameter value
|
||||
input : parameter_list
|
||||
output: options
|
||||
"""
|
||||
# get the parameter list
|
||||
(shortParameter, longParameter) = self.getParseParameter(module)
|
||||
|
||||
try:
|
||||
paraList = sys.argv[1:]
|
||||
for paraInfo in paraList:
|
||||
if (paraInfo.startswith('--')):
|
||||
isFlag = False
|
||||
for longPara in longParameter:
|
||||
if (paraInfo[2:].startswith(longPara.strip("="))):
|
||||
isFlag = True
|
||||
if (not isFlag):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50000"] % paraInfo)
|
||||
# check delete parameter -h and -f, if specified lcname,
|
||||
# not required -h or -f.
|
||||
check_delete_name = False
|
||||
for check_i in sys.argv[1:]:
|
||||
if ("--name" in check_i):
|
||||
check_delete_name = True
|
||||
break
|
||||
(opts, args) = getopt.getopt(sys.argv[1:], shortParameter,
|
||||
longParameter)
|
||||
except Exception as e:
|
||||
s1 = str(e).split(" ")
|
||||
option = s1[1]
|
||||
if ("requires argument" in str(e)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % \
|
||||
option[1:] + " Error:\n%s" % str(e))
|
||||
elif ("not recognized" in str(e)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50000"]
|
||||
% option)
|
||||
elif ("not a unique prefix" in str(e)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50006"]
|
||||
% option)
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50000"]
|
||||
% str(e))
|
||||
|
||||
if (len(args) > 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50000"]
|
||||
% str(args[0]))
|
||||
|
||||
return opts
|
||||
|
||||
def moveCrypto(self, module):
|
||||
"""
|
||||
function: Parse the parameter
|
||||
input : parameter_list
|
||||
output: PARAMETER_VALUEDICT
|
||||
"""
|
||||
if (module in ("preinstall", "sshexkey")):
|
||||
DefaultValue.doConfigForParamiko()
|
||||
|
||||
def printVersionInfo(self):
|
||||
"""
|
||||
"""
|
||||
if (self.versionflag):
|
||||
print("%s %s" % (sys.argv[0].split("/")[-1],
|
||||
VersionInfo.COMMON_VERSION))
|
||||
sys.exit(0)
|
||||
|
||||
def ParameterCommandLine(self, module):
|
||||
"""
|
||||
function: Parse the parameter
|
||||
input : parameter_list
|
||||
output: PARAMETER_VALUEDICT
|
||||
"""
|
||||
# copy crypto
|
||||
self.moveCrypto(module)
|
||||
|
||||
# Determines whether help and version information is output
|
||||
self.helpflag, self.versionflag = self.getHelpAndVersionStatus()
|
||||
if (self.helpflag):
|
||||
PARAMETER_VALUEDICT['helpFlag'] = self.helpflag
|
||||
return PARAMETER_VALUEDICT
|
||||
|
||||
# print version information
|
||||
self.printVersionInfo()
|
||||
|
||||
# Special handling of the -t parameter
|
||||
self.getActionParameterValue(module)
|
||||
|
||||
# get the parameter list
|
||||
opts = self.ParseParameterValue(module)
|
||||
|
||||
parameterNeedValue = {"-t": "action",
|
||||
"-c": "cmd",
|
||||
"-m": "Mode",
|
||||
"--mode": "Mode",
|
||||
# hotpatch name
|
||||
"-n": "patch_name",
|
||||
"-d": "destPath",
|
||||
"-s": "sourcePath",
|
||||
"-j": "jobs",
|
||||
"-U": "user",
|
||||
"-G": "group",
|
||||
"-I": "instance_name",
|
||||
"-e": "scenes",
|
||||
"-C": "configFile",
|
||||
"--format": "format",
|
||||
"--cid": "cid",
|
||||
"--routing": "routing",
|
||||
"--ShrinkNodes": "shrinkNodes",
|
||||
"--az": "az_name",
|
||||
"--root-passwd": "rootPasswd",
|
||||
"--alarm-type": "warningType",
|
||||
"--alarm-server-addr": "warningserverip",
|
||||
"--time-out": "time_out", "": "",
|
||||
"--alarm-component": "alarm_component",
|
||||
"--SSD-fault-time": "SSDFaultTime",
|
||||
"--begin-time": "begintime",
|
||||
"--end-time": "endtime",
|
||||
"--keyword": "keyword",
|
||||
"--redis-mode": "redismode",
|
||||
"--failure-limit": "failure_limit",
|
||||
"--virtual-ip": "virtual-ip",
|
||||
"--master": "master",
|
||||
"--standby": "standby",
|
||||
"--disk-threshold": "disk-threshold",
|
||||
"--target": "target",
|
||||
"--name": "name",
|
||||
"-N": "DSN",
|
||||
"--type": "type",
|
||||
"--remote-host": "remote_host",
|
||||
"--remote-env-file": "remote_mpprcfile",
|
||||
"--dilatation-mode": "dilatation_mode",
|
||||
"--nodegroup-name": "nodegroup_name",
|
||||
"--speed-limit": "speedLimit",
|
||||
# add "resourcectl" for resource control
|
||||
# in data redistribution
|
||||
"--resource-level": "resource_level",
|
||||
"-p": "port",
|
||||
"--dn-port": "dn-port",
|
||||
"--dn-ip": "dn-ip",
|
||||
"--interval": "interval",
|
||||
"--threshold": "threshold",
|
||||
"--check-count": "check_count",
|
||||
"--wait-count": "wait_count",
|
||||
"--option": "option",
|
||||
"--dbname": "dbname",
|
||||
"--dbuser": "dbuser",
|
||||
"--nodeId": "nodeId",
|
||||
"--security-mode": "security_mode"
|
||||
}
|
||||
parameterNeedValue_keys = parameterNeedValue.keys()
|
||||
|
||||
parameterIsBool = {"-L": "localMode",
|
||||
"--set": "set",
|
||||
"--skip-root-items": "skipRootItems",
|
||||
"--non-interactive": "preMode",
|
||||
"--skip-os-set": "skipOSSet",
|
||||
"--skip-hostname-set": "skipHostnameSet",
|
||||
"--reset": "reset",
|
||||
"--parameter": "isParameter",
|
||||
"--binary": "isBinary",
|
||||
"--delete-data": "cleanInstance",
|
||||
"--delete-user": "delete-user",
|
||||
"--delete-group": "delete-group",
|
||||
"--dws-mode": "dws-mode",
|
||||
"--detail": "show_detail",
|
||||
"--detail-all": "show_detail_all",
|
||||
"--rollback": "rollback",
|
||||
"--vacuum-full": "enable_vacuum",
|
||||
"--fast-redis": "enable_fast",
|
||||
"--distribute": "distribute",
|
||||
"--build-redistb": "buildTable",
|
||||
"--key-files": "key-files",
|
||||
"--all": "all",
|
||||
"--upgrade": "upgrade",
|
||||
"--lcname-only": "lcname-only",
|
||||
"--high-perform": "high-perform",
|
||||
"--elastic-group": "elastic-group",
|
||||
"--addto-elastic-group": "isAddElasticGroup",
|
||||
"--express": "express",
|
||||
"--checkdisk": "checkdisk",
|
||||
"--inplace": "inplace",
|
||||
"--continue": "continue",
|
||||
"--force": "force",
|
||||
"--agent-mode": "agentMode",
|
||||
"--krb-server": "krb-server",
|
||||
"--krb-client": "krb-client",
|
||||
}
|
||||
parameterIsBool_keys = parameterIsBool.keys()
|
||||
|
||||
# Parameter assignment and return
|
||||
for (key, value) in opts:
|
||||
if (key in parameterNeedValue_keys):
|
||||
PARAMETER_VALUEDICT[parameterNeedValue[key]] = value
|
||||
elif (key in parameterIsBool_keys):
|
||||
PARAMETER_VALUEDICT[parameterIsBool[key]] = True
|
||||
elif (key == "-h"):
|
||||
# Only obtain the last value of hostname
|
||||
del NODE_NAME[:]
|
||||
for node in value.strip().split(","):
|
||||
if (node is not None and node != "" and (
|
||||
node not in NODE_NAME)):
|
||||
NODE_NAME.append(node.strip())
|
||||
elif (key == "-W" or key == "--password"):
|
||||
user_passwd.append(value)
|
||||
elif (key == "-D"):
|
||||
PARAMETER_VALUEDICT['dataDir'] = os.path.normpath(value)
|
||||
elif (key == "-M"):
|
||||
PARAMETER_VALUEDICT['cgroupMountDir'] = \
|
||||
os.path.realpath(value.strip())
|
||||
elif (key == "-o"):
|
||||
PARAMETER_VALUEDICT['outFile'] = os.path.realpath(value)
|
||||
if (module not in ["collector", "check"]):
|
||||
self.createOutputDir(os.path.realpath(value))
|
||||
elif (key == "-i"):
|
||||
for item in value.strip().split(","):
|
||||
if item is not None and item != "" \
|
||||
and (item not in Itemstr):
|
||||
Itemstr.append(item)
|
||||
elif (key == "--skip-items"):
|
||||
for item in value.strip().split(","):
|
||||
if (item is not None and item != "" and (
|
||||
item not in skipItems)):
|
||||
skipItems.append(item)
|
||||
elif self.action != "license" and (
|
||||
key == "-f" or key == "--hosts"):
|
||||
hostFile = self.checkPath(key, value)
|
||||
PARAMETER_VALUEDICT['hostfile'] = os.path.realpath(hostFile)
|
||||
elif (key == "-X"):
|
||||
if (module != "uninstall"):
|
||||
xmlFile = self.checkPath(key, value)
|
||||
PARAMETER_VALUEDICT['confFile'] = os.path.realpath(xmlFile)
|
||||
else:
|
||||
xmlFile = str(value)
|
||||
PARAMETER_VALUEDICT['confFile'] = os.path.realpath(xmlFile)
|
||||
elif (key == "--env-var"):
|
||||
EnvParams.append(value)
|
||||
elif (key == "--sep-env-file"):
|
||||
PARAMETER_VALUEDICT['mpprcFile'] = os.path.realpath(value)
|
||||
elif (key == "--gsinit-parameter"):
|
||||
DbInitParam.append(value)
|
||||
elif (key == "--dn-guc"):
|
||||
DataGucParam.append(value)
|
||||
elif (key == "-l"):
|
||||
PARAMETER_VALUEDICT['logFile'] = os.path.realpath(value)
|
||||
elif (key == "--backup-dir"):
|
||||
PARAMETER_VALUEDICT['backupDir'] = \
|
||||
os.path.realpath(value.strip())
|
||||
elif (key == "--all"):
|
||||
PARAMETER_VALUEDICT['isParameter'] = True
|
||||
PARAMETER_VALUEDICT['isBinary'] = True
|
||||
elif (key == "--parallel-jobs"):
|
||||
paralleljobs = self.checkParamternum(key, value)
|
||||
PARAMETER_VALUEDICT['paralleljobs'] = paralleljobs
|
||||
elif (key == "-g"):
|
||||
nodesNum = self.checkParamternum(key, value)
|
||||
PARAMETER_VALUEDICT['nodesNum'] = nodesNum
|
||||
elif (key == "--ring-num"):
|
||||
ringNumbers = self.checkParamternum(key, value)
|
||||
PARAMETER_VALUEDICT['ringNumbers'] = ringNumbers
|
||||
elif (key == "--cert-file"):
|
||||
PARAMETER_VALUEDICT['cert-file'] = \
|
||||
os.path.realpath(value.strip())
|
||||
elif (key == "--priority-tables"):
|
||||
PARAMETER_VALUEDICT['priority-tables'] = \
|
||||
os.path.realpath(value.strip())
|
||||
elif key == "--role":
|
||||
PARAMETER_VALUEDICT['role'] = value.strip()
|
||||
elif (key == "--exclude-tables"):
|
||||
PARAMETER_VALUEDICT['exclude-tables'] = \
|
||||
os.path.realpath(value.strip())
|
||||
|
||||
# Only check / symbol for gs_lcct.
|
||||
if key in ("--name", "--nodegroup-name"):
|
||||
self.checkLcGroupName(key, value)
|
||||
Parameter.checkParaVaild(key, value)
|
||||
|
||||
parameterIsList = {"passwords": user_passwd,
|
||||
"envparams": EnvParams,
|
||||
"dbInitParams": DbInitParam,
|
||||
"dataGucParams": DataGucParam,
|
||||
"itemstr": Itemstr,
|
||||
"skipItems": skipItems,
|
||||
"nodename": NODE_NAME
|
||||
}
|
||||
parameterlenkeys = parameterIsList.keys()
|
||||
for key in parameterlenkeys:
|
||||
if (len(parameterIsList[key]) > 0):
|
||||
PARAMETER_VALUEDICT[key] = parameterIsList[key]
|
||||
return PARAMETER_VALUEDICT
|
||||
|
||||
@staticmethod
|
||||
def checkParaVaild(para, value):
|
||||
"""
|
||||
function: check para vaild
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
for role in VALUE_CHECK_LIST:
|
||||
if PARA_CHECK_LIST.__contains__(para):
|
||||
if value.find(role) >= 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500[
|
||||
"GAUSS_50011"] % \
|
||||
(para,
|
||||
value) + " Invaild value: %s." %
|
||||
role)
|
||||
if PATH_CHEKC_LIST.__contains__(para):
|
||||
if os.path.realpath(value).find(role) >= 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500[
|
||||
"GAUSS_50011"] % \
|
||||
(para, value) +
|
||||
" Invaild value: %s." % role)
|
||||
|
||||
def checkLcGroupName(self, lcPara, lcGroupName):
|
||||
"""
|
||||
function: Check if the virtual cluster name is legal.
|
||||
input : lcGroupName
|
||||
output: NA
|
||||
"""
|
||||
import re
|
||||
PATTERN = "^[a-zA-Z0-9_]{1,63}$"
|
||||
pattern = re.compile(PATTERN)
|
||||
result = pattern.match(lcGroupName)
|
||||
if (result is None):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50004"] % lcPara[1:]
|
||||
+ " The name of the logical cluster does not "
|
||||
"exceed 63 characters and can only contain "
|
||||
"letters, numbers, and underscores.")
|
||||
if (lcGroupName in ["group_version1", "group_version2",
|
||||
"group_version3",
|
||||
"elastic_group"]):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50004"] % lcPara[1:]
|
||||
+ " The name of the logical cluster cannot be "
|
||||
"'group_version1' or 'group_version2' or "
|
||||
"'group_version3' or 'elastic_group'.")
|
||||
|
||||
def getHelpAndVersionStatus(self):
|
||||
"""
|
||||
function: get help and version information status
|
||||
input : NA
|
||||
output: helpflag, versionflag
|
||||
"""
|
||||
helpflag = False
|
||||
versionflag = False
|
||||
for parameter in sys.argv[1:]:
|
||||
if (parameter == "-?" or parameter == "--help"):
|
||||
helpflag = True
|
||||
if (parameter == "-V" or parameter == "--version"):
|
||||
versionflag = True
|
||||
return helpflag, versionflag
|
||||
|
||||
def getActionParameterValue(self, module):
|
||||
"""
|
||||
function: get the action value
|
||||
input : parameter_list
|
||||
output: NA
|
||||
"""
|
||||
actions = []
|
||||
getMode = False
|
||||
if (module in special_list):
|
||||
if (sys.argv[1:] == []):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50014"]
|
||||
% module)
|
||||
if (sys.argv[1:][-1] == "-t"):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % \
|
||||
"t" + " option -t requires argument.")
|
||||
|
||||
for n, value in enumerate(sys.argv[1:]):
|
||||
if (sys.argv[1:][n - 1] == "-t"):
|
||||
actions.append(value)
|
||||
if (len(actions) != 1):
|
||||
GaussLog.exitWithError(
|
||||
ErrorCode.GAUSS_500["GAUSS_50006"] % actions[0])
|
||||
self.action = value
|
||||
|
||||
if self.action == "":
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50001"]
|
||||
% "t" + ".")
|
||||
|
||||
if ((module == "gsom" and not self.action in action_om)
|
||||
or (module == "upgradectl"
|
||||
and not self.action in action_upgradectl)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "t")
|
||||
|
||||
def createOutputDir(self, path):
|
||||
"""
|
||||
function: create output directory
|
||||
input : path
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
DefaultValue.checkOutputFile(path)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
dirName = os.path.dirname(os.path.realpath(path))
|
||||
if (not os.path.isdir(dirName)):
|
||||
try:
|
||||
os.makedirs(dirName, DefaultValue.DIRECTORY_PERMISSION)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50206"] % \
|
||||
("outputfile[%s]" % path)
|
||||
+ "Error:\n%s" % str(e))
|
||||
|
||||
def checkParamternum(self, key, value):
|
||||
"""
|
||||
function: Check some number parameters
|
||||
input : key, value
|
||||
output: numvalue
|
||||
"""
|
||||
try:
|
||||
numvalue = int(value)
|
||||
if (numvalue <= 0):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % \
|
||||
key[1:]
|
||||
+ " Parameter '%s' must be greater"
|
||||
" than or equal to 1." % key)
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50003"] % \
|
||||
(key[1:], "integer")
|
||||
+ " Error:\n%s" % str(e))
|
||||
|
||||
return numvalue
|
||||
|
||||
def checkPath(self, key, value):
|
||||
"""
|
||||
function: Check some path parameters
|
||||
input : key, value
|
||||
output: path
|
||||
"""
|
||||
# Check that the path parameter is a file
|
||||
try:
|
||||
if (not value):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% key[1:] +
|
||||
"Error:\noption %s requires argument"
|
||||
% key)
|
||||
path = str(value)
|
||||
g_file.checkFilePermission(path, True)
|
||||
return path
|
||||
except Exception as e:
|
||||
GaussLog.exitWithError(str(e))
|
||||
|
||||
def getParseParameter(self, module):
|
||||
"""
|
||||
function: get parse parameters
|
||||
input : parameter_list
|
||||
output: shortPara,longPara
|
||||
"""
|
||||
|
||||
shortPara = ""
|
||||
longPara = []
|
||||
var = "--"
|
||||
|
||||
ParameterList = ""
|
||||
if (module == "upgradectl"):
|
||||
if (self.action == "chose-strategy"):
|
||||
ParameterList = ParameterDict.get("chose_strategy")
|
||||
elif (self.action == "auto-rollback"):
|
||||
ParameterList = ParameterDict.get("auto_rollback")
|
||||
elif (self.action == "auto-upgrade"):
|
||||
ParameterList = ParameterDict.get("auto_upgrade")
|
||||
elif (self.action == "commit-upgrade"):
|
||||
ParameterList = ParameterDict.get("commit_upgrade")
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "t")
|
||||
|
||||
elif (module == "gs_om"):
|
||||
if (self.action in action_om):
|
||||
ParameterList = ParameterDict.get(self.action)
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"]
|
||||
% "t")
|
||||
else:
|
||||
ParameterList = ParameterDict.get(module)
|
||||
|
||||
for para in ParameterList:
|
||||
if var in para:
|
||||
varlong = para.strip("--")
|
||||
longPara.append(varlong)
|
||||
else:
|
||||
varshort = para.strip("-")
|
||||
shortPara += varshort
|
||||
|
||||
return shortPara, longPara
|
||||
204
script/gspylib/common/SSDFaultInfo.py
Normal file
204
script/gspylib/common/SSDFaultInfo.py
Normal file
@ -0,0 +1,204 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : OSSDFaultInfo.py is utility for SSD default info
|
||||
#############################################################################
|
||||
import sys
|
||||
|
||||
|
||||
class SSDFaultInfo():
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
Failurelibs = {
|
||||
"0x11": ["hioerr", "Data recovery failure",
|
||||
"GC and CM read, RAID recovery failed",
|
||||
"data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0x2b": ["hioerr", "Read back failure after programming failure",
|
||||
"write failure read back failure",
|
||||
"4KB data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0x2e": ["hioerr", "No available reserved blocks",
|
||||
"No available blocks", "result in timeout",
|
||||
"contact support for repairs"],
|
||||
"0x300": ["hioerr", "CMD timeout", "CMD timeout",
|
||||
"CMD executive timeout", "contact support for repairs"],
|
||||
"0x307": ["hioerr", "I/O error", "IO error",
|
||||
"Read/write error, return by read and write return value",
|
||||
"contact support for repairs"],
|
||||
"0x30a": ["hioerr", "DDR init failed", "DDR initialization failure",
|
||||
"Drive loading failed", "contact support for repairs"],
|
||||
"0x30c": ["hioerr", "Controller reset sync hioerr",
|
||||
"Controller reset status out of sync ",
|
||||
"Drive loading failed", "contact support for repairs"],
|
||||
"0x30d": ["hioerr", "Clock fault", "abnormals found in clock testing",
|
||||
"If happened during the process of loading, it may cause "
|
||||
"drive loading failed, during operation may cause SSD work "
|
||||
"abnormally",
|
||||
"contact support for repairs"],
|
||||
"0x312": ["hiowarn", "CAP: voltage fault",
|
||||
"Capacitance voltage alarming",
|
||||
"cause power-fail protection failure, without affecting IO "
|
||||
"functions",
|
||||
"contact support for repairs"],
|
||||
"0x313": ["hiowarn", "CAP: learn fault",
|
||||
"Capacitance self learning error.",
|
||||
"Unable to get accurate capacitance, may cause power fail "
|
||||
"protection failure; without affecting IO functions",
|
||||
"contact support for repairs"],
|
||||
"0x314": ["hionote", "CAP status",
|
||||
"Capacitance is in self learning status", "None", "Ignore"],
|
||||
"0x31a": ["hiowarn", "CAP: short circuit",
|
||||
"Capacitance voltage is zero, possibly short circuit",
|
||||
"may cause power-fail protection failure; without "
|
||||
"affecting IO functions",
|
||||
"contact support for repairs"],
|
||||
"0x31b": ["hiowarn", "Sensor fault", "Sensor access error",
|
||||
"If occurred during the process of loading ,it may cause "
|
||||
"drive loading failed; capacitor voltage could not be "
|
||||
"monitored during operation",
|
||||
"contact support for repairs"],
|
||||
"0x39": ["hioerr", "Init: PBMT scan failure",
|
||||
"initialization scanning PBMT read error",
|
||||
"part of data mapping relationship missing, 64MB at most "
|
||||
"can not find",
|
||||
"contact support for repairs"],
|
||||
"0x3b": ["hioerr", "Init: first page scan failure",
|
||||
"initialization scan home page read error",
|
||||
"part of data mapping relationship missing, 64MB at most "
|
||||
"can not find (occur during initialization)",
|
||||
"contact support for repairs"],
|
||||
"0x3c": ["hioerr", "Init: scan unclosed block failure",
|
||||
"Init:reset pointer, data page read error",
|
||||
"4KB data mapping relationship missing, the 4KB data can "
|
||||
"not be found",
|
||||
"contact support for repairs"],
|
||||
"0x40": ["hioerr", "Init: PMT recovery: data page read failure",
|
||||
"Init: PMT recovery, data page read error",
|
||||
"4KB data mapping relationship missing, the 4KB data can "
|
||||
"not be found",
|
||||
"contact support for repairs"],
|
||||
"0x43": ["hioerr", "too many unclosed blocks",
|
||||
"scan to the third unfulfilled block ",
|
||||
"Split from original 0x3c scenario. Part of data mapping "
|
||||
"relationship missing, 64MB at most can not find (occur "
|
||||
"when initialization)",
|
||||
"contact support for repairs"],
|
||||
"0x45": ["hioerr", "Init: more than one PDW block found",
|
||||
"PDW Initialization abnormal: found two and more than two "
|
||||
"PWD",
|
||||
"abnormal, may cause data missing",
|
||||
"contact support for repairs"],
|
||||
"0x47": ["hionote", "Init: PDW block not found",
|
||||
"initialization abnormal: PDW is not found when "
|
||||
"initialization",
|
||||
"data may be incomplete", "contact support for repairs"],
|
||||
"0x50": ["hioerr", "Cache: hit error data", "Cache hit data error",
|
||||
"4KB data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0x51": ["hioerr", "Cache: read back failure",
|
||||
"Cache completion and reading back error",
|
||||
"4KB data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0x53": ["hioerr", "GC/WL read back failure",
|
||||
"GC and WL read, data error",
|
||||
"4KB data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0x7": ["hioerr", "No available blocks",
|
||||
"no available block, free list is empty",
|
||||
"data failed to write normally",
|
||||
"contact support for repairs"],
|
||||
"0x7e": ["hionote", "Read blank page", "read blank page",
|
||||
"IO return successfully, but read wrong data",
|
||||
"contact support for repairs"],
|
||||
"0x7f": ["hiowarn", "Access flash timeout", "access flash timeout",
|
||||
"without affecting data correctness, but access Flash "
|
||||
"timeout",
|
||||
"Ignore"],
|
||||
"0x8a": ["hiowarn", "Warning: Bad Block close to limit",
|
||||
"bad block level 1 alarming (exceed 11%)",
|
||||
"bad block level 1 alarming (exceed 11%)", "Ignore"],
|
||||
"0x8b": ["hioerr", "Error: Bad Block over limit",
|
||||
"bad block level 2 alarming (exceed 14%)",
|
||||
"bad block level 2 alarming (exceed 14%)",
|
||||
"contact support for repairs"],
|
||||
"0x8c": ["hiowarn", "Warning: P/E cycles close to limit",
|
||||
"P/E cycles Level 1 alarming", "P/E cycles Level 1 alarming",
|
||||
"Ignore"],
|
||||
"0x8d": ["hioerr", "Error: P/E cycles over limit",
|
||||
"P/E cycles Level 2 alarming", "P/E cycles Level 2 alarming",
|
||||
"Scrapped"],
|
||||
"0x90": ["hionote", "Over temperature",
|
||||
"temperature value exceed limitation: current defined 90 "
|
||||
"centi degrees",
|
||||
"High temperature may cause SSD abnormal, if found this "
|
||||
"alarm should test server fan speed etc. then drive will "
|
||||
"run protection mechanism, limit IO speed (shut down this "
|
||||
"function by API)",
|
||||
"Suggest to check radiator"],
|
||||
"0x91": ["hionote", "Temperature is OK",
|
||||
"Temperature goes back to normal", "None", "Ignore"],
|
||||
"0x92": ["hiowarn", "Battery fault", "Super-capacitor status alarming",
|
||||
"Super-capacitor working status is abnormal",
|
||||
"contact support for repairs"],
|
||||
"0x93": ["hioerr", "SEU fault", "logical found SEU fault",
|
||||
"May cause logical working abnormally",
|
||||
"Power up and down on the SSD"],
|
||||
"0x94": ["hioerr", "DDR error",
|
||||
"data error found in controller plug-in DDR",
|
||||
"May cause controller work abnormally (data may have been "
|
||||
"in disorder status)",
|
||||
"contact support for repairs"],
|
||||
"0x95": ["hioerr", "Controller serdes error",
|
||||
"Controller serdes test transmission error",
|
||||
"May cause controller work abnormally(data may have been in "
|
||||
"disorder status)",
|
||||
"contact support for repairs"],
|
||||
"0x96": ["hioerr", "Bridge serdes 1 error",
|
||||
"Bridge controller serdes 1 test transmission error",
|
||||
"May cause controller work abnormally(data may have been in "
|
||||
"disorder status)",
|
||||
"contact support for repairs"],
|
||||
"0x97": ["hioerr", "Bridge serdes 2 error",
|
||||
"Bridge controller serdes 2 test transmission error",
|
||||
"May cause controller work abnormally(data may have been in "
|
||||
"disorder status)",
|
||||
"contact support for repairs"],
|
||||
"0x98": ["hioerr", "SEU fault (corrected)",
|
||||
"SEU fault (correctable error)",
|
||||
"Split from original 0x3c scenario. May cause logical "
|
||||
"working abnormally (10 seconds time-delay from error to "
|
||||
"correct process)",
|
||||
"Reset SSD"],
|
||||
"0x9a": ["hionote", "Over temperature",
|
||||
"temperature value exceed limitation: current defined 90 "
|
||||
"centi degrees",
|
||||
"High temperature may cause SSD abnormal, if found this "
|
||||
"alarm should test server fan speed etc. then drive will "
|
||||
"run protection mechanism, limit IO speed (shut down this "
|
||||
"function by API)",
|
||||
"Suggest to check radiator"],
|
||||
"0xf1": ["hioerr", "Read failure without recovery",
|
||||
"IOR read can not recover",
|
||||
"4KB data error and failed to recover",
|
||||
"contact support for repairs"],
|
||||
"0xf7": ["hioerr", "Init: RAID not complete",
|
||||
"Init: RAID not complete",
|
||||
"RAID line data not complete before power failed",
|
||||
"contact support for repairs"]
|
||||
}
|
||||
133
script/gspylib/common/Signal.py
Normal file
133
script/gspylib/common/Signal.py
Normal file
@ -0,0 +1,133 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : Signal.py is utility to process signal
|
||||
#############################################################################
|
||||
import sys
|
||||
import signal
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
|
||||
class Signal(object):
|
||||
|
||||
##########################################################################
|
||||
# init signal handler
|
||||
##########################################################################
|
||||
|
||||
def __init__(self, logger):
|
||||
"""
|
||||
function: initialize signal handler
|
||||
input : object logger
|
||||
output: NA
|
||||
"""
|
||||
self.logger = logger
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
|
||||
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
|
||||
|
||||
def setSignalEvent(self, functionName=None):
|
||||
"""
|
||||
function: initialize signal handler
|
||||
input : function
|
||||
output: NA
|
||||
"""
|
||||
if (functionName is not None):
|
||||
signal.signal(signal.SIGINT, functionName)
|
||||
else:
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def print_signal_stack(self, frame):
|
||||
"""
|
||||
function: Function to print signal stack
|
||||
input : frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is None):
|
||||
return
|
||||
try:
|
||||
import inspect
|
||||
stacks = inspect.getouterframes(frame)
|
||||
for curr in range(len(stacks)):
|
||||
stack = stacks[curr]
|
||||
self.logger.debug("Stack level: %d. File: %s. Function: %s. "
|
||||
"LineNo: %d." % (curr, stack[1], stack[3],
|
||||
stack[2]))
|
||||
self.logger.debug("Code: %s." %
|
||||
(stack[4][0].strip().strip("\n")))
|
||||
except Exception as e:
|
||||
self.logger.debug("Failed to print signal stack. Error: \n%s"
|
||||
% str(e))
|
||||
|
||||
def raise_handler(self, signal_num, frame):
|
||||
"""
|
||||
function: Function to raise handler
|
||||
input : signal_num, frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is not None):
|
||||
self.logger.debug("Received signal[%d]." % (signal_num))
|
||||
self.print_signal_stack(frame)
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51614"] % (signal_num))
|
||||
|
||||
def setupTimeoutHandler(self):
|
||||
"""
|
||||
function: Function to set up time out handler
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
signal.signal(signal.SIGALRM, self.timeout_handler)
|
||||
|
||||
def setTimer(self, timeout):
|
||||
"""
|
||||
function: Function to set timer
|
||||
input : timeout
|
||||
output: NA
|
||||
"""
|
||||
self.logger.debug("Set timer. The timeout: %d." % timeout)
|
||||
signal.signal(signal.SIGALRM, self.timeout_handler)
|
||||
signal.alarm(timeout)
|
||||
|
||||
def resetTimer(self):
|
||||
"""
|
||||
function: Reset timer
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
signal.signal(signal.SIGALRM, signal.SIG_IGN)
|
||||
self.logger.debug("Reset timer. Left time: %d." % signal.alarm(0))
|
||||
|
||||
def timeout_handler(self, signal_num, frame):
|
||||
"""
|
||||
function: Received the timeout signal
|
||||
input : signal_num, frame
|
||||
output: NA
|
||||
"""
|
||||
if (self.logger is not None):
|
||||
self.logger.debug("Received the timeout signal: [%d]."
|
||||
% (signal_num))
|
||||
self.print_signal_stack(frame)
|
||||
raise Timeout("Time out.")
|
||||
|
||||
|
||||
class Timeout(Exception):
|
||||
pass
|
||||
5653
script/gspylib/common/Sql.py
Normal file
5653
script/gspylib/common/Sql.py
Normal file
File diff suppressed because it is too large
Load Diff
70
script/gspylib/common/SqlResult.py
Normal file
70
script/gspylib/common/SqlResult.py
Normal file
@ -0,0 +1,70 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : Result.py is a utility to store search result from database
|
||||
#############################################################################
|
||||
import os
|
||||
import sys
|
||||
from ctypes import *
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.Common import DefaultValue
|
||||
|
||||
|
||||
class sqlResult():
|
||||
"""
|
||||
Class for storing search result from database
|
||||
"""
|
||||
|
||||
def __init__(self, result):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
self.resCount = 0
|
||||
self.resSet = []
|
||||
self.result = result
|
||||
|
||||
def parseResult(self):
|
||||
"""
|
||||
function : get resCount and resSet from result
|
||||
input:NA
|
||||
output:NA
|
||||
"""
|
||||
try:
|
||||
libpath = os.path.join(DefaultValue.getEnv("GAUSSHOME"), "lib")
|
||||
sys.path.append(libpath)
|
||||
libc = cdll.LoadLibrary("libpq.so.5.5")
|
||||
libc.PQntuples.argtypes = [c_void_p]
|
||||
libc.PQntuples.restype = c_int
|
||||
libc.PQnfields.argtypes = [c_void_p]
|
||||
libc.PQnfields.restype = c_int
|
||||
libc.PQgetvalue.restype = c_char_p
|
||||
ntups = libc.PQntuples(self.result)
|
||||
nfields = libc.PQnfields(self.result)
|
||||
libc.PQgetvalue.argtypes = [c_void_p, c_int, c_int]
|
||||
self.resCount = ntups
|
||||
for i in range(ntups):
|
||||
tmpString = []
|
||||
for j in range(nfields):
|
||||
paramValue = libc.PQgetvalue(self.result, i, j)
|
||||
if (paramValue is not None):
|
||||
tmpString.append(string_at(paramValue).decode())
|
||||
else:
|
||||
tmpString.append("")
|
||||
self.resSet.append(tmpString)
|
||||
except Exception as e:
|
||||
raise Exception("%s" % str(e))
|
||||
126
script/gspylib/common/VersionInfo.py
Normal file
126
script/gspylib/common/VersionInfo.py
Normal file
@ -0,0 +1,126 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
"""
|
||||
This file is for Gauss version things.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
|
||||
class VersionInfo():
|
||||
"""
|
||||
Info about current version
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
# package version
|
||||
__PACKAGE_VERSION = ""
|
||||
# OM version string
|
||||
COMMON_VERSION = "Gauss200 OM VERSION"
|
||||
# It will be replaced with the product version, such as "Gauss200",
|
||||
# while being packaged by mpp_package.sh
|
||||
PRODUCT_NAME = "__GAUSS_PRODUCT_STRING__"
|
||||
PRODUCT_NAME_PACKAGE = "-".join(PRODUCT_NAME.split())
|
||||
__COMPATIBLE_VERSION = []
|
||||
COMMITID = ""
|
||||
|
||||
@staticmethod
|
||||
def getPackageVersion():
|
||||
"""
|
||||
function: Get the current version from version.cfg
|
||||
input : NA
|
||||
output: String
|
||||
"""
|
||||
if (VersionInfo.__PACKAGE_VERSION != ""):
|
||||
return VersionInfo.__PACKAGE_VERSION
|
||||
# obtain version file
|
||||
versionFile = VersionInfo.get_version_file()
|
||||
version, number, commitid = VersionInfo.get_version_info(versionFile)
|
||||
# the 2 value is package version
|
||||
VersionInfo.__PACKAGE_VERSION = version
|
||||
return VersionInfo.__PACKAGE_VERSION
|
||||
|
||||
@staticmethod
|
||||
def getCommitid():
|
||||
if VersionInfo.COMMITID != "":
|
||||
return VersionInfo.COMMITID
|
||||
versionFile = VersionInfo.get_version_file()
|
||||
version, number, commitid = VersionInfo.get_version_info(versionFile)
|
||||
# the 2 value is package version
|
||||
VersionInfo.COMMITID = commitid
|
||||
return VersionInfo.COMMITID
|
||||
|
||||
@staticmethod
|
||||
def get_version_file():
|
||||
"""
|
||||
function: Get version.cfg file
|
||||
input : NA
|
||||
output: String
|
||||
"""
|
||||
# obtain version file
|
||||
dirName = os.path.dirname(os.path.realpath(__file__))
|
||||
versionFile = os.path.join(dirName, "./../../../", "version.cfg")
|
||||
versionFile = os.path.realpath(versionFile)
|
||||
if (not os.path.exists(versionFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % versionFile)
|
||||
if (not os.path.isfile(versionFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50210"] % versionFile)
|
||||
return versionFile
|
||||
|
||||
@staticmethod
|
||||
def get_version_info(versionFile):
|
||||
|
||||
# the infomation of versionFile like this:
|
||||
# openGauss-1.0
|
||||
# XX.0
|
||||
# ae45cfgt
|
||||
if not os.path.exists(versionFile):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % versionFile)
|
||||
if not os.path.isfile(versionFile):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50210"] % versionFile)
|
||||
with open(versionFile, 'r') as fp:
|
||||
retLines = fp.readlines()
|
||||
if len(retLines) < 3:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50222"] % versionFile)
|
||||
|
||||
version = re.compile(r'[0-9]+\.[0-9]+\.[0-9]+').search(
|
||||
retLines[0].strip()).group()
|
||||
number = retLines[1].strip()
|
||||
commitId = retLines[2].strip()
|
||||
|
||||
if version is None:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50222"] %
|
||||
"version.cfg" + "Does not have version "
|
||||
"such as openGauss-1.0")
|
||||
try:
|
||||
float(number)
|
||||
except Exception as e:
|
||||
raise Exception(str(e) + ErrorCode.GAUSS_516["GAUSS_51628"]
|
||||
% number)
|
||||
|
||||
if not (commitId.isalnum() and len(commitId) == 8):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50222"] % versionFile
|
||||
+ " Commit id is wrong.")
|
||||
return version, number, commitId
|
||||
0
script/gspylib/common/__init__.py
Normal file
0
script/gspylib/common/__init__.py
Normal file
201
script/gspylib/component/BaseComponent.py
Normal file
201
script/gspylib/component/BaseComponent.py
Normal file
@ -0,0 +1,201 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
from gspylib.os.gsnetwork import g_network
|
||||
|
||||
TIME_OUT = 2
|
||||
RETRY_TIMES = 100
|
||||
|
||||
|
||||
class BaseComponent(object):
|
||||
'''
|
||||
The class is used to define base component.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
function: initialize the parameters
|
||||
input : NA
|
||||
output: NA
|
||||
'''
|
||||
self.logger = None
|
||||
self.instInfo = None
|
||||
self.version = ""
|
||||
self.pkgName = ""
|
||||
self.initParas = {}
|
||||
self.binPath = ""
|
||||
self.dwsMode = False
|
||||
self.level = 1
|
||||
self.clusterType = DefaultValue.CLUSTER_TYPE_SINGLE_INST
|
||||
|
||||
def install(self):
|
||||
pass
|
||||
|
||||
def setGucConfig(self, setMode='set', paraDict=None):
|
||||
pass
|
||||
|
||||
def getGucConfig(self, paraList):
|
||||
pass
|
||||
|
||||
def setPghbaConfig(self):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def uninstall(self):
|
||||
pass
|
||||
|
||||
def killProcess(self):
|
||||
"""
|
||||
function: kill process
|
||||
input: process flag
|
||||
output: NA
|
||||
"""
|
||||
pass
|
||||
|
||||
def fixPermission(self):
|
||||
pass
|
||||
|
||||
def upgrade(self):
|
||||
pass
|
||||
|
||||
def createPath(self):
|
||||
pass
|
||||
|
||||
def perCheck(self):
|
||||
"""
|
||||
function: 1.Check instance port
|
||||
2.Check instance IP
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
ipList = self.instInfo.listenIps
|
||||
ipList.extend(self.instInfo.haIps)
|
||||
portList = []
|
||||
portList.append(self.instInfo.port)
|
||||
portList.append(self.instInfo.haPort)
|
||||
|
||||
ipList = DefaultValue.Deduplication(ipList)
|
||||
portList = DefaultValue.Deduplication(portList)
|
||||
# check port
|
||||
for port in portList:
|
||||
self.__checkport(port, ipList)
|
||||
# check ip
|
||||
failIps = g_network.checkIpAddressList(ipList)
|
||||
if (len(failIps) > 0):
|
||||
raise Exception(ErrorCode.GAUSS_506["GAUSS_50600"] +
|
||||
" The IP is %s." % ",".join(failIps))
|
||||
|
||||
def __checkport(self, port, ipList):
|
||||
"""
|
||||
function: check Port
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv()
|
||||
if (not os.path.exists(tmpDir)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
tmpDir + " Please create it first.")
|
||||
pgsqlFiles = os.listdir(tmpDir)
|
||||
|
||||
self.__checkRandomPortRange(port)
|
||||
|
||||
pgsql = ".s.PGSQL.%d" % port
|
||||
pgsql_lock = ".s.PGSQL.%d.lock" % port
|
||||
if (pgsql in pgsqlFiles):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50200"] %
|
||||
"socket file" + " Port:%s." % port)
|
||||
|
||||
if (pgsql_lock in pgsqlFiles):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50200"] %
|
||||
"socket lock file" + " Port:%s." % port)
|
||||
|
||||
# Verify that the port is occupied
|
||||
for ip in ipList:
|
||||
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sk.settimeout(TIME_OUT)
|
||||
|
||||
# retry port 4 times
|
||||
retryFlag = True
|
||||
retryTime = 0
|
||||
while (retryFlag):
|
||||
try:
|
||||
sk.bind((ip, port))
|
||||
sk.close()
|
||||
break
|
||||
except socket.error as e:
|
||||
retryTime += 1
|
||||
time.sleep(1)
|
||||
if (retryTime > RETRY_TIMES):
|
||||
retryFlag = False
|
||||
try:
|
||||
portProcessInfo = g_OSlib.getPortProcessInfo(port)
|
||||
self.logger.debug("The ip [%s] port [%s] is "
|
||||
"occupied. \nBind error "
|
||||
"msg:\n%s\nDetail msg:\n%s" % \
|
||||
(ip, port, str(e),
|
||||
portProcessInfo))
|
||||
except Exception as e:
|
||||
self.logger.debug("Failed to get the process "
|
||||
"information of the port [%s], "
|
||||
"output:%s." % (port, str(e)))
|
||||
raise Exception(ErrorCode.GAUSS_506["GAUSS_50601"] %
|
||||
port)
|
||||
|
||||
def __checkRandomPortRange(self, port):
|
||||
"""
|
||||
function: Check if port is in the range of random port
|
||||
input : port
|
||||
output: NA
|
||||
"""
|
||||
res = []
|
||||
try:
|
||||
rangeFile = "/proc/sys/net/ipv4/ip_local_port_range"
|
||||
output = g_file.readFile(rangeFile)
|
||||
res = output[0].split()
|
||||
except Exception as e:
|
||||
self.logger.debug(
|
||||
"Warning: Failed to get the range of random port."
|
||||
" Detail: \n%s" % str(e))
|
||||
return
|
||||
if (len(res) != 2):
|
||||
self.logger.debug("Warning: The range of random port is invalid. "
|
||||
"Detail: \n%s" % str(output))
|
||||
return
|
||||
minPort = int(res[0])
|
||||
maxPort = int(res[1])
|
||||
if (port >= minPort and port <= maxPort):
|
||||
self.logger.debug("Warning: Current instance port is in the "
|
||||
"range of random port(%d - %d)." % (minPort,
|
||||
maxPort))
|
||||
|
||||
def postCheck(self):
|
||||
pass
|
||||
454
script/gspylib/component/Kernel/DN_OLAP/DN_OLAP.py
Normal file
454
script/gspylib/component/Kernel/DN_OLAP/DN_OLAP.py
Normal file
@ -0,0 +1,454 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.Common import DefaultValue, ClusterInstanceConfig
|
||||
from gspylib.component.Kernel.Kernel import Kernel
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.os.gsOSlib import g_OSlib
|
||||
|
||||
METHOD_TRUST = "trust"
|
||||
METHOD_SHA = "sha256"
|
||||
MAX_PARA_NUMBER = 1000
|
||||
INSTANCE_TYPE_UNDEFINED = -1
|
||||
MASTER_INSTANCE = 0
|
||||
STANDBY_INSTANCE = 1
|
||||
DUMMY_STANDBY_INSTANCE = 2
|
||||
CASCADE_STANDBY_INSTANCE = 3
|
||||
|
||||
|
||||
class DN_OLAP(Kernel):
|
||||
'''
|
||||
The class is used to define base component.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Constructor
|
||||
'''
|
||||
super(DN_OLAP, self).__init__()
|
||||
|
||||
def getDnGUCDict(self):
|
||||
"""
|
||||
function : get init DB install guc parameter
|
||||
input : String,String,String,int
|
||||
output : String
|
||||
"""
|
||||
tmpDict = {}
|
||||
tmpDict["ssl"] = "on"
|
||||
tmpDict["ssl_cert_file"] = "'server.crt'"
|
||||
tmpDict["ssl_key_file"] = "'server.key'"
|
||||
tmpDict["ssl_ca_file"] = "'cacert.pem'"
|
||||
return tmpDict
|
||||
|
||||
def copyAndModCertFiles(self):
|
||||
"""
|
||||
function : copy and chage permission cert files
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
user = g_OSlib.getUserInfo()["name"]
|
||||
appPath = DefaultValue.getInstallDir(user)
|
||||
caPath = os.path.join(appPath, "share/sslcert/om")
|
||||
# cp cert files
|
||||
g_file.cpFile("%s/server.crt" % caPath, "%s/" %
|
||||
self.instInfo.datadir)
|
||||
g_file.cpFile("%s/server.key" % caPath, "%s/" %
|
||||
self.instInfo.datadir)
|
||||
g_file.cpFile("%s/cacert.pem" % caPath, "%s/" %
|
||||
self.instInfo.datadir)
|
||||
g_file.cpFile("%s/server.key.cipher" % caPath, "%s/" %
|
||||
self.instInfo.datadir)
|
||||
g_file.cpFile("%s/server.key.rand" % caPath, "%s/" %
|
||||
self.instInfo.datadir)
|
||||
# change mode
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, "%s/server.crt" %
|
||||
self.instInfo.datadir)
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, "%s/server.key" %
|
||||
self.instInfo.datadir)
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, "%s/cacert.pem" %
|
||||
self.instInfo.datadir)
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, "%s/server.key.cipher" %
|
||||
self.instInfo.datadir)
|
||||
g_file.changeMode(DefaultValue.KEY_FILE_MODE, "%s/server.key.rand" %
|
||||
self.instInfo.datadir)
|
||||
|
||||
def initInstance(self):
|
||||
"""
|
||||
function:
|
||||
init DB instance
|
||||
input:string:NA
|
||||
output:
|
||||
"""
|
||||
if (not os.path.exists(self.instInfo.datadir)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] %
|
||||
("data directory [%s]" % self.instInfo.datadir))
|
||||
|
||||
nodename = self.getInstanceNodeName()
|
||||
# if nodename too long, obtains the first 22 digits
|
||||
nodename = nodename[:22]
|
||||
if (self.dwsMode):
|
||||
image_path = DefaultValue.DWS_IMAGE_PATH
|
||||
# decompress package to files
|
||||
packageName = "%s/datanode.tar.gz" % image_path
|
||||
g_file.decompressFiles(packageName, self.instInfo.datadir)
|
||||
# set GUC parameter
|
||||
tmpDict = {}
|
||||
tmpDict["pgxc_node_name"] = "'%s'" % nodename
|
||||
self.setGucConfig(tmpDict)
|
||||
else:
|
||||
# If xlogdir is set in xmlfile, an independent xlog
|
||||
# path will be created.
|
||||
if (self.instInfo.xlogdir != ''):
|
||||
cmd = "%s/gs_initdb --locale=C -D %s -X %s " \
|
||||
"--nodename=%s %s -C %s" % (
|
||||
self.binPath, self.instInfo.datadir,
|
||||
self.instInfo.xlogdir, nodename,
|
||||
" ".join(self.initParas), self.binPath)
|
||||
else:
|
||||
cmd = "%s/gs_initdb --locale=C -D %s --nodename=%s %s -C %s" \
|
||||
% \
|
||||
(self.binPath, self.instInfo.datadir, nodename,
|
||||
" ".join(self.initParas), self.binPath)
|
||||
self.logger.debug("Command for initializing database "
|
||||
"node instance: %s" % cmd)
|
||||
(status, output) = DefaultValue.retryGetstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51615"] +
|
||||
" Command:%s. Error:\n%s" % (cmd, output))
|
||||
# set ssl to DB nodes.
|
||||
dnGucParas = self.getDnGUCDict()
|
||||
self.setGucConfig(dnGucParas)
|
||||
self.copyAndModCertFiles()
|
||||
|
||||
def getInstanceNodeName(self):
|
||||
"""
|
||||
function: Get Instance Node Name
|
||||
input : NA
|
||||
output: instance node name
|
||||
"""
|
||||
user = g_OSlib.getUserInfo()["name"]
|
||||
clusterInfo = dbClusterInfo()
|
||||
clusterInfo.initFromStaticConfig(user)
|
||||
peerInsts = clusterInfo.getPeerInstance(self.instInfo)
|
||||
nodename = "dn_%d" % self.instInfo.instanceId
|
||||
if len(peerInsts) == 0:
|
||||
return nodename
|
||||
nodename = ClusterInstanceConfig. \
|
||||
setReplConninfoForSinglePrimaryMultiStandbyCluster(
|
||||
self.instInfo, peerInsts, clusterInfo)[1]
|
||||
return nodename
|
||||
|
||||
|
||||
def getDNDict(self, user, configItemType=None, peerInsts=None,
|
||||
azNames=None, syncNum=-1):
|
||||
"""
|
||||
function: Get database node configuration
|
||||
input : user, configItemType=None, peerInsts,
|
||||
azNames=None, syncNum
|
||||
output: NA
|
||||
"""
|
||||
if peerInsts is None:
|
||||
peerInsts = []
|
||||
if azNames is None:
|
||||
azNames = []
|
||||
tmpDNDict = {}
|
||||
tmpDNDict["listen_addresses"] = "'%s'" % ",".join(
|
||||
self.instInfo.listenIps)
|
||||
tmpDNDict["local_bind_address"] = "'%s'" % self.instInfo.listenIps[0]
|
||||
tmpDNDict["port"] = self.instInfo.port
|
||||
|
||||
if (configItemType == "ConfigInstance"):
|
||||
tmpDNDict["cstore_buffers"] = "1GB"
|
||||
tmpDNDict["max_connections"] = "3000"
|
||||
tmpDNDict["shared_buffers"] = "1GB"
|
||||
tmpDNDict["work_mem"] = "64MB"
|
||||
tmpDNDict["maintenance_work_mem"] = "128MB"
|
||||
tmpDNDict["data_replicate_buffer_size"] = "128MB"
|
||||
if (self.clusterType ==
|
||||
DefaultValue.CLUSTER_TYPE_SINGLE_PRIMARY_MULTI_STANDBY or
|
||||
self.clusterType == DefaultValue.CLUSTER_TYPE_SINGLE_INST):
|
||||
tmpDNDict["enable_data_replicate"] = "off"
|
||||
tmpDNDict["replication_type"] = "1"
|
||||
tmpDNDict["max_wal_senders"] = "16"
|
||||
totalnum = len(peerInsts)
|
||||
for inst in peerInsts:
|
||||
if inst.instanceType == CASCADE_STANDBY_INSTANCE:
|
||||
totalnum = totalnum - 1
|
||||
tmpDNDict["application_name"] = "'dn_%s'" % \
|
||||
self.instInfo.instanceId
|
||||
if len(azNames) == 1 and totalnum > 0:
|
||||
if syncNum == -1 and totalnum > 1:
|
||||
num = totalnum - 1
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY %d(%s)'" % (num, azNames[0])
|
||||
elif syncNum > 0:
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY %d(%s)'" % (syncNum, azNames[0])
|
||||
elif syncNum == 0:
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY 1(%s)'" % (azNames[0])
|
||||
elif len(azNames) == 2 and totalnum in (3, 4):
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY 2(%s,%s)'" % (azNames[0], azNames[1])
|
||||
elif len(azNames) == 2 and totalnum in (5, 6, 7):
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY 3(%s,%s)'" % (azNames[0], azNames[1])
|
||||
elif len(azNames) == 3 and totalnum in (3, 4):
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY 2(%s,%s,%s)'" % (azNames[0], azNames[1], azNames[2])
|
||||
elif len(azNames) == 3 and totalnum in (5, 6, 7):
|
||||
tmpDNDict["synchronous_standby_names"] = \
|
||||
"'ANY 3(%s,%s,%s)'" % (azNames[0], azNames[1], azNames[2])
|
||||
if len(peerInsts) > 4:
|
||||
if "synchronous_standby_names" in tmpDNDict:
|
||||
del tmpDNDict['synchronous_standby_names']
|
||||
|
||||
if (self.clusterType == DefaultValue.CLUSTER_TYPE_SINGLE):
|
||||
tmpDNDict["replication_type"] = "2"
|
||||
|
||||
if (configItemType != "ChangeIPUtility"):
|
||||
tmpDNDict["log_directory"] = "'%s/pg_log/dn_%d'" % (
|
||||
DefaultValue.getUserLogDirWithUser(user),
|
||||
self.instInfo.instanceId)
|
||||
tmpDNDict["audit_directory"] = "'%s/pg_audit/dn_%d'" % (
|
||||
DefaultValue.getUserLogDirWithUser(user),
|
||||
self.instInfo.instanceId)
|
||||
|
||||
if (len(self.instInfo.ssdDir) != 0 and configItemType !=
|
||||
"ChangeIPUtility"):
|
||||
tmpDNDict["ssd_cache_dir"] = "'%s'" % (self.instInfo.ssdDir)
|
||||
tmpDNDict["enable_adio_function"] = "on"
|
||||
tmpDNDict["enable_cstore_ssd_cache"] = "on"
|
||||
return tmpDNDict
|
||||
|
||||
def getPrivateGucParamList(self):
|
||||
"""
|
||||
function : Get the private guc parameter list.
|
||||
input : NA
|
||||
output
|
||||
"""
|
||||
# only used by dummy standby instance
|
||||
# max_connections value is 100
|
||||
# memorypool_enable value is false
|
||||
# shared_buffers value is 32MB
|
||||
# bulk_write_ring_size value is 32MB
|
||||
# max_prepared_transactions value is 10
|
||||
# cstore_buffers value is 16MB
|
||||
# autovacuum_max_workers value is 0
|
||||
# max_pool_size value is 50
|
||||
# wal_buffers value is -1
|
||||
|
||||
# add the parameter content to the dictionary list
|
||||
priavetGucParamDict = {}
|
||||
priavetGucParamDict["max_connections"] = "100"
|
||||
priavetGucParamDict["memorypool_enable"] = "false"
|
||||
priavetGucParamDict["shared_buffers"] = "32MB"
|
||||
priavetGucParamDict["bulk_write_ring_size"] = "32MB"
|
||||
priavetGucParamDict["max_prepared_transactions"] = "10"
|
||||
priavetGucParamDict["cstore_buffers"] = "16MB"
|
||||
priavetGucParamDict["autovacuum_max_workers"] = "0"
|
||||
priavetGucParamDict["wal_buffers"] = "-1"
|
||||
priavetGucParamDict["max_locks_per_transaction"] = "64"
|
||||
priavetGucParamDict["sysadmin_reserved_connections"] = "3"
|
||||
priavetGucParamDict["max_wal_senders"] = "4"
|
||||
return priavetGucParamDict
|
||||
|
||||
def modifyDummpyStandbyConfigItem(self):
|
||||
"""
|
||||
function: Modify the parameter at dummyStandby instance.
|
||||
It only be used by DB instance.
|
||||
input : Inst, configFile
|
||||
output: NA
|
||||
"""
|
||||
# only modify config item for dummpy standby instance
|
||||
if (self.instInfo.instanceType != DefaultValue.DUMMY_STANDBY_INSTANCE):
|
||||
return
|
||||
tmpDNDict = self.getPrivateGucParamList()
|
||||
self.setGucConfig(tmpDNDict)
|
||||
|
||||
def setPrimaryStandyConnInfo(self, peerInsts):
|
||||
"""
|
||||
function: Modify replconninfo for datanode
|
||||
input : peerInsts
|
||||
output: NA
|
||||
"""
|
||||
connInfo1 = None
|
||||
connInfo2 = None
|
||||
dummyStandbyInst = None
|
||||
nodename = None
|
||||
user = g_OSlib.getUserInfo()["name"]
|
||||
clusterInfo = dbClusterInfo()
|
||||
clusterInfo.initFromStaticConfig(user)
|
||||
if (self.clusterType ==
|
||||
DefaultValue.CLUSTER_TYPE_SINGLE_PRIMARY_MULTI_STANDBY or
|
||||
self.clusterType == DefaultValue.CLUSTER_TYPE_SINGLE_INST):
|
||||
(connInfo1, nodename) = ClusterInstanceConfig. \
|
||||
setReplConninfoForSinglePrimaryMultiStandbyCluster(
|
||||
self.instInfo, peerInsts, clusterInfo)
|
||||
for i in range(len(connInfo1)):
|
||||
connInfo = "replconninfo" + "%d" % (i + 1)
|
||||
tmpDict1 = {}
|
||||
tmpDict1[connInfo] = "'%s'" % connInfo1[i]
|
||||
self.setGucConfig(tmpDict1)
|
||||
if "availablezone" in tmpDict1[connInfo]:
|
||||
tempazname = tmpDict1[connInfo].split("=")[-1].strip("'")
|
||||
#if "availablezone" in str(connInfo1):
|
||||
self.setGucConfig({"available_zone": "'%s'" %
|
||||
self.instInfo.azName})
|
||||
else:
|
||||
(connInfo1, connInfo2, dummyStandbyInst, nodename) = \
|
||||
ClusterInstanceConfig.setReplConninfo(self.instInfo,
|
||||
peerInsts, clusterInfo)
|
||||
connInfo = "replconninfo1"
|
||||
tmpDict1 = {}
|
||||
tmpDict1[connInfo] = "'%s'" % connInfo1
|
||||
self.setGucConfig(tmpDict1)
|
||||
|
||||
if (dummyStandbyInst is not None):
|
||||
tmpDict2 = {}
|
||||
tmpDict2["replconninfo2"] = "'%s'" % connInfo2
|
||||
self.setGucConfig(tmpDict2)
|
||||
|
||||
def configInstance(self, user, dataConfig, peerInsts,
|
||||
configItemType=None, alarm_component=None,
|
||||
azNames=None, gucXml=False, clusterInfo=None):
|
||||
"""
|
||||
peerInsts : peerInsts is empty means that it is a single cluster.
|
||||
"""
|
||||
if azNames is None:
|
||||
azNames = []
|
||||
syncNum = self.instInfo.syncNum
|
||||
tmpDNDict = self.getDNDict(user, configItemType, peerInsts,
|
||||
azNames, syncNum)
|
||||
|
||||
commonDict = self.setCommonItems()
|
||||
self.setGucConfig(commonDict)
|
||||
|
||||
self.logger.debug("Check if tmp_guc file exists.")
|
||||
tmpGucFile = ""
|
||||
tmpGucPath = DefaultValue.getTmpDirFromEnv(user)
|
||||
tmpGucFile = "%s/tmp_guc" % tmpGucPath
|
||||
if (os.path.exists(tmpGucFile)):
|
||||
dynamicDict = {}
|
||||
dynamicDict = DefaultValue.dynamicGuc(user, self.logger,
|
||||
"dn", tmpGucFile,
|
||||
gucXml)
|
||||
if gucXml:
|
||||
dynamicDict["log_line_prefix"] = "'%s'" % \
|
||||
dynamicDict["log_line_prefix"]
|
||||
dynamicDict["thread_pool_attr"] = "'%s'" % \
|
||||
dynamicDict[
|
||||
"thread_pool_attr"]
|
||||
if (len(dynamicDict) != 0):
|
||||
self.logger.debug("set dynamic guc parameters "
|
||||
"for database node instances.")
|
||||
if (self.instInfo.instanceType ==
|
||||
DefaultValue.DUMMY_STANDBY_INSTANCE):
|
||||
self.logger.debug("remove max_process_memory if "
|
||||
"current datanode is dummy one.")
|
||||
dummydynamicDict = dynamicDict
|
||||
dummydynamicDict.pop("max_process_memory")
|
||||
tmpDNDict.update(dummydynamicDict)
|
||||
else:
|
||||
tmpDNDict.update(dynamicDict)
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
|
||||
"guc_list.conf")
|
||||
tmpDNDict.update(dataConfig)
|
||||
tmpDNDict["alarm_component"] = "'%s'" % alarm_component
|
||||
self.setGucConfig(tmpDNDict)
|
||||
|
||||
if (len(peerInsts)):
|
||||
self.setPrimaryStandyConnInfo(peerInsts)
|
||||
else:
|
||||
tmpDict1 = {}
|
||||
tmpDict1["synchronous_commit"] = "off"
|
||||
self.setGucConfig(tmpDict1)
|
||||
|
||||
if syncNum == 0 or (syncNum == -1 and len(peerInsts) == 1):
|
||||
tmpDict1 = {}
|
||||
tmpDict1["synchronous_commit"] = "off"
|
||||
self.setGucConfig(tmpDict1)
|
||||
|
||||
|
||||
self.modifyDummpyStandbyConfigItem()
|
||||
|
||||
def setPghbaConfig(self, clusterAllIpList, user='all'):
|
||||
"""
|
||||
"""
|
||||
principal = None
|
||||
if (DefaultValue.checkKerberos(DefaultValue.getMpprcFile())):
|
||||
|
||||
(status, output) = \
|
||||
g_OSlib.getGrepValue("-Er", "^default_realm",
|
||||
os.path.join(os.path.dirname(
|
||||
DefaultValue.getMpprcFile()),
|
||||
DefaultValue.FI_KRB_CONF))
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50222"] %
|
||||
"krb5.conf" + "Error:\n%s" % output)
|
||||
principal = output.split("=")[1].strip()
|
||||
|
||||
# build ip string list
|
||||
# Every 1000 records merged into one
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
GUCParasStrList = []
|
||||
for ipAddress in clusterAllIpList:
|
||||
i += 1
|
||||
# Set the initial user and initial database access permissions
|
||||
if principal is None:
|
||||
GUCParasStr += "-h \"host all %s %s/32 %s\" " % \
|
||||
(user, ipAddress, METHOD_TRUST)
|
||||
else:
|
||||
GUCParasStr += "-h \"host all %s %s/32 gss " \
|
||||
"include_realm=1 krb_realm=%s\" "\
|
||||
% (user, ipAddress, principal)
|
||||
if (i % MAX_PARA_NUMBER == 0):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
|
||||
if (GUCParasStr != ""):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
|
||||
for parasStr in GUCParasStrList:
|
||||
self.doGUCConfig("set", parasStr, True)
|
||||
|
||||
"""
|
||||
Desc:
|
||||
Under the AP branch, we don't need to the
|
||||
uninstall/postcheck for every componet.
|
||||
"""
|
||||
|
||||
def fixPermission(self):
|
||||
pass
|
||||
|
||||
def upgrade(self):
|
||||
pass
|
||||
|
||||
def createPath(self):
|
||||
pass
|
||||
|
||||
def postCheck(self):
|
||||
pass
|
||||
0
script/gspylib/component/Kernel/DN_OLAP/__init__.py
Normal file
0
script/gspylib/component/Kernel/DN_OLAP/__init__.py
Normal file
463
script/gspylib/component/Kernel/Kernel.py
Normal file
463
script/gspylib/component/Kernel/Kernel.py
Normal file
@ -0,0 +1,463 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import grp
|
||||
import pwd
|
||||
import base64
|
||||
import re
|
||||
import time
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.component.BaseComponent import BaseComponent
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.threads.parallelTool import parallelTool, CommandThread
|
||||
from gspylib.os.gsfile import g_file, g_Platform
|
||||
|
||||
RETRY_COUNT = 3
|
||||
MAX_PARA_NUMBER = 1000
|
||||
|
||||
|
||||
class Kernel(BaseComponent):
|
||||
'''
|
||||
The class is used to define base component.
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
"""
|
||||
super(Kernel, self).__init__()
|
||||
# init paramter schemaCoordinatorFile,
|
||||
# schemaJobFile and schemaDatanodeFile
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv()
|
||||
self.schemaCoordinatorFile = "%s/%s" % (
|
||||
tmpDir, DefaultValue.SCHEMA_COORDINATOR)
|
||||
self.coordinatorJobDataFile = "%s/%s" % (
|
||||
tmpDir, DefaultValue.COORDINATOR_JOB_DATA)
|
||||
self.schemaDatanodeFile = "%s/%s" % (tmpDir,
|
||||
DefaultValue.SCHEMA_DATANODE)
|
||||
self.dumpTableFile = "%s/%s" % (tmpDir,
|
||||
DefaultValue.DUMP_TABLES_DATANODE)
|
||||
self.dumpOutputFile = "%s/%s" % (tmpDir,
|
||||
DefaultValue.DUMP_Output_DATANODE)
|
||||
self.coordinatorStatisticsDataFile = "%s/%s" % (
|
||||
tmpDir, DefaultValue.COORDINATOR_STAT_DATA)
|
||||
|
||||
"""
|
||||
Desc:
|
||||
start/stop/query single instance
|
||||
"""
|
||||
|
||||
def start(self, time_out=DefaultValue.TIMEOUT_CLUSTER_START,
|
||||
security_mode="off"):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl start -D %s " % (self.binPath, self.instInfo.datadir)
|
||||
if self.instInfo.instanceType == DefaultValue.MASTER_INSTANCE:
|
||||
if len(self.instInfo.peerInstanceInfos) > 0:
|
||||
cmd += "-M primary"
|
||||
elif self.instInfo.instanceType == DefaultValue.CASCADE_STANDBY:
|
||||
cmd += "-M cascade_standby"
|
||||
elif self.instInfo.instanceType == DefaultValue.STANDBY_INSTANCE:
|
||||
cmd += "-M standby"
|
||||
if time_out is not None:
|
||||
cmd += " -t %s" % time_out
|
||||
if security_mode == "on":
|
||||
cmd += " -o \'--securitymode\'"
|
||||
self.logger.debug("start cmd = %s" % cmd)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0 or re.search("start failed", output):
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51607"] % "instance"
|
||||
+ " Error: Please check the gs_ctl log for "
|
||||
"failure details." + "\n" + output)
|
||||
if re.search("another server might be running", output):
|
||||
self.logger.log(output)
|
||||
|
||||
def stop(self, stopMode="", time_out=300):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl stop -D %s " % (
|
||||
self.binPath, self.instInfo.datadir)
|
||||
if not self.isPidFileExist():
|
||||
cmd += " -m immediate"
|
||||
else:
|
||||
# check stop mode
|
||||
if stopMode != "":
|
||||
cmd += " -m %s" % stopMode
|
||||
cmd += " -t %s" % time_out
|
||||
self.logger.debug("stop cmd = %s" % cmd)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
raise Exception(ErrorCode.GAUSS_516["GAUSS_51610"] %
|
||||
"instance" + " Error: \n%s." % output)
|
||||
|
||||
def isPidFileExist(self):
|
||||
pidFile = "%s/postmaster.pid" % self.instInfo.datadir
|
||||
return os.path.isfile(pidFile)
|
||||
|
||||
def query(self):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl query -D %s" % (self.binPath, self.instInfo.datadir)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error: \n%s " % output)
|
||||
return (status, output)
|
||||
|
||||
def build(self, buidMode="full", standByBuildTimeout=300):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl build -D %s -M standby -b %s -r %d " % (
|
||||
self.binPath, self.instInfo.datadir, buidMode, standByBuildTimeout)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error: \n%s " % output)
|
||||
def build_cascade(self, buidMode="full", standByBuildTimeout=300):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl build -D %s -M cascade_standby -b %s -r %d " % (
|
||||
self.binPath, self.instInfo.datadir, buidMode, standByBuildTimeout)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error: \n%s " % output)
|
||||
|
||||
def queryBuild(self):
|
||||
"""
|
||||
"""
|
||||
cmd = "%s/gs_ctl querybuild -D %s" % (self.binPath,
|
||||
self.instInfo.datadir)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error: \n%s " % output)
|
||||
|
||||
"""
|
||||
Desc:
|
||||
Under the AP branch, the installation package of each
|
||||
component is not distinguished.
|
||||
After checking, unzip the public installation package and
|
||||
complete the installation.
|
||||
"""
|
||||
|
||||
def install(self, nodeName="", dbInitParams=""):
|
||||
"""
|
||||
"""
|
||||
pass
|
||||
|
||||
def getInstanceTblspcDirs(self, nodeName=""):
|
||||
"""
|
||||
function: Get instance dirs
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
tbsDirList = []
|
||||
|
||||
if (not os.path.exists("%s/pg_tblspc" % self.instInfo.datadir)):
|
||||
self.logger.debug("%s/pg_tblspc does not exists." %
|
||||
self.instInfo.datadir)
|
||||
return tbsDirList
|
||||
|
||||
fileList = os.listdir("%s/pg_tblspc" % self.instInfo.datadir)
|
||||
if (len(fileList)):
|
||||
for filename in fileList:
|
||||
if (os.path.islink("%s/pg_tblspc/%s" % (self.instInfo.datadir,
|
||||
filename))):
|
||||
linkDir = os.readlink("%s/pg_tblspc/%s" % (
|
||||
self.instInfo.datadir, filename))
|
||||
if (os.path.isdir(linkDir)):
|
||||
tblspcDir = "%s/%s_%s" % (
|
||||
linkDir, DefaultValue.TABLESPACE_VERSION_DIRECTORY,
|
||||
nodeName)
|
||||
self.logger.debug("Table space directories is %s." %
|
||||
tblspcDir)
|
||||
tbsDirList.append(tblspcDir)
|
||||
else:
|
||||
self.logger.debug(
|
||||
"%s is not link directory." % linkDir)
|
||||
else:
|
||||
self.logger.debug("%s is not a link file." % filename)
|
||||
else:
|
||||
self.logger.debug("%s/pg_tblspc is empty." % self.instInfo.datadir)
|
||||
|
||||
return tbsDirList
|
||||
|
||||
def getLockFiles(self):
|
||||
"""
|
||||
function: Get lock files
|
||||
input : NA
|
||||
output: NA
|
||||
"""
|
||||
fileList = []
|
||||
# the static file must be exists
|
||||
tmpDir = os.path.realpath(DefaultValue.getTmpDirFromEnv())
|
||||
|
||||
pgsql = ".s.PGSQL.%d" % self.instInfo.port
|
||||
pgsqlLock = ".s.PGSQL.%d.lock" % self.instInfo.port
|
||||
fileList.append(os.path.join(tmpDir, pgsql))
|
||||
fileList.append(os.path.join(tmpDir, pgsqlLock))
|
||||
return fileList
|
||||
|
||||
def removeSocketFile(self, fileName):
|
||||
"""
|
||||
"""
|
||||
g_file.removeFile(fileName, "shell")
|
||||
|
||||
def removeTbsDir(self, tbsDir):
|
||||
"""
|
||||
"""
|
||||
g_file.removeDirectory(tbsDir)
|
||||
|
||||
def cleanDir(self, instDir):
|
||||
"""
|
||||
function: Clean the dirs
|
||||
input : instDir
|
||||
output: NA
|
||||
"""
|
||||
if (not os.path.exists(instDir)):
|
||||
return
|
||||
|
||||
dataDir = []
|
||||
dataDir = os.listdir(instDir)
|
||||
if (os.getuid() == 0):
|
||||
pglDir = '%s/pg_location' % instDir
|
||||
isPglDirEmpty = False
|
||||
if (os.path.exists(pglDir) and len(os.listdir(pglDir)) == 0):
|
||||
isPglDirEmpty = True
|
||||
if (len(dataDir) == 0 or isPglDirEmpty):
|
||||
g_file.cleanDirectoryContent(instDir)
|
||||
else:
|
||||
for info in dataDir:
|
||||
if (str(info) == "pg_location"):
|
||||
resultMount = []
|
||||
resultFile = []
|
||||
resultDir = []
|
||||
pglDir = '%s/pg_location' % instDir
|
||||
|
||||
# delete all files in the mount point
|
||||
cmd = "%s | %s '%s' | %s '{printf $3}'" % \
|
||||
(g_Platform.getMountCmd(), g_Platform.getGrepCmd(),
|
||||
pglDir, g_Platform.getAwkCmd())
|
||||
(status, outputMount) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] %
|
||||
instDir + " Error:\n%s." %
|
||||
str(outputMount) +
|
||||
"The cmd is %s" % cmd)
|
||||
else:
|
||||
if (len(outputMount) > 0):
|
||||
resultMount = str(outputMount).split()
|
||||
for infoMount in resultMount:
|
||||
g_file.cleanDirectoryContent(infoMount)
|
||||
else:
|
||||
g_file.cleanDirectoryContent(instDir)
|
||||
continue
|
||||
|
||||
# delete file in the pg_location directory
|
||||
if (not os.path.exists(pglDir)):
|
||||
continue
|
||||
cmd = "cd '%s'" % pglDir
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] %
|
||||
cmd + " Error: \n%s " % output)
|
||||
|
||||
outputFile = g_file.findFile(".", "f", "type")
|
||||
if (len(outputFile) > 0):
|
||||
for infoFile in outputFile:
|
||||
tmpinfoFile = pglDir + infoFile[1:]
|
||||
for infoMount in resultMount:
|
||||
if (tmpinfoFile.find(infoMount) < 0 and
|
||||
infoMount.find(tmpinfoFile) < 0):
|
||||
realFile = "'%s/%s'" % (pglDir, infoFile)
|
||||
g_file.removeFile(realFile, "shell")
|
||||
|
||||
# delete directory in the pg_location directory
|
||||
cmd = "if [ -d '%s' ]; then cd '%s' && find -type d; fi" \
|
||||
% \
|
||||
(pglDir, pglDir)
|
||||
(status, outputDir) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] %
|
||||
instDir + " Error:\n%s." %
|
||||
str(outputDir) + "The cmd is %s" % cmd)
|
||||
else:
|
||||
resultDir = g_file.findFile(".", "d", "type")
|
||||
resultDir.remove(".")
|
||||
if (len(resultDir) > 0):
|
||||
for infoDir in resultDir:
|
||||
tmpinfoDir = pglDir + infoDir[1:]
|
||||
for infoMount in resultMount:
|
||||
if (tmpinfoDir.find(infoMount) < 0 and
|
||||
infoMount.find(tmpinfoDir) < 0):
|
||||
realPath = "'%s/%s'" % (
|
||||
pglDir, infoDir)
|
||||
g_file.removeDirectory(realPath)
|
||||
cmd = "if [ -d '%s' ];then cd '%s' && find . ! -name " \
|
||||
"'pg_location' " \
|
||||
"! -name '..' ! -name '.' -print0 |xargs -r -0 -n100 rm " \
|
||||
"-rf; " \
|
||||
"fi" % (instDir, instDir)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50207"] %
|
||||
instDir + " Error:\n%s." % str(output) +
|
||||
"The cmd is %s" % cmd)
|
||||
|
||||
def uninstall(self, instNodeName):
|
||||
"""
|
||||
function: Clean node instances.
|
||||
1.get the data dirs, tablespaces, soketfiles
|
||||
2.use theard delete the dirs or files
|
||||
input : instNodeName
|
||||
output: NA
|
||||
"""
|
||||
self.logger.log("Cleaning instance.")
|
||||
|
||||
# tablespace data directory
|
||||
tbsDirList = self.getInstanceTblspcDirs(instNodeName)
|
||||
|
||||
# sockete file
|
||||
socketFiles = self.getLockFiles()
|
||||
|
||||
# clean tablespace dir
|
||||
if (len(tbsDirList) != 0):
|
||||
try:
|
||||
self.logger.debug("Deleting instances tablespace directories.")
|
||||
for tbsDir in tbsDirList:
|
||||
self.removeTbsDir(tbsDir)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
self.logger.log("Successfully cleaned instance tablespace.")
|
||||
|
||||
if (len(self.instInfo.datadir) != 0):
|
||||
try:
|
||||
self.logger.debug("Deleting instances directories.")
|
||||
self.cleanDir(self.instInfo.datadir)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
self.logger.log("Successfully cleaned instances.")
|
||||
|
||||
if (len(self.instInfo.xlogdir) != 0):
|
||||
try:
|
||||
self.logger.debug("Deleting instances xlog directories.")
|
||||
self.cleanDir(self.instInfo.xlogdir)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
self.logger.log("Successfully cleaned instances.")
|
||||
|
||||
if (len(socketFiles) != 0):
|
||||
try:
|
||||
self.logger.debug("Deleting socket files.")
|
||||
for socketFile in socketFiles:
|
||||
self.removeSocketFile(socketFile)
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
self.logger.log("Successfully cleaned socket files.")
|
||||
|
||||
def setCommonItems(self):
|
||||
"""
|
||||
function: set common items
|
||||
input : tmpDir
|
||||
output: tempCommonDict
|
||||
"""
|
||||
tempCommonDict = {}
|
||||
tmpDir = DefaultValue.getTmpDirFromEnv()
|
||||
tempCommonDict["unix_socket_directory"] = "'%s'" % tmpDir
|
||||
tempCommonDict["unix_socket_permissions"] = "0700"
|
||||
tempCommonDict["log_file_mode"] = "0600"
|
||||
tempCommonDict["enable_nestloop"] = "off"
|
||||
tempCommonDict["enable_mergejoin"] = "off"
|
||||
tempCommonDict["explain_perf_mode"] = "pretty"
|
||||
tempCommonDict["log_line_prefix"] = "'%m %c %d %p %a %x %n %e '"
|
||||
tempCommonDict["modify_initial_password"] = "true"
|
||||
|
||||
return tempCommonDict
|
||||
|
||||
def doGUCConfig(self, action, GUCParasStr, isHab=False):
|
||||
"""
|
||||
"""
|
||||
# check instance data directory
|
||||
if (self.instInfo.datadir == "" or not os.path.exists(
|
||||
self.instInfo.datadir)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50219"] %
|
||||
("data directory of the instance[%s]" %
|
||||
str(self.instInfo)))
|
||||
|
||||
if (GUCParasStr == ""):
|
||||
return
|
||||
|
||||
# check conf file
|
||||
if (isHab == True):
|
||||
configFile = "%s/pg_hba.conf" % self.instInfo.datadir
|
||||
else:
|
||||
configFile = "%s/postgresql.conf" % self.instInfo.datadir
|
||||
if (not os.path.exists(configFile)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % configFile)
|
||||
|
||||
cmd = "%s/gs_guc %s -D %s %s " % (self.binPath, action,
|
||||
self.instInfo.datadir, GUCParasStr)
|
||||
(status, output) = DefaultValue.retryGetstatusoutput(cmd, 3, 3)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_500["GAUSS_50007"] % "GUC" +
|
||||
" Command: %s. Error:\n%s" % (cmd, output))
|
||||
|
||||
def setGucConfig(self, paraDict=None, setMode='set'):
|
||||
"""
|
||||
"""
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
GUCParasStrList = []
|
||||
if paraDict is None:
|
||||
paraDict = {}
|
||||
for paras in paraDict:
|
||||
i += 1
|
||||
GUCParasStr += " -c \"%s=%s\" " % (paras, paraDict[paras])
|
||||
if (i % MAX_PARA_NUMBER == 0):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
if (GUCParasStr != ""):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
|
||||
for parasStr in GUCParasStrList:
|
||||
self.doGUCConfig(setMode, parasStr, False)
|
||||
|
||||
def removeIpInfoOnPghbaConfig(self, ipAddressList):
|
||||
"""
|
||||
"""
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
GUCParasStrList = []
|
||||
for ipAddress in ipAddressList:
|
||||
i += 1
|
||||
GUCParasStr += " -h \"host all all %s/32\"" % (ipAddress)
|
||||
if (i % MAX_PARA_NUMBER == 0):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
i = 0
|
||||
GUCParasStr = ""
|
||||
if (GUCParasStr != ""):
|
||||
GUCParasStrList.append(GUCParasStr)
|
||||
|
||||
for parasStr in GUCParasStrList:
|
||||
self.doGUCConfig("set", parasStr, True)
|
||||
0
script/gspylib/component/Kernel/__init__.py
Normal file
0
script/gspylib/component/Kernel/__init__.py
Normal file
0
script/gspylib/component/__init__.py
Normal file
0
script/gspylib/component/__init__.py
Normal file
67
script/gspylib/etc/conf/check_list.conf
Normal file
67
script/gspylib/etc/conf/check_list.conf
Normal file
@ -0,0 +1,67 @@
|
||||
#The file(check_list.conf) is the gs_check and gs_checkos configuration file.
|
||||
#The file is placed in $GPHOME/script/util
|
||||
|
||||
# the system control parameter
|
||||
[/etc/sysctl.conf]
|
||||
net.ipv4.tcp_max_tw_buckets = 10000
|
||||
net.ipv4.tcp_tw_reuse = 1
|
||||
net.ipv4.tcp_tw_recycle = 1
|
||||
net.ipv4.tcp_keepalive_time = 30
|
||||
net.ipv4.tcp_keepalive_intvl = 30
|
||||
net.ipv4.tcp_keepalive_probes = 9
|
||||
net.ipv4.tcp_retries2 = 12
|
||||
net.sctp.addip_enable = 0
|
||||
net.core.wmem_max = 21299200
|
||||
net.core.rmem_max = 21299200
|
||||
net.core.wmem_default = 21299200
|
||||
net.core.rmem_default = 21299200
|
||||
net.sctp.sctp_mem = 94500000 915000000 927000000
|
||||
net.sctp.sctp_rmem = 8192 250000 16777216
|
||||
net.sctp.sctp_wmem = 8192 250000 16777216
|
||||
kernel.sem = 250 6400000 1000 25600
|
||||
net.ipv4.tcp_rmem = 8192 250000 16777216
|
||||
net.ipv4.tcp_wmem = 8192 250000 16777216
|
||||
# vm.min_free_kbytes would set to 5% of total system memory real time, total system memory get with cmd: free -k|grep Mem|awk '{print $2}'.
|
||||
vm.min_free_kbytes = total_system_memory*5%
|
||||
net.core.netdev_max_backlog = 65535
|
||||
net.ipv4.tcp_max_syn_backlog = 65535
|
||||
net.core.somaxconn = 65535
|
||||
net.ipv4.tcp_syncookies = 1
|
||||
vm.overcommit_memory = 0
|
||||
kernel.shmall = 1152921504606846720
|
||||
kernel.shmmax = 18446744073709551615
|
||||
|
||||
# if parameter value is not equal to ths OS's value, print the waring, and not error
|
||||
[SUGGEST:/etc/sysctl.conf]
|
||||
net.sctp.sndbuf_policy = 0
|
||||
net.sctp.rcvbuf_policy = 0
|
||||
net.ipv4.ip_local_port_range = 26000 65535
|
||||
net.ipv4.tcp_fin_timeout = 60
|
||||
net.ipv4.tcp_sack = 1
|
||||
net.ipv4.tcp_timestamps = 1
|
||||
net.ipv4.tcp_retries1 = 5
|
||||
net.ipv4.tcp_syn_retries = 5
|
||||
net.ipv4.tcp_synack_retries = 5
|
||||
net.sctp.path_max_retrans = 10
|
||||
net.sctp.max_init_retransmits = 10
|
||||
net.sctp.association_max_retrans = 10
|
||||
net.sctp.hb_interval = 30000
|
||||
vm.extfrag_threshold = 500
|
||||
vm.overcommit_ratio = 90
|
||||
SctpChecksumErrors = 0
|
||||
|
||||
# open file number, please set it to set '1000000'
|
||||
[/etc/security/limits.conf]
|
||||
open files = 1000000
|
||||
stack size = 3072
|
||||
virtual memory = unlimited
|
||||
max user processes = unlimited
|
||||
# network parameter
|
||||
# if the level of network is greater or equal to 10000Mb/s, please set RX/TX to 4096;
|
||||
# we will check if the MTU is greater or equal to 1500, but gs_checkos dose not set it.
|
||||
# else, skip it.
|
||||
[/sbin/ifconfig]
|
||||
MTU = 8192
|
||||
RX = 4096
|
||||
TX = 4096
|
||||
|
||||
65
script/gspylib/etc/conf/check_list_dws.conf
Normal file
65
script/gspylib/etc/conf/check_list_dws.conf
Normal file
@ -0,0 +1,65 @@
|
||||
#The file(check_list.conf) is the gs_check and gs_checkos configuration file.
|
||||
#The file is placed in $GPHOME/script/util
|
||||
|
||||
# the system control parameter
|
||||
[/etc/sysctl.conf]
|
||||
net.ipv4.tcp_max_tw_buckets = 10000
|
||||
net.ipv4.tcp_tw_reuse = 1
|
||||
net.ipv4.tcp_tw_recycle = 1
|
||||
net.ipv4.tcp_keepalive_time = 30
|
||||
net.ipv4.tcp_keepalive_intvl = 30
|
||||
net.ipv4.tcp_keepalive_probes = 9
|
||||
net.ipv4.tcp_retries2 = 12
|
||||
net.sctp.addip_enable = 0
|
||||
net.core.wmem_max = 21299200
|
||||
net.core.rmem_max = 21299200
|
||||
net.core.wmem_default = 21299200
|
||||
net.core.rmem_default = 21299200
|
||||
net.sctp.sctp_mem = 94500000 915000000 927000000
|
||||
net.sctp.sctp_rmem = 8192 250000 16777216
|
||||
net.sctp.sctp_wmem = 8192 250000 16777216
|
||||
kernel.sem = 250 6400000 1000 25600
|
||||
net.ipv4.tcp_rmem = 8192 250000 16777216
|
||||
net.ipv4.tcp_wmem = 8192 250000 16777216
|
||||
# vm.min_free_kbytes would set to 5% of total system memory real time, total system memory get with cmd: free -k|grep Mem|awk '{print $2}'.
|
||||
vm.min_free_kbytes = total_system_memory*5%
|
||||
net.core.netdev_max_backlog = 65535
|
||||
net.ipv4.tcp_max_syn_backlog = 65535
|
||||
net.core.somaxconn = 65535
|
||||
net.ipv4.tcp_syncookies = 1
|
||||
vm.overcommit_memory = 0
|
||||
net.sctp.sndbuf_policy = 0
|
||||
net.sctp.rcvbuf_policy = 0
|
||||
net.ipv4.tcp_fin_timeout = 60
|
||||
kernel.shmall = 1152921504606846720
|
||||
kernel.shmmax = 18446744073709551615
|
||||
net.ipv4.tcp_sack = 1
|
||||
net.ipv4.tcp_timestamps = 1
|
||||
net.ipv4.tcp_retries1 = 10
|
||||
net.ipv4.tcp_syn_retries = 10
|
||||
net.ipv4.tcp_synack_retries = 10
|
||||
net.sctp.path_max_retrans = 10
|
||||
net.sctp.max_init_retransmits = 10
|
||||
net.sctp.association_max_retrans = 10
|
||||
net.sctp.hb_interval = 30000
|
||||
vm.extfrag_threshold = 500
|
||||
vm.overcommit_ratio = 90
|
||||
|
||||
# if parameter value is not equal to ths OS's value, print the waring, and not error
|
||||
[SUGGEST:/etc/sysctl.conf]
|
||||
SctpChecksumErrors = 0
|
||||
|
||||
# open file number, please set it to set '1000000'
|
||||
[/etc/security/limits.conf]
|
||||
open files = 1000000
|
||||
stack size = 3072
|
||||
|
||||
# network parameter
|
||||
# if the level of network is greater or equal to 10000Mb/s, please set RX/TX to 4096;
|
||||
# we will check if the MTU is greater or equal to 1500, but gs_checkos dose not set it.
|
||||
# else, skip it.
|
||||
[/sbin/ifconfig]
|
||||
MTU = 8192
|
||||
RX = 4096
|
||||
TX = 4096
|
||||
|
||||
36
script/gspylib/etc/conf/cluster_config_template.xml
Normal file
36
script/gspylib/etc/conf/cluster_config_template.xml
Normal file
@ -0,0 +1,36 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<ROOT>
|
||||
<CLUSTER>
|
||||
<PARAM name="clusterName" value="Cluster_template" />
|
||||
<PARAM name="nodeNames" value="node1_hostname,node2_hostname"/>
|
||||
<PARAM name="gaussdbAppPath" value="/opt/huawei/install/app" />
|
||||
<PARAM name="gaussdbLogPath" value="/var/log/omm" />
|
||||
<PARAM name="tmpMppdbPath" value="/opt/huawei/tmp"/>
|
||||
<PARAM name="gaussdbToolPath" value="/opt/huawei/install/om" />
|
||||
<PARAM name="corePath" value="/opt/huawei/corefile"/>
|
||||
<PARAM name="backIp1s" value="192.168.0.1,192.168.0.2"/>
|
||||
</CLUSTER>
|
||||
|
||||
<DEVICELIST>
|
||||
<DEVICE sn="node1_hostname">
|
||||
<PARAM name="name" value="node1_hostname"/>
|
||||
<PARAM name="azName" value="AZ1"/>
|
||||
<PARAM name="azPriority" value="1"/>
|
||||
<PARAM name="backIp1" value="192.168.0.1"/>
|
||||
<PARAM name="sshIp1" value="192.168.0.1"/>
|
||||
<!-- dn -->
|
||||
<PARAM name="dataNum" value="1"/>
|
||||
<PARAM name="dataPortBase" value="15400"/>
|
||||
<PARAM name="dataNode1" value="/opt/huawei/install/data/dn,node2_hostname,/opt/huawei/install/data/dn"/>
|
||||
<PARAM name="dataNode1_syncNum" value="0"/>
|
||||
</DEVICE>
|
||||
|
||||
<DEVICE sn="node2_hostname">
|
||||
<PARAM name="name" value="node2_hostname"/>
|
||||
<PARAM name="azName" value="AZ1"/>
|
||||
<PARAM name="azPriority" value="1"/>
|
||||
<PARAM name="backIp1" value="192.168.0.2"/>
|
||||
<PARAM name="sshIp1" value="192.168.0.2"/>
|
||||
</DEVICE>
|
||||
</DEVICELIST>
|
||||
</ROOT>
|
||||
12
script/gspylib/etc/conf/gs-OS-set.service
Normal file
12
script/gspylib/etc/conf/gs-OS-set.service
Normal file
@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=OS Optimize Service
|
||||
After=local-fs.target
|
||||
|
||||
[Service]
|
||||
Type=idle
|
||||
ExecStart=/usr/local/gauss/script/gauss-OS-set.sh
|
||||
Delegate=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
9
script/gspylib/etc/conf/gs_collector.json
Normal file
9
script/gspylib/etc/conf/gs_collector.json
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"Collect":
|
||||
[
|
||||
{"TypeName": "System", "Content":"RunTimeInfo, HardWareInfo","Interval":"0", "Count":"1"},
|
||||
{"TypeName": "Log", "Content" : "DataNode,ClusterManager", "Interval":"0", "Count":"1"},
|
||||
{"TypeName": "Database", "Content": "pg_locks,pg_stat_activity,pg_thread_wait_status","Interval":"0", "Count":"1"},
|
||||
{"TypeName": "Config", "Content": "DataNode", "Interval":"0", "Count":"1"}
|
||||
]
|
||||
}
|
||||
39
script/gspylib/etc/conf/guc_cloud_list.xml
Normal file
39
script/gspylib/etc/conf/guc_cloud_list.xml
Normal file
@ -0,0 +1,39 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
|
||||
<root>
|
||||
<dn>
|
||||
<PARAM KEY='comm_tcp_mode' VALUE='on' />
|
||||
<PARAM KEY='comm_quota_size' VALUE='1024kB' />
|
||||
<!-- Do not set max_process_memory in dummy datanodes -->
|
||||
<PARAM KEY='log_line_prefix' VALUE='%x %a %m %u %d %h %p %S' />
|
||||
<PARAM KEY='enable_wdr_snapshot' VALUE='on' />
|
||||
<PARAM KEY='use_workload_manager' VALUE='off' />
|
||||
<PARAM KEY='bypass_workload_manager' VALUE='on' />
|
||||
<PARAM KEY='enable_dynamic_workload' VALUE='off' />
|
||||
<PARAM KEY='enable_gtm_free' VALUE='on' />
|
||||
<PARAM KEY='checkpoint_segments' VALUE='1024' />
|
||||
<PARAM KEY='wal_buffers' VALUE='1GB' />
|
||||
<PARAM KEY='xloginsert_locks' VALUE='16' />
|
||||
<PARAM KEY='max_process_memory' VALUE='80GB' />
|
||||
<PARAM KEY='shared_buffers' VALUE='40GB' />
|
||||
<PARAM KEY='max_prepared_transactions' VALUE='2048' />
|
||||
<PARAM KEY='work_mem' VALUE='1GB' />
|
||||
<PARAM KEY='maintenance_work_mem' VALUE='512MB' />
|
||||
<PARAM KEY='standby_shared_buffers_fraction' VALUE='1' />
|
||||
<PARAM KEY='max_files_per_process' VALUE='1024' />
|
||||
<PARAM KEY='recovery_max_workers' VALUE='4' />
|
||||
<PARAM KEY='enable_data_replicate' VALUE='off' />
|
||||
<PARAM KEY='recovery_time_target' VALUE='60' />
|
||||
<PARAM KEY='ssl' VALUE='off' />
|
||||
<PARAM KEY='ssl_renegotiation_limit' VALUE='0' />
|
||||
<PARAM KEY='session_timeout' VALUE='30min' />
|
||||
<PARAM KEY='password_effect_time' VALUE='0' />
|
||||
<PARAM KEY='password_encryption_type' VALUE='2' />
|
||||
<PARAM KEY='enable_stream_operator' VALUE='off' />
|
||||
<PARAM KEY='max_connections' VALUE='4096' />
|
||||
<PARAM KEY='enable_thread_pool' VALUE='on' />
|
||||
<PARAM KEY='thread_pool_attr' VALUE='1024,2,(nobind)' />
|
||||
<PARAM KEY='log_min_duration_statement' VALUE='60s' />
|
||||
<PARAM KEY='vacuum_cost_delay' VALUE='20ms' />
|
||||
</dn>
|
||||
</root>
|
||||
26
script/gspylib/etc/conf/guc_list.xml
Normal file
26
script/gspylib/etc/conf/guc_list.xml
Normal file
@ -0,0 +1,26 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
|
||||
<root>
|
||||
<dn>
|
||||
<PARAM KEY='checkpoint_segments' VALUE='64' />
|
||||
<PARAM KEY='comm_tcp_mode' VALUE='on' />
|
||||
<PARAM KEY='comm_quota_size' VALUE='1024kB' />
|
||||
<PARAM KEY='max_connections' VALUE='5000' />
|
||||
<PARAM KEY='max_prepared_transactions' VALUE='800' />
|
||||
<PARAM KEY='work_mem' VALUE='64MB' />
|
||||
<!-- Do not set max_process_memory in dummy datanodes -->
|
||||
<PARAM KEY='max_process_memory' VALUE='PHYSIC_MEMORY*0.665/(N+MAX_MASTER_DATANUM_IN_ONENODE)' />
|
||||
<PARAM KEY='shared_buffers' VALUE='1GB' />
|
||||
|
||||
<PARAM KEY='audit_enabled' VALUE='on' />
|
||||
<PARAM KEY='wal_keep_segments' VALUE='16' />
|
||||
|
||||
<!-- parameters for parallel redo -->
|
||||
<PARAM KEY='recovery_max_workers' VALUE='4' />
|
||||
|
||||
<!-- parameters for incremental checkpoint -->
|
||||
<PARAM KEY='enable_incremental_checkpoint' VALUE='on' />
|
||||
<PARAM KEY='enable_double_write' VALUE='on' />
|
||||
<PARAM KEY='use_workload_manager' VALUE='off' />
|
||||
</dn>
|
||||
</root>
|
||||
0
script/gspylib/etc/controller/gs_install.xml
Normal file
0
script/gspylib/etc/controller/gs_install.xml
Normal file
75
script/gspylib/etc/controller/gs_preinstall.xml
Normal file
75
script/gspylib/etc/controller/gs_preinstall.xml
Normal file
@ -0,0 +1,75 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
|
||||
<gs_preinstall desc="prepare installation environment">
|
||||
<scene name="OLAP" desc="business flow of LibrA/Elk/DWS">
|
||||
<configuration>
|
||||
<reentry>ture/false</reentry>
|
||||
<auto_rollback>ture/false</auto_rollback>
|
||||
<concurrency_list></concurrency_list>
|
||||
</configuration>
|
||||
|
||||
<flow_list>
|
||||
<flow id="0001">
|
||||
<module>gspylib.common.ParameterParsecheck</module>
|
||||
<function>ParameterCommandLine</function>
|
||||
<scope>current</scope>
|
||||
<expect>0</expect>
|
||||
<block>block</block>
|
||||
<rollback_module>None</rollback_module>
|
||||
<rollback_function>None</rollback_function>
|
||||
<rollback_scope>all</rollback_scope>
|
||||
<rollback_expect>0</rollback_expect>
|
||||
<rollback_block>non-block</rollback_block>
|
||||
</flow>
|
||||
|
||||
<flow id="0002">
|
||||
<module>gspylib.common.ParameterParsecheck</module>
|
||||
<function>ParameterCommandLine</function>
|
||||
<scope>current</scope>
|
||||
<expect>0</expect>
|
||||
<block>block</block>
|
||||
<rollback_module>None</rollback_module>
|
||||
<rollback_function>None</rollback_function>
|
||||
<rollback_scope>all</rollback_scope>
|
||||
<rollback_expect>0</rollback_expect>
|
||||
<rollback_block>non-block</rollback_block>
|
||||
</flow>
|
||||
|
||||
<flow id="0003">
|
||||
<module></module>
|
||||
<function></function>
|
||||
<scope></scope>
|
||||
<expect></expect>
|
||||
<block></block>
|
||||
<rollback_module></rollback_module>
|
||||
<rollback_function></rollback_function>
|
||||
<rollback_scope></rollback_scope>
|
||||
<rollback_expect></rollback_expect>
|
||||
<rollback_block></rollback_block>
|
||||
</flow>
|
||||
</flow_list>
|
||||
</scene>
|
||||
|
||||
<scene name="single" desc="business flow of single cluster">
|
||||
<configuration>
|
||||
<reentry>ture/false</reentry>
|
||||
<auto_rollback>ture/false</auto_rollback>
|
||||
<concurrency_list></concurrency_list>
|
||||
</configuration>
|
||||
|
||||
<flow_list>
|
||||
<flow id="">
|
||||
<module></module>
|
||||
<function></function>
|
||||
<scope></scope>
|
||||
<expect></expect>
|
||||
<block></block>
|
||||
<rollback_module></rollback_module>
|
||||
<rollback_function></rollback_function>
|
||||
<rollback_scope></rollback_scope>
|
||||
<rollback_expect></rollback_expect>
|
||||
<rollback_block></rollback_block>
|
||||
</flow>
|
||||
</flow_list>
|
||||
</scene>
|
||||
</gs_preinstall>
|
||||
3217
script/gspylib/etc/sql/pmk_schema.sql
Normal file
3217
script/gspylib/etc/sql/pmk_schema.sql
Normal file
File diff suppressed because it is too large
Load Diff
2682
script/gspylib/etc/sql/pmk_schema_single_inst.sql
Normal file
2682
script/gspylib/etc/sql/pmk_schema_single_inst.sql
Normal file
File diff suppressed because it is too large
Load Diff
2
script/gspylib/etc/sql/test_data_node.sql
Normal file
2
script/gspylib/etc/sql/test_data_node.sql
Normal file
@ -0,0 +1,2 @@
|
||||
--test the data node
|
||||
SELECT * FROM pgxc_node WHERE node_type = 'D';
|
||||
17
script/gspylib/etc/sql/test_pmk.sql
Normal file
17
script/gspylib/etc/sql/test_pmk.sql
Normal file
@ -0,0 +1,17 @@
|
||||
--
|
||||
--test the pmk schema
|
||||
--
|
||||
DECLARE
|
||||
pmk_oid oid;
|
||||
class_count int;
|
||||
proc_count int;
|
||||
BEGIN
|
||||
--if pmk schema not exist, it will raise an error.
|
||||
SELECT oid FROM pg_namespace WHERE nspname='pmk' INTO pmk_oid;
|
||||
--select the count of class_count
|
||||
SELECT COUNT(*) FROM pg_class WHERE relnamespace=pmk_oid INTO class_count;
|
||||
--select the count of proc_count
|
||||
SELECT COUNT(*) FROM pg_proc WHERE pronamespace=pmk_oid INTO proc_count;
|
||||
RAISE INFO 'pmk schema exist. class count is %, proc count is %.', class_count , proc_count;
|
||||
END;
|
||||
/
|
||||
33
script/gspylib/etc/sql/unlock_cluster.sql
Normal file
33
script/gspylib/etc/sql/unlock_cluster.sql
Normal file
@ -0,0 +1,33 @@
|
||||
--
|
||||
--unlock the cluster
|
||||
--The query content must be the same as the values of LOCK_CLUSTER_SQL and WAITLOCK_CLUSTER_SQL in the local/LocalQuery.py file.
|
||||
--The value must be the same.
|
||||
--
|
||||
DECLARE
|
||||
result BOOL;
|
||||
--begin unlock the cluster sql
|
||||
BEGIN
|
||||
FOR i in (select * from pg_stat_activity where query like 'select case (select pgxc_lock_for_backup()) when true then (select pg_sleep(%)::text) end;' or query like 'select case (select count(*) from pg_advisory_lock(65535,65535)) when true then (select pg_sleep(%)::text) end;')
|
||||
LOOP
|
||||
--set info datid datname pid
|
||||
RAISE INFO 'datid: %, datname: %, pid: %', i.datid, i.datname, i.pid;
|
||||
--set info usesysid usename application_name
|
||||
RAISE INFO 'usesysid: %, usename: %, application_name: %', i.usesysid, i.usename, i.application_name;
|
||||
--set info client_addr client_hostname client_port
|
||||
RAISE INFO 'client_addr: %, client_hostname: %, client_port: %', i.client_addr, i.client_hostname, i.client_port;
|
||||
--set info backend_start xact_start
|
||||
RAISE INFO 'backend_start: %, xact_start: %', i.backend_start, i.xact_start;
|
||||
--set info query_start state_change
|
||||
RAISE INFO 'query_start: %, state_change: %', i.query_start, i.state_change;
|
||||
--set info waiting state
|
||||
RAISE INFO 'waiting: %, state: %', i.waiting, i.state;
|
||||
--set info query
|
||||
RAISE INFO 'query: %', i.query;
|
||||
--set result false
|
||||
result := false;
|
||||
--SELECT pg_cancel_backend
|
||||
SET xc_maintenance_mode = on; SELECT pg_cancel_backend(i.pid) INTO result; RESET xc_maintenance_mode;
|
||||
RAISE INFO 'cancel command result: %', result;
|
||||
END LOOP;
|
||||
END;
|
||||
/
|
||||
0
script/gspylib/hardware/__init__.py
Normal file
0
script/gspylib/hardware/__init__.py
Normal file
86
script/gspylib/hardware/gscpu.py
Normal file
86
script/gspylib/hardware/gscpu.py
Normal file
@ -0,0 +1,86 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : cpu.py is a utility to do something for cpu information.
|
||||
#############################################################################
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import multiprocessing
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
"""
|
||||
Requirements:
|
||||
1. getCpuNum() -> get real cpu number.
|
||||
2. getCpuOnlineOfflineInfo(isOnlineCpu) -> get cpu online/offline information
|
||||
"""
|
||||
|
||||
|
||||
class CpuInfo(object):
|
||||
"""
|
||||
function: Init the CpuInfo options
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: Init the CpuInfo options
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def getCpuNum():
|
||||
"""
|
||||
function : get cpu set of current board
|
||||
input : null
|
||||
output : total CPU count
|
||||
"""
|
||||
total = 0
|
||||
try:
|
||||
total = multiprocessing.cpu_count()
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_523["GAUSS_52301"] + str(e))
|
||||
return total
|
||||
|
||||
@staticmethod
|
||||
def getCpuOnlineOfflineInfo(isOnlineCpu=True):
|
||||
"""
|
||||
cat /sys/devices/system/cpu/online or /sys/devices/system/cpu/offline
|
||||
"""
|
||||
onlineFileName = "/sys/devices/system/cpu/online"
|
||||
offlineFileName = "/sys/devices/system/cpu/offline"
|
||||
|
||||
if (isOnlineCpu):
|
||||
fileName = onlineFileName
|
||||
else:
|
||||
fileName = offlineFileName
|
||||
|
||||
if (not os.path.exists(fileName)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % fileName)
|
||||
if (not os.path.isfile(fileName)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50210"] % fileName)
|
||||
|
||||
cmd = "cat '%s' 2>/dev/null" % fileName
|
||||
status, output = subprocess.getstatusoutput(cmd)
|
||||
if (status == 0):
|
||||
return output
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd +
|
||||
" Error: \n%s" % str(output))
|
||||
|
||||
|
||||
g_cpu = CpuInfo()
|
||||
298
script/gspylib/hardware/gsdisk.py
Normal file
298
script/gspylib/hardware/gsdisk.py
Normal file
@ -0,0 +1,298 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : disk.py is a utility to do something for disk.
|
||||
#############################################################################
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import psutil
|
||||
import math
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.os.gsplatform import g_Platform
|
||||
|
||||
"""
|
||||
Requirements:
|
||||
1. getUsageSize(directory) -> get directory or file real size. Unit is byte.
|
||||
2. getMountPathByDataDir(directory) -> get the input directory of the mount
|
||||
disk
|
||||
3. getMountPathAvailSize(directory) -> get the avail size about the input
|
||||
directory of the mount disk. Unit MB
|
||||
4. getDiskSpaceUsage(directory) -> get directory or file space size. Unit is
|
||||
byte.
|
||||
5. getDiskInodeUsage(directory) -> get directory or file inode uage. Unit is
|
||||
byte.
|
||||
6. getDiskMountType(directory) -> get the type about the input directory of
|
||||
the mount disk.
|
||||
7. getDiskReadWritespeed(inputFile, outputFile, bs, count, iflag = '',
|
||||
oflag = '') -> get disk read/write speed
|
||||
"""
|
||||
|
||||
|
||||
class diskInfo():
|
||||
"""
|
||||
function: Init the DiskUsage options
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.mtabFile = g_Platform.getMtablFile()
|
||||
|
||||
def getMountInfo(self, allInfo=False):
|
||||
"""
|
||||
get mount disk information: device mountpoint fstype opts
|
||||
input: bool (physical devices and all others)
|
||||
output: list
|
||||
"""
|
||||
return psutil.disk_partitions(allInfo)
|
||||
|
||||
def getUsageSize(self, directory):
|
||||
"""
|
||||
get directory or file real size. Unit is byte
|
||||
"""
|
||||
cmd = ""
|
||||
try:
|
||||
cmd = "%s -l -R %s | %s ^- | %s '{t+=$5;} END {print t}'" % (
|
||||
g_Platform.getListCmd(), directory, g_Platform.getGrepCmd(),
|
||||
g_Platform.getAwkCmd())
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status == 0):
|
||||
return output.split('\t')[0].strip()
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
|
||||
+ " Error: \n%s" % str(output))
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd)
|
||||
|
||||
# Mtab always keeps the partition information already mounted in the
|
||||
# current system.
|
||||
# For programs like fdisk and df,
|
||||
# you must read the mtab file to get the partition mounting status in
|
||||
# the current system.
|
||||
def getMountPathByDataDir(self, datadir):
|
||||
"""
|
||||
function : Get the disk by the file path
|
||||
input : datadir the file path
|
||||
output : device disk
|
||||
"""
|
||||
device = ""
|
||||
mountDisk = {}
|
||||
if not os.path.exists(datadir):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] % datadir)
|
||||
try:
|
||||
datadir = os.path.realpath(datadir)
|
||||
with open(self.mtabFile, "r") as fp:
|
||||
for line in fp.readlines():
|
||||
if line.startswith('none'):
|
||||
continue
|
||||
i_fields = line.split()
|
||||
if len(i_fields) < 3:
|
||||
continue
|
||||
i_device = i_fields[0].strip()
|
||||
i_mountpoint = i_fields[1].strip()
|
||||
mountDisk[i_mountpoint] = [i_device, i_mountpoint]
|
||||
|
||||
mountList = mountDisk.keys()
|
||||
sorted(mountList, reverse=True)
|
||||
for mount in mountList:
|
||||
i_mountpoint = mountDisk[mount][1]
|
||||
if (i_mountpoint == '/'):
|
||||
i_mount_dirlst = ['']
|
||||
else:
|
||||
i_mount_dirlst = i_mountpoint.split('/')
|
||||
data_dirlst = datadir.split('/')
|
||||
if len(i_mount_dirlst) > len(data_dirlst):
|
||||
continue
|
||||
if (i_mount_dirlst == data_dirlst[:len(i_mount_dirlst)]):
|
||||
device = mountDisk[mount][0]
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"] +
|
||||
" disk mount." + "Error: %s" % str(e))
|
||||
return device
|
||||
|
||||
# Mtab always keeps the partition information already mounted in the
|
||||
# current system.
|
||||
# For programs like fdisk and df,
|
||||
# you must read the mtab file to get the partition mounting status in
|
||||
# the current system.
|
||||
def getMountPathAvailSize(self, device, sizeUnit='MB'):
|
||||
"""
|
||||
function : Get the disk size by the file path
|
||||
input : device the file path
|
||||
: sizeUnit byte, GB, MB, KB
|
||||
output : total disk size
|
||||
"""
|
||||
if (not os.path.exists(device)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] % device)
|
||||
try:
|
||||
dev_info = os.statvfs(device)
|
||||
if (sizeUnit == 'GB'):
|
||||
total = dev_info.f_bavail * dev_info.f_frsize // (
|
||||
1024 * 1024 * 1024)
|
||||
elif (sizeUnit == 'MB'):
|
||||
total = dev_info.f_bavail * dev_info.f_frsize // (1024 * 1024)
|
||||
elif (sizeUnit == 'KB'):
|
||||
total = dev_info.f_bavail * dev_info.f_frsize // 1024
|
||||
else:
|
||||
total = dev_info.f_bavail * dev_info.f_frsize
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"] + " disk size."
|
||||
+ "Error: %s" % str(e))
|
||||
return total
|
||||
|
||||
# Mtab always keeps the partition information already mounted in the
|
||||
# current system.
|
||||
# For programs like fdisk and df,
|
||||
# you must read the mtab file to get the partition mounting status in
|
||||
# the current system.
|
||||
def getDiskSpaceUsage(self, path):
|
||||
"""
|
||||
function : Get the disk usage by the file path
|
||||
method of calculation:
|
||||
Total capacity (KB)=f_bsize*f_blocks/1024 [1k-blocks]
|
||||
Usage (KB)= f_bsize*(f_blocks-f_bfree)/1024 [Used]
|
||||
Valid capacity (KB) = f_bsize*f_bavail/1024 [Available]
|
||||
Usage (%) = Usage/(Usage + Valid capacity) *100 [Use%]
|
||||
input : path the file path
|
||||
output : percent
|
||||
"""
|
||||
percent = 0
|
||||
if (not os.path.exists(path)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] % path)
|
||||
try:
|
||||
dev_info = os.statvfs(path)
|
||||
used = dev_info.f_blocks - dev_info.f_bfree
|
||||
valueable = dev_info.f_bavail + used
|
||||
percent = math.ceil((float(used) / valueable) * 100)
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"] + " disk space."
|
||||
+ "Error: %s" % str(e))
|
||||
return float(percent)
|
||||
|
||||
def getDiskSpaceForShrink(self, path, delta):
|
||||
"""
|
||||
function : Get the disk usage by the file path for Shrink
|
||||
input : path the file path and deltasize
|
||||
output : percent
|
||||
"""
|
||||
percent = 0
|
||||
if (not os.path.exists(path)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] % path)
|
||||
try:
|
||||
dev_info = os.statvfs(path)
|
||||
used = (dev_info.f_blocks - dev_info.f_bfree) * dev_info.f_bsize
|
||||
valueable = dev_info.f_bavail * dev_info.f_bsize + used + delta
|
||||
percent = math.ceil((float(used) // valueable) * 100)
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"] + " disk space."
|
||||
+ "Error: %s" % str(e))
|
||||
return float(percent)
|
||||
|
||||
def getDiskInodeUsage(self, Path):
|
||||
"""
|
||||
function : Get the inode by the file path
|
||||
input : Path the file path
|
||||
output : percent
|
||||
"""
|
||||
percent = 0
|
||||
if (not os.path.exists(Path)):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50228"] % Path)
|
||||
try:
|
||||
dev_info = os.statvfs(Path)
|
||||
used = dev_info.f_files - dev_info.f_ffree
|
||||
valueable = dev_info.f_favail + used
|
||||
percent = math.ceil((float(used) // valueable) * 100)
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"] + " disk Inode."
|
||||
+ "Error: %s" % str(e))
|
||||
return float(percent)
|
||||
|
||||
def getDiskMountType(self, device):
|
||||
"""
|
||||
function : Get the mount type by device
|
||||
input : device eg:/dev/pts
|
||||
output : fstype device type
|
||||
"""
|
||||
fstype = ""
|
||||
try:
|
||||
|
||||
with open(self.mtabFile, "r") as fp:
|
||||
for line in fp.readlines():
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
i_fields = line.split()
|
||||
if len(i_fields) < 3:
|
||||
continue
|
||||
i_device = i_fields[0].strip()
|
||||
i_fstype = i_fields[2].strip()
|
||||
if i_device == device:
|
||||
fstype = i_fstype
|
||||
break
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53011"]
|
||||
+ " disk mount type." + "Error: %s" % str(e))
|
||||
return fstype
|
||||
|
||||
def getDiskReadWritespeed(self, inputFile, outputFile, bs, count, iflag='',
|
||||
oflag=''):
|
||||
"""
|
||||
function : Get the disk read or write rate
|
||||
input : inputFile
|
||||
: outputFile
|
||||
: bs
|
||||
: count
|
||||
: iflag
|
||||
: oflag
|
||||
output : speed
|
||||
"""
|
||||
try:
|
||||
cmd = "%s if=%s of=%s bs=%s count=%s " % (
|
||||
g_Platform.getDdCmd(), inputFile, outputFile, bs, count)
|
||||
if iflag:
|
||||
cmd += "iflag=%s " % iflag
|
||||
if oflag:
|
||||
cmd += "oflag=%s " % oflag
|
||||
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status == 0):
|
||||
output = output.split("\n")
|
||||
resultInfolist = output[2].strip().split(",")
|
||||
if ((resultInfolist[2]).split()[1] == "KB/s"):
|
||||
speed = float((resultInfolist[2]).split()[0]) * 1024
|
||||
elif ((resultInfolist[2]).split()[1] == "MB/s"):
|
||||
speed = float((resultInfolist[2]).split()[0]) * 1024 * 1024
|
||||
elif ((resultInfolist[2]).split()[1] == "GB/s"):
|
||||
speed = float(
|
||||
(resultInfolist[2]).split()[0]) * 1024 * 1024 * 1024
|
||||
elif ((resultInfolist[2]).split()[1] == "TB/s"):
|
||||
speed = float((resultInfolist[2]).split()[
|
||||
0]) * 1024 * 1024 * 1024 * 1024
|
||||
else:
|
||||
speed = float((resultInfolist[2]).split()[0])
|
||||
return speed
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
|
||||
+ " Error: \n%s" % str(output))
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
ErrorCode.GAUSS_504["GAUSS_50406"] + "Error:\n%s" % str(e))
|
||||
|
||||
|
||||
g_disk = diskInfo()
|
||||
101
script/gspylib/hardware/gsmemory.py
Normal file
101
script/gspylib/hardware/gsmemory.py
Normal file
@ -0,0 +1,101 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : memory.py is a utility to do something for memory information.
|
||||
#############################################################################
|
||||
import sys
|
||||
import psutil
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../")
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
"""
|
||||
Requirements:
|
||||
get memory and swap size
|
||||
"""
|
||||
|
||||
|
||||
class memoryInfo(object):
|
||||
"""
|
||||
function: Init the MemInfo options
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
function: Init the MemInfo options
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def getMemUsedSize():
|
||||
"""
|
||||
get used memory size
|
||||
"""
|
||||
return psutil.virtual_memory().used
|
||||
|
||||
@staticmethod
|
||||
def getMemFreeSize():
|
||||
"""
|
||||
get free memory size
|
||||
"""
|
||||
return psutil.virtual_memory().free
|
||||
|
||||
@staticmethod
|
||||
def getSwapUsedSize():
|
||||
"""
|
||||
get used swap size
|
||||
"""
|
||||
return psutil.swap_memory().used
|
||||
|
||||
@staticmethod
|
||||
def getSwapFreeSize():
|
||||
"""
|
||||
get free swap size
|
||||
"""
|
||||
return psutil.swap_memory().free
|
||||
|
||||
@staticmethod
|
||||
def getSwapTotalSize():
|
||||
"""
|
||||
function : Get swap memory total size
|
||||
input : null
|
||||
output : total memory size (byte)
|
||||
"""
|
||||
total = 0
|
||||
try:
|
||||
total = psutil.swap_memory().total
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_505["GAUSS_50502"]
|
||||
+ "Error: %s" % str(e))
|
||||
return total
|
||||
|
||||
@staticmethod
|
||||
def getMemTotalSize():
|
||||
"""
|
||||
function : Get system virtual memory total size
|
||||
input : null
|
||||
output : total virtual memory(byte)
|
||||
"""
|
||||
total = 0
|
||||
try:
|
||||
total = psutil.virtual_memory().total
|
||||
except Exception as e:
|
||||
raise Exception(ErrorCode.GAUSS_505["GAUSS_50502"]
|
||||
+ "Error: %s" % str(e))
|
||||
return total
|
||||
|
||||
|
||||
g_memory = memoryInfo()
|
||||
0
script/gspylib/inspection/__init__.py
Normal file
0
script/gspylib/inspection/__init__.py
Normal file
472
script/gspylib/inspection/common/CheckItem.py
Normal file
472
script/gspylib/inspection/common/CheckItem.py
Normal file
@ -0,0 +1,472 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
|
||||
import json
|
||||
import imp
|
||||
import types
|
||||
from abc import abstractmethod
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.inspection.common.Log import LoggerFactory
|
||||
from gspylib.inspection.common.CheckResult import LocalItemResult, \
|
||||
ResultStatus
|
||||
from gspylib.inspection.common.Exception import CheckNAException
|
||||
|
||||
|
||||
def defaultAnalysis(self, itemResult):
|
||||
# Get the item result information
|
||||
itemResult.standard = self.standard
|
||||
itemResult.suggestion = self.suggestion
|
||||
itemResult.category = self.category
|
||||
itemResult.title = self.title
|
||||
|
||||
errors = []
|
||||
ngs = []
|
||||
warnings = []
|
||||
vals = {}
|
||||
for i in itemResult.getLocalItems():
|
||||
if (i.rst == ResultStatus.OK or i.rst == ResultStatus.NA):
|
||||
if (i.val):
|
||||
vals[i.host] = i.val
|
||||
continue
|
||||
elif (i.rst == ResultStatus.ERROR):
|
||||
errors.append("%s : %s" % (i.host, i.val))
|
||||
elif (i.rst == ResultStatus.WARNING):
|
||||
warnings.append("%s : %s" % (i.host, i.val))
|
||||
else:
|
||||
ngs.append("%s : %s" % (i.host, i.val))
|
||||
# Analysis results
|
||||
if (len(ngs) > 0 or len(errors) > 0 or len(warnings) > 0):
|
||||
rst = ResultStatus.WARNING
|
||||
if len(errors) > 0:
|
||||
rst = ResultStatus.ERROR
|
||||
elif len(ngs) > 0:
|
||||
rst = ResultStatus.NG
|
||||
itemResult.rst = rst
|
||||
itemResult.analysis = "\n".join(ngs + errors + warnings)
|
||||
else:
|
||||
itemResult.rst = ResultStatus.OK
|
||||
itemResult.analysis = ""
|
||||
|
||||
analysisStrList = []
|
||||
nas, oks, ngs, warnings, errors = classifyItemResult(itemResult)
|
||||
total = len(oks) + len(ngs) + len(warnings) + len(errors)
|
||||
|
||||
rst = ResultStatus.OK
|
||||
okMsg, okAnalysisList = countItemResult(oks)
|
||||
warningMsg, warningAnalysisList = countItemResult(warnings)
|
||||
failedMsg, failedAnalysisList = countItemResult(ngs)
|
||||
errorMsg, errorAnalysisList = countItemResult(errors)
|
||||
if (len(warnings) > 0):
|
||||
rst = ResultStatus.WARNING
|
||||
if (len(ngs) > 0):
|
||||
rst = ResultStatus.NG
|
||||
if (len(errors) > 0):
|
||||
rst = ResultStatus.ERROR
|
||||
countMsg = "The item run on %s nodes. %s%s%s%s" % (
|
||||
total, okMsg, warningMsg, failedMsg, errorMsg)
|
||||
analysisStrList.append(countMsg)
|
||||
if (errorAnalysisList):
|
||||
analysisStrList.extend(errorAnalysisList)
|
||||
if (failedAnalysisList):
|
||||
analysisStrList.extend(failedAnalysisList)
|
||||
if (warningAnalysisList):
|
||||
analysisStrList.extend(warningAnalysisList)
|
||||
if (itemResult.name == 'CheckSysTable'):
|
||||
value = [vals[key] for key in sorted(vals.keys())]
|
||||
analysisStrList.extend(value)
|
||||
itemResult.rst = rst
|
||||
itemResult.analysis = "\n".join(analysisStrList)
|
||||
return itemResult
|
||||
|
||||
|
||||
def consistentAnalysis(self, itemResult):
|
||||
# check the rst in each node and make sure the var is consistence
|
||||
itemResult.standard = self.standard
|
||||
itemResult.suggestion = self.suggestion
|
||||
itemResult.category = self.category
|
||||
itemResult.title = self.title
|
||||
|
||||
analysisStrList = []
|
||||
nas, oks, ngs, warnings, errors = classifyItemResult(itemResult)
|
||||
total = len(oks) + len(ngs) + len(warnings) + len(errors)
|
||||
|
||||
# The item run on %s nodes. success: %s warning: %s ng:%s error:%
|
||||
rst = ResultStatus.OK
|
||||
if (len(oks) == total):
|
||||
okMsg, okAnalysisList = countItemResult(oks, True)
|
||||
else:
|
||||
okMsg, okAnalysisList = countItemResult(oks)
|
||||
warningMsg, warningAnalysisList = countItemResult(warnings)
|
||||
failedMsg, failedAnalysisList = countItemResult(ngs)
|
||||
errorMsg, errorAnalysisList = countItemResult(errors)
|
||||
if (len(okAnalysisList) > 0):
|
||||
okMsg += " (consistent) " if (
|
||||
len(okAnalysisList) == 1) else " (not consistent) "
|
||||
|
||||
if (len(warnings) > 0 and rst == ResultStatus.OK):
|
||||
rst = ResultStatus.WARNING
|
||||
if (len(okAnalysisList) > 1):
|
||||
rst = ResultStatus.NG
|
||||
if (itemResult.name in ["CheckDiskConfig", "CheckCpuCount",
|
||||
"CheckMemInfo", "CheckStack",
|
||||
"CheckKernelVer"]):
|
||||
rst = ResultStatus.WARNING
|
||||
if (len(ngs) > 0):
|
||||
rst = ResultStatus.NG
|
||||
if (len(errors) > 0):
|
||||
rst = ResultStatus.ERROR
|
||||
|
||||
countMsg = "The item run on %s nodes. %s%s%s%s" % (
|
||||
total, okMsg, warningMsg, failedMsg, errorMsg)
|
||||
analysisStrList.append(countMsg)
|
||||
if (errorAnalysisList):
|
||||
analysisStrList.extend(errorAnalysisList)
|
||||
if (failedAnalysisList):
|
||||
analysisStrList.extend(failedAnalysisList)
|
||||
if (warningAnalysisList):
|
||||
analysisStrList.extend(warningAnalysisList)
|
||||
if (okAnalysisList):
|
||||
analysisStrList.extend(okAnalysisList)
|
||||
itemResult.rst = rst
|
||||
itemResult.analysis = "\n".join(analysisStrList)
|
||||
return itemResult
|
||||
|
||||
|
||||
def getValsItems(vals):
|
||||
"""
|
||||
|
||||
:param vals:
|
||||
:return:
|
||||
"""
|
||||
ret = {}
|
||||
for i_key, i_val in list(vals.items()):
|
||||
try:
|
||||
i_val = eval(i_val)
|
||||
except Exception:
|
||||
i_val = i_val
|
||||
if isinstance(i_val, dict):
|
||||
for j_key, j_val in list(i_val.items()):
|
||||
ret[j_key] = j_val
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def getCheckType(category):
|
||||
'''
|
||||
function : get check type
|
||||
input : category
|
||||
output : 1,2,3
|
||||
'''
|
||||
if not category:
|
||||
return 0
|
||||
if category == "cluster":
|
||||
return 1
|
||||
elif category == "database":
|
||||
return 3
|
||||
else:
|
||||
return 2
|
||||
|
||||
|
||||
def classifyItemResult(itemResult):
|
||||
nas = []
|
||||
oks = []
|
||||
ngs = []
|
||||
wns = []
|
||||
ers = []
|
||||
# Summary results
|
||||
for i in itemResult.getLocalItems():
|
||||
if (i.rst == ResultStatus.OK):
|
||||
oks.append(i)
|
||||
if (i.rst == ResultStatus.NA):
|
||||
nas.append(i)
|
||||
if (i.rst == ResultStatus.NG):
|
||||
ngs.append(i)
|
||||
if (i.rst == ResultStatus.WARNING):
|
||||
wns.append(i)
|
||||
if (i.rst == ResultStatus.ERROR):
|
||||
ers.append(i)
|
||||
return (nas, oks, ngs, wns, ers)
|
||||
|
||||
|
||||
def countItemResult(itemList, allNode=False):
|
||||
if (itemList is None or len(itemList) == 0):
|
||||
return ("", [])
|
||||
first = itemList[0]
|
||||
msgTitle = "default"
|
||||
if (first.rst == ResultStatus.WARNING):
|
||||
msgTitle = "warning"
|
||||
if (first.rst == ResultStatus.NG):
|
||||
msgTitle = "ng"
|
||||
if (first.rst == ResultStatus.ERROR):
|
||||
msgTitle = "error"
|
||||
if (first.rst == ResultStatus.OK):
|
||||
msgTitle = "success"
|
||||
countMsg = " %s: %s " % (msgTitle, len(itemList))
|
||||
|
||||
defaultHosts = [first.host]
|
||||
diffs = []
|
||||
for i in itemList[1:]:
|
||||
if i.val == first.val:
|
||||
defaultHosts.append(i.host)
|
||||
continue
|
||||
else:
|
||||
diffs.append("The different[%s] value:\n%s" % (i.host, i.val))
|
||||
if (allNode):
|
||||
analysisStrList = [
|
||||
"The %s on all nodes value:\n%s" % (msgTitle, first.val)]
|
||||
else:
|
||||
analysisStrList = ["The %s%s value:\n%s" % (
|
||||
msgTitle, '[' + ",".join(defaultHosts) + ']', first.val)]
|
||||
if (len(diffs) > 0):
|
||||
analysisStrList.extend(diffs)
|
||||
return (countMsg, analysisStrList)
|
||||
|
||||
|
||||
class BaseItem(object):
|
||||
'''
|
||||
base class of check item
|
||||
'''
|
||||
|
||||
def __init__(self, name):
|
||||
'''
|
||||
Constructor
|
||||
'''
|
||||
self.name = name
|
||||
self.title = None
|
||||
self.set = False
|
||||
self.log = None
|
||||
self.suggestion = None
|
||||
self.standard = None
|
||||
self.threshold = {}
|
||||
self.category = 'other'
|
||||
self.permission = 'user'
|
||||
self.analysis = 'default'
|
||||
self.scope = 'all'
|
||||
self.cluster = None
|
||||
self.port = None
|
||||
self.user = None
|
||||
self.nodes = None
|
||||
self.mpprcFile = None
|
||||
self.thresholdDn = None
|
||||
self.context = None
|
||||
self.tmpPath = None
|
||||
self.outPath = None
|
||||
self.host = DefaultValue.GetHostIpOrName()
|
||||
self.result = LocalItemResult(name, self.host)
|
||||
self.routing = None
|
||||
self.skipSetItem = []
|
||||
self.ipAddr = None
|
||||
# self cluster name not only lc
|
||||
self.LCName = None
|
||||
self.ShrinkNodes = None
|
||||
|
||||
@abstractmethod
|
||||
def preCheck(self):
|
||||
'''
|
||||
abstract precheck for check item
|
||||
'''
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def doCheck(self):
|
||||
'''
|
||||
check script for each item
|
||||
'''
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def postAnalysis(self, itemResult, category="", name=""):
|
||||
'''
|
||||
analysis the item result got from each node
|
||||
'''
|
||||
pass
|
||||
|
||||
def initFrom(self, context):
|
||||
'''
|
||||
initialize the check item from context
|
||||
'''
|
||||
item = next(i for i in context.items if i['name'] == self.name)
|
||||
if item:
|
||||
self.title = self.__getLocaleAttr(item, 'title')
|
||||
self.suggestion = self.__getLocaleAttr(item, 'suggestion')
|
||||
self.standard = self.__getLocaleAttr(item, 'standard')
|
||||
if (item.__contains__('threshold')):
|
||||
self.category = item['category']
|
||||
if (item.__contains__('threshold')):
|
||||
self.threshold = item['threshold']
|
||||
# set pre check method
|
||||
self.setScope(item['scope'])
|
||||
# set post analysis method
|
||||
self.setAnalysis(item['analysis'])
|
||||
|
||||
self.context = context
|
||||
self.cluster = context.cluster
|
||||
self.user = context.user
|
||||
self.nodes = context.nodes
|
||||
self.mpprcFile = context.mpprc
|
||||
self.result.checkID = context.checkID
|
||||
self.result.user = context.user
|
||||
self.tmpPath = context.tmpPath
|
||||
self.outPath = context.outPath
|
||||
self.set = context.set
|
||||
self.log = context.log
|
||||
self.routing = context.routing
|
||||
self.skipSetItem = context.skipSetItem
|
||||
self.__getLocalIP(context.nodes)
|
||||
self.LCName = context.LCName
|
||||
self.ShrinkNodes = context.ShrinkNodes
|
||||
if not context.thresholdDn:
|
||||
self.thresholdDn = 90
|
||||
else:
|
||||
self.thresholdDn = context.thresholdDn
|
||||
# new host without cluster installed
|
||||
if (not self.user):
|
||||
self.host = DefaultValue.GetHostIpOrName()
|
||||
self.result.host = DefaultValue.GetHostIpOrName()
|
||||
|
||||
def __getLocalIP(self, nodeList):
|
||||
for node in nodeList:
|
||||
if (SharedFuncs.is_local_node(node) and SharedFuncs.validate_ipv4(
|
||||
node)):
|
||||
self.ipAddr = node
|
||||
return
|
||||
|
||||
def __getLocaleAttr(self, obj, attr, language='zh'):
|
||||
'''
|
||||
get attribute value for different language
|
||||
'''
|
||||
locAttr = str(attr) + '_' + language
|
||||
if (not obj.__contains__(locAttr) or obj[locAttr] == ""):
|
||||
return obj[str(attr) + '_' + 'zh']
|
||||
else:
|
||||
return obj[locAttr]
|
||||
|
||||
def setScope(self, scope):
|
||||
# Choose execution node
|
||||
self.scope = scope
|
||||
# cn node to perform the check
|
||||
if (scope == 'cn'):
|
||||
self.preCheck = self.__cnPreCheck(self.preCheck)
|
||||
# Local implementation of the inspection
|
||||
elif (scope == 'local'):
|
||||
self.preCheck = self.__localPreCheck(self.preCheck)
|
||||
|
||||
def setAnalysis(self, analysis):
|
||||
# Analyze the test results
|
||||
self.analysis = analysis
|
||||
# Consistency analysis for ap
|
||||
if (analysis == 'consistent'):
|
||||
self.postAnalysis = types.MethodType(consistentAnalysis, self)
|
||||
# Default analysis for ap
|
||||
elif (analysis == 'default'):
|
||||
self.postAnalysis = types.MethodType(defaultAnalysis, self)
|
||||
|
||||
def runCheck(self, context, g_logger):
|
||||
'''
|
||||
main process for checking
|
||||
'''
|
||||
try:
|
||||
g_logger.debug("Start to run %s" % self.name)
|
||||
# initialization
|
||||
self.initFrom(context)
|
||||
self.preCheck()
|
||||
# Perform the inspection
|
||||
self.doCheck()
|
||||
if (self.set and (
|
||||
self.result.rst == ResultStatus.NG
|
||||
or self.result.rst == ResultStatus.WARNING)
|
||||
and self.name not in self.skipSetItem):
|
||||
self.doSet()
|
||||
self.doCheck()
|
||||
g_logger.debug("Finish to run %s" % self.name)
|
||||
except CheckNAException:
|
||||
self.result.rst = ResultStatus.NA
|
||||
# An internal error occurred while executing code
|
||||
except Exception as e:
|
||||
self.result.rst = ResultStatus.ERROR
|
||||
self.result.val = str(e)
|
||||
g_logger.debug(
|
||||
"Exception occur when running %s:\n%s" % (self.name, str(e)))
|
||||
finally:
|
||||
# output result
|
||||
self.result.output(context.tmpPath)
|
||||
|
||||
def __cnPreCheck(self, func):
|
||||
# cn Pre-check node
|
||||
def wrapper():
|
||||
if (not hasattr(self, 'cluster')):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53030"]
|
||||
% "cluster attribute")
|
||||
if (not hasattr(self, 'host')):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53030"]
|
||||
% "host attribute")
|
||||
if (not self.cluster):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53031"])
|
||||
dbNode = self.cluster.getDbNodeByName(self.host)
|
||||
# The specified node does not exist or is empty
|
||||
if (dbNode is None or dbNode == ""):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
|
||||
% "The dbNode")
|
||||
if self.cluster.isSingleInstCluster():
|
||||
masterDn = SharedFuncs.getMasterDnNum(self.user,
|
||||
self.mpprcFile)
|
||||
if len(dbNode.datanodes) < 1 or dbNode.datanodes[
|
||||
0].instanceId not in masterDn:
|
||||
raise CheckNAException(
|
||||
"The node does not contains materDn instance")
|
||||
self.port = dbNode.datanodes[0].port
|
||||
else:
|
||||
# The specified CN node does not exist
|
||||
if (len(dbNode.coordinators) == 0):
|
||||
raise CheckNAException(
|
||||
"The node does not contains cn instance")
|
||||
# get cn port
|
||||
self.port = dbNode.coordinators[0].port
|
||||
self.cntype = dbNode.coordinators[0].instanceType
|
||||
return func()
|
||||
|
||||
return wrapper
|
||||
|
||||
def __localPreCheck(self, func):
|
||||
def wrapper():
|
||||
return func()
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class CheckItemFactory(object):
|
||||
@staticmethod
|
||||
def createItem(name, path, scope='all', analysis='default'):
|
||||
mod = imp.load_source(name, path)
|
||||
clazz = getattr(mod, name)
|
||||
checker = clazz()
|
||||
# set pre check method
|
||||
checker.setScope(scope)
|
||||
# set post analysis method
|
||||
checker.setAnalysis(analysis)
|
||||
return checker
|
||||
|
||||
@staticmethod
|
||||
def createFrom(name, path, context):
|
||||
mod = imp.load_source(name, path)
|
||||
clazz = getattr(mod, name)
|
||||
checker = clazz()
|
||||
checker.initFrom(context)
|
||||
return checker
|
||||
251
script/gspylib/inspection/common/CheckResult.py
Normal file
251
script/gspylib/inspection/common/CheckResult.py
Normal file
@ -0,0 +1,251 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import pwd
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common.Log import LoggerFactory
|
||||
|
||||
class GsCheckEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, bytes):
|
||||
return str(obj, encoding='utf-8')
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
class ResultStatus(object):
|
||||
OK = "OK"
|
||||
NA = "NA"
|
||||
WARNING = "WARNING"
|
||||
NG = "NG"
|
||||
ERROR = "ERROR"
|
||||
|
||||
|
||||
class LocalItemResult(object):
|
||||
'''
|
||||
the check result running on one host
|
||||
'''
|
||||
|
||||
def __init__(self, name, host):
|
||||
self.name = name
|
||||
self.host = host
|
||||
self.raw = ""
|
||||
self.rst = ResultStatus.NA
|
||||
self.val = ""
|
||||
self.checkID = None
|
||||
self.user = None
|
||||
|
||||
def output(self, outPath):
|
||||
u"""
|
||||
[HOST] {host}
|
||||
[NAM] {name}
|
||||
[RST] {rst}
|
||||
[VAL]
|
||||
{val}
|
||||
[RAW]
|
||||
{raw}
|
||||
"""
|
||||
|
||||
val = self.val if self.val else ""
|
||||
raw = self.raw if self.raw else ""
|
||||
try:
|
||||
content = self.output.__doc__.format(name=self.name, rst=self.rst,
|
||||
host=self.host, val=val,
|
||||
raw=raw)
|
||||
except Exception:
|
||||
content = self.output.__doc__.encode('utf-8').format(
|
||||
name=self.name, rst=self.rst, host=self.host, val=val,
|
||||
raw=raw).decode('utf-8', 'ignore')
|
||||
fileName = "%s_%s_%s.out" % (self.name, self.host, self.checkID)
|
||||
# output the result to local path
|
||||
SharedFuncs.writeFile(fileName, content, outPath,
|
||||
DefaultValue.KEY_FILE_MODE, self.user)
|
||||
|
||||
|
||||
class ItemResult(object):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self._items = []
|
||||
self.rst = ResultStatus.NA
|
||||
self.standard = ""
|
||||
self.suggestion = ""
|
||||
self.category = 'other'
|
||||
self.analysis = ""
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._items)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self._items[idx]
|
||||
|
||||
def append(self, val):
|
||||
self._items.append(val)
|
||||
|
||||
def formatOutput(self, detail=False):
|
||||
result = u"{name:.<25}...............{rst:.>6}".format(name=self.name,
|
||||
rst=self.rst)
|
||||
result += u"\r\n%s\r\n" % self.analysis
|
||||
return result
|
||||
|
||||
def getLocalItems(self):
|
||||
return self._items
|
||||
|
||||
@staticmethod
|
||||
def parse(output):
|
||||
itemResult = None
|
||||
localItemResult = None
|
||||
host = None
|
||||
idx = 0
|
||||
for line in output.splitlines():
|
||||
idx += 1
|
||||
if (idx == len(
|
||||
output.splitlines()) and localItemResult is not None):
|
||||
itemResult.append(localItemResult)
|
||||
current = line.strip()
|
||||
if (not current):
|
||||
continue
|
||||
if (current.startswith('[HOST]')):
|
||||
host = current.split()[1].strip()
|
||||
if (current.startswith('[NAM]')):
|
||||
name = current.split()[1].strip()
|
||||
if (itemResult is None):
|
||||
itemResult = ItemResult(name)
|
||||
if (localItemResult is not None):
|
||||
itemResult.append(localItemResult)
|
||||
localItemResult = LocalItemResult(current.split()[1].strip(),
|
||||
host)
|
||||
if (current.startswith('[RST]')):
|
||||
localItemResult.rst = current.split()[1].strip()
|
||||
if (current.startswith('[VAL]')):
|
||||
localItemResult.val = ItemResult.__parseMultiLine(
|
||||
output.splitlines()[idx:])
|
||||
if (current.startswith('[RAW]')):
|
||||
localItemResult.raw = ItemResult.__parseMultiLine(
|
||||
output.splitlines()[idx:])
|
||||
return itemResult
|
||||
|
||||
@staticmethod
|
||||
def __parseMultiLine(lines):
|
||||
vals = []
|
||||
starter = ('[HOST]', '[NAM]', '[RST]', '[VAL]', '[RAW]')
|
||||
for line in lines:
|
||||
current = line.strip()
|
||||
if (current.startswith(starter)):
|
||||
break
|
||||
else:
|
||||
vals.append(current)
|
||||
return "\n".join(vals)
|
||||
|
||||
|
||||
class CheckResult(object):
|
||||
def __init__(self):
|
||||
self._items = []
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._items)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self._items[idx]
|
||||
|
||||
def append(self, val):
|
||||
self._items.append(val)
|
||||
|
||||
def outputStatistic(self):
|
||||
ok = 0
|
||||
warning = 0
|
||||
ng = 0
|
||||
error = 0
|
||||
for i in self._items:
|
||||
if (i.rst == ResultStatus.ERROR):
|
||||
error += 1
|
||||
elif (i.rst == ResultStatus.NG):
|
||||
ng += 1
|
||||
elif (i.rst == ResultStatus.WARNING):
|
||||
warning += 1
|
||||
else:
|
||||
ok += 1
|
||||
okMsg = " Success:%s " % ok if ok > 0 else ""
|
||||
warningMsg = " Warning:%s " % warning if warning > 0 else ""
|
||||
ngMsg = " NG:%s " % ng if ng > 0 else ""
|
||||
errorMsg = " Error:%s " % error if error > 0 else ""
|
||||
result = ""
|
||||
result += "Failed." if (ng + error) > 0 else "Success."
|
||||
result += "\tAll check items run completed. Total:%s %s %s %s %s" % (
|
||||
ok + warning + ng + error, okMsg, warningMsg, ngMsg, errorMsg)
|
||||
return result
|
||||
|
||||
def outputRaw(self):
|
||||
u"""
|
||||
{date} [NAM] {name}
|
||||
{date} [STD] {standard}
|
||||
{date} [RST] {rst}
|
||||
{val}
|
||||
{date} [RAW]
|
||||
{raw}
|
||||
"""
|
||||
|
||||
result = ""
|
||||
for i in self._items:
|
||||
for j in i._items:
|
||||
t = time.localtime(time.time())
|
||||
dateString = time.strftime("%Y-%m-%d %H:%M:%S", t)
|
||||
rst = j.rst
|
||||
if (j.rst == ResultStatus.NA):
|
||||
rst = "NONE"
|
||||
elif (
|
||||
j.rst == ResultStatus.WARNING
|
||||
or j.rst == ResultStatus.ERROR):
|
||||
rst = "NG"
|
||||
result += self.outputRaw.__doc__.format(date=dateString,
|
||||
name=j.name,
|
||||
standard=i.standard,
|
||||
rst=rst,
|
||||
val=j.val, raw=j.raw)
|
||||
result += "\r\n"
|
||||
return result
|
||||
|
||||
def outputResult(self):
|
||||
result = ""
|
||||
for i in self._items:
|
||||
result += i.formatOutput()
|
||||
result += "\r\n"
|
||||
result += self.outputStatistic()
|
||||
return result
|
||||
|
||||
def outputJson(self):
|
||||
resultDic = {}
|
||||
for itemResult in self._items:
|
||||
resultDic['name'] = itemResult.name
|
||||
resultDic['category'] = itemResult.category
|
||||
resultDic['std'] = itemResult.standard.decode('utf-8', 'ignore')
|
||||
resultDic['rst'] = itemResult.rst
|
||||
resultDic['analysis'] = itemResult.analysis
|
||||
resultDic['suggestion'] = itemResult.suggestion
|
||||
localList = []
|
||||
for localitem in itemResult:
|
||||
local = {}
|
||||
local['host'] = localitem.host
|
||||
local['rstd'] = localitem.val
|
||||
local['raw'] = localitem.raw
|
||||
localList.append(local)
|
||||
resultDic['hosts'] = localList
|
||||
return json.dumps(resultDic, cls=GsCheckEncoder, indent=2)
|
||||
184
script/gspylib/inspection/common/Exception.py
Normal file
184
script/gspylib/inspection/common/Exception.py
Normal file
@ -0,0 +1,184 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
|
||||
|
||||
class CheckException(Exception):
|
||||
def __init__(self, content):
|
||||
self.code = "GAUSS-53000"
|
||||
self.content = content
|
||||
|
||||
def __str__(self):
|
||||
return "[%s]: ERROR: " % self.code + self.content
|
||||
|
||||
|
||||
class ParameterException(CheckException):
|
||||
def __init__(self, content):
|
||||
self.code = "GAUSS-53012"
|
||||
self.content = "Errors occurred when parsing parameters: %s." % content
|
||||
|
||||
|
||||
class UnknownParameterException(CheckException):
|
||||
def __init__(self, param):
|
||||
self.code = "GAUSS-53013"
|
||||
self.content = "Unknown parameters were set: %s." % param
|
||||
|
||||
|
||||
class EmptyParameterException(CheckException):
|
||||
def __init__(self):
|
||||
self.code = "GAUSS-53014"
|
||||
self.content = "The parameters cannot be empty."
|
||||
|
||||
|
||||
class UseBothParameterException(CheckException):
|
||||
def __init__(self, params):
|
||||
self.code = "GAUSS-53015"
|
||||
self.content = \
|
||||
" The parameter '-%s' and '-%s' can not be used together." % (
|
||||
params[0], params[1])
|
||||
|
||||
|
||||
class AvailableParameterException(CheckException):
|
||||
def __init__(self, parent, subs):
|
||||
self.code = "GAUSS-53016"
|
||||
self.content = " The parameter '%s' were not available for '%s'." % (
|
||||
",".join(subs), parent)
|
||||
|
||||
|
||||
class SceneNotFoundException(CheckException):
|
||||
def __init__(self, scene, supportScenes):
|
||||
self.code = "GAUSS-53017"
|
||||
self.content = \
|
||||
"The scene %s and its configuaration file scene_%s.xml " \
|
||||
"were not found in config folder." % (
|
||||
scene, scene) + "\nThe support scenes is: [%s]" % ",".join(
|
||||
supportScenes)
|
||||
|
||||
|
||||
class ParseItemException(CheckException):
|
||||
def __init__(self, items):
|
||||
self.code = "GAUSS-53017"
|
||||
self.content = \
|
||||
"There were errors when parsing these items: %s." % ",".join(
|
||||
items) + \
|
||||
" maybe items name is incorrect."
|
||||
|
||||
|
||||
class NotEmptyException(CheckException):
|
||||
def __init__(self, elem, detail=""):
|
||||
self.code = "GAUSS-53018"
|
||||
self.content = "The %s cannot be empty. %s" % (elem, detail)
|
||||
|
||||
|
||||
class NotExistException(CheckException):
|
||||
def __init__(self, elem, List):
|
||||
self.code = "GAUSS-53019"
|
||||
self.content = "The %s does not exist in %s." % (elem, List)
|
||||
|
||||
|
||||
class InterruptException(CheckException):
|
||||
def __init__(self):
|
||||
self.code = "GAUSS-53020"
|
||||
self.content = \
|
||||
"The checking process was interrupted by user with Ctrl+C command"
|
||||
|
||||
|
||||
class TrustException(CheckException):
|
||||
def __init__(self, hosts):
|
||||
self.code = "GAUSS-53021"
|
||||
self.content = "Faild to verified SSH trust on hosts: %s" % hosts
|
||||
|
||||
|
||||
class ShellCommandException(CheckException):
|
||||
def __init__(self, cmd, output):
|
||||
self.code = "GAUSS-53025"
|
||||
self.cmd = cmd
|
||||
self.output = output
|
||||
self.content = \
|
||||
"Execute Shell command faild: %s , the exception is: %s" % (
|
||||
self.cmd, self.output)
|
||||
|
||||
|
||||
class SshCommandException(CheckException):
|
||||
def __init__(self, host, cmd, output):
|
||||
self.code = "GAUSS-53026"
|
||||
self.cmd = cmd
|
||||
self.host = host
|
||||
self.output = output
|
||||
self.content = \
|
||||
"Execute SSH command on host %s faild. The exception is: %s" % (
|
||||
self.host, self.output)
|
||||
|
||||
|
||||
class SQLCommandException(CheckException):
|
||||
def __init__(self, sql, output):
|
||||
self.code = "GAUSS-53027"
|
||||
self.sql = sql
|
||||
self.output = output
|
||||
self.content = \
|
||||
"Execute SQL command faild: %s , the exception is: %s" % (
|
||||
self.sql, self.output)
|
||||
|
||||
|
||||
class TimeoutException(CheckException):
|
||||
def __init__(self, nodes):
|
||||
self.code = "GAUSS-53028"
|
||||
self.content = "The node[%s] execute timeout." % ",".join(nodes)
|
||||
|
||||
|
||||
class ThreadCheckException(CheckException):
|
||||
def __init__(self, thread, exception):
|
||||
self.code = "GAUSS-53020"
|
||||
if (isinstance(exception, ShellCommandException)
|
||||
or isinstance(exception, SQLCommandException)
|
||||
or isinstance(exception, SshCommandException)):
|
||||
output = exception.output
|
||||
elif (isinstance(exception, TimeoutException)):
|
||||
output = exception.content
|
||||
elif (isinstance(exception, CheckException)):
|
||||
output = exception.content
|
||||
else:
|
||||
output = str(exception)
|
||||
self.content = \
|
||||
"The thread %s running checking item but occurs errors: %s" % (
|
||||
thread, output)
|
||||
|
||||
|
||||
class ContextDumpException(CheckException):
|
||||
def __init__(self, errors):
|
||||
self.code = "GAUSS-53030"
|
||||
self.content = "Dumping context has errors: %s." % str(errors)
|
||||
|
||||
|
||||
class ContextLoadException(CheckException):
|
||||
def __init__(self, errors):
|
||||
self.code = "GAUSS-53031"
|
||||
self.content = "Loading context has errors: %s." % str(errors)
|
||||
|
||||
|
||||
class CheckErrorException(CheckException):
|
||||
def __init__(self):
|
||||
self.code = "GAUSS-53032"
|
||||
self.content = "An internal error occurred during the checking process"
|
||||
|
||||
|
||||
class CheckNAException(CheckException):
|
||||
def __init__(self, item):
|
||||
self.code = "GAUSS-53033"
|
||||
self.content = \
|
||||
"Check item %s are not needed at the current node" % item
|
||||
181
script/gspylib/inspection/common/Log.py
Normal file
181
script/gspylib/inspection/common/Log.py
Normal file
@ -0,0 +1,181 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import pwd
|
||||
import time
|
||||
import subprocess
|
||||
import logging.handlers
|
||||
import os
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
# max log file size
|
||||
# 16M
|
||||
MAXLOGFILESIZE = 16 * 1024 * 1024
|
||||
KEY_FILE_MODE = 600
|
||||
|
||||
|
||||
class LoggerFactory():
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def getLogger(module, logFile, user=""):
|
||||
"""
|
||||
function : config log handler
|
||||
input : module, logFileName, logLevel
|
||||
output : log
|
||||
"""
|
||||
afilename = LoggerFactory.getLogFileName(os.path.realpath(logFile))
|
||||
if (not os.path.exists(afilename)):
|
||||
dirName = os.path.dirname(afilename)
|
||||
cmd = "if [ ! -d %s ]; then mkdir %s -p -m %s;fi" % (
|
||||
dirName, dirName, DefaultValue.KEY_DIRECTORY_MODE)
|
||||
cmd += ";touch %s && chmod %s %s" % (
|
||||
afilename, KEY_FILE_MODE, afilename)
|
||||
# The user exists and is not the current user
|
||||
if (user and pwd.getpwnam(user).pw_uid != os.getuid()):
|
||||
cmd = "su - %s -c \"%s\" " % (user, cmd)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50206"]
|
||||
% ("log file [%s]" % afilename) +
|
||||
"Error:\n%s" % output +
|
||||
"The cmd is %s " % cmd)
|
||||
log = logging.getLogger(module)
|
||||
LoggerFactory._addFileHandle(log, afilename)
|
||||
LoggerFactory._addConsoleHandle(log)
|
||||
return (log, afilename)
|
||||
|
||||
@staticmethod
|
||||
def getLogFileName(oldLogFile):
|
||||
"""
|
||||
function : Increase the time stamp and check the file size
|
||||
input : logFileName
|
||||
output : String
|
||||
"""
|
||||
# get current time
|
||||
currentTime = time.strftime("%Y-%m-%d_%H%M%S")
|
||||
# Check log file correctness
|
||||
dirName = os.path.dirname(oldLogFile)
|
||||
originalFileName = os.path.basename(oldLogFile)
|
||||
resList = originalFileName.split(".")
|
||||
if (len(resList) > 2):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50235"] % oldLogFile)
|
||||
(prefix, suffix) = os.path.splitext(originalFileName)
|
||||
if (suffix != ".log"):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50212"]
|
||||
% (oldLogFile, ".log"))
|
||||
# The log file have time stamped in -L mode
|
||||
if (len(originalFileName) > 21):
|
||||
timeStamp = originalFileName[-21:-4]
|
||||
if (LoggerFactory.is_valid_date(timeStamp)):
|
||||
return oldLogFile
|
||||
|
||||
# Defaults log file
|
||||
newLogFile = dirName + "/" + prefix + "-" + currentTime + suffix
|
||||
if (os.path.isdir(dirName)):
|
||||
# Check old log file list
|
||||
cmd = "ls %s | grep '^%s-' | grep '%s$'" % (
|
||||
dirName, prefix, suffix)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status == 0):
|
||||
filenameList = []
|
||||
for echoLine in output.split("\n"):
|
||||
filename = echoLine.strip()
|
||||
existedResList = filename.split(".")
|
||||
if (len(existedResList) > 2):
|
||||
continue
|
||||
existedSuffix = os.path.splitext(filename)[1]
|
||||
if (existedSuffix != ".log"):
|
||||
continue
|
||||
if (len(originalFileName) + 18 != len(filename)):
|
||||
continue
|
||||
timeStamp = filename[-21:-4]
|
||||
# check log file name
|
||||
if (LoggerFactory.is_valid_date(timeStamp)):
|
||||
pass
|
||||
else:
|
||||
continue
|
||||
# Add the valid log file
|
||||
filenameList.append(filename)
|
||||
|
||||
if (len(filenameList)):
|
||||
fileName = max(filenameList)
|
||||
logFile = dirName + "/" + fileName.strip()
|
||||
# check if need switch to an new log file
|
||||
size = os.path.getsize(logFile)
|
||||
if (size <= MAXLOGFILESIZE):
|
||||
newLogFile = logFile
|
||||
return newLogFile
|
||||
|
||||
@staticmethod
|
||||
def is_valid_date(datastr):
|
||||
'''
|
||||
Judge if date valid
|
||||
'''
|
||||
try:
|
||||
time.strptime(datastr, "%Y-%m-%d_%H%M%S")
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def getScriptLogger():
|
||||
filePath = os.path.split(os.path.realpath(__file__))[0]
|
||||
afilename = "%s/../output/log/script_%s.log" % (
|
||||
filePath, DefaultValue.GetHostIpOrName())
|
||||
|
||||
log = logging.getLogger()
|
||||
LoggerFactory._addFileHandle(log, afilename)
|
||||
return log
|
||||
|
||||
@staticmethod
|
||||
def _addFileHandle(log, fileName):
|
||||
# create log file
|
||||
if not os.path.exists(os.path.dirname(fileName)):
|
||||
dir_permission = 0o700
|
||||
os.makedirs(os.path.dirname(fileName), mode=dir_permission)
|
||||
else:
|
||||
if oct(os.stat(fileName).st_mode)[-3:] != '600':
|
||||
os.chmod(fileName, DefaultValue.KEY_FILE_PERMISSION)
|
||||
|
||||
fmt = logging.Formatter(
|
||||
'[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] '
|
||||
'%(message)s',
|
||||
'%Y-%m-%d %H:%M:%S')
|
||||
# output the log to a file
|
||||
# 16M takes precedence over 20M, Here cut the file does not trigger
|
||||
rthandler = logging.handlers.RotatingFileHandler(
|
||||
fileName,
|
||||
maxBytes=20 * 1024 * 1024,
|
||||
backupCount=2)
|
||||
rthandler.setFormatter(fmt)
|
||||
rthandler.setLevel(logging.DEBUG)
|
||||
log.handlers = []
|
||||
log.addHandler(rthandler)
|
||||
|
||||
@staticmethod
|
||||
def _addConsoleHandle(log):
|
||||
fmt = logging.Formatter('%(message)s')
|
||||
# output the log to screen the same time
|
||||
console = logging.StreamHandler()
|
||||
console.setFormatter(fmt)
|
||||
console.setLevel(logging.INFO)
|
||||
log.addHandler(console)
|
||||
log.setLevel(logging.DEBUG)
|
||||
112
script/gspylib/inspection/common/ProgressBar.py
Normal file
112
script/gspylib/inspection/common/ProgressBar.py
Normal file
@ -0,0 +1,112 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
|
||||
CLEAR_TO_END = "\033[K"
|
||||
UP_ONE_LINE = "\033[F"
|
||||
|
||||
|
||||
class ProgressBar(object):
|
||||
def __init__(self, width=25, title=''):
|
||||
self.width = width
|
||||
self.title = ProgressBar.filter_str(title)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
@property
|
||||
def lock(self):
|
||||
return self._lock
|
||||
|
||||
def update(self, progress=0):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def filter_str(pending_str):
|
||||
"""remove strings like \r \t \n"""
|
||||
return re.sub(pattern=r'\r|\t|\n', repl='', string=pending_str)
|
||||
|
||||
|
||||
class LineProgress(ProgressBar):
|
||||
def __init__(self, total=100, symbol='#', width=25, title=''):
|
||||
"""
|
||||
@param total : count of progress bar
|
||||
@param symbol : symbol to show
|
||||
@param width : width of progress bar
|
||||
@param title : text before progress bar
|
||||
"""
|
||||
super(LineProgress, self).__init__(width=width, title=title)
|
||||
self.total = total
|
||||
self.symbol = symbol
|
||||
self._current_progress = 0
|
||||
|
||||
def update(self, progress=0):
|
||||
"""
|
||||
@param progress : current value
|
||||
"""
|
||||
with self.lock:
|
||||
if progress > 0:
|
||||
self._current_progress = float(progress)
|
||||
sys.stdout.write('\r' + CLEAR_TO_END)
|
||||
hashes = '=' * int(
|
||||
self._current_progress // self.total * self.width)
|
||||
spaces = ' ' * (self.width - len(hashes))
|
||||
sys.stdout.write("\r%-25s [%s] %d/%d" % (
|
||||
self.title, hashes + spaces, self._current_progress,
|
||||
self.total))
|
||||
|
||||
|
||||
class MultiProgressManager(object):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""singleton"""
|
||||
if not hasattr(cls, '_instance'):
|
||||
cls._instance = super(MultiProgressManager, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self._progress_dict = {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def put(self, key, progress_bar):
|
||||
with self._lock:
|
||||
if key and progress_bar:
|
||||
self._progress_dict[key] = progress_bar
|
||||
progress_bar.index = len(self._progress_dict) - 1
|
||||
|
||||
def clear(self):
|
||||
with self._lock:
|
||||
self._progress_dict.clear()
|
||||
|
||||
def update(self, key, progress):
|
||||
"""
|
||||
@param key : progress bar key
|
||||
@param progress : value
|
||||
"""
|
||||
with self._lock:
|
||||
if not key:
|
||||
return
|
||||
delta_line = len(self._progress_dict)
|
||||
sys.stdout.write(
|
||||
UP_ONE_LINE * delta_line if delta_line > 0 else '')
|
||||
for tmp_key in self._progress_dict.keys():
|
||||
progress_bar = self._progress_dict.get(tmp_key)
|
||||
tmp_progress = 0
|
||||
if key == tmp_key:
|
||||
tmp_progress = progress
|
||||
progress_bar.update(tmp_progress)
|
||||
sys.stdout.write('\n')
|
||||
974
script/gspylib/inspection/common/SharedFuncs.py
Normal file
974
script/gspylib/inspection/common/SharedFuncs.py
Normal file
@ -0,0 +1,974 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
#############################################################################
|
||||
import sys
|
||||
import subprocess
|
||||
import os
|
||||
import pwd
|
||||
import time
|
||||
import re
|
||||
import multiprocessing
|
||||
from datetime import datetime, timedelta
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.os.gsfile import g_Platform
|
||||
from gspylib.os.gsnetwork import g_network
|
||||
from gspylib.inspection.common.Exception import TrustException, \
|
||||
ShellCommandException, SshCommandException, SQLCommandException
|
||||
|
||||
localPath = os.path.dirname(__file__)
|
||||
sys.path.insert(0, localPath + "/../lib")
|
||||
|
||||
FILE_MODE = 640
|
||||
FILE_WRITE_MODE = 220
|
||||
DIRECTORY_MODE = 750
|
||||
KEY_FILE_MODE = 600
|
||||
KEY_DIRECTORY_MODE = 700
|
||||
MAX_FILE_NODE = 755
|
||||
MAX_DIRECTORY_NODE = 755
|
||||
INIT_FILE_SUSE = "/etc/init.d/boot.local"
|
||||
INIT_FILE_REDHAT = "/etc/rc.d/rc.local"
|
||||
|
||||
|
||||
def runShellCmd(cmd, user=None, mpprcFile=""):
|
||||
"""
|
||||
function: run shell cmd
|
||||
input : md, user, mpprcFile
|
||||
output : str
|
||||
"""
|
||||
if (mpprcFile):
|
||||
cmd = "source '%s'; %s" % (mpprcFile, cmd)
|
||||
# Set the output LANG to English
|
||||
cmd = "export LC_ALL=C; %s" % cmd
|
||||
# change user but can not be root user
|
||||
if (user and user != getCurrentUser()):
|
||||
cmd = "su - %s -c \"source /etc/profile 2>/dev/null; %s\"" % (
|
||||
user, cmd)
|
||||
cmd = cmd.replace("$", "\$")
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0 and DefaultValue.checkDockerEnv()):
|
||||
return output
|
||||
if (status != 0):
|
||||
raise ShellCommandException(cmd, output)
|
||||
return output
|
||||
|
||||
|
||||
def runSshCmd(cmd, host, user="", mpprcFile="", timeout=""):
|
||||
"""
|
||||
function: run ssh cmd
|
||||
input : cmd, host, user, mpprcFile, timeout
|
||||
output : str
|
||||
"""
|
||||
if (timeout):
|
||||
timeout = "-o ConnectTimeout=%s" % timeout
|
||||
if (mpprcFile):
|
||||
cmd = "source '%s'; %s" % (mpprcFile, cmd)
|
||||
# Set the output LANG to English
|
||||
cmd = "export LC_ALL=C; %s" % cmd
|
||||
# RedHat does not automatically source /etc/profile
|
||||
# but SuSE executes when using ssh to remotely execute commands
|
||||
# Some environment variables are written in /etc/profile
|
||||
# when there is no separation of environment variables
|
||||
if (host == DefaultValue.GetHostIpOrName()):
|
||||
sshCmd = cmd
|
||||
else:
|
||||
sshCmd = "pssh -s -H %s %s 'source /etc/profile 2>/dev/null;%s'" % (
|
||||
host, timeout, cmd)
|
||||
if (user and user != getCurrentUser()):
|
||||
sshCmd = "su - %s -c \"%s\"" % (user, sshCmd)
|
||||
(status, output) = subprocess.getstatusoutput(sshCmd)
|
||||
if (status != 0):
|
||||
raise SshCommandException(host, sshCmd, output)
|
||||
return output
|
||||
|
||||
|
||||
def runSshCmdWithPwd(cmd, host, user="", passwd="", mpprcFile=""):
|
||||
"""
|
||||
function: run ssh cmd with password
|
||||
input : cmd, host, user, passwd, mpprcFile
|
||||
output : str
|
||||
"""
|
||||
# Environment variables separation
|
||||
if (mpprcFile):
|
||||
cmd = "source '%s'; %s" % (mpprcFile, cmd)
|
||||
ssh = None
|
||||
try:
|
||||
if (passwd):
|
||||
import paramiko
|
||||
cmd = "export LC_ALL=C; source /etc/profile 2>/dev/null; %s" % cmd
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
# Remote Connection
|
||||
ssh.connect(host, 22, user, passwd)
|
||||
stdout, stderr = ssh.exec_command(cmd)[1:3]
|
||||
output = stdout.read()
|
||||
error = stderr.read()
|
||||
if error:
|
||||
raise SshCommandException(host, cmd, error)
|
||||
return output.decode()
|
||||
else:
|
||||
cmd = \
|
||||
"pssh -s -H %s \"export LC_ALL=C; " \
|
||||
"source /etc/profile 2>/dev/null; %s\"" % (
|
||||
host, cmd)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise SshCommandException(host, cmd, output)
|
||||
return output
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
finally:
|
||||
if (ssh):
|
||||
ssh.close()
|
||||
|
||||
|
||||
def runRootCmd(cmd, rootuser, passwd, mpprcFile=''):
|
||||
"""
|
||||
function: run root cmd
|
||||
input : cmd, rootuser, passwd, mpprcFile
|
||||
output : str
|
||||
"""
|
||||
if (mpprcFile):
|
||||
cmd = "source '%s'; %s" % (mpprcFile, cmd)
|
||||
ssh = None
|
||||
try:
|
||||
import paramiko
|
||||
cmd = "export LC_ALL=C; source /etc/profile 2>/dev/null; %s" % cmd
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect('localhost', 22, rootuser, passwd)
|
||||
stdout, stderr = ssh.exec_command(cmd, get_pty=True)[1:3]
|
||||
output = stdout.read()
|
||||
error = stderr.read()
|
||||
if error:
|
||||
raise SshCommandException(cmd, "localhost", error)
|
||||
return output
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
finally:
|
||||
if ssh:
|
||||
ssh.close()
|
||||
|
||||
|
||||
def verifyPasswd(host, user, pswd=None):
|
||||
"""
|
||||
function: verify password
|
||||
Connect to the remote node
|
||||
input : host, user, pswd
|
||||
output : bool
|
||||
"""
|
||||
import paramiko
|
||||
ssh = paramiko.Transport((host, 22))
|
||||
try:
|
||||
ssh.connect(username=user, password=pswd)
|
||||
return True
|
||||
except paramiko.AuthenticationException:
|
||||
return False
|
||||
finally:
|
||||
ssh.close()
|
||||
|
||||
|
||||
def cleanOutput(output):
|
||||
"""
|
||||
function: run ssh cmd
|
||||
clean warning or password message
|
||||
input : output
|
||||
output : str
|
||||
"""
|
||||
lines = output.splitlines()
|
||||
if (len(lines) == 0):
|
||||
return ''
|
||||
idx = 1
|
||||
for line in lines:
|
||||
if (line.lower().find('password:') != -1):
|
||||
break
|
||||
idx += 1
|
||||
return output if idx == len(lines) + 1 else "\n".join(lines[idx:])
|
||||
|
||||
|
||||
def runSqlCmdWithTimeOut(sql, user, host, port, tmpPath, database="postgres",
|
||||
mpprcFile="", needmpara=False, timeout=60):
|
||||
"""
|
||||
function: run sql cmd with timeout
|
||||
input : sql, user, host, port, tmpPath, database
|
||||
mpprcFile, needmpara, timeou
|
||||
output : str
|
||||
"""
|
||||
infoList = [
|
||||
[sql, user, host, port, tmpPath, database, mpprcFile, needmpara]]
|
||||
endTime = datetime.now() + timedelta(seconds=timeout)
|
||||
pool = ThreadPool(1)
|
||||
result = pool.map_async(executeSql, infoList)
|
||||
while datetime.now() < endTime:
|
||||
if (result._ready):
|
||||
pool.close()
|
||||
if (result._value[0] == "NO RESULT"):
|
||||
return ""
|
||||
elif (result._value[0].startswith("ERROR")):
|
||||
raise SQLCommandException(sql, result._value[0])
|
||||
else:
|
||||
return result._value[0]
|
||||
else:
|
||||
time.sleep(1)
|
||||
pool.close()
|
||||
raise SQLCommandException(
|
||||
sql,
|
||||
"Running timeout, exceed the limit %s seconds" % timeout)
|
||||
|
||||
|
||||
def executeSql(paraList):
|
||||
"""
|
||||
function: execute sql
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
sql = paraList[0]
|
||||
user = paraList[1]
|
||||
host = paraList[2]
|
||||
port = paraList[3]
|
||||
tmpPath = paraList[4]
|
||||
database = paraList[5]
|
||||
mpprcFile = paraList[6]
|
||||
needmpara = paraList[7]
|
||||
try:
|
||||
output = runSqlCmd(sql, user, host, port, tmpPath, database, mpprcFile,
|
||||
needmpara)
|
||||
if (not output):
|
||||
output = "NO RESULT"
|
||||
except Exception as e:
|
||||
output = "ERROR:%s" % (str(e))
|
||||
return output
|
||||
|
||||
|
||||
def runSqlCmd(sql, user, host, port, tmpPath, database="postgres",
|
||||
mpprcFile="", maintenance=False):
|
||||
"""
|
||||
function : Execute sql command
|
||||
input : String,String,String,int
|
||||
output : String
|
||||
"""
|
||||
database = database.replace('$', '\$')
|
||||
# Get the current time
|
||||
currentTime = time.strftime("%Y-%m-%d_%H%M%S")
|
||||
# Get the process ID
|
||||
pid = os.getpid()
|
||||
# init SQL query file
|
||||
sqlFile = os.path.join(tmpPath,
|
||||
"check_query.sql_%s_%s_%s" % (
|
||||
str(port), str(currentTime), str(pid)))
|
||||
# init SQL result file
|
||||
queryResultFile = os.path.join(tmpPath,
|
||||
"check_result.sql_%s_%s_%s" % (
|
||||
str(port), str(currentTime), str(pid)))
|
||||
# Clean up the file
|
||||
cleanFile("%s,%s" % (queryResultFile, sqlFile))
|
||||
|
||||
# create an empty sql query file
|
||||
try:
|
||||
cmd = "touch %s && chmod %s %s" % \
|
||||
(sqlFile, DefaultValue.MAX_DIRECTORY_MODE, sqlFile)
|
||||
runShellCmd(cmd, user, mpprcFile)
|
||||
except ShellCommandException as e:
|
||||
raise SQLCommandException(sql,
|
||||
"create sql query file failed." + e.output)
|
||||
|
||||
# write the SQL command into sql query file
|
||||
try:
|
||||
with open(sqlFile, 'w') as fp:
|
||||
fp.writelines(sql)
|
||||
except Exception as e:
|
||||
# Clean up the file
|
||||
cleanFile(sqlFile)
|
||||
raise SQLCommandException(sql,
|
||||
"write into sql query file failed. " + str(
|
||||
e))
|
||||
|
||||
# read the content of query result file.
|
||||
try:
|
||||
# init host
|
||||
hostPara = (
|
||||
"-h %s" % host) \
|
||||
if host != "" and host != "localhost" \
|
||||
and host != DefaultValue.GetHostIpOrName() else ""
|
||||
# build shell command
|
||||
cmd = "gsql %s -p %s -d %s -f %s --output %s -t -A -X" % (
|
||||
hostPara, port, database, sqlFile, queryResultFile)
|
||||
if (maintenance):
|
||||
cmd += ' -m'
|
||||
# Environment variables separation
|
||||
if mpprcFile != "":
|
||||
cmd = "source '%s' && " % mpprcFile + cmd
|
||||
# Execute the shell command
|
||||
output = runShellCmd(cmd, user)
|
||||
if findErrorInSqlFile(sqlFile, output):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
|
||||
+ "Error:\n%s" % output)
|
||||
|
||||
# Reading documents
|
||||
fp = None
|
||||
with open(queryResultFile, 'r') as fp:
|
||||
rowList = fp.readlines()
|
||||
except Exception as e:
|
||||
cleanFile("%s,%s" % (queryResultFile, sqlFile))
|
||||
if isinstance(e, ShellCommandException):
|
||||
output = e.output
|
||||
else:
|
||||
output = str(e)
|
||||
raise SQLCommandException(sql, output)
|
||||
|
||||
# remove local sqlFile
|
||||
cleanFile("%s,%s" % (queryResultFile, sqlFile))
|
||||
|
||||
return "".join(rowList)[:-1]
|
||||
|
||||
|
||||
def runSqlSimplely(sql, user, host, port, tmpPath, database="postgres",
|
||||
mpprcFile="", needmpara=False):
|
||||
"""
|
||||
function : Execute sql command
|
||||
input : String,String,String,int
|
||||
output : String
|
||||
"""
|
||||
# Get the current time
|
||||
currentTime = time.strftime("%Y-%m-%d_%H%M%S")
|
||||
# Get the process ID
|
||||
pid = os.getpid()
|
||||
# init SQL query file
|
||||
sqlFile = os.path.join(tmpPath,
|
||||
"check_query.sql_%s_%s_%s" % (
|
||||
str(port), str(currentTime), str(pid)))
|
||||
|
||||
# Clean up the file
|
||||
if (os.path.exists(sqlFile)):
|
||||
cleanFile("%s" % (sqlFile))
|
||||
|
||||
# create an empty sql query file
|
||||
try:
|
||||
cmd = "touch %s && chmod %s %s" % \
|
||||
(sqlFile, DefaultValue.MAX_DIRECTORY_MODE, sqlFile)
|
||||
runShellCmd(cmd, user, mpprcFile)
|
||||
except ShellCommandException as e:
|
||||
raise SQLCommandException(sql, "create sql query file failed.")
|
||||
|
||||
# write the SQL command into sql query file
|
||||
try:
|
||||
with open(sqlFile, 'w') as fp:
|
||||
fp.writelines(sql)
|
||||
except Exception as e:
|
||||
# Clean up the file
|
||||
cleanFile(sqlFile)
|
||||
raise SQLCommandException(sql,
|
||||
"write into sql query file failed. " + str(
|
||||
e))
|
||||
|
||||
# read the content of query result file.
|
||||
try:
|
||||
# init host
|
||||
hostPara = (
|
||||
"-h %s" % host) \
|
||||
if host != "" and host != "localhost" else ""
|
||||
# build shell command
|
||||
if (needmpara):
|
||||
cmd = "gsql %s -p %s -d %s -f %s -m" % (
|
||||
hostPara, port, database, sqlFile)
|
||||
else:
|
||||
cmd = "gsql %s -p %s -d %s -f %s" % (
|
||||
hostPara, port, database, sqlFile)
|
||||
# Environment variables separation
|
||||
if mpprcFile != "":
|
||||
cmd = "source '%s' && " % mpprcFile + cmd
|
||||
# Execute the shell command
|
||||
output = runShellCmd(cmd, user)
|
||||
if findErrorInSqlFile(sqlFile, output):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51400"] % cmd
|
||||
+ "Error:\n%s" % output)
|
||||
|
||||
# Reading documents
|
||||
except Exception as e:
|
||||
cleanFile("%s" % (sqlFile))
|
||||
if isinstance(e, ShellCommandException):
|
||||
output = e.output
|
||||
else:
|
||||
output = str(e)
|
||||
raise SQLCommandException(sql, output)
|
||||
|
||||
# remove local sqlFile
|
||||
cleanFile("%s" % (sqlFile))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def findErrorInSqlFile(sqlFile, output):
|
||||
"""
|
||||
function : Find error in the sql file
|
||||
input : String,String
|
||||
output : String
|
||||
"""
|
||||
GSQL_BIN_FILE = "gsql"
|
||||
# init flag
|
||||
ERROR_MSG_FLAG = "(ERROR|FATAL|PANIC)"
|
||||
GSQL_ERROR_PATTERN = "^%s:%s:(\d*): %s:.*" % (
|
||||
GSQL_BIN_FILE, sqlFile, ERROR_MSG_FLAG)
|
||||
pattern = re.compile(GSQL_ERROR_PATTERN)
|
||||
for line in output.split("\n"):
|
||||
line = line.strip()
|
||||
result = pattern.match(line)
|
||||
if (result is not None):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def cleanFile(fileName, hostname=""):
|
||||
"""
|
||||
function : remove file
|
||||
input : String,hostname
|
||||
output : NA
|
||||
"""
|
||||
fileList = fileName.split(",")
|
||||
cmd = ""
|
||||
for fileStr in fileList:
|
||||
if cmd != "":
|
||||
cmd += ';(if [ -f %s ];then rm -f %s;fi)' % (fileStr, fileStr)
|
||||
else:
|
||||
cmd = '(if [ -f %s ];then rm -f %s;fi)' % (fileStr, fileStr)
|
||||
if hostname == "":
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS-50207"] % "file"
|
||||
+ " Error: \n%s." % output
|
||||
+ "The cmd is %s " % cmd)
|
||||
else:
|
||||
sshCmd = "pssh -s -H %s '%s'" % (hostname, cmd)
|
||||
(status, output) = subprocess.getstatusoutput(sshCmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS-50207"] % "file"
|
||||
+ " Error: \n%s." % output
|
||||
+ "The cmd is %s " % sshCmd)
|
||||
|
||||
|
||||
def checkComplete(checkId, host, hostname, user, tmpPath, passwd=None):
|
||||
"""
|
||||
function: check whether has completed or not
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
cmd = "cd %s && ls -l |grep %s_%s.out|wc -l" % (tmpPath, hostname, checkId)
|
||||
if (is_local_node(host)):
|
||||
output = runShellCmd(cmd, user)
|
||||
elif (passwd):
|
||||
output = runSshCmdWithPwd(cmd, host, user, passwd)
|
||||
else:
|
||||
output = runSshCmd(cmd, host, user)
|
||||
if (len(output.splitlines()) > 1):
|
||||
output = output.splitlines()[-1]
|
||||
return output
|
||||
|
||||
|
||||
def getVersion():
|
||||
"""
|
||||
Get current file version by VersionInfo
|
||||
|
||||
"""
|
||||
return ("%s %s" % (sys.argv[0].split("/")[-1], VersionInfo.COMMON_VERSION))
|
||||
|
||||
|
||||
def createFolder(folderName, path, permission=DIRECTORY_MODE, user=""):
|
||||
# Folder path
|
||||
folderName = os.path.join(path, folderName)
|
||||
# Create a folder
|
||||
g_file.createDirectory(folderName, True, permission)
|
||||
# change owner
|
||||
if (user):
|
||||
g_file.changeOwner(user, folderName)
|
||||
return folderName
|
||||
|
||||
|
||||
def createFile(fileName, path, permission=FILE_MODE, user=""):
|
||||
# file path
|
||||
fileName = os.path.join(path, fileName)
|
||||
# Create a file
|
||||
g_file.createFile(fileName, True, permission)
|
||||
# change owner
|
||||
if (user):
|
||||
g_file.changeOwner(user, fileName)
|
||||
return fileName
|
||||
|
||||
|
||||
def chmodFile(fileName, permission=FILE_MODE, user=""):
|
||||
# Modify the file permissions
|
||||
g_file.changeMode(permission, fileName)
|
||||
if (user):
|
||||
g_file.changeOwner(user, fileName)
|
||||
|
||||
|
||||
def writeFile(fileName, content, path, permission=FILE_MODE, user=""):
|
||||
"""
|
||||
function: write file
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
filePath = os.path.join(path, fileName)
|
||||
# Create a file
|
||||
g_file.createFile(filePath, True, permission)
|
||||
# Modify the file permissions
|
||||
if (user):
|
||||
g_file.changeOwner(user, filePath)
|
||||
g_file.writeFile(filePath, [content])
|
||||
|
||||
|
||||
def readFile(fileName):
|
||||
# Get the contents of the file
|
||||
text = g_file.readFile(fileName)
|
||||
return "\n".join(text)
|
||||
|
||||
|
||||
def sendFile(fileName, host, user, path, passwd=None):
|
||||
# Copy files remotely
|
||||
t = None
|
||||
if (passwd):
|
||||
try:
|
||||
import paramiko
|
||||
t = paramiko.Transport((host, 22))
|
||||
t.connect(username=user, password=passwd)
|
||||
sftp = paramiko.SFTPClient.from_transport(t)
|
||||
sftp.put(fileName, os.path.join(path, os.path.basename(fileName)))
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
finally:
|
||||
if (t):
|
||||
t.close()
|
||||
else:
|
||||
if "HOST_IP" not in list(os.environ.keys()):
|
||||
host = "%s@%s" % (user, host)
|
||||
cmd = "pscp -H %s '%s' %s" % (host, fileName, path)
|
||||
if (os.getuid() == 0):
|
||||
cmd = "su - %s -c \"%s\"" % (user, cmd)
|
||||
runShellCmd(cmd)
|
||||
|
||||
|
||||
def receiveFile(fileName, host, user, path, passwd=None):
|
||||
# Receive remote files
|
||||
t = None
|
||||
if (passwd):
|
||||
try:
|
||||
import paramiko
|
||||
t = paramiko.Transport((host, 22))
|
||||
t.connect(username=user, password=passwd)
|
||||
sftp = paramiko.SFTPClient.from_transport(t)
|
||||
if (type(fileName) == list):
|
||||
for fname in fileName:
|
||||
sftp.get(fname,
|
||||
os.path.join(path, os.path.basename(fname)))
|
||||
else:
|
||||
sftp.get(fileName, os.path.join(path, fileName))
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
finally:
|
||||
if (t):
|
||||
t.close()
|
||||
else:
|
||||
if "HOST_IP" not in list(os.environ.keys()):
|
||||
host = "%s@%s" % (user, host)
|
||||
cmd = "pssh -s -H %s 'pscp -H %s %s %s' " % (
|
||||
host, DefaultValue.GetHostIpOrName(), fileName, path)
|
||||
if (os.getuid() == 0):
|
||||
cmd = "su - %s -c \"%s\"" % (user, cmd)
|
||||
runShellCmd(cmd)
|
||||
|
||||
|
||||
def getCurrentUser():
|
||||
return pwd.getpwuid(os.getuid())[0]
|
||||
|
||||
|
||||
def verifyTrust(hosts, user):
|
||||
"""
|
||||
function: Ensure the proper password-less access to the remote host.
|
||||
input : hostname
|
||||
output: True/False
|
||||
"""
|
||||
try:
|
||||
pool = ThreadPool(multiprocessing.cpu_count())
|
||||
params = zip(hosts, [user, ])
|
||||
results = pool.map(lambda x: checkAuthentication(x[0], x[1]), params)
|
||||
pool.close()
|
||||
pool.join()
|
||||
hostnames = ""
|
||||
for (key, value) in results:
|
||||
if (not key):
|
||||
hostnames = hostnames + ',' + value
|
||||
if (hostnames != ""):
|
||||
raise TrustException(hostnames)
|
||||
except Exception:
|
||||
raise TrustException(",".join(hosts))
|
||||
return True
|
||||
|
||||
|
||||
def checkAuthentication(host, user):
|
||||
"""
|
||||
function: check authentication
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
cmd = 'pssh -s -H %s true' % host
|
||||
try:
|
||||
runSshCmd(cmd, host, user)
|
||||
except Exception:
|
||||
return (False, host)
|
||||
return (True, host)
|
||||
|
||||
|
||||
def checkClusterUser(username, mpprcFile=''):
|
||||
"""
|
||||
function: check cluster user
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
try:
|
||||
pwd.getpwnam(username).pw_gid
|
||||
except Exception:
|
||||
return False
|
||||
mpprc = mpprcFile if mpprcFile else '~/.bashrc'
|
||||
cmd = "echo \"%s$GAUSS_ENV\" 2>/dev/null" % (
|
||||
"\\" if (username and username != getCurrentUser()) else "")
|
||||
try:
|
||||
output = runShellCmd(cmd, username, mpprc)
|
||||
gaussEnv = output.split("\n")[0]
|
||||
if not gaussEnv:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def getMasterDnNum(user, mpprcFile):
|
||||
"""
|
||||
function : get cluster master DB number
|
||||
input : string, string
|
||||
output : List
|
||||
"""
|
||||
masterDnList = []
|
||||
cmd = "gs_om -t query |grep Primary"
|
||||
output = runShellCmd(cmd, user, mpprcFile)
|
||||
line = output.splitlines()[0]
|
||||
instanceinfo = line.split()
|
||||
for idx in range(len(instanceinfo)):
|
||||
if (instanceinfo[idx] == "Primary"):
|
||||
if (idx > 2 and instanceinfo[idx - 2].isdigit()):
|
||||
masterDnList.append(int(instanceinfo[idx - 2]))
|
||||
return masterDnList
|
||||
|
||||
|
||||
def checkBondMode(bondingConfFile):
|
||||
"""
|
||||
function : Check Bond mode
|
||||
input : String, bool
|
||||
output : List
|
||||
"""
|
||||
|
||||
netNameList = []
|
||||
cmd = "grep -w 'Bonding Mode' %s | awk -F ':' '{print $NF}'" \
|
||||
% bondingConfFile
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0 or output.strip() == ""):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51403"] % "Bonding Mode" +
|
||||
"The cmd is %s " % cmd)
|
||||
cmd = "grep -w 'Slave Interface' %s | awk -F ':' '{print $NF}'" \
|
||||
% bondingConfFile
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
raise Exception(ErrorCode.GAUSS_514["GAUSS_51403"] %
|
||||
"Slave Interface" + "The cmd is %s " % cmd)
|
||||
|
||||
for networkname in output.split('\n'):
|
||||
netNameList.append(networkname.strip())
|
||||
return netNameList
|
||||
|
||||
|
||||
def is_local_node(host):
|
||||
"""
|
||||
function: check whether is or not local node
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
if (host == DefaultValue.GetHostIpOrName()):
|
||||
return True
|
||||
allNetworkInfo = g_network.getAllNetworkIp()
|
||||
for network in allNetworkInfo:
|
||||
if (host == network.ipAddress):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validate_ipv4(ip_str):
|
||||
"""
|
||||
function: check whether is or not validate ipv4
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
sep = ip_str.split('.')
|
||||
if len(sep) != 4:
|
||||
return False
|
||||
for i, x in enumerate(sep):
|
||||
try:
|
||||
int_x = int(x)
|
||||
if int_x < 0 or int_x > 255:
|
||||
return False
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def SetLimitsConf(typeList, item, value, limitFile):
|
||||
"""
|
||||
function: set limits conf
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
for typeName in typeList:
|
||||
cmd = """sed -i '/^.* %s *%s .*$/d' %s &&
|
||||
echo "* %s %s %s" >> %s""" % (
|
||||
typeName, item, limitFile, typeName, item, value, limitFile)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
return "Failed to set variable '%s %s'. Error: \n%s." % (
|
||||
typeName, item, output) + "The cmd is %s " % cmd
|
||||
return "Success"
|
||||
|
||||
|
||||
def isSupportSystemOs():
|
||||
"""
|
||||
function: check whether is or not redhat
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
osName = g_Platform.dist()[0]
|
||||
if osName in ["redhat", "centos", "euleros", "openEuler"]:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def getInitFile():
|
||||
"""
|
||||
function: get init file
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
if isSupportSystemOs():
|
||||
return INIT_FILE_REDHAT
|
||||
else:
|
||||
return INIT_FILE_SUSE
|
||||
|
||||
|
||||
def getNICNum(ipAddress):
|
||||
"""
|
||||
function: get nic num
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
if g_Platform.isPlatFormEulerOSOrRHEL7X():
|
||||
cmd = "/sbin/ifconfig -a | grep -B1 \"inet %s \" | " \
|
||||
"grep -v \"inet %s \" | awk '{print $1}'" % (
|
||||
ipAddress, ipAddress)
|
||||
else:
|
||||
cmd = "/sbin/ifconfig -a | grep -B1 \"addr:%s \" | " \
|
||||
"grep -v \"addr:%s \" | awk '{print $1}'" % (
|
||||
ipAddress, ipAddress)
|
||||
output = runShellCmd(cmd)
|
||||
if g_Platform.isPlatFormEulerOSOrRHEL7X():
|
||||
return output.strip()[:-1]
|
||||
else:
|
||||
return output.strip()
|
||||
|
||||
|
||||
def getIpByHostName(host):
|
||||
"""
|
||||
function: get ip by hostname
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
ipList = g_file.readFile("/etc/hosts", host)
|
||||
pattern = re.compile(
|
||||
r'^[1-9 \t].*%s[ \t]*#Gauss.* IP Hosts Mapping' % host)
|
||||
for ipInfo in ipList:
|
||||
match = pattern.match(ipInfo.strip())
|
||||
if (match):
|
||||
return match.group().split(' ')[0].strip()
|
||||
# get local host by os function
|
||||
# Replace host with the IP address.
|
||||
hostIp = host
|
||||
return hostIp
|
||||
|
||||
|
||||
def isBond(netWorkNum):
|
||||
"""
|
||||
function: check whether is or not bond
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
bondingConfFile = "/proc/net/bonding/%s" % netWorkNum
|
||||
if g_Platform.isPlatFormEulerOSOrRHEL7X():
|
||||
cmd = "/sbin/ifconfig %s " \
|
||||
"| grep -E '\<ether\>' | awk -F ' ' '{print $2}'" % netWorkNum
|
||||
else:
|
||||
cmd = "/sbin/ifconfig %s " \
|
||||
"| grep -E '\<HWaddr\>' | awk -F ' ' '{print $NF}'" % netWorkNum
|
||||
MacAddr = runShellCmd(cmd)
|
||||
cmd = "/sbin/ifconfig -a | grep '\<%s\>' | wc -l" % MacAddr
|
||||
output = runShellCmd(cmd)
|
||||
MacAddrNum = int(output)
|
||||
if (MacAddrNum > 2 and os.path.exists(bondingConfFile)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def getNetWorkConfFile(networkCardNum):
|
||||
"""
|
||||
function: get network conf file
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
SuSENetWorkConfPath = "/etc/sysconfig/network"
|
||||
RedHatNetWorkConfPath = "/etc/sysconfig/network-scripts"
|
||||
if isSupportSystemOs():
|
||||
NetWorkConfFile = "%s/ifcfg-%s" % (
|
||||
RedHatNetWorkConfPath, networkCardNum)
|
||||
else:
|
||||
NetWorkConfFile = "%s/ifcfg-%s" % (SuSENetWorkConfPath, networkCardNum)
|
||||
|
||||
if (not os.path.exists(NetWorkConfFile)):
|
||||
if isSupportSystemOs():
|
||||
cmd = "find %s -iname 'ifcfg-*-%s' -print" % (
|
||||
RedHatNetWorkConfPath, networkCardNum)
|
||||
else:
|
||||
cmd = "find %s -iname 'ifcfg-*-%s' -print" % (
|
||||
SuSENetWorkConfPath, networkCardNum)
|
||||
output = runShellCmd(cmd)
|
||||
if (DefaultValue.checkDockerEnv() and
|
||||
output.find("No such file or directory") >= 0):
|
||||
return output.strip()
|
||||
if (output.strip() == "" or len(output.split('\n')) != 1):
|
||||
if DefaultValue.checkDockerEnv():
|
||||
return ""
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% NetWorkConfFile)
|
||||
NetWorkConfFile = output.strip()
|
||||
return NetWorkConfFile
|
||||
|
||||
|
||||
def CheckNetWorkBonding(serviceIP):
|
||||
"""
|
||||
function: check network bonding
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
networkCardNum = getNICNum(serviceIP)
|
||||
NetWorkConfFile = getNetWorkConfFile(networkCardNum)
|
||||
if ((NetWorkConfFile.find("No such file or directory") >= 0
|
||||
or NetWorkConfFile == "") and DefaultValue.checkDockerEnv()):
|
||||
return "Shell command faild"
|
||||
bondingConfFile = "/proc/net/bonding/%s" % networkCardNum
|
||||
networkCardNumList = [networkCardNum]
|
||||
cmd = "grep -i 'BONDING_OPTS\|BONDING_MODULE_OPTS' %s" % NetWorkConfFile
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if ((status == 0) and (output.strip() != "")):
|
||||
if ((output.find("mode") > 0) and os.path.exists(bondingConfFile)):
|
||||
networkCardNumList = networkCardNumList + checkBondMode(
|
||||
bondingConfFile)
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_506["GAUSS_50611"] +
|
||||
"The cmd is %s " % cmd)
|
||||
return networkCardNumList
|
||||
|
||||
|
||||
|
||||
def getTHPandOSInitFile():
|
||||
"""
|
||||
function : We know that the centos have same init file
|
||||
and THP file as RedHat.
|
||||
input : NA
|
||||
output : String, String
|
||||
"""
|
||||
THPFile = "/sys/kernel/mm/transparent_hugepage/enabled"
|
||||
initFile = getOSInitFile()
|
||||
if (initFile == ""):
|
||||
raise Exception(ErrorCode.GAUSS_506["GAUSS_50618"]
|
||||
% "startup file of current OS")
|
||||
return (THPFile, initFile)
|
||||
|
||||
|
||||
def getOSInitFile():
|
||||
"""
|
||||
function : Get the OS initialization file
|
||||
input : NA
|
||||
output : String
|
||||
"""
|
||||
distname = g_Platform.dist()[0]
|
||||
systemd_system_dir = "/usr/lib/systemd/system/"
|
||||
systemd_system_file = "/usr/lib/systemd/system/gs-OS-set.service"
|
||||
# OS init file
|
||||
# now we only support SuSE and RHEL
|
||||
initFileSuse = "/etc/init.d/boot.local"
|
||||
initFileRedhat = "/etc/rc.d/rc.local"
|
||||
# system init file
|
||||
initSystemFile = "/usr/local/gauss/script/gauss-OS-set.sh"
|
||||
dirName = os.path.dirname(os.path.realpath(__file__))
|
||||
# Get the startup file of suse or redhat os
|
||||
if (os.path.isdir(systemd_system_dir)):
|
||||
if (not os.path.exists(systemd_system_file)):
|
||||
cmd = "cp '%s'/gs-OS-set.service '%s'; chmod %s '%s'" % (
|
||||
dirName, systemd_system_file, DefaultValue.KEY_FILE_MODE,
|
||||
systemd_system_file)
|
||||
runShellCmd(cmd)
|
||||
cmd = "systemctl enable gs-OS-set.service"
|
||||
runShellCmd(cmd)
|
||||
if (not os.path.exists(initSystemFile)):
|
||||
cmd = "mkdir -p '%s'" % os.path.dirname(initSystemFile)
|
||||
runShellCmd(cmd)
|
||||
g_file.createFileInSafeMode(initSystemFile)
|
||||
with open(initSystemFile, "w") as fp:
|
||||
fp.write("#!/bin/bash\n")
|
||||
cmd = "chmod %s '%s'" % (DefaultValue.KEY_FILE_MODE, initSystemFile)
|
||||
runShellCmd(cmd)
|
||||
return initSystemFile
|
||||
if (distname == "SuSE" and os.path.isfile(initFileSuse)):
|
||||
initFile = initFileSuse
|
||||
elif (distname in (
|
||||
"redhat", "centos", "euleros", "openEuler") and os.path.isfile(
|
||||
initFileRedhat)):
|
||||
initFile = initFileRedhat
|
||||
else:
|
||||
initFile = ""
|
||||
return initFile
|
||||
|
||||
|
||||
def getMaskByIP(IPAddr):
|
||||
"""
|
||||
function: get netMask by ip addr
|
||||
"""
|
||||
if g_Platform.isPlatFormEulerOSOrRHEL7X():
|
||||
cmd = "/sbin/ifconfig -a |grep -E '\<%s\>'| awk '{print $4}'" % IPAddr
|
||||
else:
|
||||
cmd = \
|
||||
"/sbin/ifconfig -a |grep -E '\<%s\>'| awk -F ':' '{print $NF}'" \
|
||||
% IPAddr
|
||||
netMask = runShellCmd(cmd)
|
||||
return netMask
|
||||
204
script/gspylib/inspection/common/TaskPool.py
Normal file
204
script/gspylib/inspection/common/TaskPool.py
Normal file
@ -0,0 +1,204 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
import sys
|
||||
import os
|
||||
import signal
|
||||
import threading
|
||||
from queue import Queue
|
||||
from gspylib.inspection.common.Exception import InterruptException
|
||||
|
||||
|
||||
class TaskThread(threading.Thread):
|
||||
def __init__(self, queWork, queResult, iTimeout):
|
||||
"""
|
||||
function: constructor
|
||||
"""
|
||||
threading.Thread.__init__(self)
|
||||
# timeout for fetching task
|
||||
self.m_iTimeout = iTimeout
|
||||
self.m_bRunning = True
|
||||
self.setDaemon(True)
|
||||
self.m_queWork = queWork
|
||||
self.m_queResult = queResult
|
||||
self.start()
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
function: run method
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
while self.m_bRunning:
|
||||
if Queue is None:
|
||||
break
|
||||
try:
|
||||
# fetch a task from the queue,
|
||||
# here timout parameter MUST be asigned,
|
||||
# otherwise get() will wait for ever
|
||||
callableFun, args = self.m_queWork.get(timeout=self.m_iTimeout)
|
||||
# run the task
|
||||
Ret = callableFun(args[0])
|
||||
self.m_queResult.put(Ret)
|
||||
# if task queue is empty
|
||||
except Exception:
|
||||
self.m_bRunning = False
|
||||
continue
|
||||
|
||||
|
||||
class TaskPool:
|
||||
def __init__(self, iNumOfThreads, iTimeOut=1):
|
||||
"""
|
||||
function: constructor
|
||||
"""
|
||||
self.m_queWork = Queue.Queue()
|
||||
self.m_queResult = Queue.Queue()
|
||||
self.m_lstThreads = []
|
||||
self.m_iTimeOut = iTimeOut
|
||||
self.__createThreadPool(iNumOfThreads)
|
||||
|
||||
def __createThreadPool(self, iNumOfThreads):
|
||||
"""
|
||||
function: create thread pool
|
||||
input : iNumOfThreads
|
||||
output : NA
|
||||
"""
|
||||
for i in range(iNumOfThreads):
|
||||
aThread = TaskThread(self.m_queWork, self.m_queResult,
|
||||
self.m_iTimeOut)
|
||||
self.m_lstThreads.append(aThread)
|
||||
|
||||
# add a task into the thread pool
|
||||
def addTask(self, callableFunc, *args):
|
||||
"""
|
||||
function: add task
|
||||
input : callableFunc, *args
|
||||
output : NA
|
||||
"""
|
||||
self.m_queWork.put((callableFunc, list(args)))
|
||||
|
||||
# get one task executing result
|
||||
def getOneResult(self):
|
||||
"""
|
||||
function: get one result
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
try:
|
||||
# get a reult from queue,
|
||||
# get will not return until a result is got
|
||||
aItem = self.m_queResult.get()
|
||||
return aItem
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# notify all theads in the thread pool to exit
|
||||
def notifyStop(self):
|
||||
"""
|
||||
function: notify stop
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
for aThread in self.m_lstThreads:
|
||||
aThread.m_bRunning = False
|
||||
|
||||
# Waiting for all threads in the thread pool exit
|
||||
def waitForComplete(self):
|
||||
# wait all threads terminate
|
||||
while len(self.m_lstThreads):
|
||||
aThread = self.m_lstThreads.pop()
|
||||
# wait the thread terminates
|
||||
if aThread.isAlive():
|
||||
aThread.join()
|
||||
|
||||
|
||||
class Watcher:
|
||||
"""
|
||||
this class solves two problems with multithreaded
|
||||
programs in Python, (1) a signal might be delivered
|
||||
to any thread (which is just a malfeature) and (2) if
|
||||
the thread that gets the signal is waiting, the signal
|
||||
is ignored (which is a bug).
|
||||
|
||||
The watcher is a concurrent process (not thread) that
|
||||
waits for a signal and the process that contains the
|
||||
threads.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a child thread, which returns.
|
||||
The parent thread waits for a KeyboardInterrupt
|
||||
and then kills the child thread.
|
||||
"""
|
||||
self.child = os.fork()
|
||||
if self.child == 0:
|
||||
return
|
||||
else:
|
||||
self.watch()
|
||||
|
||||
def watch(self):
|
||||
"""
|
||||
function: watch
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
try:
|
||||
os.wait()
|
||||
except KeyboardInterrupt:
|
||||
# I put the capital B in KeyBoardInterrupt so I can
|
||||
# tell when the Watcher gets the SIGINT
|
||||
self.kill()
|
||||
raise InterruptException()
|
||||
sys.exit()
|
||||
|
||||
def kill(self):
|
||||
"""
|
||||
function: kill
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
os.kill(self.child, signal.SIGKILL)
|
||||
|
||||
|
||||
class CheckThread(threading.Thread):
|
||||
def __init__(self, name, func, *args):
|
||||
"""
|
||||
function: constructor
|
||||
"""
|
||||
super(CheckThread, self).__init__(name=name, target=func, args=args)
|
||||
self._stop_event = threading.Event()
|
||||
self.setDaemon(True)
|
||||
self.exitcode = 0
|
||||
self.exception = None
|
||||
self.name = name
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.start()
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
function: run
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
try:
|
||||
self.func(*self.args)
|
||||
except Exception as e:
|
||||
self.exitcode = 1
|
||||
self.exception = e
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
function: stop
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
self._stop_event.set()
|
||||
|
||||
def stopped(self):
|
||||
"""
|
||||
function: stopped
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
return self._stop_event.is_set()
|
||||
0
script/gspylib/inspection/common/__init__.py
Normal file
0
script/gspylib/inspection/common/__init__.py
Normal file
62
script/gspylib/inspection/config/check_list_V1R6C10.conf
Normal file
62
script/gspylib/inspection/config/check_list_V1R6C10.conf
Normal file
@ -0,0 +1,62 @@
|
||||
#The file(check_list.conf) is the gs_check and gs_checkos configuration file.
|
||||
#The file is placed in $GPHOME/script/util
|
||||
|
||||
# the system control parameter
|
||||
[/etc/sysctl.conf]
|
||||
net.ipv4.tcp_max_tw_buckets = 10000
|
||||
net.ipv4.tcp_tw_reuse = 1
|
||||
net.ipv4.tcp_tw_recycle = 1
|
||||
net.ipv4.tcp_keepalive_time = 30
|
||||
net.ipv4.tcp_keepalive_intvl = 30
|
||||
net.ipv4.tcp_keepalive_probes = 9
|
||||
net.ipv4.tcp_retries2 = 80
|
||||
net.sctp.addip_enable = 0
|
||||
net.core.wmem_max = 21299200
|
||||
net.core.rmem_max = 21299200
|
||||
net.core.wmem_default = 21299200
|
||||
net.core.rmem_default = 21299200
|
||||
net.sctp.sctp_mem = 94500000 915000000 927000000
|
||||
net.sctp.sctp_rmem = 8192 250000 16777216
|
||||
net.sctp.sctp_wmem = 8192 250000 16777216
|
||||
kernel.sem = 250 6400000 1000 25600
|
||||
net.ipv4.tcp_rmem = 8192 250000 16777216
|
||||
net.ipv4.tcp_wmem = 8192 250000 16777216
|
||||
# vm.min_free_kbytes would set to 5% of total system memory real time, total system memory get with cmd: free -k|grep Mem|awk '{print $2}'.
|
||||
vm.min_free_kbytes = total_system_memory*5%
|
||||
net.core.netdev_max_backlog = 65535
|
||||
net.ipv4.tcp_max_syn_backlog = 65535
|
||||
net.core.somaxconn = 65535
|
||||
net.ipv4.tcp_syncookies = 1
|
||||
vm.overcommit_memory = 0
|
||||
vm.panic_on_oom = 0;
|
||||
vm.oom_kill_allocating_task = 0;
|
||||
net.sctp.sndbuf_policy = 0
|
||||
net.sctp.rcvbuf_policy = 0
|
||||
|
||||
# if parameter value is not equal to ths OS's value, print the waring, and not error
|
||||
[SUGGEST:/etc/sysctl.conf]
|
||||
net.ipv4.tcp_fin_timeout = 60
|
||||
net.ipv4.tcp_sack = 1
|
||||
net.ipv4.tcp_timestamps = 1
|
||||
net.ipv4.tcp_retries1 = 5
|
||||
net.ipv4.tcp_syn_retries = 5
|
||||
net.ipv4.tcp_synack_retries = 5
|
||||
net.sctp.path_max_retrans = 10
|
||||
net.sctp.max_init_retransmits = 10
|
||||
net.sctp.association_max_retrans = 10
|
||||
net.sctp.hb_interval = 30000
|
||||
|
||||
# open file number, please set it to set '1000000'
|
||||
[/etc/security/limits.conf]
|
||||
open files = 1000000
|
||||
stack size = 3072
|
||||
|
||||
# network parameter
|
||||
# if the level of network is greater or equal to 10000Mb/s, please set RX/TX to 4096;
|
||||
# we will check if the MTU is greater or equal to 1500, but gs_checkos dose not set it.
|
||||
# else, skip it.
|
||||
[/sbin/ifconfig]
|
||||
MTU = 1500
|
||||
RX = 4096
|
||||
TX = 4096
|
||||
|
||||
181
script/gspylib/inspection/config/check_list_V1R7C00.conf
Normal file
181
script/gspylib/inspection/config/check_list_V1R7C00.conf
Normal file
@ -0,0 +1,181 @@
|
||||
#The file(check_list.conf) is the gs_check and gs_checkos configuration file.
|
||||
#The file is placed in $GPHOME/script/util
|
||||
|
||||
# the system control parameter
|
||||
[/etc/sysctl.conf]
|
||||
net.ipv4.tcp_max_tw_buckets = 10000
|
||||
net.ipv4.tcp_tw_reuse = 1
|
||||
net.ipv4.tcp_tw_recycle = 1
|
||||
net.ipv4.tcp_keepalive_time = 30
|
||||
net.ipv4.tcp_keepalive_intvl = 30
|
||||
net.ipv4.tcp_keepalive_probes = 9
|
||||
net.ipv4.tcp_retries2 = 80
|
||||
net.sctp.addip_enable = 0
|
||||
net.core.wmem_max = 21299200
|
||||
net.core.rmem_max = 21299200
|
||||
net.core.wmem_default = 21299200
|
||||
net.core.rmem_default = 21299200
|
||||
net.sctp.sctp_mem = 94500000 915000000 927000000
|
||||
net.sctp.sctp_rmem = 8192 250000 16777216
|
||||
net.sctp.sctp_wmem = 8192 250000 16777216
|
||||
kernel.sem = 250 6400000 1000 25600
|
||||
net.ipv4.tcp_rmem = 8192 250000 16777216
|
||||
net.ipv4.tcp_wmem = 8192 250000 16777216
|
||||
# vm.min_free_kbytes would set to 5% of total system memory real time, total system memory get with cmd: free -k|grep Mem|awk '{print $2}'.
|
||||
vm.min_free_kbytes = total_system_memory*5%
|
||||
net.core.netdev_max_backlog = 65535
|
||||
net.ipv4.tcp_max_syn_backlog = 65535
|
||||
net.core.somaxconn = 65535
|
||||
net.ipv4.tcp_syncookies = 1
|
||||
vm.overcommit_memory = 0
|
||||
vm.panic_on_oom = 0
|
||||
vm.oom_kill_allocating_task = 0
|
||||
net.sctp.sndbuf_policy = 0
|
||||
net.sctp.rcvbuf_policy = 0
|
||||
kernel.shmall = 1152921504606846720
|
||||
kernel.shmmax = 18446744073709551615
|
||||
|
||||
# if parameter value is not equal to ths OS's value, print the waring, and not error
|
||||
[SUGGEST:/etc/sysctl.conf]
|
||||
net.ipv4.tcp_fin_timeout = 60
|
||||
net.ipv4.tcp_sack = 1
|
||||
net.ipv4.tcp_timestamps = 1
|
||||
net.ipv4.tcp_retries1 = 5
|
||||
net.ipv4.tcp_syn_retries = 5
|
||||
net.ipv4.tcp_synack_retries = 5
|
||||
net.sctp.path_max_retrans = 10
|
||||
net.sctp.max_init_retransmits = 10
|
||||
net.sctp.association_max_retrans = 10
|
||||
net.sctp.hb_interval = 30000
|
||||
vm.extfrag_threshold = 500
|
||||
vm.overcommit_ratio = 90
|
||||
SctpChecksumErrors = 0
|
||||
|
||||
# open file number, please set it to set '1000000'
|
||||
[/etc/security/limits.conf]
|
||||
open files = 1000000
|
||||
stack size = 3072
|
||||
|
||||
# network parameter
|
||||
# if the level of network is greater or equal to 10000Mb/s, please set RX/TX to 4096;
|
||||
# we will check if the MTU is greater or equal to 1500, but gs_checkos dose not set it.
|
||||
# else, skip it.
|
||||
[/sbin/ifconfig]
|
||||
MTU = 8192
|
||||
RX = 4096
|
||||
TX = 4096
|
||||
|
||||
#gs_check CheckGucConsistent guc parameter ignore list
|
||||
[guc_ignore]
|
||||
listen_addresses = 0
|
||||
local_bind_address = 0
|
||||
port = 0
|
||||
cstore_buffers = 0
|
||||
max_connections = 0
|
||||
shared_buffers = 0
|
||||
work_mem = 0
|
||||
maintenance_work_mem = 0
|
||||
data_replicate_buffer_size = 0
|
||||
pooler_port = 0
|
||||
log_directory = 0
|
||||
audit_directory = 0
|
||||
pgxc_node_name = 0
|
||||
ssd_cache_dir = 0
|
||||
enable_adio_function = 0
|
||||
enable_cstore_ssd_cache = 0
|
||||
unix_socket_directory = 0
|
||||
unix_socket_permissions = 0
|
||||
log_file_mode = 0
|
||||
max_coordinators = 0
|
||||
max_datanodes = 0
|
||||
enable_nestloop = 0
|
||||
enable_mergejoin = 0
|
||||
comm_tcp_mode = 0
|
||||
explain_perf_mode = 0
|
||||
log_line_prefix = 0
|
||||
max_active_statements = 0
|
||||
# Ip and port related
|
||||
comm_control_port = 0
|
||||
comm_sctp_port = 0
|
||||
replconninfo2 = 0
|
||||
replconninfo1 = 0
|
||||
# Instance directory related
|
||||
ident_file = 0
|
||||
config_file = 0
|
||||
hba_file = 0
|
||||
data_directory = 0
|
||||
archive_command = 0
|
||||
xc_maintenance_mode = 0
|
||||
|
||||
[guc_logic]
|
||||
allow_concurrent_tuple_update = 0
|
||||
prefetch_quantity = 0
|
||||
backwrite_quantity = 0
|
||||
cstore_prefetch_quantity = 0
|
||||
cstore_backwrite_max_threshold = 0
|
||||
cstore_backwrite_quantity = 0
|
||||
fast_extend_file_size = 0
|
||||
bgwriter_delay = 0
|
||||
bgwriter_lru_maxpages = 0
|
||||
bgwriter_flush_after = 0
|
||||
autovacuum_naptime = 0
|
||||
autovacuum_vacuum_threshold = 0
|
||||
autovacuum_analyze_threshold = 0
|
||||
autovacuum_vacuum_scale_factor = 0
|
||||
autovacuum_analyze_scale_factor = 0
|
||||
enable_stream_operator = 0
|
||||
enable_data_replicate = 0
|
||||
wal_keep_segments = 0
|
||||
wal_sender_timeout = 0
|
||||
wal_writer_delay = 0
|
||||
checkpoint_segments = 0
|
||||
checkpoint_timeout = 0
|
||||
checkpoint_warning = 0
|
||||
checkpoint_flush_after = 0
|
||||
checkpoint_wait_timeout = 0
|
||||
vacuum_cost_page_hit = 0
|
||||
vacuum_cost_page_miss = 0
|
||||
vacuum_cost_page_dirty = 0
|
||||
vacuum_cost_limit = 0
|
||||
vacuum_cost_delay = 0
|
||||
autovacuum_vacuum_cost_delay = 0
|
||||
autovacuum_vacuum_cost_limit = 0
|
||||
full_page_writes = 0
|
||||
fsync = 0
|
||||
io_limits = 0
|
||||
io_priority = 0
|
||||
bulk_write_ring_size = 0
|
||||
bulk_read_ring_size = 0
|
||||
partition_mem_batch = 0
|
||||
partition_max_cache_size = 0
|
||||
temp_file_limit = 0
|
||||
query_mem = 0
|
||||
maintenance_work_mem = 0
|
||||
synchronous_commit = 0
|
||||
work_mem = 0
|
||||
dynamic_memory_quota = 0
|
||||
temp_buffers = 0
|
||||
max_loaded_cudesc = 0
|
||||
wal_receiver_status_interval = 0
|
||||
wal_receiver_timeout = 0
|
||||
wal_receiver_connect_timeout = 0
|
||||
wal_receiver_connect_retries = 0
|
||||
wal_receiver_buffer_size = 0
|
||||
data_replicate_buffer_size = 0
|
||||
max_connections = 0
|
||||
max_files_per_process = 0
|
||||
shared_buffers = 0
|
||||
memorypool_size = 0
|
||||
cstore_buffers = 0
|
||||
UDFWorkerMemHardLimit = 0
|
||||
walsender_max_send_size = 0
|
||||
wal_buffers = 0
|
||||
max_wal_senders = 0
|
||||
autovacuum_freeze_max_age = 0
|
||||
autovacuum_max_workers = 0
|
||||
track_activity_query_size = 0
|
||||
event_source = 0
|
||||
zhparser_dict_in_memory = 0
|
||||
memorypool_enable = 0
|
||||
enable_memory_limit = 0
|
||||
|
||||
1982
script/gspylib/inspection/config/items.xml
Normal file
1982
script/gspylib/inspection/config/items.xml
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="binary_upgrade" desc="check cluster parameters before binary upgrade.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckSysTabSize"/>
|
||||
<item name="CheckXid"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
22
script/gspylib/inspection/config/scene_health.xml
Normal file
22
script/gspylib/inspection/config/scene_health.xml
Normal file
@ -0,0 +1,22 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="default" desc="The tool performs an inspection for the cluster.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckClusterState"/>
|
||||
<item name="CheckDirPermissions"/>
|
||||
<item name="CheckGaussVer"/>
|
||||
<item name="CheckIntegrity"/>
|
||||
<item name="CheckDebugSwitch"/>
|
||||
<item name="CheckEnvProfile"/>
|
||||
<item name="CheckDBParams"/>
|
||||
<item name="CheckDBConnection"/>
|
||||
<item name="CheckLockNum"/>
|
||||
<item name="CheckCursorNum"/>
|
||||
<item name="CheckCurConnCount"/>
|
||||
<item name="CheckInstallDiskUsage"/>
|
||||
<item name="CheckLogDiskUsage"/>
|
||||
<item name="CheckDataDiskUsage"/>
|
||||
<item name="CheckTmpDiskUsage"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
66
script/gspylib/inspection/config/scene_inspect.xml
Normal file
66
script/gspylib/inspection/config/scene_inspect.xml
Normal file
@ -0,0 +1,66 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="default" desc="The tool performs an inspection for the cluster.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckClusterState"/>
|
||||
<item name="CheckDBParams"/>
|
||||
<item name="CheckDebugSwitch"/>
|
||||
<item name="CheckDirPermissions"/>
|
||||
<item name="CheckReadonlyMode"/>
|
||||
<item name="CheckEnvProfile"/>
|
||||
<item name="CheckBlockdev"/>
|
||||
<item name="CheckCurConnCount"/>
|
||||
<item name="CheckCursorNum"/>
|
||||
<item name="CheckPgxcgroup"/>
|
||||
<item name="CheckDiskFormat"/>
|
||||
<item name="CheckSpaceUsage"/>
|
||||
<item name="CheckInodeUsage"/>
|
||||
<item name="CheckSwapMemory"/>
|
||||
<item name="CheckLogicalBlock"/>
|
||||
<item name="CheckIOrequestqueue"/>
|
||||
<item name="CheckMaxAsyIOrequests"/>
|
||||
<item name="CheckIOConfigure"/>
|
||||
<item name="CheckMTU"/>
|
||||
<item name="CheckPing"/>
|
||||
<item name="CheckRXTX"/>
|
||||
<item name="CheckNetWorkDrop"/>
|
||||
<item name="CheckMultiQueue"/>
|
||||
<item name="CheckEncoding"/>
|
||||
<item name="CheckFirewall"/>
|
||||
<item name="CheckKernelVer"/>
|
||||
<item name="CheckMaxHandle"/>
|
||||
<item name="CheckNTPD"/>
|
||||
<item name="CheckOSVer"/>
|
||||
<item name="CheckSysParams"/>
|
||||
<item name="CheckTHP"/>
|
||||
<item name="CheckTimeZone"/>
|
||||
<item name="CheckCPU"/>
|
||||
<item name="CheckSshdService"/>
|
||||
<item name="CheckSshdConfig"/>
|
||||
<item name="CheckCrondService"/>
|
||||
<item name="CheckStack"/>
|
||||
<item name="CheckNoCheckSum"/>
|
||||
<item name="CheckSysPortRange"/>
|
||||
<item name="CheckMemInfo"/>
|
||||
<item name="CheckHyperThread"/>
|
||||
<item name="CheckTableSpace"/>
|
||||
<item name="CheckSctpService"/>
|
||||
<item name="CheckSysadminUser"/>
|
||||
<item name="CheckGUCConsistent"/>
|
||||
<item name="CheckMaxProcMemory"/>
|
||||
<item name="CheckBootItems"/>
|
||||
<item name="CheckHashIndex"/>
|
||||
<item name="CheckPgxcRedistb"/>
|
||||
<item name="CheckNodeGroupName"/>
|
||||
<item name="CheckTDDate"/>
|
||||
<item name="CheckDilateSysTab"/>
|
||||
<item name="CheckKeyProAdj"/>
|
||||
<item name="CheckProStartTime"/>
|
||||
<item name="CheckFilehandle"/>
|
||||
<item name="CheckRouting"/>
|
||||
<item name="CheckNICModel"/>
|
||||
<item name="CheckDropCache"/>
|
||||
<item name="CheckMpprcFile"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
56
script/gspylib/inspection/config/scene_install.xml
Normal file
56
script/gspylib/inspection/config/scene_install.xml
Normal file
@ -0,0 +1,56 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="install" desc="check cluster parameters before install.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckDirLeft"/>
|
||||
<item name="CheckProcessLeft"/>
|
||||
<item name="CheckPortConflict"/>
|
||||
<item name="CheckOmmUserExist"/>
|
||||
<item name="CheckCrontabLeft"/>
|
||||
<item name="CheckSysPortRange"/>
|
||||
<item name="CheckStack"/>
|
||||
<item name="CheckCrondService"/>
|
||||
<item name="CheckSshdService"/>
|
||||
<item name="CheckSctpService"/>
|
||||
<item name="CheckSysParams">
|
||||
<threshold>
|
||||
version=V1R7C00
|
||||
</threshold>
|
||||
</item>
|
||||
<item name="CheckNoCheckSum"/>
|
||||
<item name="CheckDiskFormat"/>
|
||||
<item name="CheckEtcHosts"/>
|
||||
<item name="CheckHyperThread"/>
|
||||
<item name="CheckCpuCount"/>
|
||||
|
||||
<!--check os items-->
|
||||
<item name="CheckTimeZone"/>
|
||||
<item name="CheckEncoding"/>
|
||||
<item name="CheckKernelVer"/>
|
||||
<item name="CheckMaxHandle"/>
|
||||
<item name="CheckNTPD"/>
|
||||
<item name="CheckOSVer"/>
|
||||
<item name="CheckTHP"/>
|
||||
<item name="CheckSwapMemory"/>
|
||||
<item name="CheckBlockdev"/>
|
||||
<item name="CheckLogicalBlock"/>
|
||||
<item name="CheckIOrequestqueue"/>
|
||||
<item name="CheckMaxAsyIOrequests"/>
|
||||
<item name="CheckIOConfigure"/>
|
||||
<item name="CheckFirewall"/>
|
||||
<item name="CheckSpaceUsage"/>
|
||||
<item name="CheckInodeUsage"/>
|
||||
<item name="CheckDiskConfig"/>
|
||||
<item name="CheckMemInfo"/>
|
||||
<item name="CheckBootItems"/>
|
||||
|
||||
<item name="CheckMTU"/>
|
||||
<item name="CheckRXTX"/>
|
||||
<item name="CheckMultiQueue"/>
|
||||
<item name="CheckBond"/>
|
||||
<item name="CheckNetSpeed"/>
|
||||
<item name="CheckRouting"/>
|
||||
<item name="CheckNICModel"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
12
script/gspylib/inspection/config/scene_longtime.xml
Normal file
12
script/gspylib/inspection/config/scene_longtime.xml
Normal file
@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="longtime" desc="Check items will run for a long time.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckDiskFailure"/>
|
||||
<item name="CheckUnAnalyzeTable"/>
|
||||
<item name="CheckTableSkew"/>
|
||||
<item name="CheckSpecialFile"/>
|
||||
<item name="CheckLargeFile"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
19
script/gspylib/inspection/config/scene_slow_node.xml
Normal file
19
script/gspylib/inspection/config/scene_slow_node.xml
Normal file
@ -0,0 +1,19 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="default" desc="The tool performs an inspection for the cluster.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckProcMem"/>
|
||||
<item name="CheckCPU"/>
|
||||
<threshold>
|
||||
StandardCPUIdle=10;
|
||||
</threshold>
|
||||
<item name="CheckSlowDisk"/>
|
||||
<item name="CheckNetWorkDrop"/>
|
||||
<threshold>
|
||||
dropRate=1;
|
||||
</threshold>
|
||||
<item name="CheckClusterState"/>
|
||||
<item name="CheckDBConnection"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
60
script/gspylib/inspection/config/scene_upgrade.xml
Normal file
60
script/gspylib/inspection/config/scene_upgrade.xml
Normal file
@ -0,0 +1,60 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<scene name="upgrade" desc="check cluster parameters before upgrade.">
|
||||
<configuration/>
|
||||
|
||||
<allowitems>
|
||||
<item name="CheckClusterState"/>
|
||||
<item name="CheckProcessStatus"/>
|
||||
<item name="CheckGaussVer"/>
|
||||
<item name="CheckDirPermissions"/>
|
||||
<item name="CheckEnvProfile"/>
|
||||
<item name="CheckCrondService"/>
|
||||
<item name="CheckSshdService"/>
|
||||
<item name="CheckPortRange"/>
|
||||
<item name="CheckCatchup"/>
|
||||
<item name="CheckArchiveParameter"/>
|
||||
<item name="CheckReadonlyMode"/>
|
||||
<item name="CheckIdleSession"/>
|
||||
<item name="CheckPgxcgroup"/>
|
||||
<item name="CheckPgPreparedXacts"/>
|
||||
<item name="CheckTableSpace"/>
|
||||
<item name="CheckSysParams">
|
||||
<threshold>
|
||||
version=V1R7C00
|
||||
</threshold>
|
||||
</item>
|
||||
<item name="CheckNoCheckSum"/>
|
||||
<item name="CheckGUCValue"/>
|
||||
<item name="CheckStack"/>
|
||||
<item name="CheckDiskFormat"/>
|
||||
<item name="CheckPMKData"/>
|
||||
<item name="CheckTimeZone"/>
|
||||
<item name="CheckEncoding"/>
|
||||
<item name="CheckKernelVer"/>
|
||||
<item name="CheckMaxHandle"/>
|
||||
<item name="CheckNTPD"/>
|
||||
<item name="CheckOSVer"/>
|
||||
<item name="CheckTHP"/>
|
||||
|
||||
<item name="CheckSpaceUsage"/>
|
||||
<item name="CheckInodeUsage"/>
|
||||
<item name="CheckSwapMemory"/>
|
||||
<item name="CheckLogicalBlock"/>
|
||||
<item name="CheckIOrequestqueue"/>
|
||||
<item name="CheckIOConfigure"/>
|
||||
<item name="CheckMaxAsyIOrequests"/>
|
||||
<item name="CheckBlockdev"/>
|
||||
<item name="CheckMemInfo"/>
|
||||
<item name="CheckMTU"/>
|
||||
<item name="CheckBond"/>
|
||||
<item name="CheckMultiQueue"/>
|
||||
<item name="CheckGUCConsistent"/>
|
||||
<item name="CheckBootItems"/>
|
||||
<item name="CheckDilateSysTab"/>
|
||||
<item name="CheckHashIndex"/>
|
||||
<item name="CheckTDDate"/>
|
||||
<item name="CheckReturnType"/>
|
||||
<item name="CheckCreateView"/>
|
||||
<item name="CheckMpprcFile"/>
|
||||
</allowitems>
|
||||
</scene>
|
||||
39
script/gspylib/inspection/items/cluster/CheckCatchup.py
Normal file
39
script/gspylib/inspection/items/cluster/CheckCatchup.py
Normal file
@ -0,0 +1,39 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckCatchup(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckCatchup, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
cmd = "ps -ef |grep '^<%s\>' | grep '\<gaussdb\>' | grep -v grep |" \
|
||||
" awk '{print $2}' |(while read arg; do gstack $arg |" \
|
||||
" grep CatchupMain; done) 2>/dev/null" % self.user
|
||||
output = SharedFuncs.runShellCmd(cmd)
|
||||
if (output != ""):
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "The gatchdb process stack contains the" \
|
||||
" CatchupMain function."
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "The gatchdb process stack not contains" \
|
||||
" the CatchupMain function."
|
||||
self.result.raw = cmd
|
||||
81
script/gspylib/inspection/items/cluster/CheckClusterState.py
Normal file
81
script/gspylib/inspection/items/cluster/CheckClusterState.py
Normal file
@ -0,0 +1,81 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
import subprocess
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.DbClusterStatus import DbClusterStatus
|
||||
from gspylib.common.Common import ClusterCommand
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
KEY_FILE_MODE = 600
|
||||
|
||||
|
||||
class CheckClusterState(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckClusterState, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
tmpFile = os.path.join(self.tmpPath, "gauss_cluster_status.dat")
|
||||
tmpFileName = os.path.join(self.tmpPath, "abnormal_node_status.dat")
|
||||
try:
|
||||
self.result.val = ""
|
||||
self.result.raw = ""
|
||||
# Check the cluster status with cm_ctl
|
||||
cmd = ClusterCommand.getQueryStatusCmd(self.user, "", tmpFile)
|
||||
output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile)
|
||||
self.result.raw += output
|
||||
# Check whether the cluster needs to be balanced
|
||||
# Check whether redistribution is required
|
||||
# Initialize cluster status information for temporary file
|
||||
clusterStatus = DbClusterStatus()
|
||||
clusterStatus.initFromFile(tmpFile)
|
||||
# Get the status of cluster
|
||||
statusInfo = clusterStatus.getClusterStauts(self.user)
|
||||
self.result.val = statusInfo
|
||||
if clusterStatus.isAllHealthy():
|
||||
self.result.rst = ResultStatus.OK
|
||||
if os.path.exists(tmpFile):
|
||||
os.remove(tmpFile)
|
||||
return
|
||||
# If the abnormal node is present, create a temporary file
|
||||
# and print out the details
|
||||
g_file.createFile(tmpFileName, True, KEY_FILE_MODE)
|
||||
with open(tmpFileName, "w+") as tmpFileFp:
|
||||
for dbNode in clusterStatus.dbNodes:
|
||||
if not dbNode.isNodeHealthy():
|
||||
dbNode.outputNodeStatus(tmpFileFp, self.user, True)
|
||||
tmpFileFp.flush()
|
||||
tmpFileFp.seek(0)
|
||||
self.result.raw = tmpFileFp.read()
|
||||
if self.result.raw == "":
|
||||
self.result.raw = "Failed to obtain the cluster status."
|
||||
self.result.rst = ResultStatus.NG
|
||||
# Delete the temporary file
|
||||
if os.path.exists(tmpFileName):
|
||||
os.remove(tmpFileName)
|
||||
if os.path.exists(tmpFile):
|
||||
os.remove(tmpFile)
|
||||
except Exception as e:
|
||||
if os.path.exists(tmpFile):
|
||||
os.remove(tmpFile)
|
||||
if os.path.exists(tmpFileName):
|
||||
os.remove(tmpFileName)
|
||||
raise Exception(str(e))
|
||||
|
||||
|
||||
99
script/gspylib/inspection/items/cluster/CheckCollector.py
Normal file
99
script/gspylib/inspection/items/cluster/CheckCollector.py
Normal file
@ -0,0 +1,99 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.common.Common import DefaultValue
|
||||
|
||||
SHELLPATH = os.path.realpath(
|
||||
os.path.join(os.path.split(os.path.realpath(__file__))[0],
|
||||
"../../lib/checkcollector/"))
|
||||
# file permission
|
||||
FILE_MODE = 700
|
||||
|
||||
|
||||
class CheckCollector(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckCollector, self).__init__(self.__class__.__name__)
|
||||
|
||||
def checkFilePermission(self, filename):
|
||||
"""
|
||||
Function : check file: 1.exist 2. isfile 3. permission
|
||||
Note : 1.You must check that the file exist and is a file.
|
||||
2.You can choose whether to check the file's
|
||||
permission:executable.
|
||||
"""
|
||||
# Check if the file exists
|
||||
if (not os.path.exists(filename)):
|
||||
raise Exception("The file %s does not exist." % filename)
|
||||
# Check whether the file
|
||||
if (not os.path.isfile(filename)):
|
||||
raise Exception("%s is not file." % filename)
|
||||
# Check the file permissions
|
||||
# Modify the file permissions
|
||||
if (not os.access(filename, os.X_OK)):
|
||||
g_file.changeMode(DefaultValue.KEY_DIRECTORY_MODE, filename)
|
||||
|
||||
def genhostfile(self, nodenames):
|
||||
"""
|
||||
Function : generate host file
|
||||
"""
|
||||
iphostInfo = ""
|
||||
nodenameFile = "hostfile"
|
||||
# the path of script
|
||||
recordFile = os.path.join(SHELLPATH, nodenameFile)
|
||||
for nodename in nodenames:
|
||||
iphostInfo += '%s\n' % nodename
|
||||
|
||||
g_file.createFile(recordFile, True, DefaultValue.KEY_DIRECTORY_MODE)
|
||||
|
||||
# Write IP information to file
|
||||
g_file.writeFile(recordFile, [iphostInfo])
|
||||
|
||||
def doCheck(self):
|
||||
parRes = ""
|
||||
# generate hostfile file, server node name
|
||||
self.genhostfile(self.nodes)
|
||||
# shell name
|
||||
shellName = "getClusterInfo.sh"
|
||||
# the path of script
|
||||
shellName = os.path.join(SHELLPATH, shellName)
|
||||
# judge permission
|
||||
self.checkFilePermission(shellName)
|
||||
|
||||
g_file.replaceFileLineContent('omm', self.user, shellName)
|
||||
g_file.replaceFileLineContent(
|
||||
'\/opt\/huawei\/Bigdata\/mppdb\/.mppdbgs_profile',
|
||||
self.mpprcFile.replace('/', '\/'), shellName)
|
||||
# the shell command
|
||||
exectueCmd = "cd %s && sh %s -p %s" % (
|
||||
SHELLPATH, shellName, self.port)
|
||||
self.result.raw = exectueCmd
|
||||
# Call the shell script
|
||||
SharedFuncs.runShellCmd(exectueCmd, self.user, self.mpprcFile)
|
||||
self.result.rst = ResultStatus.OK
|
||||
pacakageName = os.path.join(self.outPath, "checkcollector_%s"
|
||||
% self.context.checkID)
|
||||
# crate tar package
|
||||
g_file.compressZipFiles(pacakageName, os.path.join(SHELLPATH, 'out'))
|
||||
# Check the result information
|
||||
parRes += "The inspection(checkcollector) has been completed!\n"
|
||||
parRes += "Please perform decompression firstly." \
|
||||
" The log is saved in '%s.zip'" % (pacakageName)
|
||||
self.result.val = parRes
|
||||
267
script/gspylib/inspection/items/cluster/CheckDBParams.py
Normal file
267
script/gspylib/inspection/items/cluster/CheckDBParams.py
Normal file
@ -0,0 +1,267 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import math
|
||||
import subprocess
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.DbClusterStatus import DbClusterStatus
|
||||
from gspylib.common.Common import ClusterCommand
|
||||
|
||||
|
||||
class CheckDBParams(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckDBParams, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
# Gets the current node information
|
||||
nodeInfo = self.cluster.getDbNodeByName(self.host)
|
||||
# Get the number of instances
|
||||
InatorsList = nodeInfo.datanodes
|
||||
# Get local primary DB id
|
||||
primaryDNidList = self.getLocalPrimaryDNid(nodeInfo)
|
||||
self.result.raw = ""
|
||||
# Determine if there are DB instances
|
||||
if (len(primaryDNidList) < 1):
|
||||
self.result.raw = "There is no primary database node " \
|
||||
"instance in the current node."
|
||||
self.result.rst = ResultStatus.OK
|
||||
return
|
||||
for inst in InatorsList:
|
||||
self.CheckGaussdbParameters(inst, nodeInfo, primaryDNidList)
|
||||
if (self.result.rst != ResultStatus.NG):
|
||||
self.result.rst = ResultStatus.OK
|
||||
|
||||
def getLocalPrimaryDNid(self, nodeInfo):
|
||||
"""
|
||||
function: Get local primary DNid
|
||||
input: NA
|
||||
output: NA
|
||||
"""
|
||||
tmpFile = os.path.join(self.tmpPath, "gauss_dn_status.dat")
|
||||
primaryDNidList = []
|
||||
try:
|
||||
# Use cm_ctl to query the current node instance
|
||||
cmd = ClusterCommand.getQueryStatusCmd(self.user, nodeInfo.name,
|
||||
tmpFile)
|
||||
SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile)
|
||||
# Match query results and cluster configuration
|
||||
clusterStatus = DbClusterStatus()
|
||||
clusterStatus.initFromFile(tmpFile)
|
||||
if (os.path.exists(tmpFile)):
|
||||
os.remove(tmpFile)
|
||||
# Find the master DB instance
|
||||
for dbNode in clusterStatus.dbNodes:
|
||||
for instance in dbNode.datanodes:
|
||||
if instance.status == 'Primary':
|
||||
primaryDNidList.append(instance.instanceId)
|
||||
return primaryDNidList
|
||||
except Exception as e:
|
||||
if (os.path.exists(tmpFile)):
|
||||
os.remove(tmpFile)
|
||||
raise Exception(str(e))
|
||||
|
||||
def CheckSingleGaussdbParameter(self, port, desc,
|
||||
INDENTATION_VALUE_INT=60):
|
||||
"""
|
||||
function: check gaussdb instance parameters
|
||||
input: int, string, int
|
||||
output: bool
|
||||
"""
|
||||
sqlResultFile = ""
|
||||
try:
|
||||
flag = True
|
||||
# Generate different temporary files when parallel
|
||||
# Identify by instance number
|
||||
# Remove parentheses from the instance number
|
||||
InstNum = desc.replace('(', '')
|
||||
InstNum = InstNum.replace(')', '')
|
||||
# get max connection number
|
||||
sqlcmd = "show max_connections;"
|
||||
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
maxConnections = int(output)
|
||||
if (desc.find("CN(") < 0):
|
||||
self.result.raw += "The max number of %s connections " \
|
||||
"is %s.\n" % (desc, maxConnections)
|
||||
# get shared_buffers size
|
||||
GB = 1 * 1024 * 1024 * 1024
|
||||
MB = 1 * 1024 * 1024
|
||||
kB = 1 * 1024
|
||||
shared_buffers = 0
|
||||
# Execute the query command
|
||||
sqlcmd = "show shared_buffers;"
|
||||
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
shared_buffer_size = str(output)
|
||||
# The result of the conversion query is a regular display
|
||||
if shared_buffer_size[0:-2].isdigit() and (
|
||||
(shared_buffer_size[-2:] > "GB") - (
|
||||
shared_buffer_size[-2:] < "GB")) == 0:
|
||||
shared_buffers = int(shared_buffer_size[0:-2]) * GB
|
||||
if shared_buffer_size[0:-2].isdigit() and (
|
||||
(shared_buffer_size[-2:] > "MB") - (
|
||||
shared_buffer_size[-2:] < "MB")) == 0:
|
||||
shared_buffers = int(shared_buffer_size[0:-2]) * MB
|
||||
if shared_buffer_size[0:-2].isdigit() and (
|
||||
(shared_buffer_size[-2:] > "kB") - (
|
||||
shared_buffer_size[-2:] < "kB")) == 0:
|
||||
shared_buffers = int(shared_buffer_size[0:-2]) * kB
|
||||
if shared_buffer_size[0:-1].isdigit() and (
|
||||
(shared_buffer_size[-2:] > "B") - (
|
||||
shared_buffer_size[-2:] < "B")) == 0:
|
||||
shared_buffers = int(shared_buffer_size[0:-1])
|
||||
|
||||
# check shared_buffers
|
||||
strCmd = "cat /proc/sys/kernel/shmmax"
|
||||
status, shmmax = subprocess.getstatusoutput(strCmd)
|
||||
if (status != 0):
|
||||
self.result.raw += "Failed to obtain shmmax parameters." \
|
||||
" Command: %s.\n" % strCmd
|
||||
flag = False
|
||||
# check shmall parameters
|
||||
strCmd = "cat /proc/sys/kernel/shmall"
|
||||
status, shmall = subprocess.getstatusoutput(strCmd)
|
||||
if (status != 0):
|
||||
self.result.raw += "Failed to obtain shmall parameters." \
|
||||
" Command: %s.\n" % strCmd
|
||||
flag = False
|
||||
# get PAGESIZE
|
||||
strCmd = "getconf PAGESIZE"
|
||||
status, PAGESIZE = subprocess.getstatusoutput(strCmd)
|
||||
if (status != 0):
|
||||
self.result.raw += "Failed to obtain PAGESIZE." \
|
||||
" Command: %s.\n" % strCmd
|
||||
flag = False
|
||||
if (shared_buffers < 128 * kB):
|
||||
self.result.raw += "Shared_buffers must be greater " \
|
||||
"than or equal to 128KB.\n"
|
||||
flag = False
|
||||
elif (shared_buffers > int(shmmax)):
|
||||
self.result.raw += "Shared_buffers must be less" \
|
||||
" than shmmax(%d).\n" % int(shmmax)
|
||||
flag = False
|
||||
elif (shared_buffers > int(shmall) * int(PAGESIZE)):
|
||||
self.result.raw += "Shared_buffers must be less " \
|
||||
"than shmall*PAGESIZE(%d).\n" \
|
||||
% int(shmall) * int(PAGESIZE)
|
||||
flag = False
|
||||
else:
|
||||
self.result.raw += "%s Shared buffers size is %s.\n" \
|
||||
% (desc, shared_buffer_size)
|
||||
# check sem
|
||||
if (desc.find("CN(") >= 0):
|
||||
strCmd = "cat /proc/sys/kernel/sem"
|
||||
status, output = subprocess.getstatusoutput(strCmd)
|
||||
if (status != 0):
|
||||
self.result.raw += "Failed to obtain sem parameters." \
|
||||
" Error: %s.\n" % output + \
|
||||
" Command: %s.\n" % strCmd
|
||||
flag = False
|
||||
paramList = output.split("\t")
|
||||
if (int(paramList[0]) < 17):
|
||||
self.result.raw += "The system limit for the maximum" \
|
||||
" number of semaphores per set" \
|
||||
" (SEMMSL) must be greater than or" \
|
||||
" equal to 17. The current SEMMSL " \
|
||||
"value is: " + str(paramList[0]) \
|
||||
+ ".\n"
|
||||
flag = False
|
||||
|
||||
if (int(paramList[3]) < math.ceil(
|
||||
(maxConnections + 150) // 16)):
|
||||
self.result.raw += "The system limit for the maximum" \
|
||||
" number of semaphore sets (SEMMNI)" \
|
||||
" must be greater than or equal to" \
|
||||
" the value(math.ceil((" \
|
||||
"maxConnections + 150) / 16)) " + \
|
||||
str(math.ceil((maxConnections +
|
||||
150) // 16)) + \
|
||||
", The current SEMMNI value is: " + \
|
||||
str(paramList[3]) + ".\n"
|
||||
flag = False
|
||||
elif (int(paramList[1]) < math.ceil(
|
||||
(maxConnections + 150) // 16) * 17):
|
||||
self.result.raw += "The system limit for the maximum" \
|
||||
" number of semaphores (SEMMNS) must" \
|
||||
" be greater than or equal to the" \
|
||||
" value(math.ceil((maxConnections" \
|
||||
" + 150) / 16) * 17) " \
|
||||
+ str(math.ceil((maxConnections +
|
||||
150) // 16) * 17) + \
|
||||
", The current SEMMNS value is: " + \
|
||||
str(paramList[1]) + ".\n"
|
||||
flag = False
|
||||
else:
|
||||
self.result.raw += "The max number of %s connections" \
|
||||
" is %s.\n" % (desc, maxConnections)
|
||||
if (os.path.exists(sqlResultFile)):
|
||||
os.remove(sqlResultFile)
|
||||
return flag
|
||||
except Exception as e:
|
||||
if (os.path.exists(sqlResultFile)):
|
||||
os.remove(sqlResultFile)
|
||||
raise Exception(ErrorCode.GAUSS_513["GAUSS_51306"] %
|
||||
(("The max number of %s connections.\n" %
|
||||
desc).ljust(INDENTATION_VALUE_INT), str(e)))
|
||||
|
||||
def CheckGaussdbParameters(self, inst, nodeInfo, primaryDNidList):
|
||||
"""
|
||||
function: Check gaussdb instance parameters
|
||||
input: instance
|
||||
output: NA
|
||||
"""
|
||||
INDENTATION_VALUE_INT = 50
|
||||
resultList = []
|
||||
try:
|
||||
# Check all master DB instances
|
||||
if (primaryDNidList != []):
|
||||
if (inst in nodeInfo.datanodes):
|
||||
if inst.instanceId in primaryDNidList:
|
||||
resultList.append(
|
||||
self.CheckSingleGaussdbParameter(
|
||||
inst.port, "DN(%s)" % str(inst.instanceId),
|
||||
INDENTATION_VALUE_INT))
|
||||
if (False in resultList):
|
||||
self.result.rst = ResultStatus.NG
|
||||
return
|
||||
except Exception as e:
|
||||
raise Exception(str(e))
|
||||
|
||||
def doSet(self):
|
||||
resultStr = ""
|
||||
cmd = "gs_guc set -N all -I all -c" \
|
||||
" 'shared_buffers=1GB' -c 'max_connections=400'"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
resultStr += "Falied to set cn shared_buffers.\nError : %s" \
|
||||
% output + " Command: %s.\n" % cmd
|
||||
cmd = "gs_guc set -N all -I all -c 'shared_buffers=1GB'" \
|
||||
" -c 'max_connections=3000'"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
resultStr += "Falied to set database node shared_buffers.\n" \
|
||||
"Error : %s" % output + " Command: %s.\n" % cmd
|
||||
if (len(resultStr) > 0):
|
||||
self.result.val = resultStr
|
||||
else:
|
||||
self.result.val = "Set shared_buffers successfully."
|
||||
146
script/gspylib/inspection/items/cluster/CheckDebugSwitch.py
Normal file
146
script/gspylib/inspection/items/cluster/CheckDebugSwitch.py
Normal file
@ -0,0 +1,146 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
# Conf file name constant
|
||||
POSTGRESQL_CONF = "postgresql.conf"
|
||||
INSTANCE_ROLE_DATANODE = 4
|
||||
g_result = []
|
||||
|
||||
|
||||
class CheckDebugSwitch(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckDebugSwitch, self).__init__(self.__class__.__name__)
|
||||
|
||||
def obtainDataDirLength(self, nodeInfo):
|
||||
"""
|
||||
function: Obtain data dir length
|
||||
input: NA
|
||||
output: int, list
|
||||
"""
|
||||
# Get the longest path
|
||||
DirLength = 0
|
||||
# Get the DB instance and the longest DB path
|
||||
for inst in nodeInfo.datanodes:
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
|
||||
return DirLength
|
||||
|
||||
def checkSingleParaFile(self, inst, desc, INDENTATION_VALUE_INT):
|
||||
"""
|
||||
function: Check the log_min_messages parameter for each instance
|
||||
input: String, String, int
|
||||
output: int
|
||||
"""
|
||||
# The instance directory must exist
|
||||
if (not os.path.exists(inst.datadir) or len(
|
||||
os.listdir(inst.datadir)) == 0):
|
||||
g_result.append(
|
||||
"%s: Abnormal reason: The directory doesn't exist"
|
||||
" or is empty." % (
|
||||
"%s(%s) log_min_messages parameter" % (
|
||||
desc, inst.datadir)).ljust(INDENTATION_VALUE_INT))
|
||||
return -1
|
||||
paraPath = ""
|
||||
# Gets the database node configuration file
|
||||
if inst.instanceRole == INSTANCE_ROLE_DATANODE:
|
||||
paraPath = os.path.join(inst.datadir, POSTGRESQL_CONF)
|
||||
else:
|
||||
g_result.append(
|
||||
"%s: Abnormal reason: Invalid instance type: %s." % (
|
||||
("%s(%s) log_min_messages parameter " % (
|
||||
desc, inst.datadir)).ljust(INDENTATION_VALUE_INT),
|
||||
inst.instanceRole))
|
||||
return - 1
|
||||
# The instance configuration file must exist
|
||||
if (not os.path.exists(paraPath)):
|
||||
g_result.append("%s: Abnormal reason: %s does not exist." % (
|
||||
("%s(%s) log_min_messages parameter " % (
|
||||
desc, inst.datadir)).ljust(INDENTATION_VALUE_INT),
|
||||
paraPath))
|
||||
return -1
|
||||
# Gets the log_min_messages parameter in the configuration file
|
||||
output = g_file.readFile(paraPath, "log_min_messages")
|
||||
value = None
|
||||
for line in output:
|
||||
line = line.split('#')[0].strip()
|
||||
if (line.find('log_min_messages') >= 0 and line.find('=') > 0):
|
||||
value = line.split('=')[1].strip()
|
||||
break
|
||||
if not value:
|
||||
value = "warning"
|
||||
# Determines whether the log_min_messages parameter is valid
|
||||
if (value.lower() != "warning"):
|
||||
g_result.append(
|
||||
"%s: Warning reason: The parameter 'log_min_messages(%s)'"
|
||||
" value is incorrect. It should be 'warning'."
|
||||
% (("%s(%s) log_min_messages parameter(%s)"
|
||||
% (desc, paraPath, value)).ljust(INDENTATION_VALUE_INT),
|
||||
value))
|
||||
return -1
|
||||
g_result.append("%s: Normal" % (
|
||||
"%s(%s) log_min_messages parameter(%s)" % (
|
||||
desc, paraPath, value)).ljust(
|
||||
INDENTATION_VALUE_INT))
|
||||
return 0
|
||||
|
||||
def doCheck(self):
|
||||
global g_result
|
||||
g_result = []
|
||||
nodeInfo = self.cluster.getDbNodeByName(self.host)
|
||||
intervalLen = self.obtainDataDirLength(nodeInfo)
|
||||
resultList = []
|
||||
self.result.val = ""
|
||||
INDENTATION_VALUE_INT = intervalLen + 64
|
||||
# Check all DB instance debug switch
|
||||
for inst in nodeInfo.datanodes:
|
||||
resultList.append(
|
||||
self.checkSingleParaFile(inst, "DN", INDENTATION_VALUE_INT))
|
||||
if (-1 in resultList):
|
||||
self.result.rst = ResultStatus.WARNING
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
for detail in g_result:
|
||||
self.result.val = self.result.val + '%s\n' % detail
|
||||
|
||||
def doSet(self):
|
||||
nodeInfo = self.cluster.getDbNodeByName(self.host)
|
||||
intervalLen = self.obtainDataDirLength(nodeInfo)
|
||||
flag = 0
|
||||
resultStr = ""
|
||||
INDENTATION_VALUE_INT = intervalLen + 64
|
||||
for inst in nodeInfo.datanodes:
|
||||
flag = self.checkSingleParaFile(inst, "DN", INDENTATION_VALUE_INT)
|
||||
if (flag == -1):
|
||||
cmd = "gs_guc set -N all -I all -c" \
|
||||
" 'log_min_messages = warning'"
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
resultStr += "Falied to set database node " \
|
||||
"log_min_massages.\n Error : %s" % output + \
|
||||
" Command: %s.\n" % cmd
|
||||
if (len(resultStr) > 0):
|
||||
self.result.val = resultStr
|
||||
else:
|
||||
self.result.val = "Set log_min_messages successfully."
|
||||
95
script/gspylib/inspection/items/cluster/CheckDilateSysTab.py
Normal file
95
script/gspylib/inspection/items/cluster/CheckDilateSysTab.py
Normal file
@ -0,0 +1,95 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
dbList = []
|
||||
|
||||
|
||||
class CheckDilateSysTab(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckDilateSysTab, self).__init__(self.__class__.__name__)
|
||||
self.Threshold_NG = None
|
||||
self.Threshold_Warning = None
|
||||
|
||||
def preCheck(self):
|
||||
super(CheckDilateSysTab, self).preCheck()
|
||||
if (not (self.threshold.__contains__(
|
||||
'Threshold_NG') and self.threshold.__contains__(
|
||||
'Threshold_Warning'))):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
|
||||
% "The threshold Threshold_NG and"
|
||||
" Threshold_Warning ")
|
||||
if (not self.threshold['Threshold_NG'].isdigit() or not
|
||||
self.threshold['Threshold_Warning'].isdigit()):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53014"]
|
||||
% "The threshold Threshold_NG and"
|
||||
" Threshold_Warning ")
|
||||
self.Threshold_NG = int(self.threshold['Threshold_NG'])
|
||||
self.Threshold_Warning = int(self.threshold['Threshold_Warning'])
|
||||
|
||||
def doCheck(self):
|
||||
global dbList
|
||||
self.result.rst = ResultStatus.OK
|
||||
sqldb = "select datname from pg_database;"
|
||||
output = SharedFuncs.runSqlCmd(sqldb, self.user, "", self.port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
dbList = output.split("\n")
|
||||
dbList.remove("template0")
|
||||
sql = "select (pg_table_size(1259)/count(*)/247.172)::numeric(10,3)" \
|
||||
" from pg_class;"
|
||||
result = []
|
||||
for db in dbList:
|
||||
# Calculate the size with sql cmd
|
||||
output = SharedFuncs.runSqlCmd(sql, self.user, "", self.port,
|
||||
self.tmpPath, db, self.mpprcFile)
|
||||
if (float(output) > self.Threshold_NG):
|
||||
self.result.rst = ResultStatus.NG
|
||||
result.append(db)
|
||||
elif (float(output) > self.Threshold_Warning):
|
||||
result.append(db)
|
||||
if (self.result.rst == ResultStatus.OK):
|
||||
self.result.rst = ResultStatus.WARNING
|
||||
|
||||
if (self.result.rst == ResultStatus.OK):
|
||||
self.result.val = "no system table dilate"
|
||||
else:
|
||||
self.result.val = "there is system table dilate in" \
|
||||
" databases:\n%s" % "\n".join(result)
|
||||
|
||||
def doSet(self):
|
||||
reslutStr = ""
|
||||
sqlCmd = "cluster pg_attribute using" \
|
||||
" pg_attribute_relid_attnum_index;" \
|
||||
"cluster pg_class using pg_class_oid_index;" \
|
||||
"cluster pg_type using pg_type_oid_index;" \
|
||||
"cluster pg_proc using pg_proc_oid_index;" \
|
||||
"cluster pg_depend using pg_depend_depender_index;" \
|
||||
"cluster pg_index using pg_index_indexrelid_index;" \
|
||||
"cluster pg_namespace using pg_namespace_oid_index;" \
|
||||
"cluster pgxc_class using pgxc_class_pcrelid_index;" \
|
||||
"vacuum full pg_statistic;"
|
||||
for databaseName in dbList:
|
||||
for sql in sqlCmd.split(';'):
|
||||
output = SharedFuncs.runSqlCmd(sql, self.user, "", self.port,
|
||||
self.tmpPath, databaseName,
|
||||
self.mpprcFile)
|
||||
reslutStr += output
|
||||
self.result.val = reslutStr
|
||||
187
script/gspylib/inspection/items/cluster/CheckDirPermissions.py
Normal file
187
script/gspylib/inspection/items/cluster/CheckDirPermissions.py
Normal file
@ -0,0 +1,187 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import subprocess
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
DIRECTORY_MODE = 750
|
||||
g_result = []
|
||||
g_chList = []
|
||||
|
||||
|
||||
class CheckDirPermissions(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckDirPermissions, self).__init__(self.__class__.__name__)
|
||||
|
||||
def obtainDataDirLength(self, nodeInfo):
|
||||
"""
|
||||
function: Obtain data dir length
|
||||
input: NA
|
||||
output: int, list
|
||||
"""
|
||||
# Get the longest path
|
||||
DirLength = 0
|
||||
dataDirList = []
|
||||
# Get the DB instance and the longest DB path
|
||||
for inst in nodeInfo.datanodes:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
# Get the CMserver instance and longest path in the CMserver, DN
|
||||
for inst in nodeInfo.cmservers:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
# Get the CMagent instance and longest path in the CM, DN
|
||||
for inst in nodeInfo.cmagents:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
# Get the CN instance and longest path in the CM, DN, CN
|
||||
for inst in nodeInfo.coordinators:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
# Get the GTM instance and longest path in the CM, DN, CN, GTM
|
||||
for inst in nodeInfo.gtms:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
# Get the ETCD instance and longest path in the all instance
|
||||
if (hasattr(nodeInfo, 'etcds')):
|
||||
for inst in nodeInfo.etcds:
|
||||
dataDirList.append(inst.datadir)
|
||||
if (len(inst.datadir) > DirLength):
|
||||
DirLength = len(inst.datadir)
|
||||
|
||||
return (DirLength, dataDirList)
|
||||
|
||||
def checkDirWriteable(self, dirPath, user, flag=""):
|
||||
"""
|
||||
function : Check if target directory is writeable for user.
|
||||
input : String,String
|
||||
output : boolean
|
||||
"""
|
||||
return os.access(dirPath, os.W_OK)
|
||||
|
||||
def checkSingleDirectoryPermission(self, singledir, desc,
|
||||
INDENTATION_VALUE_INT):
|
||||
"""
|
||||
function: Check Directory Permissions
|
||||
input: String, String, int
|
||||
output: int
|
||||
"""
|
||||
# The directory must be a folder
|
||||
if (not os.path.isdir(singledir)):
|
||||
g_result.append(
|
||||
"%s: Abnormal reason: Directory does not exist." % (
|
||||
"%s directory(%s)" % (desc, singledir)).ljust(
|
||||
INDENTATION_VALUE_INT))
|
||||
return -1
|
||||
# Gets the folder permissions
|
||||
currentPremission = int(oct(os.stat(singledir).st_mode)[-3:])
|
||||
# Check the write access and compare the permission size
|
||||
if (self.checkDirWriteable(singledir, self.user)
|
||||
and currentPremission <= DIRECTORY_MODE):
|
||||
|
||||
g_result.append(
|
||||
"%s: Normal" % ("%s directory(%s) permissions %s" % (
|
||||
desc, singledir, str(currentPremission))).ljust(
|
||||
INDENTATION_VALUE_INT))
|
||||
return 0
|
||||
elif (currentPremission > DIRECTORY_MODE):
|
||||
g_result.append(
|
||||
"%s: Abnormal reason: Directory permission"
|
||||
" can not exceed 750."
|
||||
% (("%s directory(%s) permissions %s"
|
||||
% (desc, singledir,
|
||||
str(currentPremission))).ljust(INDENTATION_VALUE_INT)))
|
||||
return -1
|
||||
else:
|
||||
g_result.append(
|
||||
"%s: Abnormal reason: Directory is not writable for users."
|
||||
% ("%s directory(%s) permissions %s"
|
||||
% (desc, singledir,
|
||||
str(currentPremission))).ljust(INDENTATION_VALUE_INT))
|
||||
return -1
|
||||
|
||||
def doCheck(self):
|
||||
global g_chList
|
||||
global g_result
|
||||
resultList = []
|
||||
g_result = []
|
||||
nodeInfo = self.cluster.getDbNodeByName(self.host)
|
||||
tmpDir = DefaultValue.getEnv("PGHOST")
|
||||
logDir = DefaultValue.getEnv("GAUSSLOG")
|
||||
toolDir = DefaultValue.getEnv("GPHOME")
|
||||
(intervalLen, instList) = self.obtainDataDirLength(nodeInfo)
|
||||
if intervalLen < len(self.cluster.appPath):
|
||||
intervalLen = len(self.cluster.appPath)
|
||||
if intervalLen < len(logDir):
|
||||
intervalLen = len(logDir)
|
||||
INDENTATION_VALUE_INT = intervalLen + 44
|
||||
# Check the permissions for appPath
|
||||
resultList.append(
|
||||
self.checkSingleDirectoryPermission(self.cluster.appPath,
|
||||
"AppPath",
|
||||
INDENTATION_VALUE_INT))
|
||||
g_chList.append(self.cluster.appPath)
|
||||
# Check the permissions for tmpPath
|
||||
resultList.append(self.checkSingleDirectoryPermission(
|
||||
tmpDir, "Tmp", INDENTATION_VALUE_INT))
|
||||
# Check the permissions for logPath
|
||||
g_chList.append(tmpDir)
|
||||
resultList.append(self.checkSingleDirectoryPermission(
|
||||
logDir, "Log", INDENTATION_VALUE_INT))
|
||||
# Check the permissions for logPath
|
||||
g_chList.append(logDir)
|
||||
resultList.append(
|
||||
self.checkSingleDirectoryPermission(toolDir, "ToolPath",
|
||||
INDENTATION_VALUE_INT))
|
||||
# Check the permissions for all CMserver
|
||||
g_chList.append(toolDir)
|
||||
# Check the permissions for all DB instance
|
||||
for inst in nodeInfo.datanodes:
|
||||
resultList.append(
|
||||
self.checkSingleDirectoryPermission(inst.datadir, "DN",
|
||||
INDENTATION_VALUE_INT))
|
||||
# Check the xlog permissions for all DB instance
|
||||
xlogDir = "%s/pg_xlog" % inst.datadir
|
||||
resultList.append(
|
||||
self.checkSingleDirectoryPermission(xlogDir, "DN Xlog",
|
||||
INDENTATION_VALUE_INT))
|
||||
g_chList.append(inst.datadir)
|
||||
g_chList.append(xlogDir)
|
||||
if (-1 in resultList):
|
||||
self.result.rst = ResultStatus.NG
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = ""
|
||||
for detail in g_result:
|
||||
self.result.val = self.result.val + '%s\n' % detail
|
||||
|
||||
def doSet(self):
|
||||
resultStr = ""
|
||||
for dirName in g_chList:
|
||||
g_file.changeOwner(self.user, dirName, True)
|
||||
g_file.changeMode(DIRECTORY_MODE, dirName)
|
||||
self.result.val = "Set DirPermissions completely."
|
||||
151
script/gspylib/inspection/items/cluster/CheckEnvProfile.py
Normal file
151
script/gspylib/inspection/items/cluster/CheckEnvProfile.py
Normal file
@ -0,0 +1,151 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
from gspylib.common.VersionInfo import VersionInfo
|
||||
|
||||
g_envProfileDist = {}
|
||||
|
||||
|
||||
class CheckEnvProfile(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckEnvProfile, self).__init__(self.__class__.__name__)
|
||||
|
||||
def getProcessEnv(self, ProcessNum, Process):
|
||||
abnormal_flag = False
|
||||
processEnvDist = {}
|
||||
# Get environment variables
|
||||
if (os.path.isfile("/proc/%s/environ" % ProcessNum)):
|
||||
envInfoList = g_file.readFile("/proc/%s/environ" % ProcessNum)[
|
||||
0].split('\0')
|
||||
for env in envInfoList:
|
||||
envName = env.split('=')[0].strip()
|
||||
processEnvDist[envName] = env.split('=')[-1].strip()
|
||||
for env in g_envProfileDist.keys():
|
||||
# environment variables if exist
|
||||
if (not env in processEnvDist.keys() or
|
||||
not processEnvDist[env]):
|
||||
abnormal_flag = True
|
||||
self.result.val += "There is no env[%s] in " \
|
||||
"process %s[%s].\n " \
|
||||
% (env, Process, ProcessNum)
|
||||
continue
|
||||
# environment variables is GAUSSHOME
|
||||
if (env == "GAUSSHOME"):
|
||||
if (g_envProfileDist[env] != processEnvDist[env]):
|
||||
abnormal_flag = True
|
||||
self.result.val += "The env[GAUSSHOME] is " \
|
||||
"inconsistent in process %s[%s] " \
|
||||
"and system.\nProcess: %s\n" \
|
||||
% (Process, ProcessNum,
|
||||
processEnvDist[env])
|
||||
##environment variables is PATH
|
||||
elif (env == "PATH"):
|
||||
binPath = "%s/bin" % g_envProfileDist["GAUSSHOME"]
|
||||
ProcessEnvList = processEnvDist[env].split(':')
|
||||
if (binPath not in ProcessEnvList):
|
||||
abnormal_flag = True
|
||||
self.result.val += "There is no [%s] in " \
|
||||
"process %s[%s]'s environment " \
|
||||
"variable [%s].\n " \
|
||||
% (binPath, Process,
|
||||
ProcessNum, env)
|
||||
else:
|
||||
libPath = "%s/lib" % g_envProfileDist["GAUSSHOME"]
|
||||
ProcessEnvList = processEnvDist[env].split(':')
|
||||
if (libPath not in ProcessEnvList):
|
||||
abnormal_flag = True
|
||||
self.result.val += "There is no [%s] in process" \
|
||||
" %s[%s]'s environment variable" \
|
||||
" [%s].\n " % (libPath, Process,
|
||||
ProcessNum, env)
|
||||
|
||||
return abnormal_flag
|
||||
|
||||
def doCheck(self):
|
||||
g_envProfileDist["GAUSSHOME"] = DefaultValue.getEnv("GAUSSHOME")
|
||||
g_envProfileDist["PATH"] = DefaultValue.getEnv("PATH")
|
||||
g_envProfileDist["LD_LIBRARY_PATH"] = DefaultValue.getEnv(
|
||||
"LD_LIBRARY_PATH")
|
||||
|
||||
self.result.val = ""
|
||||
ProcessList = []
|
||||
ProcessDisk = {}
|
||||
abnormal_flag = False
|
||||
if (g_envProfileDist["GAUSSHOME"] == ""):
|
||||
abnormal_flag = True
|
||||
self.result.val += "The environmental variable " \
|
||||
"GAUSSHOME is empty.\n"
|
||||
else:
|
||||
self.result.val += "GAUSSHOME %s\n" % g_envProfileDist[
|
||||
"GAUSSHOME"]
|
||||
|
||||
libPath = "%s/lib" % g_envProfileDist["GAUSSHOME"]
|
||||
if (libPath not in g_envProfileDist["LD_LIBRARY_PATH"].split(':')):
|
||||
abnormal_flag = True
|
||||
self.result.val += \
|
||||
VersionInfo.PRODUCT_NAME + \
|
||||
" lib path does not exist in LD_LIBRARY_PATH.\n"
|
||||
else:
|
||||
self.result.val += "LD_LIBRARY_PATH %s\n" % libPath
|
||||
binPath = "%s/bin" % g_envProfileDist["GAUSSHOME"]
|
||||
# Whether the environment variable bin is in path
|
||||
if (binPath not in g_envProfileDist["PATH"].split(':')):
|
||||
abnormal_flag = True
|
||||
self.result.val += VersionInfo.PRODUCT_NAME + \
|
||||
" bin path does not exist in PATH.\n"
|
||||
else:
|
||||
self.result.val += "PATH %s\n" % binPath
|
||||
|
||||
if abnormal_flag:
|
||||
self.result.rst = ResultStatus.NG
|
||||
return
|
||||
|
||||
# Gets the current node information
|
||||
nodeInfo = self.cluster.getDbNodeByName(self.host)
|
||||
# check the number of instances
|
||||
if len(nodeInfo.datanodes) > 0:
|
||||
ProcessList.append("gaussdb")
|
||||
|
||||
# Query process
|
||||
for Process in ProcessList:
|
||||
cmd = "ps ux | grep '%s/bin/%s' | grep -v 'grep' |" \
|
||||
" awk '{print $2}'" % (self.cluster.appPath, Process)
|
||||
output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile)
|
||||
if (output != ""):
|
||||
if (len(output.split('\n')) > 1):
|
||||
for ProcessNum in output.split('\n'):
|
||||
ProcessDisk[ProcessNum] = [Process]
|
||||
else:
|
||||
ProcessDisk[output] = [Process]
|
||||
else:
|
||||
self.result.val += "The process %s is not exist.\n" % Process
|
||||
abnormal_flag = True
|
||||
for ProcessNum in ProcessDisk.keys():
|
||||
# Get the process environment variables
|
||||
result = self.getProcessEnv(ProcessNum, ProcessDisk[ProcessNum])
|
||||
if not abnormal_flag:
|
||||
abnormal_flag = result
|
||||
|
||||
if abnormal_flag:
|
||||
self.result.rst = ResultStatus.NG
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
47
script/gspylib/inspection/items/cluster/CheckGaussVer.py
Normal file
47
script/gspylib/inspection/items/cluster/CheckGaussVer.py
Normal file
@ -0,0 +1,47 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckGaussVer(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckGaussVer, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
gaussdbVersion = ""
|
||||
gsqlVersion = ""
|
||||
# Get the version
|
||||
cmd = "gaussdb -V | awk '{print $4\"_\"$6}'"
|
||||
self.result.raw = cmd + "\n"
|
||||
gaussdbVersion = SharedFuncs.runShellCmd(cmd, "", self.mpprcFile)
|
||||
if (gaussdbVersion[-1] == ")"):
|
||||
gaussdbVersion = gaussdbVersion[:-1]
|
||||
# Get the version
|
||||
cmd = "gsql -V | awk '{print $4\"_\"$6}'"
|
||||
self.result.raw += cmd
|
||||
gsqlVersion = SharedFuncs.runShellCmd(cmd, "", self.mpprcFile)
|
||||
if (gsqlVersion[-1] == ")"):
|
||||
gsqlVersion = gsqlVersion[:-1]
|
||||
# Compare the two version numbers are the same
|
||||
if gaussdbVersion and gaussdbVersion == gsqlVersion:
|
||||
self.result.rst = ResultStatus.OK
|
||||
else:
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "gaussdb Version: %s \ngsql Version: %s" % (
|
||||
gaussdbVersion, gsqlVersion)
|
||||
38
script/gspylib/inspection/items/cluster/CheckIntegrity.py
Normal file
38
script/gspylib/inspection/items/cluster/CheckIntegrity.py
Normal file
@ -0,0 +1,38 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
|
||||
class CheckIntegrity(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckIntegrity, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
gaussHome = self.cluster.appPath
|
||||
gaussdbFile = os.path.join(gaussHome, "bin/gaussdb")
|
||||
gaussdbIntegrity = g_file.getFileSHA256(gaussdbFile)
|
||||
self.result.raw = gaussdbIntegrity
|
||||
if (gaussdbIntegrity != "" and len(gaussdbIntegrity) == 64):
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "gaussdb sha256sum: %s" % gaussdbIntegrity
|
||||
else:
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "Failed to obtain gaussdb sha256 value." \
|
||||
" Error:\n%s" % gaussdbIntegrity
|
||||
96
script/gspylib/inspection/items/cluster/CheckLargeFile.py
Normal file
96
script/gspylib/inspection/items/cluster/CheckLargeFile.py
Normal file
@ -0,0 +1,96 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
|
||||
class CheckLargeFile(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckLargeFile, self).__init__(self.__class__.__name__)
|
||||
self.Threshold_SIZE = None
|
||||
|
||||
def preCheck(self):
|
||||
super(CheckLargeFile, self).preCheck()
|
||||
if (not (self.threshold.__contains__('size'))):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
|
||||
% "The threshold size")
|
||||
self.Threshold_SIZE = (self.threshold['size'])
|
||||
|
||||
def obtainDataDir(self, nodeInfo):
|
||||
dataDirList = []
|
||||
for inst in nodeInfo.datanodes:
|
||||
dataDirList.append(inst.datadir)
|
||||
return dataDirList
|
||||
|
||||
def checkLargeFile(self, path):
|
||||
fileList = []
|
||||
failList = []
|
||||
cmd = "find %s -type f -size +%s" % (path, self.Threshold_SIZE)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0 and output.find("Permission denied") > 0):
|
||||
for fileName in output.splitlines():
|
||||
if (fileName.find("Permission denied") > 0):
|
||||
failList.append(fileName)
|
||||
else:
|
||||
for fileName in output.splitlines():
|
||||
fileList.append(os.path.join(path, fileName))
|
||||
return fileList, failList
|
||||
|
||||
def doCheck(self):
|
||||
outputList = []
|
||||
failList = []
|
||||
pathList = []
|
||||
if (self.cluster):
|
||||
paths = self.obtainDataDir(
|
||||
self.cluster.getDbNodeByName(self.host))
|
||||
else:
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"] % "cluster")
|
||||
for path in paths:
|
||||
if (path):
|
||||
pathList.append(path)
|
||||
pool = ThreadPool(DefaultValue.getCpuSet())
|
||||
results = pool.map(self.checkLargeFile, pathList)
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
for outlist, flist in results:
|
||||
if (outlist):
|
||||
outputList.extend(outlist)
|
||||
if (flist):
|
||||
failList.extend(flist)
|
||||
|
||||
if (len(outputList) == 0 and len(failList) == 0):
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "No file more than %s" % self.Threshold_SIZE
|
||||
else:
|
||||
if (len(outputList) > 0):
|
||||
self.result.val = "Files more than %s:\n%s" % (
|
||||
self.Threshold_SIZE, "\n".join(outputList))
|
||||
if (len(failList) > 0):
|
||||
self.result.val = "Files more than %s:\n%s\n%s" % (
|
||||
self.Threshold_SIZE, "\n".join(outputList),
|
||||
"\n".join(failList))
|
||||
else:
|
||||
self.result.val = "%s" % ("\n".join(failList))
|
||||
self.result.rst = ResultStatus.NG
|
||||
97
script/gspylib/inspection/items/cluster/CheckMpprcFile.py
Normal file
97
script/gspylib/inspection/items/cluster/CheckMpprcFile.py
Normal file
@ -0,0 +1,97 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckMpprcFile(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckMpprcFile, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "There are illegal characters in mpprc file"
|
||||
appPath = self.cluster.appPath
|
||||
mpprcFile = self.mpprcFile
|
||||
bashfile = "/home/%s/.bashrc" % self.user
|
||||
if (mpprcFile == "" or not mpprcFile or mpprcFile == "/etc/profile"
|
||||
or mpprcFile == "~/.bashrc" or mpprcFile == bashfile
|
||||
or not os.path.exists(mpprcFile)):
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "There is no mpprc file"
|
||||
return
|
||||
try:
|
||||
with open(mpprcFile, 'r') as fp:
|
||||
env_list = fp.readlines()
|
||||
while '' in env_list:
|
||||
env_list.remove('')
|
||||
# get ec content
|
||||
ec_content = "if [ -f '%s/utilslib/env_ec' ] &&" \
|
||||
" [ `id -u` -ne 0 ];" \
|
||||
" then source '%s/utilslib/env_ec'; fi " \
|
||||
% (appPath, appPath)
|
||||
ec_content_old = "if [ -f '%s/utilslib/env_ec' ] ;" \
|
||||
" then source '%s/utilslib/env_ec'; fi " \
|
||||
% (appPath, appPath)
|
||||
# remove ec content from list
|
||||
if ec_content in env_list:
|
||||
env_list.remove(ec_content)
|
||||
if ec_content_old in env_list:
|
||||
env_list.remove(ec_content_old)
|
||||
# white elements
|
||||
list_white = ["ELK_CONFIG_DIR", "ELK_SYSTEM_TABLESPACE",
|
||||
"MPPDB_ENV_SEPARATE_PATH", "GPHOME", "PATH",
|
||||
"LD_LIBRARY_PATH", "PYTHONPATH",
|
||||
"GAUSS_WARNING_TYPE", "GAUSSHOME", "PATH",
|
||||
"LD_LIBRARY_PATH",
|
||||
"S3_CLIENT_CRT_FILE", "GAUSS_VERSION", "PGHOST",
|
||||
"GS_CLUSTER_NAME", "GAUSSLOG",
|
||||
"GAUSS_ENV", "KRB5_CONFIG", "PGKRBSRVNAME",
|
||||
"KRBHOSTNAME", "ETCD_UNSUPPORTED_ARCH"]
|
||||
# black elements
|
||||
list_black = ["|", ";", "&", "<", ">", "`", "\\", "'", "\"",
|
||||
"{", "}", "(", ")", "[", "]", "~", "*", "?",
|
||||
"!", "\n"]
|
||||
for env in env_list:
|
||||
env = env.strip()
|
||||
if env == "":
|
||||
continue
|
||||
if len(env.split()) != 2:
|
||||
return
|
||||
if env.split()[0] == "umask" and env.split()[1] == "077":
|
||||
continue
|
||||
for black in list_black:
|
||||
flag = env.find(black)
|
||||
if flag >= 0:
|
||||
return
|
||||
if ((not env.startswith("export")) or (
|
||||
env.split()[0] != "export")):
|
||||
return
|
||||
else:
|
||||
val = env[6:].strip()
|
||||
if not val.find("="):
|
||||
return
|
||||
elif (val.split("=")[0].strip() not in list_white):
|
||||
return
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "Mpprc file is ok"
|
||||
except Exception as e:
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = "Can not read mpprc file"
|
||||
78
script/gspylib/inspection/items/cluster/CheckPortRange.py
Normal file
78
script/gspylib/inspection/items/cluster/CheckPortRange.py
Normal file
@ -0,0 +1,78 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
|
||||
class CheckPortRange(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckPortRange, self).__init__(self.__class__.__name__)
|
||||
self.ip_local_port_range = None
|
||||
|
||||
def preCheck(self):
|
||||
# check current node contains cn instances if not raise exception
|
||||
super(CheckPortRange, self).preCheck()
|
||||
# check the threshold was set correctly
|
||||
if (not self.threshold.__contains__('ip_local_port_range')):
|
||||
raise Exception(ErrorCode.GAUSS_530["GAUSS_53013"]
|
||||
% "threshold ip_local_port_range")
|
||||
self.ip_local_port_range = self.threshold['ip_local_port_range']
|
||||
|
||||
def getPort(self):
|
||||
cooInst = None
|
||||
portList = {}
|
||||
dbNode = self.cluster.getDbNodeByName(self.host)
|
||||
for dnInst in dbNode.datanodes:
|
||||
portList[dnInst.port] = dnInst.instanceRole
|
||||
portList[dnInst.haPort] = dnInst.instanceRole
|
||||
|
||||
return portList
|
||||
|
||||
def doCheck(self):
|
||||
parRes = ""
|
||||
flag = None
|
||||
instance = {0: "CMSERVER", 1: "GTM", 2: "ETCD", 3: "COODINATOR",
|
||||
4: "DATANODE", 5: "CMAGENT"}
|
||||
portList = self.getPort()
|
||||
# Check the port range
|
||||
output = g_file.readFile('/proc/sys/net/ipv4/ip_local_port_range')[
|
||||
0].strip()
|
||||
smallValue = output.split('\t')[0].strip()
|
||||
bigValue = output.split('\t')[1].strip()
|
||||
expect = self.ip_local_port_range.split()
|
||||
if (int(smallValue) < int(expect[0].strip()) or int(bigValue) > int(
|
||||
expect[1].strip())):
|
||||
parRes += "The value of net.ipv4.ip_local_port_range is" \
|
||||
" incorrect, expect value is %s.\n" \
|
||||
% self.ip_local_port_range
|
||||
parRes += "The value of net.ipv4.ip_local_port_range is %d %d." \
|
||||
% (int(smallValue), int(bigValue))
|
||||
|
||||
for port in portList.keys():
|
||||
if (int(port) <= int(bigValue) and int(port) >= int(smallValue)):
|
||||
flag = 1
|
||||
parRes += "\n %s" \
|
||||
% ("The instance %s port \"%d\" is incorrect."
|
||||
% (instance[portList[port]], int(port)))
|
||||
if (flag == 1):
|
||||
self.result.rst = ResultStatus.NG
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = parRes
|
||||
self.result.raw = output
|
||||
127
script/gspylib/inspection/items/cluster/CheckProStartTime.py
Normal file
127
script/gspylib/inspection/items/cluster/CheckProStartTime.py
Normal file
@ -0,0 +1,127 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
from datetime import datetime
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
monthdic = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6,
|
||||
"Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12}
|
||||
|
||||
|
||||
class CheckProStartTime(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckProStartTime, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
self.result.rst = ResultStatus.OK
|
||||
timelist = []
|
||||
gaussPro = "gaussdb"
|
||||
cmd = "ps -C %s -o lstart,args | grep -v grep | grep -v 'om_monitor'" \
|
||||
" 2>/dev/null" % gaussPro
|
||||
output = SharedFuncs.runShellCmd(cmd, self.user, self.mpprcFile)
|
||||
for line in output.splitlines()[1:]:
|
||||
resultList = line.split()
|
||||
year = resultList[4]
|
||||
month = monthdic[resultList[1]]
|
||||
day = resultList[2]
|
||||
time = resultList[3]
|
||||
timestring = "%s-%s-%s %s" % (year, month, day, time)
|
||||
dattime = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S')
|
||||
timelist.append(dattime)
|
||||
if (timelist):
|
||||
mintime = timelist[0]
|
||||
maxtime = timelist[0]
|
||||
else:
|
||||
mintime = None
|
||||
maxtime = None
|
||||
for tmpdatetime in timelist:
|
||||
if (tmpdatetime < mintime):
|
||||
mintime = tmpdatetime
|
||||
elif (tmpdatetime > maxtime):
|
||||
maxtime = tmpdatetime
|
||||
if (maxtime and mintime):
|
||||
if (int((maxtime - mintime).days) > 0 or int(
|
||||
(maxtime - mintime).seconds) > 300):
|
||||
self.result.rst = ResultStatus.WARNING
|
||||
self.result.val = output
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = output
|
||||
|
||||
def postAnalysis(self, itemResult):
|
||||
errors = []
|
||||
timedic = {}
|
||||
valdic = {}
|
||||
allhost = []
|
||||
nghost = []
|
||||
Mintime = None
|
||||
for v in itemResult.getLocalItems():
|
||||
output = v.val
|
||||
timelist = []
|
||||
for line in output.splitlines()[1:]:
|
||||
resultList = line.split()
|
||||
year = resultList[4]
|
||||
month = monthdic[resultList[1]]
|
||||
day = resultList[2]
|
||||
time = resultList[3]
|
||||
timestring = "%s-%s-%s %s" % (year, month, day, time)
|
||||
dattime = datetime.strptime(timestring, '%Y-%m-%d %H:%M:%S')
|
||||
timelist.append(dattime)
|
||||
if (timelist):
|
||||
mintime = timelist[0]
|
||||
maxtime = timelist[0]
|
||||
else:
|
||||
mintime = None
|
||||
maxtime = None
|
||||
for tmpdatetime in timelist:
|
||||
if (tmpdatetime < mintime):
|
||||
mintime = tmpdatetime
|
||||
elif (tmpdatetime > maxtime):
|
||||
maxtime = tmpdatetime
|
||||
timelist = []
|
||||
if (maxtime and mintime):
|
||||
timelist.append(mintime)
|
||||
timelist.append(maxtime)
|
||||
if (Mintime and Mintime < mintime):
|
||||
pass
|
||||
else:
|
||||
Mintime = mintime
|
||||
if (timelist):
|
||||
timedic[v.host] = timelist
|
||||
valdic[v.host] = output
|
||||
allhost.append(v.host)
|
||||
for host in allhost:
|
||||
hostmax = timedic[host][1]
|
||||
if (int((hostmax - Mintime).days) > 0 or int(
|
||||
(hostmax - Mintime).seconds) > 300):
|
||||
if (host not in nghost):
|
||||
nghost.append(host)
|
||||
|
||||
if (nghost):
|
||||
itemResult.rst = ResultStatus.WARNING
|
||||
resultStr = ""
|
||||
for host in nghost:
|
||||
resultStr += "%s:\n%s\n" % (host, valdic[host])
|
||||
itemResult.analysis = resultStr
|
||||
else:
|
||||
itemResult.rst = ResultStatus.OK
|
||||
itemResult.analysis = "Basically ,all the gaussdb process" \
|
||||
" start at the same time"
|
||||
return itemResult
|
||||
@ -0,0 +1,45 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import subprocess
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckProcessStatus(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckProcessStatus, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
parRes = ""
|
||||
flag = 0
|
||||
self.result.raw = ""
|
||||
processList = ['gaussdb']
|
||||
for process in processList:
|
||||
# Query process status
|
||||
cmd = "ps -u %s -N | grep '\<%s\>'" % (self.user, process)
|
||||
self.result.raw += "%s\n" % cmd
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
# Resolve and outputs the execution results
|
||||
if (status == 0 and output.find("%s" % process) >= 0):
|
||||
parRes += "\n %s" % (output)
|
||||
flag = 1
|
||||
if (flag == 1):
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = parRes
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "All process Status is Normal."
|
||||
36
script/gspylib/inspection/items/cluster/CheckReadonlyMode.py
Normal file
36
script/gspylib/inspection/items/cluster/CheckReadonlyMode.py
Normal file
@ -0,0 +1,36 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckReadonlyMode(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckReadonlyMode, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
sqlcmd = "show default_transaction_read_only;"
|
||||
self.result.raw = sqlcmd
|
||||
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", self.port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
if (output == "off"):
|
||||
self.result.rst = ResultStatus.OK
|
||||
else:
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = output
|
||||
206
script/gspylib/inspection/items/cluster/CheckSpecialFile.py
Normal file
206
script/gspylib/inspection/items/cluster/CheckSpecialFile.py
Normal file
@ -0,0 +1,206 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
import subprocess
|
||||
from multiprocessing.dummy import Pool as ThreadPool
|
||||
from gspylib.common.Common import DefaultValue
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
|
||||
class CheckSpecialFile(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckSpecialFile, self).__init__(self.__class__.__name__)
|
||||
|
||||
def getDiskPath(self):
|
||||
nodeDirs = []
|
||||
# get PGHOST Dir
|
||||
tmpDir = DefaultValue.getEnv("PGHOST")
|
||||
nodeDirs.append(tmpDir)
|
||||
|
||||
# get gphome dir
|
||||
gphome_path = DefaultValue.getEnv("GPHOME")
|
||||
nodeDirs.append(gphome_path)
|
||||
|
||||
# get log dir
|
||||
log_path = DefaultValue.getEnv("GAUSSLOG")
|
||||
nodeDirs.append(log_path)
|
||||
|
||||
# get gausshome dir
|
||||
gausshome_path = DefaultValue.getEnv("GAUSSHOME")
|
||||
nodeDirs.append(os.path.realpath(gausshome_path))
|
||||
|
||||
hostName = DefaultValue.GetHostIpOrName()
|
||||
dbNode = self.cluster.getDbNodeByName(hostName)
|
||||
# including dn
|
||||
for dbInst in dbNode.datanodes:
|
||||
nodeDirs.append(dbInst.datadir)
|
||||
|
||||
return nodeDirs
|
||||
|
||||
def checkPathVaild(self, envValue):
|
||||
"""
|
||||
function: check path vaild
|
||||
input : envValue
|
||||
output: NA
|
||||
"""
|
||||
if (envValue.strip() == ""):
|
||||
return 0
|
||||
# check path vaild
|
||||
for rac in DefaultValue.PATH_CHECK_LIST:
|
||||
flag = envValue.find(rac)
|
||||
if flag >= 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def ignorePath(self, path):
|
||||
# Part of the root path and file permissions need to be ignored
|
||||
ignorePathList = []
|
||||
toolPath = DefaultValue.getEnv("GPHOME")
|
||||
sudoPath = os.path.join(toolPath, "sudo")
|
||||
inspectionPath = os.path.join(toolPath, "script/inspection")
|
||||
ignorePathList.append("%s/script/gs_preinstall" % toolPath)
|
||||
ignorePathList.append("%s/script/gs_postuninstall" % toolPath)
|
||||
ignorePathList.append("%s/script/gs_checkos" % toolPath)
|
||||
|
||||
scriptPath = os.path.join(toolPath, "script")
|
||||
scriptDirList = scriptPath.split('/')
|
||||
inspectionDirList = inspectionPath.split('/')
|
||||
# ignore own special files
|
||||
if (path in ignorePathList or os.path.dirname(path) == sudoPath):
|
||||
return True
|
||||
else:
|
||||
(filename, suffix) = os.path.splitext(path)
|
||||
pathDirList = path.split('/')
|
||||
# ignore .pyc file in GPHOME/script
|
||||
if (path.find(scriptPath) == 0 and pathDirList[:len(
|
||||
scriptDirList)] == scriptDirList and suffix == ".pyc"):
|
||||
return True
|
||||
# ignore GPHOME/script/inspection dir
|
||||
elif (path.find(inspectionPath) == 0 and pathDirList[:len(
|
||||
inspectionDirList)] == inspectionDirList):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def checkSpecialChar(self):
|
||||
outputList = []
|
||||
failList = []
|
||||
pathList = []
|
||||
paths = self.getDiskPath()
|
||||
for path in paths:
|
||||
if (not path or not os.path.isdir(path)):
|
||||
continue
|
||||
else:
|
||||
pathList.append(path)
|
||||
pool = ThreadPool(DefaultValue.getCpuSet())
|
||||
results = pool.map(self.checkSingleSpecialChar, pathList)
|
||||
pool.close()
|
||||
pool.join()
|
||||
for outlist, flist in results:
|
||||
if (outlist):
|
||||
outputList.extend(outlist)
|
||||
if (flist):
|
||||
failList.extend(flist)
|
||||
if (len(outputList) > 0):
|
||||
outputList = DefaultValue.Deduplication(outputList)
|
||||
if (failList):
|
||||
failList = DefaultValue.Deduplication(failList)
|
||||
return outputList, failList
|
||||
|
||||
def checkSingleSpecialChar(self, path):
|
||||
# Check a single path
|
||||
outputList = []
|
||||
failList = []
|
||||
cmd = "find '%s' -name '*'" % path
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
FileList = output.split('\n')
|
||||
while '' in FileList:
|
||||
FileList.remove('')
|
||||
if (status != 0 and output.find("Permission denied") > 0):
|
||||
for realPath in FileList:
|
||||
if (realPath.find("Permission denied") > 0):
|
||||
failList.append(realPath)
|
||||
elif (self.checkPathVaild(realPath) != 0):
|
||||
outputList.append(realPath)
|
||||
else:
|
||||
for realPath in FileList:
|
||||
if (self.checkPathVaild(realPath) != 0):
|
||||
outputList.append(realPath)
|
||||
return outputList, failList
|
||||
|
||||
#########################################################
|
||||
# get the files which under the all useful directory and
|
||||
# its owner is not current execute use
|
||||
#########################################################
|
||||
def checkErrorOwner(self, ownername):
|
||||
outputList = []
|
||||
failList = []
|
||||
path = ""
|
||||
for path in self.getDiskPath():
|
||||
if (not path or not os.path.isdir(path)):
|
||||
continue
|
||||
cmd = "find '%s' -iname '*' ! -user %s -print" % (path, ownername)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status == 0 and output != ""):
|
||||
pathList = output.split("\n")
|
||||
for path in pathList:
|
||||
if (self.ignorePath(path)):
|
||||
continue
|
||||
outputList.append(path)
|
||||
elif (output.find("Permission denied") > 0):
|
||||
pathList = output.split("\n")
|
||||
for path in pathList:
|
||||
if (path.find("Permission denied") > 0):
|
||||
failList.append(path)
|
||||
continue
|
||||
if (self.ignorePath(path)):
|
||||
continue
|
||||
outputList.append(path)
|
||||
if (len(outputList) > 0):
|
||||
outputList = DefaultValue.Deduplication(outputList)
|
||||
return outputList, failList
|
||||
|
||||
def doCheck(self):
|
||||
parRes = ""
|
||||
flag = 0
|
||||
output = ""
|
||||
outputList, failList = self.checkSpecialChar()
|
||||
for output in outputList:
|
||||
if (output != ""):
|
||||
flag = 1
|
||||
parRes += "\nSpecial characters file: \"%s\"" % output
|
||||
|
||||
outputList, errorList = self.checkErrorOwner(self.user)
|
||||
for output in outputList:
|
||||
if (output != ""):
|
||||
flag = 1
|
||||
parRes += "\nFile owner should be %s." \
|
||||
" Incorrect owner file: \"%s\"" \
|
||||
% (self.user, output)
|
||||
failList.extend(errorList)
|
||||
if (failList):
|
||||
flag = 1
|
||||
failList = DefaultValue.Deduplication(failList)
|
||||
parRes += "\n%s" % ("\n".join(failList))
|
||||
if (flag == 1):
|
||||
self.result.rst = ResultStatus.NG
|
||||
self.result.val = parRes
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "All files are normal."
|
||||
45
script/gspylib/inspection/items/cluster/CheckUpVer.py
Normal file
45
script/gspylib/inspection/items/cluster/CheckUpVer.py
Normal file
@ -0,0 +1,45 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import os
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.os.gsfile import g_file
|
||||
|
||||
|
||||
class CheckUpVer(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckUpVer, self).__init__(self.__class__.__name__)
|
||||
self.upgradepath = None
|
||||
|
||||
def preCheck(self):
|
||||
# check the threshold was set correctly
|
||||
if (not self.threshold.__contains__("upgradepath")):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50236"]
|
||||
% "The upgrade path")
|
||||
self.upgradepath = self.threshold['upgradepath']
|
||||
if not os.path.isfile(os.path.join(self.upgradepath, "version.cfg")):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"]
|
||||
% ("new version file[%s]" %
|
||||
os.path.join(self.upgradepath, "version.cfg")))
|
||||
|
||||
def doCheck(self):
|
||||
packageFile = os.path.realpath(
|
||||
os.path.join(self.upgradepath, "version.cfg"))
|
||||
output = g_file.readFile(packageFile)
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = "".join(output)
|
||||
@ -0,0 +1,70 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
import subprocess
|
||||
from gspylib.inspection.common import SharedFuncs
|
||||
from gspylib.inspection.common.CheckItem import BaseItem
|
||||
from gspylib.inspection.common.CheckResult import ResultStatus
|
||||
|
||||
|
||||
class CheckArchiveParameter(BaseItem):
|
||||
def __init__(self):
|
||||
super(CheckArchiveParameter, self).__init__(self.__class__.__name__)
|
||||
|
||||
def doCheck(self):
|
||||
sqlcmd = "show archive_mode;"
|
||||
self.result.raw = sqlcmd
|
||||
|
||||
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", self.port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
if output.strip() == "on":
|
||||
sqlcmd = "show archive_command;"
|
||||
self.result.raw = sqlcmd
|
||||
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", self.port,
|
||||
self.tmpPath, "postgres",
|
||||
self.mpprcFile)
|
||||
cooInst = self.cluster.getDbNodeByName(self.host).coordinators[0]
|
||||
dataInst = self.cluster.getDbNodeByName(self.host).datanodes[0]
|
||||
if ((self.cluster.isSingleInstCluster() and not (
|
||||
output.find("%s" % dataInst.datadir) >= 0)) and not (
|
||||
output.find("%s" % cooInst.datadir) >= 0)):
|
||||
self.result.rst = ResultStatus.NG
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
else:
|
||||
self.result.rst = ResultStatus.OK
|
||||
self.result.val = output
|
||||
|
||||
def doSet(self):
|
||||
resultStr = ""
|
||||
cooInst = self.cluster.getDbNodeByName(self.host).coordinators[0]
|
||||
dataInst = self.cluster.getDbNodeByName(self.host).datanodes[0]
|
||||
if self.cluster.isSingleInstCluster():
|
||||
cmd = "gs_guc reload -N all -I " \
|
||||
"all -c \"archive_command = 'cp -P --remove-destination" \
|
||||
" %s %s/pg_xlog/archive/%s'\" " % dataInst.datadir
|
||||
else:
|
||||
cmd = "gs_guc reload -N all -I " \
|
||||
"all -c \"archive_command = 'cp -P --remove-destination" \
|
||||
" %s %s/pg_xlog/archive/%s'\"" % cooInst.datadir
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if (status != 0):
|
||||
resultStr = "Failed to set ArchiveMode.\n Error : %s." % output
|
||||
resultStr += "The cmd is %s " % cmd
|
||||
else:
|
||||
resultStr = "Set ArchiveMode successfully."
|
||||
self.result.val = resultStr
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user