扩容工具提升易用性
This commit is contained in:
parent
8a4fd42f13
commit
dd4acdfde0
@ -23,6 +23,7 @@ import os
|
||||
import sys
|
||||
import subprocess
|
||||
import socket
|
||||
import datetime
|
||||
package_path = os.path.dirname(os.path.realpath(__file__))
|
||||
ld_path = package_path + "/gspylib/clib"
|
||||
if 'LD_LIBRARY_PATH' not in os.environ:
|
||||
@ -34,6 +35,8 @@ if not os.environ.get('LD_LIBRARY_PATH').startswith(ld_path):
|
||||
os.execve(os.path.realpath(__file__), sys.argv, os.environ)
|
||||
|
||||
sys.path.append(sys.path[0])
|
||||
from gspylib.common.copy_python_lib import copy_lib
|
||||
copy_lib()
|
||||
from gspylib.common.DbClusterInfo import dbClusterInfo, \
|
||||
checkPathVaild, dbNodeInfo, instanceInfo
|
||||
from gspylib.common.GaussLog import GaussLog
|
||||
@ -41,6 +44,7 @@ from gspylib.common.Common import DefaultValue
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
from gspylib.common.ParallelBaseOM import ParallelBaseOM
|
||||
from gspylib.common.ParameterParsecheck import Parameter
|
||||
from gspylib.threads.SshTool import SshTool
|
||||
from impl.expansion.ExpansionImpl import ExpansionImpl
|
||||
from impl.expansion.expansion_impl_with_cm import ExpansionImplWithCm
|
||||
from impl.expansion.expansion_impl_with_cm_local import ExpansionImplWithCmLocal
|
||||
@ -49,6 +53,7 @@ from domain_utils.cluster_file.cluster_log import ClusterLog
|
||||
from base_utils.os.env_util import EnvUtil
|
||||
from base_utils.os.user_util import UserUtil
|
||||
from base_utils.os.cmd_util import CmdUtil
|
||||
from base_utils.os.file_util import FileUtil
|
||||
|
||||
ENV_LIST = ["MPPDB_ENV_SEPARATE_PATH", "GPHOME", "PATH",
|
||||
"LD_LIBRARY_PATH", "PYTHONPATH", "GAUSS_WARNING_TYPE",
|
||||
@ -190,8 +195,6 @@ General options:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-U")
|
||||
if len(self.group) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-G")
|
||||
if len(self.xmlFile) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-X")
|
||||
if len(self.newHostList) == 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35701"] % "-h")
|
||||
# check if upgrade action is exist
|
||||
@ -232,7 +235,6 @@ General options:
|
||||
self.clusterInfo = clusterInfo
|
||||
hostNameIpDict = clusterInfo.initFromXml(self.xmlFile)
|
||||
clusterDict = clusterInfo.get_cluster_directory_dict()
|
||||
self.nodeNameList = clusterInfo.getClusterNodeNames()
|
||||
|
||||
# get corepath and toolpath from xml file
|
||||
corePath = clusterInfo.readClustercorePath(self.xmlFile)
|
||||
@ -308,6 +310,41 @@ General options:
|
||||
if output != self.user:
|
||||
subprocess.getstatusoutput("chown {}:{} {}".format(self.user, self.group, self.logger.logFile))
|
||||
self.logger.ignoreErr = True
|
||||
|
||||
def generate_xml(self):
|
||||
if self.xmlFile:
|
||||
return
|
||||
self.logger.log("Start generate xml")
|
||||
# get current path
|
||||
currentPath = os.path.dirname(os.path.realpath(__file__))
|
||||
gs_om = os.path.join(currentPath, "gs_om")
|
||||
# get new hostname and hostip
|
||||
hostname_str, hostip_str = self.get_new_hostname_and_hostip()
|
||||
# execute gs_om -t generate_xml
|
||||
if not self.envFile:
|
||||
self.envFile = "/home/%s/.bashrc" % self.user
|
||||
cmd = "source %s; %s -t generate_xml --add-hostname=%s --add-hostip=%s" % (self.envFile, gs_om, hostname_str, hostip_str)
|
||||
if os.getuid() == 0:
|
||||
cmd = "su - %s -c '%s'" % (self.user, cmd)
|
||||
status, output = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50231"] % self.xmlFile)
|
||||
xml_tmp_file = "/home/%s/tmp_generate_xml" % self.user
|
||||
if not os.path.exists(xml_tmp_file):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_502["GAUSS_50201"] % xml_tmp_file)
|
||||
self.xmlFile = FileUtil.readFile(xml_tmp_file)[0].strip()
|
||||
# delete xml tmp file
|
||||
FileUtil.removeFile(xml_tmp_file)
|
||||
self.logger.log("Successfully generate xml, the xml file is %s" % self.xmlFile)
|
||||
|
||||
def get_new_hostname_and_hostip(self):
|
||||
hostip_str = ",".join(self.newHostList)
|
||||
hostname_list = []
|
||||
for ip in self.newHostList:
|
||||
hostname = socket.gethostbyaddr(ip)[0]
|
||||
hostname_list.append(hostname)
|
||||
hostname_str = ",".join(hostname_list)
|
||||
return hostname_str, hostip_str
|
||||
|
||||
def getExpansionInfo(self):
|
||||
self._getClusterInfoDict()
|
||||
@ -345,6 +382,15 @@ General options:
|
||||
if currentHost != primaryHost:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_501["GAUSS_50110"] %
|
||||
(currentHost + ", which is not primary"))
|
||||
|
||||
def init_cluster_info_all_node(self):
|
||||
"""
|
||||
init cluster info for all node
|
||||
"""
|
||||
clusterInfo = dbClusterInfo()
|
||||
clusterInfo.initFromXml(self.xmlFile)
|
||||
self.clusterInfo = clusterInfo
|
||||
self.nodeNameList = clusterInfo.getClusterNodeNames()
|
||||
|
||||
def checkTrust(self, hostList = None):
|
||||
"""
|
||||
@ -356,33 +402,43 @@ General options:
|
||||
hostList += backIpList
|
||||
gpHome = EnvUtil.getEnv("GPHOME")
|
||||
psshPath = "python3 %s/script/gspylib/pssh/bin/pssh" % gpHome
|
||||
rootSSHExceptionHosts = []
|
||||
individualSSHExceptionHosts = []
|
||||
ssh_exception_hosts = []
|
||||
for host in hostList:
|
||||
if os.getuid() == 0:
|
||||
# check root's trust
|
||||
check_root_trust_cmd = "%s -s -H %s 'pwd'" % (psshPath, host)
|
||||
(status, output) = subprocess.getstatusoutput(check_root_trust_cmd)
|
||||
# check individual user's trust
|
||||
check_user_trust_cmd = "su - %s -c '%s -s -H %s pwd'" % (self.user, psshPath, host)
|
||||
(status, output) = subprocess.getstatusoutput(check_user_trust_cmd)
|
||||
if status != 0:
|
||||
rootSSHExceptionHosts.append(host)
|
||||
# check individual user's trust
|
||||
check_user_trust_cmd = "su - %s -c '%s -s -H %s pwd'" % (self.user, psshPath, host)
|
||||
ssh_exception_hosts.append(host)
|
||||
# check current user's trust
|
||||
check_user_trust_cmd = "%s -s -H %s 'pwd'" % (psshPath, host)
|
||||
(status, output) = subprocess.getstatusoutput(check_user_trust_cmd)
|
||||
if status != 0:
|
||||
individualSSHExceptionHosts.append(host)
|
||||
ssh_exception_hosts.append(host)
|
||||
|
||||
# output ssh exception info if ssh connect failed
|
||||
if rootSSHExceptionHosts or individualSSHExceptionHosts:
|
||||
sshExceptionInfo = ""
|
||||
if rootSSHExceptionHosts:
|
||||
sshExceptionInfo += "\n"
|
||||
sshExceptionInfo += ", ".join(rootSSHExceptionHosts)
|
||||
sshExceptionInfo += " by root"
|
||||
if individualSSHExceptionHosts:
|
||||
sshExceptionInfo += "\n"
|
||||
sshExceptionInfo += ", ".join(individualSSHExceptionHosts)
|
||||
sshExceptionInfo += " by individual user"
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_511["GAUSS_51100"] %
|
||||
sshExceptionInfo)
|
||||
if ssh_exception_hosts:
|
||||
self.logger.log("The cluster need create ssh trust")
|
||||
self.create_trust()
|
||||
else:
|
||||
self.logger.log("The cluster no need create ssh trust")
|
||||
|
||||
def create_trust(self):
|
||||
cluster_info = dbClusterInfo()
|
||||
cluster_info.initFromXml(self.xmlFile)
|
||||
all_ips = []
|
||||
sships = cluster_info.getClusterSshIps()
|
||||
for ips in sships:
|
||||
all_ips.extend(ips)
|
||||
if os.getuid() == 0:
|
||||
self.create_trust_for_user("root", all_ips)
|
||||
self.create_trust_for_user(self.user, all_ips)
|
||||
|
||||
def create_trust_for_user(self, user, all_ips):
|
||||
self.logger.log("Please enter password for %s" % user)
|
||||
self.sshTool = SshTool(self.nodeNameList, self.logFile, DefaultValue.TIMEOUT_PSSH_PREINSTALL)
|
||||
self.sshTool.createTrust(user, all_ips)
|
||||
self.logger.debug("Successfully created SSH trust for the %s" % user)
|
||||
|
||||
def checkEnvfile(self):
|
||||
"""
|
||||
@ -516,10 +572,7 @@ class ExpansionClusterInfo(dbClusterInfo):
|
||||
Find instance in b_list
|
||||
"""
|
||||
for inst in b_list:
|
||||
if inst.instanceId == a_inst.instanceId:
|
||||
return inst
|
||||
elif inst.instanceRole in [DefaultValue.INSTANCE_ROLE_CMSERVER,
|
||||
DefaultValue.INSTANCE_ROLE_CMAGENT]:
|
||||
if inst.hostname == a_inst.hostname:
|
||||
return inst
|
||||
raise Exception("Instance {0} not config in XML.".format(inst.instanceId))
|
||||
|
||||
@ -626,9 +679,11 @@ if __name__ == "__main__":
|
||||
expansion.parseCommandLine()
|
||||
expansion.checkParameters()
|
||||
expansion.initLogs()
|
||||
expansion.getExpansionInfo()
|
||||
expansion.checkEnvfile()
|
||||
expansion.generate_xml()
|
||||
expansion.init_cluster_info_all_node()
|
||||
expansion.checkTrust()
|
||||
expansion.getExpansionInfo()
|
||||
expansion.checkXmlIncludeNewHost()
|
||||
expansion.checkExecutingHost()
|
||||
expansion.checkTrust()
|
||||
expansion.expand_run(expansion)
|
||||
|
29
script/gs_om
29
script/gs_om
@ -52,6 +52,7 @@ ACTION_QUERY = "query"
|
||||
ACTION_KERBEROS = "kerberos"
|
||||
ACTION_REFRESHCONF = "refreshconf"
|
||||
ACTION_KILLMONITOR = "killmonitor"
|
||||
ACTION_GENERATE_XML = "generate_xml"
|
||||
|
||||
# postgis
|
||||
ACTION_DEL_POSTGIs = "rmlib"
|
||||
@ -111,6 +112,10 @@ class CmdOptions():
|
||||
# view
|
||||
self.is_dynamic = False
|
||||
|
||||
# generate_xml
|
||||
self.add_hostips = ""
|
||||
self.add_hostnames = ""
|
||||
|
||||
|
||||
###########################################
|
||||
class OperationManager(ParallelBaseOM):
|
||||
@ -150,6 +155,7 @@ Usage:
|
||||
gs_om -t query [-o OUTPUT] [--time-out=SECS]
|
||||
gs_om -t refreshconf
|
||||
gs_om -t killmonitor
|
||||
gs_om -t generate_xml [--add-hostname=hostname1,hostname2] [--add-hostip=hostip1,hostip2]
|
||||
|
||||
General options:
|
||||
-t Type of the OM command.
|
||||
@ -461,6 +467,12 @@ Install options:
|
||||
if (ParaDict.__contains__("logFile")):
|
||||
self.g_opts.logFile = ParaDict.get("logFile")
|
||||
|
||||
def parse_generate_xml(self, ParaDict):
|
||||
if (ParaDict.__contains__("add_hostname")):
|
||||
self.g_opts.add_hostnames = ParaDict.get("add_hostname")
|
||||
if (ParaDict.__contains__("add_hostip")):
|
||||
self.g_opts.add_hostips = ParaDict.get("add_hostip")
|
||||
|
||||
def parseCommandLine(self):
|
||||
"""
|
||||
function:Parse command line and save to global variable
|
||||
@ -510,6 +522,8 @@ Install options:
|
||||
self.parseLog(ParaDict)
|
||||
# Parse Az info
|
||||
self.parseAZ(ParaDict)
|
||||
# Parse generate xml parameter
|
||||
self.parse_generate_xml(ParaDict)
|
||||
|
||||
###########################################################################
|
||||
# Check parameters for all operations
|
||||
@ -552,6 +566,8 @@ Install options:
|
||||
pass
|
||||
elif (self.g_opts.action == ACTION_KILLMONITOR):
|
||||
pass
|
||||
elif (self.g_opts.action == ACTION_GENERATE_XML):
|
||||
self.check_generate_xml_parameter()
|
||||
else:
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_500["GAUSS_50004"] % "t")
|
||||
|
||||
@ -764,6 +780,14 @@ Install options:
|
||||
return wrongChar
|
||||
else:
|
||||
return True
|
||||
|
||||
def check_generate_xml_parameter(self):
|
||||
add_hostnames = self.g_opts.add_hostnames.split(',')
|
||||
add_hostips = self.g_opts.add_hostips.split(',')
|
||||
|
||||
if (len(add_hostnames) != len(add_hostips)):
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35713"] %
|
||||
(len(add_hostnames), len(add_hostips)))
|
||||
|
||||
|
||||
def main():
|
||||
@ -798,7 +822,8 @@ def main():
|
||||
ACTION_VIEW,
|
||||
ACTION_QUERY,
|
||||
ACTION_REFRESHCONF,
|
||||
ACTION_KILLMONITOR
|
||||
ACTION_KILLMONITOR,
|
||||
ACTION_GENERATE_XML
|
||||
]):
|
||||
raise Exception(ErrorCode.GAUSS_531['GAUSS_53104']
|
||||
% ("gs_om -t " + manager.g_opts.action))
|
||||
@ -832,6 +857,8 @@ def main():
|
||||
impl.doRefreshConf()
|
||||
elif (manager.g_opts.action == ACTION_KILLMONITOR):
|
||||
impl.kill_om_monitor()
|
||||
elif (manager.g_opts.action == ACTION_GENERATE_XML):
|
||||
impl.do_generate_xml()
|
||||
|
||||
manager.logger.closeLog()
|
||||
except Exception as e:
|
||||
|
@ -105,6 +105,7 @@ class Preinstall(ParallelBaseOM):
|
||||
self.dss_vgname = ""
|
||||
self.enable_perf_config = False
|
||||
self.skip_cgroup_set = False
|
||||
self.cluster_core_path = ""
|
||||
|
||||
def usage(self):
|
||||
"""
|
||||
@ -493,6 +494,7 @@ General options:
|
||||
if len(temp_nodes.split(',')) < 2:
|
||||
self.isSingle = True
|
||||
os.environ[ClusterConstants.TOOL_PATH_ENV] = self.clusterToolPath
|
||||
self.cluster_core_path = self.clusterInfo.readClustercorePath(self.xmlFile)
|
||||
|
||||
self.logger.log("Parsing the configuration file.", "addStep")
|
||||
try:
|
||||
|
@ -1199,7 +1199,9 @@ class ErrorCode():
|
||||
"GAUSS_35709": "[GAUSS-35709] The %s of %s is not %s.",
|
||||
"GAUSS_35710": "[GAUSS-35710] Generate static file [%s] not found.",
|
||||
"GAUSS_35711": "[GAUSS-35711] %s in xml is not consistent with that in cluster.",
|
||||
"GAUSS_35712": "[GAUSS-35712] User [%s] is not in the group [%s]."
|
||||
"GAUSS_35712": "[GAUSS-35712] User [%s] is not in the group [%s].",
|
||||
"GAUSS_35713": "[GAUSS-35713] The number of parameter hostname is %s, the number of parameter hostip is %s,"
|
||||
"The values of these two parameters are different."
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
|
@ -123,6 +123,7 @@ gs_om_kerberos = ["-t:", "-?", "--help", "-V", "--version", "-m:", "-U:",
|
||||
"-X:", "-l:", "--krb-server", "--krb-client"]
|
||||
gs_om_refreshconf = ["-t:", "-?", "--help", "-V", "--version", "-l:"]
|
||||
gs_om_killmonitor = ["-t:", "-?", "--help", "-V", "--version", "-l:"]
|
||||
gs_om_generate_xml = ["-t:", "-?", "--help", "-V", "--version", "-l:", "--add-hostname=", "--add-hostip="]
|
||||
# gs_upgradectl child branch
|
||||
# AP and TP are same
|
||||
gs_upgradectl_chose_strategy = ["-t:", "-?", "--help", "-V", "--version",
|
||||
@ -167,7 +168,8 @@ ParameterDict = {"preinstall": gs_preinstall,
|
||||
"refreshconf": gs_om_refreshconf,
|
||||
"expansion": gs_expansion,
|
||||
"dropnode": gs_dropnode,
|
||||
"killmonitor": gs_om_killmonitor
|
||||
"killmonitor": gs_om_killmonitor,
|
||||
"generate_xml": gs_om_generate_xml
|
||||
}
|
||||
|
||||
# List of scripts with the -t parameter
|
||||
@ -175,7 +177,7 @@ special_list = ["gs_om", "backup", "upgradectl"]
|
||||
|
||||
# The -t parameter list
|
||||
action_om = ["start", "stop", "status", "restart", "generateconf", "kerberos",
|
||||
"cert", "view", "query", "refreshconf", "killmonitor"]
|
||||
"cert", "view", "query", "refreshconf", "killmonitor", "generate_xml"]
|
||||
action_upgradectl = ["chose-strategy", "auto-upgrade", "auto-rollback",
|
||||
"commit-upgrade", "upgrade-cm"]
|
||||
|
||||
@ -341,6 +343,8 @@ class Parameter():
|
||||
"--dbname": "dbname",
|
||||
"--dbuser": "dbuser",
|
||||
"--nodeId": "nodeId",
|
||||
"--add-hostname": "add_hostname",
|
||||
"--add-hostip": "add_hostip",
|
||||
"--security-mode": "security_mode",
|
||||
"--cluster-number": "cluster_number"
|
||||
}
|
||||
|
374
script/gspylib/common/generate_xml.py
Normal file
374
script/gspylib/common/generate_xml.py
Normal file
@ -0,0 +1,374 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
#############################################################################
|
||||
# Copyright (c) 2024 Huawei Technologies Co.,Ltd.
|
||||
#
|
||||
# openGauss is licensed under Mulan PSL v2.
|
||||
# You can use this software according to the terms
|
||||
# and conditions of the Mulan PSL v2.
|
||||
# You may obtain a copy of Mulan PSL v2 at:
|
||||
#
|
||||
# http://license.coscl.org.cn/MulanPSL2
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OF ANY KIND,
|
||||
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
|
||||
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
|
||||
# See the Mulan PSL v2 for more details.
|
||||
# ----------------------------------------------------------------------------
|
||||
# Description : Generate_xml.py is generate xml base on cluster
|
||||
#############################################################################
|
||||
import os
|
||||
import pwd
|
||||
import datetime
|
||||
import subprocess
|
||||
import copy
|
||||
import xml.etree.ElementTree as ET
|
||||
import xml.dom.minidom as minidom
|
||||
|
||||
from base_utils.os.env_util import EnvUtil
|
||||
from gspylib.common.ErrorCode import ErrorCode
|
||||
|
||||
|
||||
class ClusterKey:
|
||||
# cluster label
|
||||
CLUSTER_NAME = "clusterName"
|
||||
NODENAMES = "nodeNames"
|
||||
GAUSS_DB_APP_PATH = "gaussdbAppPath"
|
||||
GAUSS_DB_LOG_PATH = "gaussdbLogPath"
|
||||
GAUSS_MPP_DB_PATH = "tmpMppdbPath"
|
||||
GAUSS_DB_TOOL_PATH = "gaussdbToolPath"
|
||||
CORE_PATH = "corePath"
|
||||
BAKCIP1S = "backIp1s"
|
||||
|
||||
# ddes
|
||||
ENABLE_DSS = "enable_dss"
|
||||
DSS_HOME = "dss_home"
|
||||
SS_DSS_VG_NAME = "ss_dss_vg_name"
|
||||
DSS_VG_INFO = "dss_vg_info"
|
||||
VOTING_DISK_PATH = "votingDiskPath"
|
||||
SHARE_DISK_DIR = "shareDiskDir"
|
||||
DSS_SSL_ENABLE = "dss_ssl_enable"
|
||||
|
||||
# dcf
|
||||
ENABLE_DCF = "enable_dcf"
|
||||
DCF_CONFIG = "dcf_config"
|
||||
|
||||
# device labes
|
||||
# om
|
||||
NAME = "name"
|
||||
AZNAME = "azName"
|
||||
AZPRIORITY = "azPriority"
|
||||
BACKIP1 = "backIp1"
|
||||
SSHIP1 = "sshIp1"
|
||||
DATA_NUM = "dataNum"
|
||||
DATA_PORT_BASE = "dataPortBase"
|
||||
DATA_NODE1 = "dataNode1"
|
||||
DATA_NODE1_SYNCNUM = "dataNode1_syncNum"
|
||||
|
||||
# cm
|
||||
CMS_NUM = "cmsNum"
|
||||
CM_SERVER_PORT_BASE = "cmServerPortBase"
|
||||
CM_SERVER_LISTEN_IP1 = "cmServerListenIp1"
|
||||
CM_SERVER_HA_IP1 = "cmServerHaIp1"
|
||||
CM_SERVER_LEVEL = "cmServerlevel"
|
||||
CM_SERVER_RELATION = "cmServerRelation"
|
||||
CM_SERVER_PORT_STANDBY = "cmServerPortStandby"
|
||||
CM_DIR = "cmDir"
|
||||
|
||||
# cascadeRole
|
||||
CASCADEROLE = "cascadeRole"
|
||||
|
||||
CLUSTER_LABEL = [CLUSTER_NAME, NODENAMES, GAUSS_DB_APP_PATH, GAUSS_DB_LOG_PATH, GAUSS_MPP_DB_PATH,
|
||||
GAUSS_DB_TOOL_PATH, CORE_PATH, BAKCIP1S]
|
||||
|
||||
DSS_LABEL = [ENABLE_DSS, DSS_HOME, SS_DSS_VG_NAME, DSS_VG_INFO, VOTING_DISK_PATH,
|
||||
SHARE_DISK_DIR, DSS_SSL_ENABLE]
|
||||
|
||||
DCF_LABEL = [ENABLE_DCF, DCF_CONFIG]
|
||||
|
||||
DEVICE_LABEL_SN = [NAME, AZNAME, AZPRIORITY, BACKIP1, SSHIP1, DATA_NUM, DATA_PORT_BASE,
|
||||
DATA_NODE1, DATA_NODE1_SYNCNUM, CASCADEROLE]
|
||||
|
||||
CM_LABEL_SN = [CMS_NUM, CM_SERVER_PORT_BASE, CM_SERVER_LISTEN_IP1, CM_SERVER_HA_IP1,
|
||||
CM_SERVER_LEVEL, CM_SERVER_RELATION, CM_SERVER_PORT_STANDBY, CM_DIR]
|
||||
|
||||
|
||||
class GenerateXml:
|
||||
|
||||
def __init__(self):
|
||||
self.tree = None
|
||||
self.root = None
|
||||
self.cluster_label = ""
|
||||
self.device_list_label = ""
|
||||
self.device_label = ""
|
||||
self.hostip_list = []
|
||||
self.hostname_list = []
|
||||
self.hostip_str = ""
|
||||
self.hostname_str = ""
|
||||
self.cm_flag = False
|
||||
|
||||
def create_init_label(self):
|
||||
root = ET.Element("ROOT")
|
||||
cluster = ET.Element("CLUSTER")
|
||||
device_list = ET.Element("DEVICELIST")
|
||||
root.append(cluster)
|
||||
root.append(device_list)
|
||||
self.cluster_label = cluster
|
||||
self.device_list_label = device_list
|
||||
self.tree = ET.ElementTree(root)
|
||||
self.root = self.tree.getroot()
|
||||
|
||||
def has_new_host(self, new_host_info):
|
||||
if new_host_info:
|
||||
self.new_flag = True
|
||||
else:
|
||||
self.new_flag = False
|
||||
|
||||
def update_cluster_node_info(self, cluster_info, new_host_info):
|
||||
"""
|
||||
function: update cluster new node info
|
||||
input : cluster_info new_host_info
|
||||
output : NA
|
||||
"""
|
||||
if not self.new_flag:
|
||||
return
|
||||
for hostname, hostip in new_host_info.items():
|
||||
new_node = copy.deepcopy(cluster_info.dbNodes[-1])
|
||||
new_node.name = hostname
|
||||
new_node.backIps[0] = hostip
|
||||
new_node.sshIps[0] = hostip
|
||||
cluster_info.dbNodes.append(new_node)
|
||||
|
||||
# if has cm, need copy cmservers cmagents
|
||||
if self.cm_flag:
|
||||
cm_server = new_node.cmservers[0]
|
||||
cm_server.hostname = hostname
|
||||
cm_server.listenIps[0] = hostip
|
||||
cm_server.haIps[0] = hostip
|
||||
|
||||
def do_generate_xml(self, cluster_info, new_host_info=None):
|
||||
"""
|
||||
function: Generate XML based on cluster
|
||||
input : cluster_info new_host_info
|
||||
output : NA
|
||||
"""
|
||||
# If the parameter has add_ Hostname
|
||||
self.has_new_host(new_host_info)
|
||||
# Add node information to the existing cluster information
|
||||
self.update_cluster_node_info(cluster_info, new_host_info)
|
||||
# if has cm
|
||||
self.has_cm(cluster_info)
|
||||
# get cluster info
|
||||
cluster_info_dict = self.get_cluster_info(cluster_info)
|
||||
# generate xml
|
||||
self.create_init_label()
|
||||
self.set_cluster_info(cluster_info_dict)
|
||||
self.set_device_info(cluster_info_dict)
|
||||
# output
|
||||
self.output_xml()
|
||||
|
||||
def get_cluster_info(self, cluster_info):
|
||||
cluster_info_dict = {}
|
||||
# cluster
|
||||
self.gen_cluster(cluster_info, cluster_info_dict)
|
||||
# ddes
|
||||
self.gen_ddes(cluster_info, cluster_info_dict)
|
||||
# dcf
|
||||
self.gen_dcf(cluster_info, cluster_info_dict)
|
||||
# om
|
||||
self.gen_om(cluster_info, cluster_info_dict)
|
||||
# cm
|
||||
self.gen_cm(cluster_info, cluster_info_dict)
|
||||
# cascadeRole
|
||||
self.gen_cascade_role(cluster_info, cluster_info_dict)
|
||||
|
||||
cluster_info_dict[ClusterKey.NODENAMES] = self.hostname_str
|
||||
cluster_info_dict[ClusterKey.BAKCIP1S] = self.hostip_str
|
||||
|
||||
return cluster_info_dict
|
||||
|
||||
def gen_cluster(self, cluster_info, cluster_info_dict):
|
||||
gp_home = EnvUtil.getEnv('GPHOME')
|
||||
tmp_path = EnvUtil.getEnv("PGHOST")
|
||||
core_path = EnvUtil.getEnv("COREPATH")
|
||||
cluster_info_dict[ClusterKey.CLUSTER_NAME] = cluster_info.name
|
||||
cluster_info_dict[ClusterKey.GAUSS_DB_APP_PATH] = cluster_info.appPath
|
||||
cluster_info_dict[ClusterKey.GAUSS_DB_LOG_PATH] = cluster_info.logPath
|
||||
cluster_info_dict[ClusterKey.GAUSS_MPP_DB_PATH] = tmp_path
|
||||
cluster_info_dict[ClusterKey.GAUSS_DB_TOOL_PATH] = gp_home
|
||||
cluster_info_dict[ClusterKey.CORE_PATH] = core_path
|
||||
|
||||
def gen_ddes(self, cluster_info, cluster_info_dict):
|
||||
username = pwd.getpwuid(os.getuid()).pw_name
|
||||
dss_home = EnvUtil.get_dss_home(username)
|
||||
dss_ssl = EnvUtil.get_dss_ssl_status(username)
|
||||
vg_name = EnvUtil.getEnv('VGNAME')
|
||||
if not dss_home:
|
||||
return
|
||||
|
||||
cm_conf_file = os.path.normpath(os.path.join(dss_home, 'cfg', 'dss_cm_conf.ini'))
|
||||
vg_conf_file = os.path.normpath(os.path.join(dss_home, 'cfg', 'dss_vg_conf.ini'))
|
||||
|
||||
voting_disk_path = ""
|
||||
share_disk_dir = ""
|
||||
if not os.path.exists(cm_conf_file):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % cm_conf_file)
|
||||
with open(cm_conf_file, 'r') as fd:
|
||||
lines = fd.readlines()
|
||||
voting_disk_path = lines[0].strip()
|
||||
share_disk_dir = lines[1].strip()
|
||||
|
||||
dss_vg_info = ""
|
||||
if not os.path.exists(vg_conf_file):
|
||||
raise Exception(ErrorCode.GAUSS_502["GAUSS_50201"] % vg_conf_file)
|
||||
with open(vg_conf_file, 'r') as fd:
|
||||
lines = fd.readlines()
|
||||
lines = [line.strip() for line in lines]
|
||||
dss_vg_info = ",".join(lines)
|
||||
|
||||
cluster_info_dict[ClusterKey.ENABLE_DSS] = "on"
|
||||
cluster_info_dict[ClusterKey.DSS_HOME] = dss_home
|
||||
cluster_info_dict[ClusterKey.SS_DSS_VG_NAME] = vg_name
|
||||
cluster_info_dict[ClusterKey.DSS_VG_INFO] = dss_vg_info
|
||||
cluster_info_dict[ClusterKey.VOTING_DISK_PATH] = voting_disk_path
|
||||
cluster_info_dict[ClusterKey.SHARE_DISK_DIR] = share_disk_dir
|
||||
cluster_info_dict[ClusterKey.DSS_SSL_ENABLE] = dss_ssl
|
||||
|
||||
def gen_dcf(self, cluster_info, cluster_info_dict):
|
||||
if cluster_info.enable_dcf == "on":
|
||||
cluster_info_dict[ClusterKey.ENABLE_DCF] = cluster_info.enable_dcf
|
||||
cluster_info_dict[ClusterKey.DCF_CONFIG] = cluster_info.dcf_config
|
||||
|
||||
def get_datanodes1_value(self, cluster_info):
|
||||
datanode = cluster_info.dbNodes[0].datanodes[0].datadir
|
||||
datanode1 = ""
|
||||
datanode1 += datanode + ","
|
||||
for node in cluster_info.dbNodes[1:-1]:
|
||||
datanode1 += node.name + "," + datanode + ","
|
||||
datanode1 += cluster_info.dbNodes[-1].name + "," + datanode
|
||||
return datanode1
|
||||
|
||||
def gen_om(self, cluster_info, cluster_info_dict):
|
||||
hostname_list = []
|
||||
hostip_list = []
|
||||
datanode1 = self.get_datanodes1_value(cluster_info)
|
||||
for node in cluster_info.dbNodes:
|
||||
hostname = node.name
|
||||
host_ip = node.backIps[0]
|
||||
hostname_list.append(hostname)
|
||||
hostip_list.append(host_ip)
|
||||
|
||||
instance_type = node.datanodes[0].instanceType
|
||||
cluster_info_dict[hostname] = {
|
||||
ClusterKey.NAME: hostname,
|
||||
ClusterKey.AZNAME: node.azName,
|
||||
ClusterKey.AZPRIORITY: str(node.azPriority),
|
||||
ClusterKey.BACKIP1: host_ip,
|
||||
ClusterKey.SSHIP1: node.sshIps[0],
|
||||
"instance_type": str(instance_type)
|
||||
}
|
||||
|
||||
if instance_type == 0:
|
||||
cluster_info_dict[hostname].update({
|
||||
ClusterKey.DATA_NUM: "1",
|
||||
ClusterKey.DATA_PORT_BASE: str(node.datanodes[0].port),
|
||||
ClusterKey.DATA_NODE1: datanode1,
|
||||
ClusterKey.DATA_NODE1_SYNCNUM: "0"
|
||||
})
|
||||
|
||||
self.hostname_list = hostname_list
|
||||
self.hostip_list = hostip_list
|
||||
self.hostname_str = ",".join(self.hostname_list)
|
||||
self.hostip_str = ",".join(self.hostip_list)
|
||||
|
||||
def has_cm(self, cluster_info):
|
||||
if cluster_info.cmscount > 0:
|
||||
self.cm_flag = True
|
||||
|
||||
def gen_cm(self, cluster_info, cluster_info_dict):
|
||||
if self.cm_flag:
|
||||
for node in cluster_info.dbNodes:
|
||||
hostname = node.name
|
||||
port = node.cmservers[0].port
|
||||
cm_dir = node.cmDataDir
|
||||
instance_type = node.cmservers[0].instanceType
|
||||
if instance_type == 0:
|
||||
cluster_info_dict[hostname]['cm'] = {
|
||||
"cm_instance_type": instance_type,
|
||||
ClusterKey.CMS_NUM: "1",
|
||||
ClusterKey.CM_SERVER_PORT_BASE: str(port),
|
||||
ClusterKey.CM_SERVER_LISTEN_IP1: self.hostip_str,
|
||||
ClusterKey.CM_SERVER_HA_IP1: self.hostip_str,
|
||||
ClusterKey.CM_SERVER_LEVEL: "1",
|
||||
ClusterKey.CM_SERVER_RELATION: self.hostname_str,
|
||||
ClusterKey.CM_DIR: cm_dir
|
||||
}
|
||||
else:
|
||||
cluster_info_dict[hostname]['cm'] = {
|
||||
"cm_instance_type": instance_type,
|
||||
ClusterKey.CM_SERVER_PORT_STANDBY: str(port),
|
||||
ClusterKey.CM_DIR: cm_dir
|
||||
}
|
||||
|
||||
def gen_cascade_role(self, cluster_info, cluster_info_dict):
|
||||
for node in cluster_info.dbNodes:
|
||||
hostname = node.name
|
||||
cascade_role = node.cascadeRole
|
||||
if node.cascadeRole == "on":
|
||||
cluster_info_dict[hostname].update({
|
||||
ClusterKey.CASCADEROLE: cascade_role
|
||||
})
|
||||
|
||||
def set_dict_key(self, dict_obj, key, value):
|
||||
if key in dict_obj.keys():
|
||||
dict_obj.update({key: value})
|
||||
else:
|
||||
dict_obj[key] = value
|
||||
|
||||
def set_cluster_info(self, cluster_info_dict):
|
||||
self.set_cluster_common_info(cluster_info_dict, ClusterKey.CLUSTER_LABEL)
|
||||
self.set_cluster_common_info(cluster_info_dict, ClusterKey.DSS_LABEL)
|
||||
self.set_cluster_common_info(cluster_info_dict, ClusterKey.DCF_LABEL)
|
||||
|
||||
def set_cluster_common_info(self, cluster_info_dict, keys):
|
||||
for label in keys:
|
||||
if label in cluster_info_dict.keys() and cluster_info_dict.get(label):
|
||||
key = label
|
||||
value = cluster_info_dict.get(label)
|
||||
param = ET.Element("PARAM", name=key, value=value)
|
||||
self.cluster_label.append(param)
|
||||
|
||||
def set_device_info(self, cluster_info_dict):
|
||||
for hostname in self.hostname_list:
|
||||
parent = ET.SubElement(self.device_list_label, "DEVICE", sn=hostname)
|
||||
for label in ClusterKey.DEVICE_LABEL_SN:
|
||||
if label in cluster_info_dict[hostname].keys() and cluster_info_dict[hostname].get(label):
|
||||
key = label
|
||||
value = cluster_info_dict[hostname].get(label)
|
||||
ET.SubElement(parent, "PARAM", name=key, value=value)
|
||||
|
||||
if not self.cm_flag:
|
||||
continue
|
||||
for label in ClusterKey.CM_LABEL_SN:
|
||||
if label in cluster_info_dict[hostname]['cm'].keys() and cluster_info_dict[hostname]['cm'].get(label):
|
||||
key = label
|
||||
value = cluster_info_dict[hostname]['cm'].get(label)
|
||||
ET.SubElement(parent, "PARAM", name=key, value=value)
|
||||
|
||||
def output_xml(self):
|
||||
user = pwd.getpwuid(os.getuid()).pw_name
|
||||
current_date = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
target_xml = "/home/%s/xml_output_%s.xml" % (user, str(current_date))
|
||||
# convert ElementTree tag tree to string
|
||||
xml_str = ET.tostring(self.root, encoding="UTF-8", method="xml")
|
||||
dom = minidom.parseString(xml_str)
|
||||
formatted_xml = dom.toprettyxml()
|
||||
cmd = "echo '%s' > %s" % (formatted_xml, target_xml)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
raise Exception("Failed to write xml file: %s" % output)
|
||||
xml_tmp_file = "/home/%s/tmp_generate_xml" % user
|
||||
cmd = "echo '%s' > '%s'" % (target_xml, xml_tmp_file)
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
raise Exception("Failed to write xml tmp file: %s" % output)
|
@ -47,6 +47,7 @@ from gspylib.os.gsfile import g_file
|
||||
from domain_utils.cluster_file.cluster_dir import ClusterDir
|
||||
from base_utils.os.env_util import EnvUtil
|
||||
from base_utils.os.cmd_util import CmdUtil
|
||||
from domain_utils.cluster_file.version_info import VersionInfo
|
||||
|
||||
#boot/build mode
|
||||
MODE_PRIMARY = "primary"
|
||||
@ -210,7 +211,7 @@ class ExpansionImpl():
|
||||
def generateAndSendXmlFile(self):
|
||||
"""
|
||||
"""
|
||||
self.logger.debug("Start to generateAndSend XML file.\n")
|
||||
self.logger.debug("Start to generateAndSend XML file.")
|
||||
|
||||
tempXmlFile = "%s/clusterconfig.xml" % self.tempFileDir
|
||||
cmd = "mkdir -p %s; touch %s; cat /dev/null > %s" % \
|
||||
@ -238,7 +239,7 @@ class ExpansionImpl():
|
||||
tempXmlFile, [host], self.envFile)
|
||||
self.cleanSshToolFile(sshTool)
|
||||
|
||||
self.logger.debug("End to generateAndSend XML file.\n")
|
||||
self.logger.debug("End to generateAndSend XML file.")
|
||||
|
||||
def __generateXml(self, backIp):
|
||||
"""
|
||||
@ -255,6 +256,9 @@ class ExpansionImpl():
|
||||
appPath = self.context.clusterInfoDict["appPath"]
|
||||
logPath = self.context.clusterInfoDict["logPath"]
|
||||
corePath = self.context.clusterInfoDict["corePath"]
|
||||
core_path_config = ""
|
||||
if corePath:
|
||||
core_path_config = '<PARAM name="corePath" value="%s" />' % corePath
|
||||
toolPath = self.context.clusterInfoDict["toolPath"]
|
||||
mppdbconfig = ""
|
||||
tmpMppdbPath = EnvUtil.getEnv("PGHOST")
|
||||
@ -274,7 +278,7 @@ class ExpansionImpl():
|
||||
<PARAM name="gaussdbLogPath" value="{logPath}" />
|
||||
<PARAM name="gaussdbToolPath" value="{toolPath}" />
|
||||
{mappdbConfig}
|
||||
<PARAM name="corePath" value="{corePath}"/>
|
||||
{core_path_config}
|
||||
<PARAM name="clusterType" value="single-inst"/>
|
||||
</CLUSTER>
|
||||
<DEVICELIST>
|
||||
@ -292,7 +296,7 @@ class ExpansionImpl():
|
||||
</DEVICELIST>
|
||||
</ROOT>
|
||||
""".format(clusterName = clusterName, nodeName = nodeName, backIp = backIp,
|
||||
appPath = appPath, logPath = logPath, toolPath = toolPath, corePath = corePath,
|
||||
appPath = appPath, logPath = logPath, toolPath = toolPath, core_path_config = core_path_config,
|
||||
sshIp = sshIp, port = port, dataNode = dataNode, azName = azName,
|
||||
azPriority = azPriority, mappdbConfig = mppdbconfig)
|
||||
return xmlConfig
|
||||
@ -1022,7 +1026,7 @@ gs_guc set -D {dn} -c "available_zone='{azName}'"
|
||||
if status != 0:
|
||||
GaussLog.exitWithError("Copy file faild. %s" % output)
|
||||
|
||||
self.logger.log("End to generate and send cluster static file.\n")
|
||||
self.logger.log("End to generate and send cluster static file.")
|
||||
if DefaultValue.get_cm_server_num_from_static(self.context.clusterInfo) > 0:
|
||||
self.logger.debug("Check new host state after restart.")
|
||||
return
|
||||
@ -1214,7 +1218,7 @@ remoteservice={remoteservice}'"\
|
||||
if wrongGsomVersionHosts:
|
||||
self.logger.log(ErrorCode.GAUSS_357["GAUSS_35708"] %
|
||||
("gs_om", ", ".join(wrongGsomVersionHosts)))
|
||||
self.logger.log("End to check gaussdb and gs_om version.\n")
|
||||
self.logger.log("End to check gaussdb and gs_om version.")
|
||||
if self._isAllFailed():
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35706"] %
|
||||
"check gaussdb and gs_om version")
|
||||
@ -1227,7 +1231,7 @@ remoteservice={remoteservice}'"\
|
||||
self.sendSoftToHosts()
|
||||
self.generateAndSendXmlFile()
|
||||
self.preInstallOnHosts()
|
||||
self.logger.log("End to preinstall database on new nodes.\n")
|
||||
self.logger.log("End to preinstall database on new nodes.")
|
||||
if self._isAllFailed():
|
||||
GaussLog.exitWithError(ErrorCode.GAUSS_357["GAUSS_35706"] % "preinstall")
|
||||
|
||||
@ -1628,7 +1632,7 @@ remoteservice={remoteservice}'"\
|
||||
if not self.context.standbyLocalMode:
|
||||
self.logger.log("Start to install database on new nodes.")
|
||||
self.installDatabaseOnHosts()
|
||||
self.logger.log("Database on standby nodes installed finished.\n")
|
||||
self.logger.log("Database on standby nodes installed finished.")
|
||||
self.checkGaussdbAndGsomVersionOfStandby()
|
||||
self.logger.log("Start to establish the relationship.")
|
||||
self.buildStandbyRelation()
|
||||
|
@ -22,6 +22,8 @@ import sys
|
||||
import re
|
||||
import time
|
||||
import getpass
|
||||
import os
|
||||
import pwd
|
||||
|
||||
sys.path.append(sys.path[0] + "/../../../../")
|
||||
from gspylib.common.DbClusterInfo import queryCmd
|
||||
@ -36,6 +38,7 @@ from base_utils.os.net_util import NetUtil
|
||||
from base_utils.os.env_util import EnvUtil
|
||||
from gspylib.component.DSS.dss_checker import DssConfig
|
||||
from gspylib.common.Common import DefaultValue, ClusterCommand
|
||||
from gspylib.common.generate_xml import GenerateXml
|
||||
from base_utils.os.cmd_util import CmdUtil
|
||||
|
||||
|
||||
@ -402,3 +405,34 @@ class OmImplOLAP(OmImpl):
|
||||
sshtool)
|
||||
|
||||
self.logger.log("Successfully generated dynamic configuration file.")
|
||||
|
||||
def do_generate_xml(self):
|
||||
"""
|
||||
function: Generate XML based on cluster
|
||||
input : NA
|
||||
output : NA
|
||||
"""
|
||||
self.logger.log("Start generate xml.")
|
||||
gen_xml = GenerateXml()
|
||||
try:
|
||||
if not self.context.g_opts.add_hostnames:
|
||||
gen_xml.do_generate_xml(self.context.clusterInfo)
|
||||
else:
|
||||
new_hostname_list = self.context.g_opts.add_hostnames.split(",")
|
||||
new_hostip_list = self.context.g_opts.add_hostips.split(",")
|
||||
new_host_dict = dict(zip(new_hostname_list, new_hostip_list))
|
||||
gen_xml.do_generate_xml(self.context.clusterInfo, new_host_dict)
|
||||
except Exception as e:
|
||||
self.logger.exitWithError("Failed to generated xml. Error: %s" % str(e))
|
||||
user = pwd.getpwuid(os.getuid()).pw_name
|
||||
# XML temporary file storage XML file path
|
||||
xml_tmp_file = "/home/%s/tmp_generate_xml" % user
|
||||
if not os.path.exists(xml_tmp_file):
|
||||
self.logger.exitWithError(ErrorCode.GAUSS_502["GAUSS_50201"] % xml_tmp_file)
|
||||
# get xml file path and print
|
||||
cmd = "cat %s" % xml_tmp_file
|
||||
(status, output) = subprocess.getstatusoutput(cmd)
|
||||
if status != 0:
|
||||
self.logger.exitWithError(ErrorCode.GAUSS_502["GAUSS_50205"] % cmd)
|
||||
xml_file = output.strip()
|
||||
self.logger.log("Successfully generated xml. the xml is %s" % xml_file)
|
||||
|
@ -292,9 +292,11 @@ class PreinstallImplOLAP(PreinstallImpl):
|
||||
function: set file size and path with core file
|
||||
:return: NA
|
||||
"""
|
||||
if os.getuid() != 0:
|
||||
return
|
||||
self.context.clusterInfo.corePath = \
|
||||
self.context.clusterInfo.readClustercorePath(self.context.xmlFile)
|
||||
if not self.context.clusterInfo.corePath or not self.context.current_user_root:
|
||||
if not self.context.clusterInfo.corePath:
|
||||
return
|
||||
self.context.logger.log("Setting Core file", "addStep")
|
||||
try:
|
||||
|
@ -921,13 +921,14 @@ class PreinstallImpl:
|
||||
|
||||
def set_tool_env(self):
|
||||
# set tool env on all hosts
|
||||
cmd = "%s -t %s -u %s -l %s -X '%s' -Q %s" % (
|
||||
cmd = "%s -t %s -u %s -l %s -X '%s' -Q %s -C %s" % (
|
||||
OMCommand.getLocalScript("Local_PreInstall"),
|
||||
ACTION_SET_TOOL_ENV,
|
||||
self.context.user,
|
||||
self.context.localLog,
|
||||
self.context.xmlFile,
|
||||
self.context.clusterToolPath)
|
||||
self.context.clusterToolPath,
|
||||
self.context.cluster_core_path)
|
||||
if self.context.mpprcFile != "":
|
||||
cmd += " -s '%s' -g %s" % (
|
||||
self.context.mpprcFile, self.context.group)
|
||||
@ -938,6 +939,8 @@ class PreinstallImpl:
|
||||
VersionInfo.PRODUCT_NAME, cmd))
|
||||
raise Exception(output)
|
||||
|
||||
if self.context.localMode or self.context.isSingle:
|
||||
return
|
||||
self.context.sshTool.executeCommand(cmd,
|
||||
DefaultValue.SUCCESS,
|
||||
[],
|
||||
|
@ -135,6 +135,7 @@ class PreInstall(LocalBaseOM):
|
||||
self.white_list = {}
|
||||
self.logger = None
|
||||
self.current_user_root = False
|
||||
self.cluster_core_path = ""
|
||||
|
||||
def get_current_user(self):
|
||||
"""
|
||||
@ -193,6 +194,7 @@ Common options:
|
||||
-s The path of MPP environment file.
|
||||
-l The path of log file.
|
||||
-R The path of cluster install path.
|
||||
-C The path of cluster core path.
|
||||
--help Show this help, then exit.
|
||||
"""
|
||||
print(self.usage.__doc__)
|
||||
@ -204,7 +206,7 @@ Common options:
|
||||
output: NA
|
||||
"""
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "t:u:g:X:P:Q:e:s:l:f:R:",
|
||||
opts, args = getopt.getopt(sys.argv[1:], "t:u:g:X:P:Q:e:s:l:f:R:C:",
|
||||
["check_empty", "help"])
|
||||
except Exception as e:
|
||||
self.usage()
|
||||
@ -218,7 +220,7 @@ Common options:
|
||||
"-X": self.clusterConfig,
|
||||
"-P": self.preparePath, "-Q": self.clusterToolPath,
|
||||
"-s": self.mpprcFile, "-f": self.tmpFile,
|
||||
"-R": self.clusterAppPath}
|
||||
"-R": self.clusterAppPath, "-C": self.cluster_core_path}
|
||||
parameter_keys = parameter_map.keys()
|
||||
|
||||
for (key, value) in opts:
|
||||
@ -245,6 +247,7 @@ Common options:
|
||||
self.mpprcFile = parameter_map["-s"]
|
||||
self.tmpFile = parameter_map["-f"]
|
||||
self.clusterAppPath = parameter_map["-R"]
|
||||
self.cluster_core_path = parameter_map["-C"]
|
||||
|
||||
def checkParameter(self):
|
||||
"""
|
||||
@ -1611,6 +1614,8 @@ Common options:
|
||||
|
||||
# clean GPHOME
|
||||
FileUtil.deleteLine(userProfile, "^\\s*export\\s*GPHOME=.*$")
|
||||
# clean COREPATH
|
||||
FileUtil.deleteLine(userProfile, "^\\s*export\\s*COREPATH=.*$")
|
||||
# clean UNPACKPATH
|
||||
FileUtil.deleteLine(userProfile, "^\\s*export\\s*UNPACKPATH=.*$")
|
||||
self.logger.debug(
|
||||
@ -1692,6 +1697,9 @@ Common options:
|
||||
datadir = node_info.datanodes[0].datadir
|
||||
FileUtil.writeFile(userProfile,
|
||||
["export PGDATA=%s" % datadir])
|
||||
|
||||
# set COREPATH
|
||||
FileUtil.writeFile(userProfile, ["export COREPATH=%s" % self.cluster_core_path])
|
||||
# set PGDATABASE
|
||||
FileUtil.writeFile(userProfile, ["export PGDATABASE=%s" % "postgres"])
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user