modify upgrade script
This commit is contained in:
@ -977,7 +977,7 @@ alias_file_name ="ob_system_variable_alias.h"
|
||||
sys_var_class_type_head_file_name = "ob_sys_var_class_type.h"
|
||||
sys_var_fac_head_file_name = "ob_system_variable_factory.h"
|
||||
sys_var_fac_cpp_file_name = "ob_system_variable_factory.cpp"
|
||||
sys_vars_dict_script_file_name = "../../../tools/upgrade/sys_vars_dict.py"
|
||||
#sys_vars_dict_script_file_name = "../../../tools/upgrade/sys_vars_dict.py"
|
||||
|
||||
(json_Dict, list_sorted_by_name, list_sorted_by_id) = parse_json(json_file_name)
|
||||
|
||||
@ -989,5 +989,5 @@ make_sys_var_class_type_h(pdir, sys_var_class_type_head_file_name, list_sorted_b
|
||||
make_sys_var_h(pdir, sys_var_fac_head_file_name, list_sorted_by_id)
|
||||
make_sys_var_cpp(pdir, sys_var_fac_cpp_file_name, list_sorted_by_name, list_sorted_by_id)
|
||||
|
||||
gen_sys_vars_dict_script_for_upgrade(sys_vars_dict_script_file_name, list_sorted_by_id)
|
||||
gen_upgrade_script()
|
||||
#gen_sys_vars_dict_script_for_upgrade(sys_vars_dict_script_file_name, list_sorted_by_id)
|
||||
#gen_upgrade_script()
|
||||
|
@ -340,214 +340,3 @@ def fetch_tenant_ids(query_cur):
|
||||
logging.exception('fail to fetch distinct tenant ids')
|
||||
raise e
|
||||
|
||||
def check_current_cluster_is_primary(query_cur):
|
||||
try:
|
||||
sql = """SELECT * FROM v$ob_cluster
|
||||
WHERE cluster_role = "PRIMARY"
|
||||
AND cluster_status = "VALID"
|
||||
AND (switchover_status = "NOT ALLOWED" OR switchover_status = "TO STANDBY") """
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
is_primary = len(results) > 0
|
||||
return is_primary
|
||||
except Exception, e:
|
||||
logging.exception("""fail to check current is primary""")
|
||||
raise e
|
||||
|
||||
def fetch_standby_cluster_infos(conn, query_cur, user, pwd):
|
||||
try:
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if not is_primary:
|
||||
logging.exception("""should be primary cluster""")
|
||||
raise e
|
||||
|
||||
standby_cluster_infos = []
|
||||
sql = """SELECT cluster_id, rootservice_list from v$ob_standby_status"""
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
|
||||
for r in results:
|
||||
standby_cluster_info = {}
|
||||
if 2 != len(r):
|
||||
logging.exception("length not match")
|
||||
raise e
|
||||
standby_cluster_info['cluster_id'] = r[0]
|
||||
standby_cluster_info['user'] = user
|
||||
standby_cluster_info['pwd'] = pwd
|
||||
# construct ip/port
|
||||
address = r[1].split(";")[0] # choose first address in rs_list
|
||||
standby_cluster_info['ip'] = str(address.split(":")[0])
|
||||
standby_cluster_info['port'] = address.split(":")[2]
|
||||
# append
|
||||
standby_cluster_infos.append(standby_cluster_info)
|
||||
logging.info("""cluster_info : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn.commit()
|
||||
# check standby cluster
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
# connect
|
||||
logging.info("""create connection : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
|
||||
tmp_conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
tmp_cur = tmp_conn.cursor(buffered=True)
|
||||
tmp_conn.autocommit = True
|
||||
tmp_query_cur = QueryCursor(tmp_cur)
|
||||
is_primary = check_current_cluster_is_primary(tmp_query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
# close
|
||||
tmp_cur.close()
|
||||
tmp_conn.close()
|
||||
|
||||
return standby_cluster_infos
|
||||
except Exception, e:
|
||||
logging.exception('fail to fetch standby cluster info')
|
||||
raise e
|
||||
|
||||
|
||||
def check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_ids):
|
||||
try:
|
||||
conn.commit()
|
||||
# check if need check ddl and dml sync
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if not is_primary:
|
||||
logging.exception("""should be primary cluster""")
|
||||
raise e
|
||||
|
||||
# fetch sys stats
|
||||
sys_infos = []
|
||||
sql = """SELECT tenant_id,
|
||||
refreshed_schema_version,
|
||||
min_sys_table_scn,
|
||||
min_user_table_scn
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
ORDER BY tenant_id desc"""
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
if len(tenant_ids) != len(results):
|
||||
logging.exception("result not match")
|
||||
raise e
|
||||
else:
|
||||
for i in range(len(results)):
|
||||
if len(results[i]) != 4:
|
||||
logging.exception("length not match")
|
||||
raise e
|
||||
elif results[i][0] != tenant_ids[i]:
|
||||
logging.exception("tenant_id not match")
|
||||
raise e
|
||||
else:
|
||||
sys_info = {}
|
||||
sys_info['tenant_id'] = results[i][0]
|
||||
sys_info['refreshed_schema_version'] = results[i][1]
|
||||
sys_info['min_sys_table_scn'] = results[i][2]
|
||||
sys_info['min_user_table_scn'] = results[i][3]
|
||||
logging.info("sys info : {0}".format(sys_info))
|
||||
sys_infos.append(sys_info)
|
||||
conn.commit()
|
||||
|
||||
# check ddl and dml by cluster
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
check_ddl_and_dml_sync_by_cluster(standby_cluster_info, sys_infos)
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("fail to check ddl and dml sync")
|
||||
raise e
|
||||
|
||||
def check_ddl_and_dml_sync_by_cluster(standby_cluster_info, sys_infos):
|
||||
try:
|
||||
# connect
|
||||
logging.info("start to check ddl and dml sync by cluster: cluster_id = {0}"
|
||||
.format(standby_cluster_info['cluster_id']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
tmp_conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
tmp_cur = tmp_conn.cursor(buffered=True)
|
||||
tmp_conn.autocommit = True
|
||||
tmp_query_cur = QueryCursor(tmp_cur)
|
||||
is_primary = check_current_cluster_is_primary(tmp_query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
for sys_info in sys_infos:
|
||||
check_ddl_and_dml_sync_by_tenant(tmp_query_cur, sys_info)
|
||||
|
||||
# close
|
||||
tmp_cur.close()
|
||||
tmp_conn.close()
|
||||
logging.info("""check_ddl_and_dml_sync_by_cluster success : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("""fail to check ddl and dml sync : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
def check_ddl_and_dml_sync_by_tenant(query_cur, sys_info):
|
||||
try:
|
||||
times = 1800 # 30min
|
||||
logging.info("start to check ddl and dml sync by tenant : {0}".format(sys_info))
|
||||
start_time = time.time()
|
||||
sql = ""
|
||||
if 1 == sys_info['tenant_id'] :
|
||||
# 备库系统租户DML不走物理同步,需要升级脚本负责写入,系统租户仅校验DDL同步
|
||||
sql = """SELECT count(*)
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
WHERE tenant_id = {0}
|
||||
AND refreshed_schema_version >= {1}
|
||||
""".format(sys_info['tenant_id'],
|
||||
sys_info['refreshed_schema_version'])
|
||||
else:
|
||||
sql = """SELECT count(*)
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
WHERE tenant_id = {0}
|
||||
AND refreshed_schema_version >= {1}
|
||||
AND min_sys_table_scn >= {2}
|
||||
AND min_user_table_scn >= {3}
|
||||
""".format(sys_info['tenant_id'],
|
||||
sys_info['refreshed_schema_version'],
|
||||
sys_info['min_sys_table_scn'],
|
||||
sys_info['min_user_table_scn'])
|
||||
while times > 0 :
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
if len(results) == 1 and results[0][0] == 1:
|
||||
break;
|
||||
time.sleep(1)
|
||||
times -= 1
|
||||
if times == 0:
|
||||
logging.exception("check ddl and dml sync timeout! : {0}, cost = {1}"
|
||||
.format(sys_info, time.time() - start_time))
|
||||
raise e
|
||||
else:
|
||||
logging.info("check ddl and dml sync success! : {0}, cost = {1}"
|
||||
.format(sys_info, time.time() - start_time))
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("fail to check ddl and dml sync : {0}".format(sys_info))
|
||||
raise e
|
||||
|
@ -11,10 +11,6 @@ import config
|
||||
import opts
|
||||
import run_modules
|
||||
import actions
|
||||
import normal_ddl_actions_post
|
||||
import normal_dml_actions_post
|
||||
import each_tenant_dml_actions_post
|
||||
import each_tenant_ddl_actions_post
|
||||
import special_upgrade_action_post
|
||||
|
||||
# 由于用了/*+read_consistency(WEAK) */来查询,因此升级期间不能允许创建或删除租户
|
||||
@ -108,66 +104,13 @@ def do_upgrade(my_host, my_port, my_user, my_passwd, my_module_set, upgrade_para
|
||||
raise MyError('no tenant id')
|
||||
logging.info('there has %s distinct tenant ids: [%s]', len(tenant_id_list), ','.join(str(tenant_id) for tenant_id in tenant_id_list))
|
||||
conn.commit()
|
||||
|
||||
# 获取standby_cluster_info列表
|
||||
standby_cluster_infos = actions.fetch_standby_cluster_infos(conn, query_cur, my_user, my_passwd)
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
actions.refresh_commit_sql_list()
|
||||
dump_sql_to_file(upgrade_params.sql_dump_filename, tenant_id_list)
|
||||
logging.info('================succeed to dump sql to file: {0}==============='.format(upgrade_params.sql_dump_filename))
|
||||
|
||||
if run_modules.MODULE_DDL in my_module_set:
|
||||
logging.info('================begin to run ddl===============')
|
||||
conn.autocommit = True
|
||||
normal_ddl_actions_post.do_normal_ddl_actions(cur)
|
||||
logging.info('================succeed to run ddl===============')
|
||||
conn.autocommit = False
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_EACH_TENANT_DDL in my_module_set:
|
||||
has_run_ddl = True
|
||||
logging.info('================begin to run each tenant ddl===============')
|
||||
conn.autocommit = True
|
||||
each_tenant_ddl_actions_post.do_each_tenant_ddl_actions(cur, tenant_id_list)
|
||||
logging.info('================succeed to run each tenant ddl===============')
|
||||
conn.autocommit = False
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_NORMAL_DML in my_module_set:
|
||||
logging.info('================begin to run normal dml===============')
|
||||
normal_dml_actions_post.do_normal_dml_actions_by_standby_cluster(standby_cluster_infos)
|
||||
normal_dml_actions_post.do_normal_dml_actions(cur)
|
||||
logging.info('================succeed to run normal dml===============')
|
||||
conn.commit()
|
||||
actions.refresh_commit_sql_list()
|
||||
logging.info('================succeed to commit dml===============')
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_EACH_TENANT_DML in my_module_set:
|
||||
logging.info('================begin to run each tenant dml===============')
|
||||
conn.autocommit = True
|
||||
each_tenant_dml_actions_post.do_each_tenant_dml_actions_by_standby_cluster(standby_cluster_infos)
|
||||
each_tenant_dml_actions_post.do_each_tenant_dml_actions(cur, tenant_id_list)
|
||||
conn.autocommit = False
|
||||
logging.info('================succeed to run each tenant dml===============')
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_SPECIAL_ACTION in my_module_set:
|
||||
logging.info('================begin to run special action===============')
|
||||
conn.autocommit = True
|
||||
special_upgrade_action_post.do_special_upgrade_in_standy_cluster(standby_cluster_infos, my_user, my_passwd)
|
||||
special_upgrade_action_post.do_special_upgrade(conn, cur, tenant_id_list, my_user, my_passwd)
|
||||
conn.autocommit = False
|
||||
actions.refresh_commit_sql_list()
|
||||
logging.info('================succeed to commit special action===============')
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
except Exception, e:
|
||||
logging.exception('run error')
|
||||
raise e
|
||||
|
@ -11,10 +11,6 @@ import config
|
||||
import opts
|
||||
import run_modules
|
||||
import actions
|
||||
import normal_ddl_actions_pre
|
||||
import normal_dml_actions_pre
|
||||
import each_tenant_dml_actions_pre
|
||||
import upgrade_sys_vars
|
||||
import special_upgrade_action_pre
|
||||
|
||||
# 由于用了/*+read_consistency(WEAK) */来查询,因此升级期间不能允许创建或删除租户
|
||||
@ -45,7 +41,6 @@ def dump_sql_to_file(cur, query_cur, dump_filename, tenant_id_list, update_sys_v
|
||||
normal_ddls_str = normal_ddl_actions_pre.get_normal_ddl_actions_sqls_str(query_cur)
|
||||
normal_dmls_str = normal_dml_actions_pre.get_normal_dml_actions_sqls_str()
|
||||
each_tenant_dmls_str = each_tenant_dml_actions_pre.get_each_tenant_dml_actions_sqls_str(tenant_id_list)
|
||||
sys_vars_upgrade_dmls_str = upgrade_sys_vars.get_sys_vars_upgrade_dmls_str(cur, query_cur, tenant_id_list, update_sys_var_list, add_sys_var_list)
|
||||
dump_file = open(dump_filename, 'w')
|
||||
dump_file.write('# 以下是upgrade_pre.py脚本中的步骤\n')
|
||||
dump_file.write('# 仅供upgrade_pre.py脚本运行失败需要人肉的时候参考\n')
|
||||
@ -95,77 +90,12 @@ def do_upgrade(my_host, my_port, my_user, my_passwd, my_module_set, upgrade_para
|
||||
check_before_upgrade(query_cur, upgrade_params)
|
||||
# get min_observer_version
|
||||
version = actions.fetch_observer_version(query_cur)
|
||||
need_check_standby_cluster = cmp(version, '2.2.40') >= 0
|
||||
# 获取租户id列表
|
||||
tenant_id_list = actions.fetch_tenant_ids(query_cur)
|
||||
if len(tenant_id_list) <= 0:
|
||||
logging.error('distinct tenant id count is <= 0, tenant_id_count: %d', len(tenant_id_list))
|
||||
raise MyError('no tenant id')
|
||||
logging.info('there has %s distinct tenant ids: [%s]', len(tenant_id_list), ','.join(str(tenant_id) for tenant_id in tenant_id_list))
|
||||
# 计算需要添加或更新的系统变量
|
||||
conn.commit()
|
||||
|
||||
# 获取standby_cluster_info列表
|
||||
standby_cluster_infos = []
|
||||
if need_check_standby_cluster:
|
||||
standby_cluster_infos = actions.fetch_standby_cluster_infos(conn, query_cur, my_user, my_passwd)
|
||||
# check ddl and dml sync
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
conn.autocommit = True
|
||||
(update_sys_var_list, update_sys_var_ori_list, add_sys_var_list) = upgrade_sys_vars.calc_diff_sys_var(cur, tenant_id_list[0])
|
||||
dump_sql_to_file(cur, query_cur, upgrade_params.sql_dump_filename, tenant_id_list, update_sys_var_list, add_sys_var_list)
|
||||
conn.autocommit = False
|
||||
conn.commit()
|
||||
logging.info('update system variables list: [%s]', ', '.join(str(sv) for sv in update_sys_var_list))
|
||||
logging.info('update system variables original list: [%s]', ', '.join(str(sv) for sv in update_sys_var_ori_list))
|
||||
logging.info('add system variables list: [%s]', ', '.join(str(sv) for sv in add_sys_var_list))
|
||||
logging.info('================succeed to dump sql to file: {0}==============='.format(upgrade_params.sql_dump_filename))
|
||||
|
||||
if run_modules.MODULE_DDL in my_module_set:
|
||||
logging.info('================begin to run ddl===============')
|
||||
conn.autocommit = True
|
||||
normal_ddl_actions_pre.do_normal_ddl_actions(cur)
|
||||
logging.info('================succeed to run ddl===============')
|
||||
conn.autocommit = False
|
||||
# check ddl and dml sync
|
||||
if need_check_standby_cluster:
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_NORMAL_DML in my_module_set:
|
||||
logging.info('================begin to run normal dml===============')
|
||||
normal_dml_actions_pre.do_normal_dml_actions(cur)
|
||||
normal_dml_actions_pre.do_normal_dml_actions_by_standby_cluster(standby_cluster_infos)
|
||||
logging.info('================succeed to run normal dml===============')
|
||||
conn.commit()
|
||||
actions.refresh_commit_sql_list()
|
||||
logging.info('================succeed to commit dml===============')
|
||||
# check ddl and dml sync
|
||||
if need_check_standby_cluster:
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_EACH_TENANT_DML in my_module_set:
|
||||
logging.info('================begin to run each tenant dml===============')
|
||||
conn.autocommit = True
|
||||
each_tenant_dml_actions_pre.do_each_tenant_dml_actions(cur, tenant_id_list)
|
||||
each_tenant_dml_actions_pre.do_each_tenant_dml_actions_by_standby_cluster(standby_cluster_infos)
|
||||
conn.autocommit = False
|
||||
logging.info('================succeed to run each tenant dml===============')
|
||||
# check ddl and dml sync
|
||||
if need_check_standby_cluster:
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
# 更新系统变量
|
||||
if run_modules.MODULE_SYSTEM_VARIABLE_DML in my_module_set:
|
||||
logging.info('================begin to run system variable dml===============')
|
||||
conn.autocommit = True
|
||||
upgrade_sys_vars.exec_sys_vars_upgrade_dml(cur, tenant_id_list)
|
||||
conn.autocommit = False
|
||||
logging.info('================succeed to run system variable dml===============')
|
||||
# check dml sync
|
||||
if need_check_standby_cluster:
|
||||
upgrade_sys_vars.exec_sys_vars_upgrade_dml_in_standby_cluster(standby_cluster_infos)
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
|
||||
if run_modules.MODULE_SPECIAL_ACTION in my_module_set:
|
||||
logging.info('================begin to run special action===============')
|
||||
@ -174,10 +104,6 @@ def do_upgrade(my_host, my_port, my_user, my_passwd, my_module_set, upgrade_para
|
||||
conn.autocommit = False
|
||||
actions.refresh_commit_sql_list()
|
||||
logging.info('================succeed to commit special action===============')
|
||||
# check ddl and dml sync
|
||||
if need_check_standby_cluster:
|
||||
special_upgrade_action_pre.do_special_upgrade_in_standy_cluster(standby_cluster_infos, my_user, my_passwd)
|
||||
actions.check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_id_list)
|
||||
except Exception, e:
|
||||
logging.exception('run error')
|
||||
raise e
|
||||
|
@ -1,171 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from my_error import MyError
|
||||
from actions import BaseEachTenantDDLAction
|
||||
from actions import reflect_action_cls_list
|
||||
from actions import fetch_observer_version
|
||||
from actions import QueryCursor
|
||||
import logging
|
||||
import time
|
||||
import my_utils
|
||||
import actions
|
||||
|
||||
'''
|
||||
添加一条each tenant ddl的方法:
|
||||
|
||||
在本文件中,添加一个类名以"EachTenantDDLActionPost"开头并且继承自BaseEachTenantDDLAction的类,
|
||||
然后在这个类中实现以下成员函数,并且每个函数执行出错都要抛错:
|
||||
(1)@staticmethod get_seq_num():
|
||||
返回一个代表着执行顺序的序列号,该序列号在本文件中不允许重复,若有重复则会报错。
|
||||
(2)dump_before_do_action(self):
|
||||
执行action sql之前把一些相关数据dump到日志中。
|
||||
(3)check_before_do_action(self):
|
||||
执行action sql之前的检查。
|
||||
(4)dump_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前把一些相关数据dump到日志中。
|
||||
(5)check_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前的检查。
|
||||
(6)@staticmethod get_each_tenant_action_ddl(tenant_id):
|
||||
返回用参数tenant_id拼成的一条action sql,并且该sql必须为ddl。
|
||||
(7)@staticmethod get_each_tenant_rollback_sql(tenant_id):
|
||||
返回一条sql,用于回滚get_each_tenant_action_ddl(tenant_id)返回的sql。
|
||||
(8)dump_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后把一些相关数据dump到日志中。
|
||||
(9)check_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后的检查。
|
||||
(10)dump_after_do_action(self):
|
||||
执行action sql之后把一些相关数据dump到日志中。
|
||||
(11)check_after_do_action(self):
|
||||
执行action sql之后的检查。
|
||||
(12)skip_pre_check(self):
|
||||
check if check_before_do_action() can be skipped
|
||||
(13)skip_each_tenant_action(self):
|
||||
check if check_before_do_each_tenant_action() and do_each_tenant_action() can be skipped
|
||||
|
||||
举例: 以下为schema拆分后加租户级系统表的示例
|
||||
class EachTenantDDLActionPostCreateAllTenantBackupBackupLogArchiveStatus(BaseEachTenantDDLAction):
|
||||
table_name = '__all_tenant_backup_backup_log_archive_status'
|
||||
@staticmethod
|
||||
def get_seq_num():
|
||||
return 24
|
||||
def dump_before_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select tenant_id, table_id, table_name from {0} where table_name = '{1}'""".format(self.get_all_table_name(), self.table_name))
|
||||
def skip_pre_check(self):
|
||||
return True
|
||||
def skip_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select tenant_id, table_id, table_name from {0} where table_name = '{1}' and tenant_id = {2}""".format(self.get_all_table_name(), self.table_name, tenant_id))
|
||||
return (1 == len(results))
|
||||
def check_before_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select tenant_id, table_id, table_name from {0} where table_name = '{1}'""".format(self.get_all_table_name(), self.table_name))
|
||||
if len(results) > 0:
|
||||
raise MyError("""{0} already created""".format(self.table_name))
|
||||
def dump_before_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select tenant_id, table_id, table_name from {0} where table_name = '{1}' and tenant_id = {2}""".format(self.get_all_table_name(), self.table_name, tenant_id))
|
||||
def check_before_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select tenant_id, table_id, table_name from {0} where table_name = '{1}' and tenant_id = {2}""".format(self.get_all_table_name(), self.table_name, tenant_id))
|
||||
if len(results) > 0:
|
||||
raise MyError("""tenant_id:{0} has already create table {1}""".format(tenant_id, self.table_name))
|
||||
@staticmethod
|
||||
def get_each_tenant_action_ddl(tenant_id):
|
||||
pure_table_id = 303
|
||||
table_id = (tenant_id << 40) | pure_table_id
|
||||
return """CREATE TABLE `__all_tenant_backup_backup_log_archive_status` (
|
||||
`gmt_create` timestamp(6) NULL DEFAULT CURRENT_TIMESTAMP(6),
|
||||
`gmt_modified` timestamp(6) NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
|
||||
`tenant_id` bigint(20) NOT NULL,
|
||||
`incarnation` bigint(20) NOT NULL,
|
||||
`log_archive_round` bigint(20) NOT NULL,
|
||||
`copy_id` bigint(20) NOT NULL,
|
||||
`min_first_time` timestamp(6) NOT NULL,
|
||||
`max_next_time` timestamp(6) NOT NULL,
|
||||
`input_bytes` bigint(20) NOT NULL DEFAULT '0',
|
||||
`output_bytes` bigint(20) NOT NULL DEFAULT '0',
|
||||
`deleted_input_bytes` bigint(20) NOT NULL DEFAULT '0',
|
||||
`deleted_output_bytes` bigint(20) NOT NULL DEFAULT '0',
|
||||
`pg_count` bigint(20) NOT NULL DEFAULT '0',
|
||||
`status` varchar(64) NOT NULL DEFAULT '',
|
||||
PRIMARY KEY (`tenant_id`, `incarnation`, `log_archive_round`, `copy_id`)
|
||||
) TABLE_ID={0} DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'none' REPLICA_NUM = 1 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 10 TABLEGROUP = 'oceanbase'
|
||||
""".format(table_id)
|
||||
@staticmethod
|
||||
def get_each_tenant_rollback_sql(tenant_id):
|
||||
return """select 1"""
|
||||
def dump_after_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select tenant_id, table_id, table_name from {0} where table_name = '{1}' and tenant_id = {2}""".format(self.get_all_table_name(), self.table_name, tenant_id))
|
||||
def check_after_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select tenant_id, table_id, table_name from {0} where table_name = '{1}' and tenant_id = {2}""".format(self.get_all_table_name(), self.table_name, tenant_id))
|
||||
if len(results) != 1:
|
||||
raise MyError("""tenant_id:{0} create table {1} failed""".format(tenant_id, self.table_name))
|
||||
def dump_after_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select tenant_id, table_id, table_name from {0} where table_name = '{1}'""".format(self.get_all_table_name(), self.table_name))
|
||||
def check_after_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select tenant_id, table_id, table_name from {0} where table_name = '{1}'""".format(self.get_all_table_name(), self.table_name))
|
||||
if len(results) != len(self.get_tenant_id_list()):
|
||||
raise MyError("""there should be {0} rows in {1} whose table_name is {2}, but there has {3} rows like that""".format(len(self.get_tenant_id_list()), self.get_all_table_name(), self.table_name, len(results)))
|
||||
'''
|
||||
|
||||
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些action,如果不写在这两行之间的话会导致清空不掉相应的action。
|
||||
|
||||
####========******####======== actions begin ========####******========####
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
def do_each_tenant_ddl_actions(cur, tenant_id_list):
|
||||
import each_tenant_ddl_actions_post
|
||||
# 组户级系统表没法通过虚拟表暴露,需要根据版本决定查哪张实体表
|
||||
query_cur = QueryCursor(cur)
|
||||
version = fetch_observer_version(query_cur)
|
||||
all_table_name = "__all_table"
|
||||
if (cmp(version, "2.2.60") >= 0) :
|
||||
all_table_name = "__all_table_v2"
|
||||
|
||||
cls_list = reflect_action_cls_list(each_tenant_ddl_actions_post, 'EachTenantDDLActionPost')
|
||||
|
||||
# set parameter
|
||||
if len(cls_list) > 0:
|
||||
actions.set_parameter(cur, 'enable_sys_table_ddl' , 'True')
|
||||
ori_enable_ddl = actions.get_ori_enable_ddl(cur)
|
||||
if ori_enable_ddl == 0:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'True')
|
||||
|
||||
for cls in cls_list:
|
||||
logging.info('do each tenant ddl acion, seq_num: %d', cls.get_seq_num())
|
||||
action = cls(cur, tenant_id_list)
|
||||
action.set_all_table_name(all_table_name)
|
||||
action.dump_before_do_action()
|
||||
if False == action.skip_pre_check():
|
||||
action.check_before_do_action()
|
||||
else:
|
||||
logging.info("skip pre check. seq_num: %d", cls.get_seq_num())
|
||||
# 系统租户组户级系统表创建成功会覆盖普通租户系统表,所以系统租户要最后建表
|
||||
for tenant_id in action.get_tenant_id_list():
|
||||
action.dump_before_do_each_tenant_action(tenant_id)
|
||||
if False == action.skip_each_tenant_action(tenant_id):
|
||||
action.check_before_do_each_tenant_action(tenant_id)
|
||||
action.do_each_tenant_action(tenant_id)
|
||||
else:
|
||||
logging.info("skip each tenant ddl action, seq_num: %d, tenant_id: %d", cls.get_seq_num(), tenant_id)
|
||||
action.dump_after_do_each_tenant_action(tenant_id)
|
||||
action.check_after_do_each_tenant_action(tenant_id)
|
||||
action.dump_after_do_action()
|
||||
action.check_after_do_action()
|
||||
|
||||
# reset parameter
|
||||
if len(cls_list) > 0:
|
||||
if ori_enable_ddl == 0:
|
||||
actions.set_parameter(cur, 'enable_ddl' , 'False')
|
||||
actions.set_parameter(cur, 'enable_sys_table_ddl' , 'False')
|
||||
|
||||
def get_each_tenant_ddl_actions_sqls_str(tenant_id_list):
|
||||
import each_tenant_ddl_actions_post
|
||||
ret_str = ''
|
||||
cls_list = reflect_action_cls_list(each_tenant_ddl_actions_post, 'EachTenantDDLActionPost')
|
||||
for i in range(0, len(cls_list)):
|
||||
for j in range(0, len(tenant_id_list)):
|
||||
if i > 0 or j > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += cls_list[i].get_each_tenant_action_ddl(tenant_id_list[j]) + ';'
|
||||
return ret_str
|
@ -1,176 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from my_error import MyError
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
from actions import BaseEachTenantDMLAction
|
||||
from actions import reflect_action_cls_list
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import logging
|
||||
import my_utils
|
||||
|
||||
'''
|
||||
添加一条each tenant dml的方法:
|
||||
|
||||
在本文件中,添加一个类名以"EachTenantDMLActionPost"开头并且继承自BaseEachTenantDMLAction的类,
|
||||
然后在这个类中实现以下成员函数,并且每个函数执行出错都要抛错:
|
||||
(1)@staticmethod get_seq_num():
|
||||
返回一个代表着执行顺序的序列号,该序列号在本文件中不允许重复,若有重复则会报错。
|
||||
(2)dump_before_do_action(self):
|
||||
执行action sql之前把一些相关数据dump到日志中。
|
||||
(3)check_before_do_action(self):
|
||||
执行action sql之前的检查。
|
||||
(4)dump_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前把一些相关数据dump到日志中。
|
||||
(5)check_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前的检查。
|
||||
(6)@staticmethod get_each_tenant_action_dml(tenant_id):
|
||||
返回用参数tenant_id拼成的一条action sql,并且该sql必须为dml。
|
||||
(7)@staticmethod get_each_tenant_rollback_sql(tenant_id):
|
||||
返回一条sql,用于回滚get_each_tenant_action_dml(tenant_id)返回的sql。
|
||||
(8)dump_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后把一些相关数据dump到日志中。
|
||||
(9)check_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后的检查。
|
||||
(10)dump_after_do_action(self):
|
||||
执行action sql之后把一些相关数据dump到日志中。
|
||||
(11)check_after_do_action(self):
|
||||
执行action sql之后的检查。
|
||||
(12)skip_pre_check(self):
|
||||
check if check_before_do_action() can be skipped
|
||||
(13)skip_each_tenant_action(self):
|
||||
check if check_before_do_each_tenant_action() and do_each_tenant_action() can be skipped
|
||||
|
||||
举例:
|
||||
class EachTenantDMLActionPost1(BaseEachTenantDMLAction):
|
||||
@staticmethod
|
||||
def get_seq_num():
|
||||
return 0
|
||||
def dump_before_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1""")
|
||||
def skip_pre_check(self):
|
||||
return True
|
||||
def skip_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
return (len(results) > 0)
|
||||
def check_before_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
if len(results) > 0:
|
||||
raise MyError('some rows in table test.for_test1 whose c2 column is 9494 already exists')
|
||||
def dump_before_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1 where c2 = 9494""")
|
||||
def check_before_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where pk = {0}""".format(tenant_id))
|
||||
if len(results) > 0:
|
||||
raise MyError('some rows in table test.for_test1 whose pk is {0} already exists'.format(tenant_id))
|
||||
@staticmethod
|
||||
def get_each_tenant_action_dml(tenant_id):
|
||||
return """insert into test.for_test1 value ({0}, 'for test 1', 9494)""".format(tenant_id)
|
||||
@staticmethod
|
||||
def get_each_tenant_rollback_sql(tenant_id):
|
||||
return """delete from test.for_test1 where pk = {0}""".format(tenant_id)
|
||||
def dump_after_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1 where c2 = 9494""")
|
||||
def check_after_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where pk = {0}""".format(tenant_id))
|
||||
if len(results) != 1:
|
||||
raise MyError('there should be only one row whose primary key is {0} in table test.for_test1, but there has {1} rows like that'.format(tenant_id, len(results)))
|
||||
elif results[0][0] != tenant_id or results[0][1] != 'for test 1' or results[0][2] != 9494:
|
||||
raise MyError('the row that has been inserted is not expected, it is: [{0}]'.format(','.join(str(r) for r in results[0])))
|
||||
def dump_after_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1""")
|
||||
def check_after_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
if len(results) != len(self.get_tenant_id_list()):
|
||||
raise MyError('there should be {0} rows whose c2 column is 9494 in table test.for_test1, but there has {1} rows like that'.format(len(self.get_tenant_id_list()), len(results)))
|
||||
'''
|
||||
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些action,如果不写在这两行之间的话会导致清空不掉相应的action。
|
||||
|
||||
####========******####======== actions begin ========####******========####
|
||||
####========******####========= actions end =========####******========####
|
||||
def get_actual_tenant_id(tenant_id):
|
||||
return tenant_id if (1 == tenant_id) else 0;
|
||||
|
||||
def do_each_tenant_dml_actions_by_standby_cluster(standby_cluster_infos):
|
||||
try:
|
||||
tenant_id_list = [1]
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_each_tenant_dml_actions_by_standby_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_each_tenant_dml_actions(cur, tenant_id_list, True)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_each_tenant_dml_actions_by_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
def do_each_tenant_dml_actions(cur, tenant_id_list, standby=False):
|
||||
import each_tenant_dml_actions_post
|
||||
cls_list = reflect_action_cls_list(each_tenant_dml_actions_post, 'EachTenantDMLActionPost')
|
||||
for cls in cls_list:
|
||||
logging.info('do each tenant dml acion, seq_num: %d', cls.get_seq_num())
|
||||
action = cls(cur, tenant_id_list)
|
||||
sys_tenant_id = 1
|
||||
action.change_tenant(sys_tenant_id)
|
||||
action.dump_before_do_action()
|
||||
if False == action.skip_pre_check():
|
||||
action.check_before_do_action()
|
||||
else:
|
||||
logging.info("skip pre check. seq_num: %d", cls.get_seq_num())
|
||||
for tenant_id in action.get_tenant_id_list():
|
||||
action.change_tenant(tenant_id)
|
||||
action.dump_before_do_each_tenant_action(tenant_id)
|
||||
if False == action.skip_each_tenant_action(tenant_id):
|
||||
action.check_before_do_each_tenant_action(tenant_id)
|
||||
action.do_each_tenant_action(tenant_id)
|
||||
else:
|
||||
logging.info("skip each tenant dml action, seq_num: %d, tenant_id: %d", cls.get_seq_num(), tenant_id)
|
||||
action.dump_after_do_each_tenant_action(tenant_id)
|
||||
action.check_after_do_each_tenant_action(tenant_id)
|
||||
action.change_tenant(sys_tenant_id)
|
||||
action.dump_after_do_action()
|
||||
if False == standby:
|
||||
action.check_after_do_action()
|
||||
|
||||
def get_each_tenant_dml_actions_sqls_str(tenant_id_list):
|
||||
import each_tenant_dml_actions_post
|
||||
ret_str = ''
|
||||
cls_list = reflect_action_cls_list(each_tenant_dml_actions_post, 'EachTenantDMLActionPost')
|
||||
for i in range(0, len(cls_list)):
|
||||
for j in range(0, len(tenant_id_list)):
|
||||
if i > 0 or j > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += cls_list[i].get_each_tenant_action_dml(tenant_id_list[j]) + ';'
|
||||
return ret_str
|
||||
|
@ -1,190 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from my_error import MyError
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
from actions import BaseEachTenantDMLAction
|
||||
from actions import reflect_action_cls_list
|
||||
from actions import fetch_observer_version
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import logging
|
||||
import my_utils
|
||||
import actions
|
||||
import re
|
||||
|
||||
'''
|
||||
添加一条each tenant dml的方法:
|
||||
|
||||
在本文件中,添加一个类名以"EachTenantDMLActionPre"开头并且继承自BaseEachTenantDMLAction的类,
|
||||
然后在这个类中实现以下成员函数,并且每个函数执行出错都要抛错:
|
||||
(1)@staticmethod get_seq_num():
|
||||
返回一个代表着执行顺序的序列号,该序列号在本文件中不允许重复,若有重复则会报错。
|
||||
(2)dump_before_do_action(self):
|
||||
执行action sql之前把一些相关数据dump到日志中。
|
||||
(3)check_before_do_action(self):
|
||||
执行action sql之前的检查。
|
||||
(4)dump_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前把一些相关数据dump到日志中。
|
||||
(5)check_before_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之前的检查。
|
||||
(6)@staticmethod get_each_tenant_action_dml(tenant_id):
|
||||
返回用参数tenant_id拼成的一条action sql,并且该sql必须为dml。
|
||||
(7)@staticmethod get_each_tenant_rollback_sql(tenant_id):
|
||||
返回一条sql,用于回滚get_each_tenant_action_dml(tenant_id)返回的sql。
|
||||
(8)dump_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后把一些相关数据dump到日志中。
|
||||
(9)check_after_do_each_tenant_action(self, tenant_id):
|
||||
执行用参数tenant_id拼成的这条action sql之后的检查。
|
||||
(10)dump_after_do_action(self):
|
||||
执行action sql之后把一些相关数据dump到日志中。
|
||||
(11)check_after_do_action(self):
|
||||
执行action sql之后的检查。
|
||||
(12)skip_pre_check(self):
|
||||
check if check_before_do_action() can be skipped
|
||||
(13)skip_each_tenant_action(self):
|
||||
check if check_before_do_each_tenant_action() and do_each_tenant_action() can be skipped
|
||||
|
||||
举例:
|
||||
class EachTenantDMLActionPre1(BaseEachTenantDMLAction):
|
||||
@staticmethod
|
||||
def get_seq_num():
|
||||
return 0
|
||||
def dump_before_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1""")
|
||||
def skip_pre_check(self):
|
||||
return True
|
||||
def skip_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
return (len(results) > 0)
|
||||
def check_before_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
if len(results) > 0:
|
||||
raise MyError('some rows in table test.for_test1 whose c2 column is 9494 already exists')
|
||||
def dump_before_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1 where c2 = 9494""")
|
||||
def check_before_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where pk = {0}""".format(tenant_id))
|
||||
if len(results) > 0:
|
||||
raise MyError('some rows in table test.for_test1 whose pk is {0} already exists'.format(tenant_id))
|
||||
@staticmethod
|
||||
def get_each_tenant_action_dml(tenant_id):
|
||||
return """insert into test.for_test1 value ({0}, 'for test 1', 9494)""".format(tenant_id)
|
||||
@staticmethod
|
||||
def get_each_tenant_rollback_sql(tenant_id):
|
||||
return """delete from test.for_test1 where pk = {0}""".format(tenant_id)
|
||||
def dump_after_do_each_tenant_action(self, tenant_id):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1 where c2 = 9494""")
|
||||
def check_after_do_each_tenant_action(self, tenant_id):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where pk = {0}""".format(tenant_id))
|
||||
if len(results) != 1:
|
||||
raise MyError('there should be only one row whose primary key is {0} in table test.for_test1, but there has {1} rows like that'.format(tenant_id, len(results)))
|
||||
elif results[0][0] != tenant_id or results[0][1] != 'for test 1' or results[0][2] != 9494:
|
||||
raise MyError('the row that has been inserted is not expected, it is: [{0}]'.format(','.join(str(r) for r in results[0])))
|
||||
def dump_after_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test1""")
|
||||
def check_after_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test1 where c2 = 9494""")
|
||||
if len(results) != len(self.get_tenant_id_list()):
|
||||
raise MyError('there should be {0} rows whose c2 column is 9494 in table test.for_test1, but there has {1} rows like that'.format(len(self.get_tenant_id_list()), len(results)))
|
||||
'''
|
||||
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些action,如果不写在这两行之间的话会导致清空不掉相应的action。
|
||||
|
||||
####========******####======== actions begin ========####******========####
|
||||
####========******####========= actions end =========####******========####
|
||||
def get_actual_tenant_id(tenant_id):
|
||||
return tenant_id if (1 == tenant_id) else 0;
|
||||
|
||||
def do_each_tenant_dml_actions(cur, tenant_id_list, standby=False):
|
||||
import each_tenant_dml_actions_pre
|
||||
cls_list = reflect_action_cls_list(each_tenant_dml_actions_pre, 'EachTenantDMLActionPre')
|
||||
|
||||
# check if pre upgrade script can run reentrantly
|
||||
query_cur = QueryCursor(cur)
|
||||
version = fetch_observer_version(query_cur)
|
||||
can_skip = False
|
||||
if (cmp(version, "2.2.77") >= 0 and cmp(version, "3.0.0") < 0):
|
||||
can_skip = True
|
||||
elif (cmp(version, "3.1.1") >= 0):
|
||||
can_skip = True
|
||||
else:
|
||||
can_skip = False
|
||||
|
||||
for cls in cls_list:
|
||||
logging.info('do each tenant dml acion, seq_num: %d', cls.get_seq_num())
|
||||
action = cls(cur, tenant_id_list)
|
||||
sys_tenant_id = 1
|
||||
action.change_tenant(sys_tenant_id)
|
||||
action.dump_before_do_action()
|
||||
if False == can_skip or False == action.skip_pre_check():
|
||||
action.check_before_do_action()
|
||||
else:
|
||||
logging.info("skip pre check. seq_num: %d", cls.get_seq_num())
|
||||
for tenant_id in action.get_tenant_id_list():
|
||||
action.change_tenant(tenant_id)
|
||||
action.dump_before_do_each_tenant_action(tenant_id)
|
||||
if False == can_skip or False == action.skip_each_tenant_action(tenant_id):
|
||||
action.check_before_do_each_tenant_action(tenant_id)
|
||||
action.do_each_tenant_action(tenant_id)
|
||||
else:
|
||||
logging.info("skip each tenant dml action, seq_num: %d, tenant_id: %d", cls.get_seq_num(), tenant_id)
|
||||
action.dump_after_do_each_tenant_action(tenant_id)
|
||||
action.check_after_do_each_tenant_action(tenant_id)
|
||||
action.change_tenant(sys_tenant_id)
|
||||
action.dump_after_do_action()
|
||||
if False == standby:
|
||||
action.check_after_do_action()
|
||||
|
||||
def do_each_tenant_dml_actions_by_standby_cluster(standby_cluster_infos):
|
||||
try:
|
||||
tenant_id_list = [1]
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_each_tenant_dml_actions_by_standby_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_each_tenant_dml_actions(cur, tenant_id_list, True)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_each_tenant_dml_actions_by_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
def get_each_tenant_dml_actions_sqls_str(tenant_id_list):
|
||||
import each_tenant_dml_actions_pre
|
||||
ret_str = ''
|
||||
cls_list = reflect_action_cls_list(each_tenant_dml_actions_pre, 'EachTenantDMLActionPre')
|
||||
for i in range(0, len(cls_list)):
|
||||
for j in range(0, len(tenant_id_list)):
|
||||
if i > 0 or j > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += cls_list[i].get_each_tenant_action_dml(tenant_id_list[j]) + ';'
|
||||
return ret_str
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,134 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from my_error import MyError
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
from actions import BaseDMLAction
|
||||
from actions import reflect_action_cls_list
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import logging
|
||||
import my_utils
|
||||
|
||||
'''
|
||||
添加一条normal dml的方法:
|
||||
|
||||
在本文件中,添加一个类名以"NormalDMLActionPost"开头并且继承自BaseDMLAction的类,
|
||||
然后在这个类中实现以下成员函数,并且每个函数执行出错都要抛错:
|
||||
(1)@staticmethod get_seq_num():
|
||||
返回一个代表着执行顺序的序列号,该序列号在本文件中不允许重复,若有重复则会报错。
|
||||
(2)dump_before_do_action(self):
|
||||
执行action sql之前把一些相关数据dump到日志中。
|
||||
(3)check_before_do_action(self):
|
||||
执行action sql之前的检查。
|
||||
(4)@staticmethod get_action_dml():
|
||||
返回action sql,并且该sql必须为dml。
|
||||
(5)@staticmethod get_rollback_sql():
|
||||
返回回滚该action的sql。
|
||||
(6)dump_after_do_action(self):
|
||||
执行action sql之后把一些相关数据dump到日志中。
|
||||
(7)check_after_do_action(self):
|
||||
执行action sql之后的检查。
|
||||
(8)skip_action(self):
|
||||
check if check_before_do_action() and do_action() can be skipped
|
||||
|
||||
举例:
|
||||
class NormalDMLActionPost1(BaseDMLAction):
|
||||
@staticmethod
|
||||
def get_seq_num():
|
||||
return 0
|
||||
def dump_before_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test""")
|
||||
def skip_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
return (len(results) > 0)
|
||||
def check_before_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
if len(results) > 0:
|
||||
raise MyError('some row in table test.for_test whose primary key is 9 already exists')
|
||||
@staticmethod
|
||||
def get_action_dml():
|
||||
return """insert into test.for_test values (9, 'haha', 99)"""
|
||||
@staticmethod
|
||||
def get_rollback_sql():
|
||||
return """delete from test.for_test where pk = 9"""
|
||||
def dump_after_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test""")
|
||||
def check_after_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
if len(results) != 1:
|
||||
raise MyError('there should be only one row whose primary key is 9 in table test.for_test, but there has {0} rows like that'.format(len(results)))
|
||||
elif results[0][0] != 9 or results[0][1] != 'haha' or results[0][2] != 99:
|
||||
raise MyError('the row that has been inserted is not expected, it is: [{0}]'.format(','.join(str(r) for r in results[0])))
|
||||
'''
|
||||
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些action,如果不写在这两行之间的话会导致清空不掉相应的action。
|
||||
|
||||
####========******####======== actions begin ========####******========####
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
def do_normal_dml_actions_by_standby_cluster(standby_cluster_infos):
|
||||
try:
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_normal_dml_actions_by_standby_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_normal_dml_actions(cur)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_normal_dml_actions_by_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
def do_normal_dml_actions(cur):
|
||||
import normal_dml_actions_post
|
||||
cls_list = reflect_action_cls_list(normal_dml_actions_post, 'NormalDMLActionPost')
|
||||
for cls in cls_list:
|
||||
logging.info('do normal dml acion, seq_num: %d', cls.get_seq_num())
|
||||
action = cls(cur)
|
||||
action.dump_before_do_action()
|
||||
if False == action.skip_action():
|
||||
action.check_before_do_action()
|
||||
action.do_action()
|
||||
else:
|
||||
logging.info("skip dml action, seq_num: %d", cls.get_seq_num())
|
||||
action.dump_after_do_action()
|
||||
action.check_after_do_action()
|
||||
|
||||
def get_normal_dml_actions_sqls_str():
|
||||
import normal_dml_actions_post
|
||||
ret_str = ''
|
||||
cls_list = reflect_action_cls_list(normal_dml_actions_post, 'NormalDMLActionPost')
|
||||
for i in range(0, len(cls_list)):
|
||||
if i > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += cls_list[i].get_action_dml() + ';'
|
||||
return ret_str
|
@ -1,148 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from my_error import MyError
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
from actions import BaseDMLAction
|
||||
from actions import reflect_action_cls_list
|
||||
from actions import fetch_observer_version
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import logging
|
||||
import my_utils
|
||||
|
||||
'''
|
||||
添加一条normal dml的方法:
|
||||
|
||||
在本文件中,添加一个类名以"NormalDMLActionPre"开头并且继承自BaseDMLAction的类,
|
||||
然后在这个类中实现以下成员函数,并且每个函数执行出错都要抛错:
|
||||
(1)@staticmethod get_seq_num():
|
||||
返回一个代表着执行顺序的序列号,该序列号在本文件中不允许重复,若有重复则会报错。
|
||||
(2)dump_before_do_action(self):
|
||||
执行action sql之前把一些相关数据dump到日志中。
|
||||
(3)check_before_do_action(self):
|
||||
执行action sql之前的检查。
|
||||
(4)@staticmethod get_action_dml():
|
||||
返回action sql,并且该sql必须为dml。
|
||||
(5)@staticmethod get_rollback_sql():
|
||||
返回回滚该action的sql。
|
||||
(6)dump_after_do_action(self):
|
||||
执行action sql之后把一些相关数据dump到日志中。
|
||||
(7)check_after_do_action(self):
|
||||
执行action sql之后的检查。
|
||||
(8)skip_action(self):
|
||||
check if check_before_do_action() and do_action() can be skipped
|
||||
|
||||
举例:
|
||||
class NormalDMLActionPre1(BaseDMLAction):
|
||||
@staticmethod
|
||||
def get_seq_num():
|
||||
return 0
|
||||
def dump_before_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test""")
|
||||
def skip_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
return (len(results) > 0)
|
||||
def check_before_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
if len(results) > 0:
|
||||
raise MyError('some row in table test.for_test whose primary key is 9 already exists')
|
||||
@staticmethod
|
||||
def get_action_dml():
|
||||
return """insert into test.for_test values (9, 'haha', 99)"""
|
||||
@staticmethod
|
||||
def get_rollback_sql():
|
||||
return """delete from test.for_test where pk = 9"""
|
||||
def dump_after_do_action(self):
|
||||
my_utils.query_and_dump_results(self._query_cursor, """select * from test.for_test""")
|
||||
def check_after_do_action(self):
|
||||
(desc, results) = self._query_cursor.exec_query("""select * from test.for_test where pk = 9""")
|
||||
if len(results) != 1:
|
||||
raise MyError('there should be only one row whose primary key is 9 in table test.for_test, but there has {0} rows like that'.format(len(results)))
|
||||
elif results[0][0] != 9 or results[0][1] != 'haha' or results[0][2] != 99:
|
||||
raise MyError('the row that has been inserted is not expected, it is: [{0}]'.format(','.join(str(r) for r in results[0])))
|
||||
'''
|
||||
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些action,如果不写在这两行之间的话会导致清空不掉相应的action。
|
||||
|
||||
####========******####======== actions begin ========####******========####
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
|
||||
def do_normal_dml_actions(cur):
|
||||
import normal_dml_actions_pre
|
||||
cls_list = reflect_action_cls_list(normal_dml_actions_pre, 'NormalDMLActionPre')
|
||||
|
||||
# check if pre upgrade script can run reentrantly
|
||||
query_cur = QueryCursor(cur)
|
||||
version = fetch_observer_version(query_cur)
|
||||
can_skip = False
|
||||
if (cmp(version, "2.2.77") >= 0 and cmp(version, "3.0.0") < 0):
|
||||
can_skip = True
|
||||
elif (cmp(version, "3.1.1") >= 0):
|
||||
can_skip = True
|
||||
else:
|
||||
can_skip = False
|
||||
|
||||
for cls in cls_list:
|
||||
logging.info('do normal dml acion, seq_num: %d', cls.get_seq_num())
|
||||
action = cls(cur)
|
||||
action.dump_before_do_action()
|
||||
if False == can_skip or False == action.skip_action():
|
||||
action.check_before_do_action()
|
||||
action.do_action()
|
||||
else:
|
||||
logging.info("skip dml action, seq_num: %d", cls.get_seq_num())
|
||||
action.dump_after_do_action()
|
||||
action.check_after_do_action()
|
||||
|
||||
def do_normal_dml_actions_by_standby_cluster(standby_cluster_infos):
|
||||
try:
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_normal_dml_actions_by_standby_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_normal_dml_actions(cur)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_normal_dml_actions_by_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
def get_normal_dml_actions_sqls_str():
|
||||
import normal_dml_actions_pre
|
||||
ret_str = ''
|
||||
cls_list = reflect_action_cls_list(normal_dml_actions_pre, 'NormalDMLActionPre')
|
||||
for i in range(0, len(cls_list)):
|
||||
if i > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += cls_list[i].get_action_dml() + ';'
|
||||
return ret_str
|
File diff suppressed because it is too large
Load Diff
@ -6,61 +6,10 @@ import time
|
||||
from actions import Cursor
|
||||
from actions import DMLCursor
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
import actions
|
||||
|
||||
def do_special_upgrade_in_standy_cluster(standby_cluster_infos, user, passwd):
|
||||
try:
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_special_upgrade_in_standy_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_special_upgrade_for_standby_cluster(conn, cur, user, passwd)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_special_upgrade_for_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
# 备库需要执行的升级动作,且备库仅系统租户可写
|
||||
def do_special_upgrade_for_standby_cluster(conn, cur, user, passwd):
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些代码,如果不写在这两行之间的话会导致清空不掉相应的代码。
|
||||
tenant_id_list = [1]
|
||||
upgrade_system_package(conn, cur)
|
||||
####========******####======== actions begin ========####******========####
|
||||
run_upgrade_job(conn, cur, "4.0.0.0")
|
||||
return
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
def do_special_upgrade(conn, cur, tenant_id_list, user, pwd):
|
||||
# special upgrade action
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
@ -68,7 +17,6 @@ def do_special_upgrade(conn, cur, tenant_id_list, user, pwd):
|
||||
#这两行之间的这些代码,如果不写在这两行之间的话会导致清空不掉相应的代码。
|
||||
upgrade_system_package(conn, cur)
|
||||
####========******####======== actions begin ========####******========####
|
||||
run_upgrade_job(conn, cur, "4.0.0.0")
|
||||
return
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
|
@ -11,7 +11,6 @@ import string
|
||||
from random import Random
|
||||
from actions import DMLCursor
|
||||
from actions import QueryCursor
|
||||
from actions import check_current_cluster_is_primary
|
||||
import binascii
|
||||
import my_utils
|
||||
import actions
|
||||
@ -47,57 +46,6 @@ import sys
|
||||
# raise e
|
||||
# logging.info('exec modify trigger finish')
|
||||
|
||||
def do_special_upgrade_in_standy_cluster(standby_cluster_infos, user, passwd):
|
||||
try:
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
logging.info("do_special_upgrade_in_standy_cluster: cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
## process
|
||||
do_special_upgrade_for_standby_cluster(conn, cur, user, passwd)
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
except Exception, e:
|
||||
logging.exception("""do_special_upgrade_for_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
# 备库需要执行的升级动作,且备库仅系统租户可写
|
||||
def do_special_upgrade_for_standby_cluster(conn, cur, user, passwd):
|
||||
#升级语句对应的action要写在下面的actions begin和actions end这两行之间,
|
||||
#因为基准版本更新的时候会调用reset_upgrade_scripts.py来清空actions begin和actions end
|
||||
#这两行之间的这些代码,如果不写在这两行之间的话会导致清空不掉相应的代码。
|
||||
|
||||
# 主库升级流程没加滚动升级步骤,或混部阶段DDL测试有相关case覆盖前,混部开始禁DDL
|
||||
actions.set_parameter(cur, 'enable_ddl', 'False')
|
||||
tenant_id_list = [1]
|
||||
####========******####======== actions begin ========####******========####
|
||||
return
|
||||
####========******####========= actions end =========####******========####
|
||||
|
||||
# 主库需要执行的升级动作
|
||||
def do_special_upgrade(conn, cur, tenant_id_list, user, passwd):
|
||||
# special upgrade action
|
||||
@ -109,358 +57,6 @@ def do_special_upgrade(conn, cur, tenant_id_list, user, passwd):
|
||||
####========******####======== actions begin ========####******========####
|
||||
return
|
||||
####========******####========= actions end =========####******========####
|
||||
def do_add_recovery_status_to_all_zone(conn, cur):
|
||||
try:
|
||||
logging.info('add recovery status row to __all_zone for each zone')
|
||||
zones = [];
|
||||
recovery_status = [];
|
||||
|
||||
# pre-check, may skip
|
||||
check_updated_sql = "select * from oceanbase.__all_zone where zone !='' AND name='recovery_status'"
|
||||
cur.execute(check_updated_sql)
|
||||
recovery_status = cur.fetchall()
|
||||
if 0 < len(recovery_status):
|
||||
logging.info('[recovery_status] row already exists, no need to add')
|
||||
|
||||
# get zones
|
||||
if 0 >= len(recovery_status):
|
||||
all_zone_sql = "select distinct(zone) zone from oceanbase.__all_zone where zone !=''"
|
||||
cur.execute(all_zone_sql)
|
||||
zone_results = cur.fetchall()
|
||||
for r in zone_results:
|
||||
zones.append("('" + r[0] + "', 'recovery_status', 0, 'NORMAL')")
|
||||
|
||||
# add rows
|
||||
if 0 < len(zones):
|
||||
upgrade_sql = "insert into oceanbase.__all_zone(zone, name, value, info) values " + ','.join(zones)
|
||||
logging.info(upgrade_sql)
|
||||
cur.execute(upgrade_sql)
|
||||
conn.commit()
|
||||
|
||||
# check result
|
||||
if 0 < len(zones):
|
||||
cur.execute(check_updated_sql)
|
||||
check_results = cur.fetchall()
|
||||
if len(check_results) != len(zones):
|
||||
raise MyError('fail insert [recovery_status] row into __all_zone')
|
||||
|
||||
except Exception, e:
|
||||
logging.exception('do_add_recovery_status_to_all_zone error')
|
||||
raise e
|
||||
|
||||
def modify_trigger(conn, cur, tenant_ids):
|
||||
try:
|
||||
conn.autocommit = True
|
||||
# disable ddl
|
||||
ori_enable_ddl = actions.get_ori_enable_ddl(cur)
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'False')
|
||||
log('tenant_ids: {0}'.format(tenant_ids))
|
||||
for tenant_id in tenant_ids:
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
#####implement#####
|
||||
trigger_sql = """
|
||||
update __all_tenant_trigger
|
||||
set
|
||||
package_spec_source = replace(
|
||||
package_spec_source,
|
||||
'FUNCTION UPDATING(column VARCHAR2 := NULL) RETURN BOOL\;',
|
||||
'FUNCTION UPDATING(column_name VARCHAR2 := NULL) RETURN BOOL\;'
|
||||
),
|
||||
package_body_source = replace(replace(
|
||||
package_body_source,
|
||||
'
|
||||
PROCEDURE init_trigger(update_columns IN STRINGARRAY) IS
|
||||
BEGIN
|
||||
NULL\;
|
||||
END\;
|
||||
',
|
||||
'
|
||||
PROCEDURE init_trigger(update_columns IN STRINGARRAY) IS
|
||||
BEGIN
|
||||
update_columns_ := STRINGARRAY()\;
|
||||
update_columns_.EXTEND(update_columns.COUNT)\;
|
||||
FOR i IN 1 .. update_columns.COUNT LOOP
|
||||
update_columns_(i) := update_columns(i)\;
|
||||
END LOOP\;
|
||||
END\;
|
||||
'),
|
||||
'
|
||||
FUNCTION UPDATING(column VARCHAR2 := NULL) RETURN BOOL IS
|
||||
BEGIN
|
||||
RETURN (dml_event_ = 4)\;
|
||||
END\;
|
||||
',
|
||||
'
|
||||
FUNCTION UPDATING(column_name VARCHAR2 := NULL) RETURN BOOL IS
|
||||
is_updating BOOL\;
|
||||
BEGIN
|
||||
is_updating := (dml_event_ = 4)\;
|
||||
IF (is_updating AND column_name IS NOT NULL) THEN
|
||||
is_updating := FALSE\;
|
||||
FOR i IN 1 .. update_columns_.COUNT LOOP
|
||||
IF (UPPER(update_columns_(i)) = UPPER(column_name)) THEN is_updating := TRUE\; EXIT\; END IF\;
|
||||
END LOOP\;
|
||||
END IF\;
|
||||
RETURN is_updating\;
|
||||
END\;
|
||||
'); """
|
||||
|
||||
log(trigger_sql)
|
||||
cur.execute(trigger_sql)
|
||||
log("update rows = " + str(cur.rowcount))
|
||||
|
||||
trigger_history_sql = """
|
||||
update __all_tenant_trigger_history
|
||||
set
|
||||
package_spec_source = replace(
|
||||
package_spec_source,
|
||||
'FUNCTION UPDATING(column VARCHAR2 := NULL) RETURN BOOL\;',
|
||||
'FUNCTION UPDATING(column_name VARCHAR2 := NULL) RETURN BOOL\;'
|
||||
),
|
||||
package_body_source = replace(replace(
|
||||
package_body_source,
|
||||
'
|
||||
PROCEDURE init_trigger(update_columns IN STRINGARRAY) IS
|
||||
BEGIN
|
||||
NULL\;
|
||||
END\;
|
||||
',
|
||||
'
|
||||
PROCEDURE init_trigger(update_columns IN STRINGARRAY) IS
|
||||
BEGIN
|
||||
update_columns_ := STRINGARRAY()\;
|
||||
update_columns_.EXTEND(update_columns.COUNT)\;
|
||||
FOR i IN 1 .. update_columns.COUNT LOOP
|
||||
update_columns_(i) := update_columns(i)\;
|
||||
END LOOP\;
|
||||
END\;
|
||||
'),
|
||||
'
|
||||
FUNCTION UPDATING(column VARCHAR2 := NULL) RETURN BOOL IS
|
||||
BEGIN
|
||||
RETURN (dml_event_ = 4)\;
|
||||
END\;
|
||||
',
|
||||
'
|
||||
FUNCTION UPDATING(column_name VARCHAR2 := NULL) RETURN BOOL IS
|
||||
is_updating BOOL\;
|
||||
BEGIN
|
||||
is_updating := (dml_event_ = 4)\;
|
||||
IF (is_updating AND column_name IS NOT NULL) THEN
|
||||
is_updating := FALSE\;
|
||||
FOR i IN 1 .. update_columns_.COUNT LOOP
|
||||
IF (UPPER(update_columns_(i)) = UPPER(column_name)) THEN is_updating := TRUE\; EXIT\; END IF\;
|
||||
END LOOP\;
|
||||
END IF\;
|
||||
RETURN is_updating\;
|
||||
END\;
|
||||
')
|
||||
where is_deleted = 0; """
|
||||
|
||||
log(trigger_history_sql)
|
||||
cur.execute(trigger_history_sql)
|
||||
log("update rows = " + str(cur.rowcount))
|
||||
|
||||
#####implement end#####
|
||||
|
||||
# 还原默认 tenant
|
||||
sys_tenant_id = 1
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(sys_tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
|
||||
# enable ddl
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'True')
|
||||
except Exception, e:
|
||||
logging.warn("exec modify trigger failed")
|
||||
raise e
|
||||
logging.info('exec modify trigger finish')
|
||||
|
||||
def fill_priv_file_column_for_all_user(conn, cur):
|
||||
try:
|
||||
conn.autocommit = True
|
||||
# disable ddl
|
||||
ori_enable_ddl = actions.get_ori_enable_ddl(cur)
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'False')
|
||||
tenant_ids = get_tenant_ids(cur)
|
||||
log('tenant_ids: {0}'.format(tenant_ids))
|
||||
for tenant_id in tenant_ids:
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
tenant_id_in_sql = 0
|
||||
if 1 == tenant_id:
|
||||
tenant_id_in_sql = 1
|
||||
|
||||
begin_user_id = 0
|
||||
begin_schema_version = 0
|
||||
fetch_num = 1000
|
||||
|
||||
while (True):
|
||||
query_limit = """
|
||||
where tenant_id = {0} and (user_id, schema_version) > ({1}, {2})
|
||||
order by tenant_id, user_id, schema_version
|
||||
limit {3}""".format(tenant_id_in_sql, begin_user_id, begin_schema_version, fetch_num)
|
||||
|
||||
sql = """select /*+ QUERY_TIMEOUT(1500000000) */ user_id, schema_version
|
||||
from oceanbase.__all_user_history""" + query_limit
|
||||
log(sql)
|
||||
result_rows = query(cur, sql)
|
||||
log("select rows = " + str(cur.rowcount))
|
||||
|
||||
if len(result_rows) <= 0:
|
||||
break
|
||||
else:
|
||||
last_schema_version = result_rows[-1][1]
|
||||
last_user_id = result_rows[-1][0]
|
||||
condition = """
|
||||
where priv_alter = 1 and priv_create = 1 and priv_create_user = 1 and priv_delete = 1
|
||||
and priv_drop = 1 and priv_insert = 1 and priv_update = 1 and priv_select = 1
|
||||
and priv_index = 1 and priv_create_view = 1 and priv_show_view = 1 and priv_show_db = 1
|
||||
and priv_super = 1 and priv_create_synonym = 1
|
||||
and tenant_id = {0} and (user_id, schema_version) > ({1}, {2})
|
||||
and (user_id, schema_version) <= ({3}, {4})
|
||||
""".format(tenant_id_in_sql, begin_user_id, begin_schema_version, last_user_id, last_schema_version)
|
||||
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_user_history
|
||||
set priv_file = 1""" + condition
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
log("update rows = " + str(cur.rowcount))
|
||||
|
||||
condition = """
|
||||
where priv_super = 1 and tenant_id = {0} and (user_id, schema_version) > ({1}, {2})
|
||||
and (user_id, schema_version) <= ({3}, {4})
|
||||
""".format(tenant_id_in_sql, begin_user_id, begin_schema_version, last_user_id, last_schema_version)
|
||||
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_user_history
|
||||
set priv_alter_tenant = 1,
|
||||
priv_alter_system = 1,
|
||||
priv_create_resource_unit = 1,
|
||||
priv_create_resource_pool = 1 """ + condition
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
|
||||
begin_schema_version = last_schema_version
|
||||
begin_user_id = last_user_id
|
||||
|
||||
begin_user_id = 0
|
||||
while (True):
|
||||
query_limit = """
|
||||
where tenant_id = {0} and user_id > {1}
|
||||
order by tenant_id, user_id
|
||||
limit {2}""".format(tenant_id_in_sql, begin_user_id, fetch_num)
|
||||
sql = """select /*+ QUERY_TIMEOUT(1500000000) */ user_id
|
||||
from oceanbase.__all_user""" + query_limit
|
||||
log(sql)
|
||||
result_rows = query(cur, sql)
|
||||
log("select rows = " + str(cur.rowcount))
|
||||
|
||||
if len(result_rows) <= 0:
|
||||
break
|
||||
else:
|
||||
end_user_id = result_rows[-1][0]
|
||||
condition = """
|
||||
where priv_alter = 1 and priv_create = 1 and priv_create_user = 1 and priv_delete = 1
|
||||
and priv_drop = 1 and priv_insert = 1 and priv_update = 1 and priv_select = 1
|
||||
and priv_index = 1 and priv_create_view = 1 and priv_show_view = 1 and priv_show_db = 1
|
||||
and priv_super = 1 and priv_create_synonym = 1
|
||||
and tenant_id = {0} and user_id > {1} and user_id <= {2}
|
||||
""".format(tenant_id_in_sql, begin_user_id, end_user_id)
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_user
|
||||
set priv_file = 1 """ + condition
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
log("update rows = " + str(cur.rowcount))
|
||||
|
||||
condition = """
|
||||
where priv_super = 1
|
||||
and tenant_id = {0} and user_id > {1} and user_id <= {2}
|
||||
""".format(tenant_id_in_sql, begin_user_id, end_user_id)
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_user
|
||||
set priv_alter_tenant = 1,
|
||||
priv_alter_system = 1,
|
||||
priv_create_resource_unit = 1,
|
||||
priv_create_resource_pool = 1 """ + condition
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
|
||||
begin_user_id = end_user_id
|
||||
|
||||
# 还原默认 tenant
|
||||
sys_tenant_id = 1
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(sys_tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
|
||||
# enable ddl
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'True')
|
||||
except Exception, e:
|
||||
logging.warn("exec fill priv_file to all_user failed")
|
||||
raise e
|
||||
logging.info('exec fill priv_file to all_user finish')
|
||||
|
||||
def insert_split_schema_version_v2(conn, cur, user, pwd):
|
||||
try:
|
||||
query_cur = actions.QueryCursor(cur)
|
||||
is_primary = actions.check_current_cluster_is_primary(query_cur)
|
||||
if not is_primary:
|
||||
logging.warn("should run in primary cluster")
|
||||
raise e
|
||||
|
||||
# primary cluster
|
||||
dml_cur = actions.DMLCursor(cur)
|
||||
sql = """replace into __all_core_table(table_name, row_id, column_name, column_value)
|
||||
values ('__all_global_stat', 1, 'split_schema_version_v2', '-1');"""
|
||||
rowcount = dml_cur.exec_update(sql)
|
||||
if rowcount <= 0:
|
||||
logging.warn("invalid rowcount : {0}".format(rowcount))
|
||||
raise e
|
||||
|
||||
# standby cluster
|
||||
standby_cluster_list = actions.fetch_standby_cluster_infos(conn, query_cur, user, pwd)
|
||||
for standby_cluster in standby_cluster_list:
|
||||
# connect
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster['cluster_id'],
|
||||
standby_cluster['ip'],
|
||||
standby_cluster['port']))
|
||||
tmp_conn = mysql.connector.connect(user = standby_cluster['user'],
|
||||
password = standby_cluster['pwd'],
|
||||
host = standby_cluster['ip'],
|
||||
port = standby_cluster['port'],
|
||||
database = 'oceanbase')
|
||||
tmp_cur = tmp_conn.cursor(buffered=True)
|
||||
tmp_conn.autocommit = True
|
||||
tmp_query_cur = actions.QueryCursor(tmp_cur)
|
||||
# check if stanby cluster
|
||||
is_primary = actions.check_current_cluster_is_primary(tmp_query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster['cluster_id'],
|
||||
standby_cluster['ip'],
|
||||
standby_cluster['port']))
|
||||
raise e
|
||||
# replace
|
||||
tmp_dml_cur = actions.DMLCursor(tmp_cur)
|
||||
sql = """replace into __all_core_table(table_name, row_id, column_name, column_value)
|
||||
values ('__all_global_stat', 1, 'split_schema_version_v2', '-1');"""
|
||||
rowcount = tmp_dml_cur.exec_update(sql)
|
||||
if rowcount <= 0:
|
||||
logging.warn("invalid rowcount : {0}".format(rowcount))
|
||||
raise e
|
||||
# close
|
||||
tmp_cur.close()
|
||||
tmp_conn.close()
|
||||
except Exception, e:
|
||||
logging.warn("init split_schema_version_v2 failed")
|
||||
raise e
|
||||
|
||||
def query(cur, sql):
|
||||
log(sql)
|
||||
@ -477,69 +73,3 @@ def get_oracle_tenant_ids(cur):
|
||||
def get_tenant_ids(cur):
|
||||
return [_[0] for _ in query(cur, 'select tenant_id from oceanbase.__all_tenant')]
|
||||
|
||||
# 修正升级上来的旧外键在 __all_foreign_key_column 和 __all_foreign_key_column_history 中 position 列的数据
|
||||
def modify_foreign_key_column_position_info(conn, cur):
|
||||
try:
|
||||
conn.autocommit = True
|
||||
# disable ddl
|
||||
ori_enable_ddl = actions.get_ori_enable_ddl(cur)
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'False')
|
||||
tenant_ids = get_tenant_ids(cur)
|
||||
log('tenant_ids: {0}'.format(tenant_ids))
|
||||
for tenant_id in tenant_ids:
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
tenant_id_in_sql = 0
|
||||
if 1 == tenant_id:
|
||||
tenant_id_in_sql = 1
|
||||
# 查出租户下所有未被删除的外键
|
||||
sql = """select /*+ QUERY_TIMEOUT(1500000000) */ foreign_key_id from oceanbase.__all_foreign_key where tenant_id = {0}""".format(tenant_id_in_sql)
|
||||
log(sql)
|
||||
foreign_key_id_rows = query(cur, sql)
|
||||
fk_num = len(foreign_key_id_rows)
|
||||
cnt = 0
|
||||
# 遍历每个外键,检查 oceanbase.__all_foreign_key_column 中记录的 position 信息是否为 0,如果为 0,需要更新为正确的值
|
||||
while cnt < fk_num:
|
||||
foreign_key_id = foreign_key_id_rows[cnt][0]
|
||||
sql = """select /*+ QUERY_TIMEOUT(1500000000) */ child_column_id, parent_column_id from oceanbase.__all_foreign_key_column where foreign_key_id = {0} and position = 0 and tenant_id = {1} order by gmt_create asc""".format(foreign_key_id, tenant_id_in_sql)
|
||||
log(sql)
|
||||
need_update_rows = query(cur, sql)
|
||||
fk_col_num = len(need_update_rows)
|
||||
if fk_col_num > 0:
|
||||
position = 1
|
||||
# 遍历特定外键里的每个 position 信息为 0 的列
|
||||
while position <= fk_col_num:
|
||||
child_column_id = need_update_rows[position - 1][0]
|
||||
parent_column_id = need_update_rows[position - 1][1]
|
||||
# 在 oceanbase.__all_foreign_key_column_history 里面更新 position 的值
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_foreign_key_column_history set position = {0} where foreign_key_id = {1} and child_column_id = {2} and parent_column_id = {3} and tenant_id = {4}""".format(position, foreign_key_id, child_column_id, parent_column_id, tenant_id_in_sql)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
if cur.rowcount == 0:
|
||||
logging.warn("affected rows is 0 when update oceanbase.__all_foreign_key_column_history")
|
||||
raise e
|
||||
# 在 oceanbase.__all_foreign_key_column 里面更新 position 的值
|
||||
sql = """update /*+ QUERY_TIMEOUT(150000000) */ oceanbase.__all_foreign_key_column set position = {0} where foreign_key_id = {1} and child_column_id = {2} and parent_column_id = {3} and tenant_id = {4}""".format(position, foreign_key_id, child_column_id, parent_column_id, tenant_id_in_sql)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
if cur.rowcount != 1:
|
||||
logging.warn("affected rows is not 1 when update oceanbase.__all_foreign_key_column")
|
||||
raise e
|
||||
position = position + 1
|
||||
cnt = cnt + 1
|
||||
# 还原默认 tenant
|
||||
sys_tenant_id = 1
|
||||
sql = """alter system change tenant tenant_id = {0}""".format(sys_tenant_id)
|
||||
log(sql)
|
||||
cur.execute(sql)
|
||||
|
||||
# enable ddl
|
||||
if ori_enable_ddl == 1:
|
||||
actions.set_parameter(cur, 'enable_ddl', 'True')
|
||||
except Exception, e:
|
||||
logging.warn("modify foreign key column position failed")
|
||||
raise e
|
||||
logging.info('modify foreign key column position finish')
|
||||
|
||||
|
@ -1,220 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# sys_vars_dict.py是由gen_ob_sys_variables.py根据ob_system_variable_init.json和upgrade_sys_var_base_script.py文件生成的,不可修改
|
||||
sys_var_dict = {}
|
||||
sys_var_dict["auto_increment_increment"] = {"id": 0, "name": "auto_increment_increment", "value": "1", "data_type": 10, "info": " ", "flags": 131, "min_val": "1", "max_val": "65535"}
|
||||
sys_var_dict["auto_increment_offset"] = {"id": 1, "name": "auto_increment_offset", "value": "1", "data_type": 10, "info": " ", "flags": 3, "min_val": "1", "max_val": "65535"}
|
||||
sys_var_dict["autocommit"] = {"id": 2, "name": "autocommit", "value": "1", "data_type": 5, "info": " ", "flags": 131}
|
||||
sys_var_dict["character_set_client"] = {"id": 3, "name": "character_set_client", "value": "45", "data_type": 5, "info": "The character set in which statements are sent by the client", "flags": 163}
|
||||
sys_var_dict["character_set_connection"] = {"id": 4, "name": "character_set_connection", "value": "45", "data_type": 5, "info": "The character set which should be translated to after receiving the statement", "flags": 163}
|
||||
sys_var_dict["character_set_database"] = {"id": 5, "name": "character_set_database", "value": "45", "data_type": 5, "info": "The character set of the default database", "flags": 4131}
|
||||
sys_var_dict["character_set_results"] = {"id": 6, "name": "character_set_results", "value": "45", "data_type": 5, "info": "The character set which server should translate to before shipping result sets or error message back to the client", "flags": 99}
|
||||
sys_var_dict["character_set_server"] = {"id": 7, "name": "character_set_server", "value": "45", "data_type": 5, "info": "The server character set", "flags": 4131}
|
||||
sys_var_dict["character_set_system"] = {"id": 8, "name": "character_set_system", "value": "45", "data_type": 5, "info": "The character set used by the server for storing identifiers.", "flags": 7}
|
||||
sys_var_dict["collation_connection"] = {"id": 9, "name": "collation_connection", "value": "45", "data_type": 5, "info": "The collation which the server should translate to after receiving the statement", "flags": 227}
|
||||
sys_var_dict["collation_database"] = {"id": 10, "name": "collation_database", "value": "45", "data_type": 5, "info": "The collation of the default database", "flags": 4259}
|
||||
sys_var_dict["collation_server"] = {"id": 11, "name": "collation_server", "value": "45", "data_type": 5, "info": "The server collation", "flags": 4259}
|
||||
sys_var_dict["interactive_timeout"] = {"id": 12, "name": "interactive_timeout", "value": "28800", "data_type": 5, "info": "The number of seconds the server waits for activity on an interactive connection before closing it.", "flags": 3, "min_val": "1", "max_val": "31536000"}
|
||||
sys_var_dict["last_insert_id"] = {"id": 13, "name": "last_insert_id", "value": "0", "data_type": 10, "info": " ", "flags": 2, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["max_allowed_packet"] = {"id": 14, "name": "max_allowed_packet", "value": "4194304", "data_type": 5, "info": "Max packet length to send to or receive from the server", "flags": 139, "min_val": "1024", "max_val": "1073741824"}
|
||||
sys_var_dict["sql_mode"] = {"id": 15, "name": "sql_mode", "value": "12582912", "data_type": 10, "info": " ", "flags": 4291}
|
||||
sys_var_dict["time_zone"] = {"id": 16, "name": "time_zone", "value": "+8:00", "data_type": 22, "info": " ", "flags": 131}
|
||||
sys_var_dict["tx_isolation"] = {"id": 17, "name": "tx_isolation", "value": "READ-COMMITTED", "data_type": 22, "info": "Transaction Isolcation Levels: READ-UNCOMMITTED READ-COMMITTED REPEATABLE-READ SERIALIZABLE", "flags": 131}
|
||||
sys_var_dict["version_comment"] = {"id": 18, "name": "version_comment", "value": "OceanBase 1.0.0", "data_type": 22, "info": " ", "flags": 5}
|
||||
sys_var_dict["wait_timeout"] = {"id": 19, "name": "wait_timeout", "value": "28800", "data_type": 5, "info": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", "flags": 3, "min_val": "1", "max_val": "31536000"}
|
||||
sys_var_dict["binlog_row_image"] = {"id": 20, "name": "binlog_row_image", "value": "2", "data_type": 5, "info": "control row cells to logged", "flags": 195}
|
||||
sys_var_dict["character_set_filesystem"] = {"id": 21, "name": "character_set_filesystem", "value": "63", "data_type": 5, "info": " ", "flags": 35}
|
||||
sys_var_dict["connect_timeout"] = {"id": 22, "name": "connect_timeout", "value": "10", "data_type": 5, "info": " ", "flags": 1, "min_val": "2", "max_val": "31536000"}
|
||||
sys_var_dict["datadir"] = {"id": 23, "name": "datadir", "value": "/usr/local/mysql/data/", "data_type": 22, "info": " ", "flags": 5}
|
||||
sys_var_dict["debug_sync"] = {"id": 24, "name": "debug_sync", "value": "", "data_type": 22, "info": "Debug sync facility", "flags": 18}
|
||||
sys_var_dict["div_precision_increment"] = {"id": 25, "name": "div_precision_increment", "value": "4", "data_type": 5, "info": " ", "flags": 195, "min_val": "0", "max_val": "30"}
|
||||
sys_var_dict["explicit_defaults_for_timestamp"] = {"id": 26, "name": "explicit_defaults_for_timestamp", "value": "1", "data_type": 5, "info": "whether use traditional mode for timestamp", "flags": 195}
|
||||
sys_var_dict["group_concat_max_len"] = {"id": 27, "name": "group_concat_max_len", "value": "1024", "data_type": 10, "info": " ", "flags": 131, "min_val": "4", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["identity"] = {"id": 28, "name": "identity", "value": "0", "data_type": 10, "info": "This variable is a synonym for the last_insert_id variable. It exists for compatibility with other database systems.", "flags": 2, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["lower_case_table_names"] = {"id": 29, "name": "lower_case_table_names", "value": "1", "data_type": 5, "info": "how table database names are stored and compared, 0 means stored using the lettercase in the CREATE_TABLE or CREATE_DATABASE statement. Name comparisons are case sensitive; 1 means that table and database names are stored in lowercase abd name comparisons are not case sensitive.", "flags": 133, "min_val": "0", "max_val": "2"}
|
||||
sys_var_dict["net_read_timeout"] = {"id": 30, "name": "net_read_timeout", "value": "30", "data_type": 5, "info": " ", "flags": 3, "min_val": "1", "max_val": "31536000"}
|
||||
sys_var_dict["net_write_timeout"] = {"id": 31, "name": "net_write_timeout", "value": "60", "data_type": 5, "info": " ", "flags": 3, "min_val": "1", "max_val": "31536000"}
|
||||
sys_var_dict["read_only"] = {"id": 32, "name": "read_only", "value": "0", "data_type": 5, "info": " ", "flags": 65}
|
||||
sys_var_dict["sql_auto_is_null"] = {"id": 33, "name": "sql_auto_is_null", "value": "0", "data_type": 5, "info": " ", "flags": 195}
|
||||
sys_var_dict["sql_select_limit"] = {"id": 34, "name": "sql_select_limit", "value": "9223372036854775807", "data_type": 5, "info": " ", "flags": 131, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["timestamp"] = {"id": 35, "name": "timestamp", "value": "0", "data_type": 15, "info": " ", "flags": 2, "min_val": "0"}
|
||||
sys_var_dict["tx_read_only"] = {"id": 36, "name": "tx_read_only", "value": "0", "data_type": 5, "info": " ", "flags": 131}
|
||||
sys_var_dict["version"] = {"id": 37, "name": "version", "value": "", "data_type": 22, "info": " ", "flags": 1}
|
||||
sys_var_dict["sql_warnings"] = {"id": 38, "name": "sql_warnings", "value": "0", "data_type": 5, "info": " ", "flags": 3}
|
||||
sys_var_dict["max_user_connections"] = {"id": 39, "name": "max_user_connections", "value": "0", "data_type": 10, "info": " ", "flags": 11, "min_val": "0", "max_val": "4294967295"}
|
||||
sys_var_dict["init_connect"] = {"id": 40, "name": "init_connect", "value": "", "data_type": 22, "info": " ", "flags": 1}
|
||||
sys_var_dict["license"] = {"id": 41, "name": "license", "value": "", "data_type": 22, "info": " ", "flags": 5}
|
||||
sys_var_dict["net_buffer_length"] = {"id": 42, "name": "net_buffer_length", "value": "16384", "data_type": 5, "info": "Buffer length for TCP/IP and socket communication", "flags": 11, "min_val": "1024", "max_val": "1048576"}
|
||||
sys_var_dict["system_time_zone"] = {"id": 43, "name": "system_time_zone", "value": "CST", "data_type": 22, "info": "The server system time zone", "flags": 133}
|
||||
sys_var_dict["query_cache_size"] = {"id": 44, "name": "query_cache_size", "value": "0", "data_type": 10, "info": "The memory allocated to store results from old queries(not used yet)", "flags": 4097, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["query_cache_type"] = {"id": 45, "name": "query_cache_type", "value": "0", "data_type": 5, "info": "OFF = Do not cache or retrieve results. ON = Cache all results except SELECT SQL_NO_CACHE ... queries. DEMAND = Cache only SELECT SQL_CACHE ... queries(not used yet)", "flags": 4101}
|
||||
sys_var_dict["sql_quote_show_create"] = {"id": 46, "name": "sql_quote_show_create", "value": "1", "data_type": 5, "info": " ", "flags": 3}
|
||||
sys_var_dict["max_sp_recursion_depth"] = {"id": 47, "name": "max_sp_recursion_depth", "value": "0", "data_type": 5, "info": "The number of times that any given stored procedure may be called recursively.", "flags": 131, "min_val": "0", "max_val": "255"}
|
||||
sys_var_dict["sql_safe_updates"] = {"id": 48, "name": "sql_safe_updates", "value": "0", "data_type": 5, "info": "enable mysql sql safe updates", "flags": 4227}
|
||||
sys_var_dict["concurrent_insert"] = {"id": 49, "name": "concurrent_insert", "value": "AUTO", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["default_authentication_plugin"] = {"id": 50, "name": "default_authentication_plugin", "value": "mysql_native_password", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["disabled_storage_engines"] = {"id": 51, "name": "disabled_storage_engines", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["error_count"] = {"id": 52, "name": "error_count", "value": "0", "data_type": 10, "info": "", "flags": 4098}
|
||||
sys_var_dict["general_log"] = {"id": 53, "name": "general_log", "value": "0", "data_type": 5, "info": "", "flags": 4099}
|
||||
sys_var_dict["have_openssl"] = {"id": 54, "name": "have_openssl", "value": "YES", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["have_profiling"] = {"id": 55, "name": "have_profiling", "value": "YES", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["have_ssl"] = {"id": 56, "name": "have_ssl", "value": "YES", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["hostname"] = {"id": 57, "name": "hostname", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["lc_messages"] = {"id": 58, "name": "lc_messages", "value": "en_US", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["local_infile"] = {"id": 59, "name": "local_infile", "value": "1", "data_type": 5, "info": "", "flags": 4099}
|
||||
sys_var_dict["lock_wait_timeout"] = {"id": 60, "name": "lock_wait_timeout", "value": "31536000", "data_type": 5, "info": "", "flags": 4099, "min_val": "1", "max_val": "31536000"}
|
||||
sys_var_dict["long_query_time"] = {"id": 61, "name": "long_query_time", "value": "10", "data_type": 15, "info": "", "flags": 4099, "min_val": "0"}
|
||||
sys_var_dict["max_connections"] = {"id": 62, "name": "max_connections", "value": "2147483647", "data_type": 10, "info": "", "flags": 4097, "min_val": "1", "max_val": "2147483647"}
|
||||
sys_var_dict["max_execution_time"] = {"id": 63, "name": "max_execution_time", "value": "0", "data_type": 5, "info": "", "flags": 4099}
|
||||
sys_var_dict["protocol_version"] = {"id": 64, "name": "protocol_version", "value": "10", "data_type": 5, "info": "", "flags": 4099}
|
||||
sys_var_dict["server_id"] = {"id": 65, "name": "server_id", "value": "1", "data_type": 5, "info": "This variable specifies the server ID(not used yet, only sys var compatible)", "flags": 4097, "min_val": "0", "max_val": "4294967295"}
|
||||
sys_var_dict["ssl_ca"] = {"id": 66, "name": "ssl_ca", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_capath"] = {"id": 67, "name": "ssl_capath", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_cert"] = {"id": 68, "name": "ssl_cert", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_cipher"] = {"id": 69, "name": "ssl_cipher", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_crl"] = {"id": 70, "name": "ssl_crl", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_crlpath"] = {"id": 71, "name": "ssl_crlpath", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["ssl_key"] = {"id": 72, "name": "ssl_key", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["time_format"] = {"id": 73, "name": "time_format", "value": "%H:%i:%s", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["tls_version"] = {"id": 74, "name": "tls_version", "value": "", "data_type": 22, "info": "TLSv1,TLSv1.1,TLSv1.2", "flags": 4099}
|
||||
sys_var_dict["tmp_table_size"] = {"id": 75, "name": "tmp_table_size", "value": "16777216", "data_type": 10, "info": "", "flags": 3, "min_val": "1024", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["tmpdir"] = {"id": 76, "name": "tmpdir", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["unique_checks"] = {"id": 77, "name": "unique_checks", "value": "1", "data_type": 5, "info": "", "flags": 4099}
|
||||
sys_var_dict["version_compile_machine"] = {"id": 78, "name": "version_compile_machine", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["version_compile_os"] = {"id": 79, "name": "version_compile_os", "value": "", "data_type": 22, "info": "", "flags": 4099}
|
||||
sys_var_dict["warning_count"] = {"id": 80, "name": "warning_count", "value": "0", "data_type": 10, "info": "", "flags": 4098}
|
||||
sys_var_dict["session_track_schema"] = {"id": 81, "name": "session_track_schema", "value": "1", "data_type": 5, "info": "specifies whether return schema change info in ok packet", "flags": 4099}
|
||||
sys_var_dict["session_track_system_variables"] = {"id": 82, "name": "session_track_system_variables", "value": "time_zone, autocommit, character_set_client, character_set_results, character_set_connection", "data_type": 22, "info": "specifies whether return system variables change info in ok packet", "flags": 4099}
|
||||
sys_var_dict["session_track_state_change"] = {"id": 83, "name": "session_track_state_change", "value": "0", "data_type": 5, "info": "specifies whether return session state change info in ok packet", "flags": 4099}
|
||||
sys_var_dict["have_query_cache"] = {"id": 84, "name": "have_query_cache", "value": "NO", "data_type": 22, "info": "Whether to have query cache or not(not used yet, only compatible)", "flags": 4101}
|
||||
sys_var_dict["query_cache_limit"] = {"id": 85, "name": "query_cache_limit", "value": "0", "data_type": 10, "info": "The maximum query result set that can be cached by the query cache(not used yet, only sys var compatible)", "flags": 4097, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["query_cache_min_res_unit"] = {"id": 86, "name": "query_cache_min_res_unit", "value": "0", "data_type": 10, "info": "The smallest unit of memory allocated by the query cache(not used yet, only sys var compatible)", "flags": 4097, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["query_cache_wlock_invalidate"] = {"id": 87, "name": "query_cache_wlock_invalidate", "value": "0", "data_type": 5, "info": "query cache wirte lock for MyISAM engine (not used yet, only sys var compatible)", "flags": 4099}
|
||||
sys_var_dict["binlog_format"] = {"id": 88, "name": "binlog_format", "value": "2", "data_type": 5, "info": "set the binary logging format(not used yet, only sys var compatible)", "flags": 4103}
|
||||
sys_var_dict["binlog_checksum"] = {"id": 89, "name": "binlog_checksum", "value": "CRC32", "data_type": 22, "info": "this variable causes the source to write a checksum for each event in the binary log(not used yet, only sys var compatible)", "flags": 4101}
|
||||
sys_var_dict["binlog_rows_query_log_events"] = {"id": 90, "name": "binlog_rows_query_log_events", "value": "0", "data_type": 5, "info": "This system variable affects row-based logging only(not used yet, only sys var compatible)", "flags": 4103}
|
||||
sys_var_dict["log_bin"] = {"id": 91, "name": "log_bin", "value": "1", "data_type": 5, "info": "This variable reports only on the status of binary logging(not used yet, only sys var compatible)", "flags": 4101}
|
||||
sys_var_dict["server_uuid"] = {"id": 92, "name": "server_uuid", "value": "ObExprUuid::gen_server_uuid", "data_type": 22, "info": "server uuid", "flags": 4101}
|
||||
sys_var_dict["default_storage_engine"] = {"id": 93, "name": "default_storage_engine", "value": "OceanBase", "data_type": 22, "info": "The default storage engine of OceanBase", "flags": 4099}
|
||||
sys_var_dict["ob_interm_result_mem_limit"] = {"id": 10001, "name": "ob_interm_result_mem_limit", "value": "2147483648", "data_type": 5, "info": "Indicate how many bytes the interm result manager can alloc most for this tenant", "flags": 131}
|
||||
sys_var_dict["ob_proxy_partition_hit"] = {"id": 10002, "name": "ob_proxy_partition_hit", "value": "1", "data_type": 5, "info": "Indicate whether sql stmt hit right partition, readonly to user, modify by ob", "flags": 22}
|
||||
sys_var_dict["ob_log_level"] = {"id": 10003, "name": "ob_log_level", "value": "disabled", "data_type": 22, "info": "log level in session", "flags": 3}
|
||||
sys_var_dict["ob_query_timeout"] = {"id": 10005, "name": "ob_query_timeout", "value": "10000000", "data_type": 5, "info": "Query timeout in microsecond(us)", "flags": 131}
|
||||
sys_var_dict["ob_read_consistency"] = {"id": 10006, "name": "ob_read_consistency", "value": "3", "data_type": 5, "info": "read consistency level: 3=STRONG, 2=WEAK, 1=FROZEN", "flags": 195}
|
||||
sys_var_dict["ob_enable_transformation"] = {"id": 10007, "name": "ob_enable_transformation", "value": "1", "data_type": 5, "info": "whether use transform in session", "flags": 195}
|
||||
sys_var_dict["ob_trx_timeout"] = {"id": 10008, "name": "ob_trx_timeout", "value": "86400000000", "data_type": 5, "info": "The max duration of one transaction", "flags": 131}
|
||||
sys_var_dict["ob_enable_plan_cache"] = {"id": 10009, "name": "ob_enable_plan_cache", "value": "1", "data_type": 5, "info": "whether use plan cache in session", "flags": 131}
|
||||
sys_var_dict["ob_enable_index_direct_select"] = {"id": 10010, "name": "ob_enable_index_direct_select", "value": "0", "data_type": 5, "info": "whether can select from index table", "flags": 195}
|
||||
sys_var_dict["ob_proxy_set_trx_executed"] = {"id": 10011, "name": "ob_proxy_set_trx_executed", "value": "0", "data_type": 5, "info": "this value is true if we have executed set transaction stmt, until a transaction commit(explicit or implicit) successfully", "flags": 22}
|
||||
sys_var_dict["ob_enable_aggregation_pushdown"] = {"id": 10012, "name": "ob_enable_aggregation_pushdown", "value": "1", "data_type": 5, "info": "enable aggregation function to be push-downed through exchange nodes", "flags": 195}
|
||||
sys_var_dict["ob_last_schema_version"] = {"id": 10013, "name": "ob_last_schema_version", "value": "0", "data_type": 5, "info": " ", "flags": 2}
|
||||
sys_var_dict["ob_global_debug_sync"] = {"id": 10014, "name": "ob_global_debug_sync", "value": "", "data_type": 22, "info": "Global debug sync facility", "flags": 18}
|
||||
sys_var_dict["ob_proxy_global_variables_version"] = {"id": 10015, "name": "ob_proxy_global_variables_version", "value": "0", "data_type": 5, "info": "this value is global variables last modified time when server session create, used for proxy to judge whether global vars has changed between two server session", "flags": 22}
|
||||
sys_var_dict["ob_enable_show_trace"] = {"id": 10016, "name": "ob_enable_show_trace", "value": "0", "data_type": 5, "info": "control whether use show trace", "flags": 131}
|
||||
sys_var_dict["ob_bnl_join_cache_size"] = {"id": 10019, "name": "ob_bnl_join_cache_size", "value": "10485760", "data_type": 5, "info": "", "flags": 195, "min_val": "1", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["ob_proxy_user_privilege"] = {"id": 10020, "name": "ob_proxy_user_privilege", "value": "0", "data_type": 5, "info": "Indicate current client session user privilege, readonly after modified by first observer", "flags": 22, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["ob_org_cluster_id"] = {"id": 10021, "name": "ob_org_cluster_id", "value": "0", "data_type": 5, "info": "When the DRC system copies data into the target cluster, it needs to be set to the CLUSTER_ID that should be written into commit log of OceanBase, in order to avoid loop replication of data. Normally, it does not need to be set, and OceanBase will use the default value, which is the CLUSTER_ID of current cluster of OceanBase. 0 indicates it is not set, please do not set it to 0", "flags": 130, "min_val": "0", "max_val": "4294967295"}
|
||||
sys_var_dict["ob_plan_cache_percentage"] = {"id": 10022, "name": "ob_plan_cache_percentage", "value": "5", "data_type": 5, "info": "percentage of tenant memory resources that can be used by plan cache", "flags": 129, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["ob_plan_cache_evict_high_percentage"] = {"id": 10023, "name": "ob_plan_cache_evict_high_percentage", "value": "90", "data_type": 5, "info": "memory usage percentage of plan_cache_limit at which plan cache eviction will be trigger", "flags": 129, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["ob_plan_cache_evict_low_percentage"] = {"id": 10024, "name": "ob_plan_cache_evict_low_percentage", "value": "50", "data_type": 5, "info": "memory usage percentage of plan_cache_limit at which plan cache eviction will be stopped", "flags": 129, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["recyclebin"] = {"id": 10025, "name": "recyclebin", "value": "0", "data_type": 5, "info": "When the recycle bin is enabled, dropped tables and their dependent objects are placed in the recycle bin. When the recycle bin is disabled, dropped tables and their dependent objects are not placed in the recycle bin; they are just dropped.", "flags": 3}
|
||||
sys_var_dict["ob_capability_flag"] = {"id": 10026, "name": "ob_capability_flag", "value": "0", "data_type": 10, "info": "Indicate features that observer supports, readonly after modified by first observer", "flags": 22, "min_val": "0", "max_val": "18446744073709551615"}
|
||||
sys_var_dict["is_result_accurate"] = {"id": 10028, "name": "is_result_accurate", "value": "1", "data_type": 5, "info": "when query is with topk hint, is_result_accurate indicates whether the result is acuurate or not ", "flags": 130}
|
||||
sys_var_dict["error_on_overlap_time"] = {"id": 10029, "name": "error_on_overlap_time", "value": "0", "data_type": 5, "info": "The variable determines how OceanBase should handle an ambiguous boundary datetime value a case in which it is not clear whether the datetime is in standard or daylight saving time", "flags": 131}
|
||||
sys_var_dict["ob_compatibility_mode"] = {"id": 10030, "name": "ob_compatibility_mode", "value": "0", "data_type": 5, "info": "What DBMS is OceanBase compatible with? MYSQL means it behaves like MySQL while ORACLE means it behaves like Oracle.", "flags": 2183}
|
||||
sys_var_dict["ob_sql_work_area_percentage"] = {"id": 10032, "name": "ob_sql_work_area_percentage", "value": "5", "data_type": 5, "info": "The percentage limitation of tenant memory for SQL execution.", "flags": 1, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["ob_safe_weak_read_snapshot"] = {"id": 10033, "name": "ob_safe_weak_read_snapshot", "value": "1", "data_type": 5, "info": "The safe weak read snapshot version in one server", "flags": 146, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["ob_route_policy"] = {"id": 10034, "name": "ob_route_policy", "value": "1", "data_type": 5, "info": "the routing policy of obproxy/java client and observer internal retry, 1=READONLY_ZONE_FIRST, 2=ONLY_READONLY_ZONE, 3=UNMERGE_ZONE_FIRST, 4=UNMERGE_FOLLOWER_FIRST", "flags": 195}
|
||||
sys_var_dict["ob_enable_transmission_checksum"] = {"id": 10035, "name": "ob_enable_transmission_checksum", "value": "1", "data_type": 5, "info": "whether do the checksum of the packet between the client and the server", "flags": 387}
|
||||
sys_var_dict["foreign_key_checks"] = {"id": 10036, "name": "foreign_key_checks", "value": "1", "data_type": 5, "info": "set to 1 (the default by MySQL), foreign key constraints are checked. If set to 0, foreign key constraints are ignored", "flags": 131}
|
||||
sys_var_dict["ob_statement_trace_id"] = {"id": 10037, "name": "ob_statement_trace_id", "value": "Y0-0", "data_type": 22, "info": "the trace id of current executing statement", "flags": 22}
|
||||
sys_var_dict["ob_enable_truncate_flashback"] = {"id": 10038, "name": "ob_enable_truncate_flashback", "value": "0", "data_type": 5, "info": "Enable the flashback of table truncation.", "flags": 3}
|
||||
sys_var_dict["ob_tcp_invited_nodes"] = {"id": 10039, "name": "ob_tcp_invited_nodes", "value": "127.0.0.1,::1", "data_type": 22, "info": "ip white list for tenant, support % and _ and multi ip(separated by commas), support ip match and wild match", "flags": 1}
|
||||
sys_var_dict["sql_throttle_current_priority"] = {"id": 10040, "name": "sql_throttle_current_priority", "value": "100", "data_type": 5, "info": "current priority used for SQL throttling", "flags": 3}
|
||||
sys_var_dict["sql_throttle_priority"] = {"id": 10041, "name": "sql_throttle_priority", "value": "-1", "data_type": 5, "info": "sql throttle priority, query may not be allowed to execute if its priority isnt greater than this value.", "flags": 1}
|
||||
sys_var_dict["sql_throttle_rt"] = {"id": 10042, "name": "sql_throttle_rt", "value": "-1", "data_type": 15, "info": "query may not be allowed to execute if its rt isnt less than this value.", "flags": 1}
|
||||
sys_var_dict["sql_throttle_cpu"] = {"id": 10043, "name": "sql_throttle_cpu", "value": "-1", "data_type": 15, "info": "query may not be allowed to execute if its CPU usage isnt less than this value.", "flags": 1}
|
||||
sys_var_dict["sql_throttle_io"] = {"id": 10044, "name": "sql_throttle_io", "value": "-1", "data_type": 5, "info": "query may not be allowed to execute if its number of IOs isnt less than this value.", "flags": 1}
|
||||
sys_var_dict["sql_throttle_network"] = {"id": 10045, "name": "sql_throttle_network", "value": "-1", "data_type": 15, "info": "query may not be allowed to execute if its network usage isnt less than this value.", "flags": 1}
|
||||
sys_var_dict["sql_throttle_logical_reads"] = {"id": 10046, "name": "sql_throttle_logical_reads", "value": "-1", "data_type": 5, "info": "query may not be allowed to execute if its number of logical reads isnt less than this value.", "flags": 1}
|
||||
sys_var_dict["auto_increment_cache_size"] = {"id": 10047, "name": "auto_increment_cache_size", "value": "1000000", "data_type": 5, "info": "auto_increment service cache size", "flags": 129, "min_val": "1", "max_val": "100000000"}
|
||||
sys_var_dict["ob_enable_jit"] = {"id": 10048, "name": "ob_enable_jit", "value": "0", "data_type": 5, "info": "JIT execution engine mode, default is AUTO", "flags": 195}
|
||||
sys_var_dict["ob_temp_tablespace_size_percentage"] = {"id": 10049, "name": "ob_temp_tablespace_size_percentage", "value": "0", "data_type": 5, "info": "the percentage limitation of some temp tablespace size in tenant disk.", "flags": 3}
|
||||
sys_var_dict["plugin_dir"] = {"id": 10052, "name": "plugin_dir", "value": "./plugin_dir/", "data_type": 22, "info": "the dir to place plugin dll", "flags": 5}
|
||||
sys_var_dict["ob_sql_audit_percentage"] = {"id": 10055, "name": "ob_sql_audit_percentage", "value": "3", "data_type": 5, "info": "The limited percentage of tenant memory for sql audit", "flags": 129, "min_val": "0", "max_val": "80"}
|
||||
sys_var_dict["ob_enable_sql_audit"] = {"id": 10056, "name": "ob_enable_sql_audit", "value": "1", "data_type": 5, "info": "wether use sql audit in session", "flags": 129}
|
||||
sys_var_dict["optimizer_use_sql_plan_baselines"] = {"id": 10057, "name": "optimizer_use_sql_plan_baselines", "value": "0", "data_type": 5, "info": "Enable use sql plan baseline", "flags": 131}
|
||||
sys_var_dict["optimizer_capture_sql_plan_baselines"] = {"id": 10058, "name": "optimizer_capture_sql_plan_baselines", "value": "0", "data_type": 5, "info": "optimizer_capture_sql_plan_baselines enables or disables automitic capture plan baseline.", "flags": 131}
|
||||
sys_var_dict["parallel_servers_target"] = {"id": 10060, "name": "parallel_servers_target", "value": "0", "data_type": 5, "info": "number of threads allowed to run parallel statements before statement queuing will be used.", "flags": 1, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["ob_early_lock_release"] = {"id": 10061, "name": "ob_early_lock_release", "value": "0", "data_type": 5, "info": "If set true, transaction open the elr optimization.", "flags": 129}
|
||||
sys_var_dict["ob_trx_idle_timeout"] = {"id": 10062, "name": "ob_trx_idle_timeout", "value": "86400000000", "data_type": 5, "info": "The stmt interval timeout of transaction(us)", "flags": 131}
|
||||
sys_var_dict["block_encryption_mode"] = {"id": 10063, "name": "block_encryption_mode", "value": "0", "data_type": 5, "info": "specifies the encryption algorithm used in the functions aes_encrypt and aes_decrypt", "flags": 131}
|
||||
sys_var_dict["nls_date_format"] = {"id": 10064, "name": "nls_date_format", "value": "DD-MON-RR", "data_type": 22, "info": "specifies the default date format to use with the TO_CHAR and TO_DATE functions, (YYYY-MM-DD HH24:MI:SS) is Common value", "flags": 643}
|
||||
sys_var_dict["nls_timestamp_format"] = {"id": 10065, "name": "nls_timestamp_format", "value": "DD-MON-RR HH.MI.SSXFF AM", "data_type": 22, "info": "specifies the default date format to use with the TO_CHAR and TO_TIMESTAMP functions, (YYYY-MM-DD HH24:MI:SS.FF) is Common value", "flags": 643}
|
||||
sys_var_dict["nls_timestamp_tz_format"] = {"id": 10066, "name": "nls_timestamp_tz_format", "value": "DD-MON-RR HH.MI.SSXFF AM TZR", "data_type": 22, "info": "specifies the default timestamp with time zone format to use with the TO_CHAR and TO_TIMESTAMP_TZ functions, (YYYY-MM-DD HH24:MI:SS.FF TZR TZD) is common value", "flags": 643}
|
||||
sys_var_dict["ob_reserved_meta_memory_percentage"] = {"id": 10067, "name": "ob_reserved_meta_memory_percentage", "value": "10", "data_type": 5, "info": "percentage of tenant memory resources that can be used by tenant meta data", "flags": 129, "min_val": "1", "max_val": "100"}
|
||||
sys_var_dict["ob_check_sys_variable"] = {"id": 10068, "name": "ob_check_sys_variable", "value": "1", "data_type": 5, "info": "If set true, sql will update sys variable while schema version changed.", "flags": 131}
|
||||
sys_var_dict["nls_language"] = {"id": 10069, "name": "nls_language", "value": "AMERICAN", "data_type": 22, "info": "specifies the default language of the database, used for messages, day and month names, the default sorting mechanism, the default values of NLS_DATE_LANGUAGE and NLS_SORT.", "flags": 642}
|
||||
sys_var_dict["nls_territory"] = {"id": 10070, "name": "nls_territory", "value": "AMERICA", "data_type": 22, "info": "specifies the name of the territory whose conventions are to be followed for day and week numbering, establishes the default date format, the default decimal character and group separator, and the default ISO and local currency symbols.", "flags": 643}
|
||||
sys_var_dict["nls_sort"] = {"id": 10071, "name": "nls_sort", "value": "BINARY", "data_type": 22, "info": "specifies the collating sequence for character value comparison in various SQL operators and clauses.", "flags": 707}
|
||||
sys_var_dict["nls_comp"] = {"id": 10072, "name": "nls_comp", "value": "BINARY", "data_type": 22, "info": "specifies the collation behavior of the database session. value can be BINARY | LINGUISTIC | ANSI", "flags": 707}
|
||||
sys_var_dict["nls_characterset"] = {"id": 10073, "name": "nls_characterset", "value": "AL32UTF8", "data_type": 22, "info": "specifies the default characterset of the database, This parameter defines the encoding of the data in the CHAR, VARCHAR2, LONG and CLOB columns of a table.", "flags": 1733}
|
||||
sys_var_dict["nls_nchar_characterset"] = {"id": 10074, "name": "nls_nchar_characterset", "value": "AL16UTF16", "data_type": 22, "info": "specifies the default characterset of the database, This parameter defines the encoding of the data in the NCHAR, NVARCHAR2 and NCLOB columns of a table.", "flags": 705}
|
||||
sys_var_dict["nls_date_language"] = {"id": 10075, "name": "nls_date_language", "value": "AMERICAN", "data_type": 22, "info": "specifies the language to use for the spelling of day and month names and date abbreviations (a.m., p.m., AD, BC) returned by the TO_DATE and TO_CHAR functions.", "flags": 643}
|
||||
sys_var_dict["nls_length_semantics"] = {"id": 10076, "name": "nls_length_semantics", "value": "BYTE", "data_type": 22, "info": "specifies the default length semantics to use for VARCHAR2 and CHAR table columns, user-defined object attributes, and PL/SQL variables in database objects created in the session. SYS user use BYTE intead of NLS_LENGTH_SEMANTICS.", "flags": 707}
|
||||
sys_var_dict["nls_nchar_conv_excp"] = {"id": 10077, "name": "nls_nchar_conv_excp", "value": "FALSE", "data_type": 22, "info": "determines whether an error is reported when there is data loss during an implicit or explicit character type conversion between NCHAR/NVARCHAR2 and CHAR/VARCHAR2.", "flags": 707}
|
||||
sys_var_dict["nls_calendar"] = {"id": 10078, "name": "nls_calendar", "value": "GREGORIAN", "data_type": 22, "info": "specifies which calendar system Oracle uses.", "flags": 643}
|
||||
sys_var_dict["nls_numeric_characters"] = {"id": 10079, "name": "nls_numeric_characters", "value": ".,", "data_type": 22, "info": "specifies the characters to use as the decimal character and group separator, overrides those characters defined implicitly by NLS_TERRITORY.", "flags": 643}
|
||||
sys_var_dict["_nlj_batching_enabled"] = {"id": 10080, "name": "_nlj_batching_enabled", "value": "1", "data_type": 5, "info": "enable batching of the RHS IO in NLJ", "flags": 211}
|
||||
sys_var_dict["tracefile_identifier"] = {"id": 10081, "name": "tracefile_identifier", "value": "", "data_type": 22, "info": "The name of tracefile.", "flags": 130}
|
||||
sys_var_dict["_groupby_nopushdown_cut_ratio"] = {"id": 10082, "name": "_groupby_nopushdown_cut_ratio", "value": "3", "data_type": 10, "info": "ratio used to decide whether push down should be done in distribtued query optimization.", "flags": 147}
|
||||
sys_var_dict["_px_broadcast_fudge_factor"] = {"id": 10083, "name": "_px_broadcast_fudge_factor", "value": "100", "data_type": 5, "info": "set the tq broadcasting fudge factor percentage.", "flags": 82, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["transaction_isolation"] = {"id": 10085, "name": "transaction_isolation", "value": "READ-COMMITTED", "data_type": 22, "info": "Transaction Isolcation Levels: READ-UNCOMMITTED READ-COMMITTED REPEATABLE-READ SERIALIZABLE", "flags": 131}
|
||||
sys_var_dict["ob_trx_lock_timeout"] = {"id": 10086, "name": "ob_trx_lock_timeout", "value": "-1", "data_type": 5, "info": "the max duration of waiting on row lock of one transaction", "flags": 131}
|
||||
sys_var_dict["validate_password_check_user_name"] = {"id": 10087, "name": "validate_password_check_user_name", "value": "0", "data_type": 5, "info": "", "flags": 129}
|
||||
sys_var_dict["validate_password_length"] = {"id": 10088, "name": "validate_password_length", "value": "0", "data_type": 10, "info": "", "flags": 129, "min_val": "0", "max_val": "2147483647"}
|
||||
sys_var_dict["validate_password_mixed_case_count"] = {"id": 10089, "name": "validate_password_mixed_case_count", "value": "0", "data_type": 10, "info": "", "flags": 129, "min_val": "0", "max_val": "2147483647"}
|
||||
sys_var_dict["validate_password_number_count"] = {"id": 10090, "name": "validate_password_number_count", "value": "0", "data_type": 10, "info": "", "flags": 129, "min_val": "0", "max_val": "2147483647"}
|
||||
sys_var_dict["validate_password_policy"] = {"id": 10091, "name": "validate_password_policy", "value": "0", "data_type": 5, "info": "", "flags": 129}
|
||||
sys_var_dict["validate_password_special_char_count"] = {"id": 10092, "name": "validate_password_special_char_count", "value": "0", "data_type": 10, "info": "", "flags": 129, "min_val": "0", "max_val": "2147483647"}
|
||||
sys_var_dict["default_password_lifetime"] = {"id": 10093, "name": "default_password_lifetime", "value": "0", "data_type": 10, "info": "", "flags": 129, "min_val": "0", "max_val": "65535"}
|
||||
sys_var_dict["_ob_ols_policy_session_labels"] = {"id": 10094, "name": "_ob_ols_policy_session_labels", "value": "", "data_type": 22, "info": "store all session labels for all label security policy.", "flags": 146}
|
||||
sys_var_dict["ob_trace_info"] = {"id": 10095, "name": "ob_trace_info", "value": "", "data_type": 22, "info": "store trace info", "flags": 2}
|
||||
sys_var_dict["_px_partition_scan_threshold"] = {"id": 10097, "name": "_px_partition_scan_threshold", "value": "64", "data_type": 5, "info": "least number of partitions per slave to start partition-based scan", "flags": 82, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["_ob_px_bcast_optimization"] = {"id": 10098, "name": "_ob_px_bcast_optimization", "value": "1", "data_type": 5, "info": "broadcast optimization.", "flags": 147}
|
||||
sys_var_dict["_ob_px_slave_mapping_threshold"] = {"id": 10099, "name": "_ob_px_slave_mapping_threshold", "value": "200", "data_type": 5, "info": "percentage threshold to use slave mapping plan", "flags": 83, "min_val": "0", "max_val": "1000"}
|
||||
sys_var_dict["_enable_parallel_dml"] = {"id": 10100, "name": "_enable_parallel_dml", "value": "0", "data_type": 5, "info": "A DML statement can be parallelized only if you have explicitly enabled parallel DML in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["_px_min_granules_per_slave"] = {"id": 10101, "name": "_px_min_granules_per_slave", "value": "13", "data_type": 5, "info": "minimum number of rowid range granules to generate per slave.", "flags": 82, "min_val": "0", "max_val": "100"}
|
||||
sys_var_dict["secure_file_priv"] = {"id": 10102, "name": "secure_file_priv", "value": "NULL", "data_type": 22, "info": "limit the effect of data import and export operations", "flags": 97}
|
||||
sys_var_dict["plsql_warnings"] = {"id": 10103, "name": "plsql_warnings", "value": "ENABLE:ALL", "data_type": 22, "info": "enables or disables the reporting of warning messages by the PL/SQL compiler, and specifies which warning messages to show as errors.", "flags": 643}
|
||||
sys_var_dict["_enable_parallel_query"] = {"id": 10104, "name": "_enable_parallel_query", "value": "1", "data_type": 5, "info": "A QUERY statement can be parallelized only if you have explicitly enabled parallel QUERY in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["_force_parallel_query_dop"] = {"id": 10105, "name": "_force_parallel_query_dop", "value": "1", "data_type": 10, "info": "A QUERY statement can be parallelized only if you have explicitly enabled parallel QUERY in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["_force_parallel_dml_dop"] = {"id": 10106, "name": "_force_parallel_dml_dop", "value": "1", "data_type": 10, "info": "A QUERY statement can be parallelized only if you have explicitly enabled parallel QUERY in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["ob_pl_block_timeout"] = {"id": 10107, "name": "ob_pl_block_timeout", "value": "3216672000000000", "data_type": 5, "info": "PL/SQL timeout in microsecond(us)", "flags": 131, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["transaction_read_only"] = {"id": 10108, "name": "transaction_read_only", "value": "0", "data_type": 5, "info": "Transaction access mode", "flags": 131}
|
||||
sys_var_dict["resource_manager_plan"] = {"id": 10109, "name": "resource_manager_plan", "value": "", "data_type": 22, "info": "specifies tenant resource plan.", "flags": 1}
|
||||
sys_var_dict["performance_schema"] = {"id": 10110, "name": "performance_schema", "value": "0", "data_type": 5, "info": "indicate whether the Performance Schema is enabled", "flags": 1}
|
||||
sys_var_dict["nls_currency"] = {"id": 10111, "name": "nls_currency", "value": "$", "data_type": 22, "info": "specifies the string to use as the local currency symbol for the L number format element. The default value of this parameter is determined by NLS_TERRITORY.", "flags": 643}
|
||||
sys_var_dict["nls_iso_currency"] = {"id": 10112, "name": "nls_iso_currency", "value": "AMERICA", "data_type": 22, "info": "specifies the string to use as the international currency symbol for the C number format element. The default value of this parameter is determined by NLS_TERRITORY", "flags": 643}
|
||||
sys_var_dict["nls_dual_currency"] = {"id": 10113, "name": "nls_dual_currency", "value": "$", "data_type": 22, "info": "specifies the dual currency symbol for the territory. The default is the dual currency symbol defined in the territory of your current language environment.", "flags": 643}
|
||||
sys_var_dict["plsql_ccflags"] = {"id": 10115, "name": "plsql_ccflags", "value": "", "data_type": 22, "info": "Lets you control conditional compilation of each PL/SQL unit independently.", "flags": 643}
|
||||
sys_var_dict["_ob_proxy_session_temporary_table_used"] = {"id": 10116, "name": "_ob_proxy_session_temporary_table_used", "value": "0", "data_type": 5, "info": "this value is true if we have executed set transaction stmt, until a transaction commit(explicit or implicit) successfully", "flags": 22}
|
||||
sys_var_dict["_enable_parallel_ddl"] = {"id": 10117, "name": "_enable_parallel_ddl", "value": "1", "data_type": 5, "info": "A DDL statement can be parallelized only if you have explicitly enabled parallel DDL in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["_force_parallel_ddl_dop"] = {"id": 10118, "name": "_force_parallel_ddl_dop", "value": "1", "data_type": 10, "info": "A DDL statement can be parallelized only if you have explicitly enabled parallel DDL in the session or in the SQL statement.", "flags": 210}
|
||||
sys_var_dict["cursor_sharing"] = {"id": 10119, "name": "cursor_sharing", "value": "0", "data_type": 5, "info": "whether needs to do parameterization? EXACT - query will not do parameterization; FORCE - query will do parameterization.", "flags": 3}
|
||||
sys_var_dict["_optimizer_null_aware_antijoin"] = {"id": 10120, "name": "_optimizer_null_aware_antijoin", "value": "1", "data_type": 5, "info": "specifies whether null aware anti join plan allow generated", "flags": 3}
|
||||
sys_var_dict["_px_partial_rollup_pushdown"] = {"id": 10121, "name": "_px_partial_rollup_pushdown", "value": "1", "data_type": 5, "info": "enable partial rollup push down optimization.", "flags": 147}
|
||||
sys_var_dict["_px_dist_agg_partial_rollup_pushdown"] = {"id": 10122, "name": "_px_dist_agg_partial_rollup_pushdown", "value": "1", "data_type": 5, "info": "enable distinct aggregate function to partial rollup push down optimization.", "flags": 147}
|
||||
sys_var_dict["_create_audit_purge_job"] = {"id": 10123, "name": "_create_audit_purge_job", "value": "", "data_type": 22, "info": "control audit log trail job in mysql mode", "flags": 4113}
|
||||
sys_var_dict["_drop_audit_purge_job"] = {"id": 10124, "name": "_drop_audit_purge_job", "value": "", "data_type": 22, "info": "drop audit log trail job in mysql mode", "flags": 4113}
|
||||
sys_var_dict["_set_purge_job_interval"] = {"id": 10125, "name": "_set_purge_job_interval", "value": "", "data_type": 22, "info": "set purge job interval in mysql mode, range in 1-999 days", "flags": 4113}
|
||||
sys_var_dict["_set_purge_job_status"] = {"id": 10126, "name": "_set_purge_job_status", "value": "", "data_type": 22, "info": "set purge job status in mysql mode, range: true/false", "flags": 4113}
|
||||
sys_var_dict["_set_last_archive_timestamp"] = {"id": 10127, "name": "_set_last_archive_timestamp", "value": "", "data_type": 22, "info": "set last archive timestamp in mysql mode, must utc time in usec from 1970", "flags": 4113}
|
||||
sys_var_dict["_clear_last_archive_timestamp"] = {"id": 10128, "name": "_clear_last_archive_timestamp", "value": "", "data_type": 22, "info": "clear last archive timestamp in mysql mode", "flags": 4113}
|
||||
sys_var_dict["_aggregation_optimization_settings"] = {"id": 10129, "name": "_aggregation_optimization_settings", "value": "0", "data_type": 10, "info": "Manually control some behaviors of aggregation", "flags": 3}
|
||||
sys_var_dict["_px_shared_hash_join"] = {"id": 10130, "name": "_px_shared_hash_join", "value": "1", "data_type": 5, "info": "enable shared hash table hash join optimization.", "flags": 147}
|
||||
sys_var_dict["sql_notes"] = {"id": 10131, "name": "sql_notes", "value": "0", "data_type": 5, "info": " ", "flags": 3}
|
||||
sys_var_dict["innodb_strict_mode"] = {"id": 10132, "name": "innodb_strict_mode", "value": "1", "data_type": 5, "info": "in certain case, warnings would be transformed to errors", "flags": 3}
|
||||
sys_var_dict["_windowfunc_optimization_settings"] = {"id": 10133, "name": "_windowfunc_optimization_settings", "value": "0", "data_type": 10, "info": "settings for window function optimizations", "flags": 3, "min_val": "0", "max_val": "9223372036854775807"}
|
||||
sys_var_dict["ob_enable_rich_error_msg"] = {"id": 10134, "name": "ob_enable_rich_error_msg", "value": "0", "data_type": 5, "info": "control whether print svr_ip,execute_time,trace_id", "flags": 3}
|
@ -270,140 +270,32 @@ def config_logging_module(log_filenamme):
|
||||
# 1. 检查paxos副本是否同步, paxos副本是否缺失
|
||||
def check_paxos_replica(query_cur):
|
||||
# 2.1 检查paxos副本是否同步
|
||||
(desc, results) = query_cur.exec_query("""select count(1) as unsync_cnt from __all_virtual_clog_stat where is_in_sync = 0 and is_offline = 0 and replica_type != 16""")
|
||||
(desc, results) = query_cur.exec_query("""select count(1) as unsync_cnt from GV$OB_LOG_STAT where in_sync = 'NO'""")
|
||||
if results[0][0] > 0 :
|
||||
raise MyError('{0} replicas unsync, please check'.format(results[0][0]))
|
||||
# 2.2 检查paxos副本是否有缺失 TODO
|
||||
logging.info('check paxos replica success')
|
||||
|
||||
# 2. 检查是否有做balance, locality变更
|
||||
def check_rebalance_task(query_cur):
|
||||
# 2. 检查observer是否可服务
|
||||
def check_observer_status(query_cur):
|
||||
# 3.1 检查是否有做locality变更
|
||||
(desc, results) = query_cur.exec_query("""select count(1) as cnt from __all_rootservice_job where job_status='INPROGRESS' and return_code is null""")
|
||||
(desc, results) = query_cur.exec_query("""select count(*) from oceanbase.__all_server where start_service_time is null or status='INACTIVE'""")
|
||||
if results[0][0] > 0 :
|
||||
raise MyError('{0} locality tasks is doing, please check'.format(results[0][0]))
|
||||
# 3.2 检查是否有做balance
|
||||
(desc, results) = query_cur.exec_query("""select count(1) as rebalance_task_cnt from __all_virtual_rebalance_task_stat""")
|
||||
if results[0][0] > 0 :
|
||||
raise MyError('{0} rebalance tasks is doing, please check'.format(results[0][0]))
|
||||
logging.info('check rebalance task success')
|
||||
raise MyError('{0} observer not service, please check'.format(results[0][0]))
|
||||
logging.info('check observer status success')
|
||||
|
||||
# 3. 检查集群状态
|
||||
def check_cluster_status(query_cur):
|
||||
# 3. 检查schema是否刷新成功
|
||||
def check_schema_status(query_cur):
|
||||
# 4.1 检查是否非合并状态
|
||||
(desc, results) = query_cur.exec_query("""select info from __all_zone where zone='' and name='merge_status'""")
|
||||
if cmp(results[0][0], 'IDLE') != 0 :
|
||||
raise MyError('global status expected = {0}, actual = {1}'.format('IDLE', results[0][0]))
|
||||
logging.info('check cluster status success')
|
||||
# 4.2 检查合并版本是否>=3
|
||||
(desc, results) = query_cur.exec_query("""select cast(value as unsigned) value from __all_zone where zone='' and name='last_merged_version'""")
|
||||
if results[0][0] < 2:
|
||||
raise MyError('global last_merged_version expected >= 2 actual = {0}'.format(results[0][0]))
|
||||
logging.info('check global last_merged_version success')
|
||||
(desc, results) = query_cur.exec_query("""select count(*) from __all_server a left join __all_virtual_server_schema_info b on a.svr_ip = b.svr_ip and a.svr_port = b.svr_port where b.svr_ip is null""")
|
||||
if results[0][0] > 0 :
|
||||
raise MyError('refresh schema failed, please check')
|
||||
(desc, results) = query_cur.exec_query("""select count(*) from __all_virtual_server_schema_info a join __all_virtual_server_schema_info b on a.tenant_id = b.tenant_id where a.refreshed_schema_version != b.refreshed_schema_version or a.refreshed_schema_version <= 1""")
|
||||
if results[0][0] > 0 :
|
||||
raise MyError('refresh schema failed, please check')
|
||||
logging.info('check schema status success')
|
||||
|
||||
# 4. 检查租户分区数是否超出内存限制
|
||||
def check_tenant_part_num(query_cur):
|
||||
# 统计每个租户在各个server上的分区数量
|
||||
(desc, res_part_num) = query_cur.exec_query("""select svr_ip, svr_port, table_id >> 40 as tenant_id, count(*) as part_num from __all_virtual_clog_stat group by 1,2,3 order by 1,2,3""")
|
||||
# 计算每个租户在每个server上的max_memory
|
||||
(desc, res_unit_memory) = query_cur.exec_query("""select u.svr_ip, u.svr_port, t.tenant_id, uc.max_memory, p.replica_type from __all_unit u, __All_resource_pool p, __all_tenant t, __all_unit_config uc where p.resource_pool_id = u.resource_pool_id and t.tenant_id = p.tenant_id and p.unit_config_id = uc.unit_config_id""")
|
||||
# 查询每个server的memstore_limit_percentage
|
||||
(desc, res_svr_memstore_percent) = query_cur.exec_query("""select svr_ip, svr_port, name, value from __all_virtual_sys_parameter_stat where name = 'memstore_limit_percentage'""")
|
||||
part_static_cost = 128 * 1024
|
||||
part_dynamic_cost = 400 * 1024
|
||||
# 考虑到升级过程中可能有建表的需求,因此预留512个分区
|
||||
part_num_reserved = 512
|
||||
for line in res_part_num:
|
||||
svr_ip = line[0]
|
||||
svr_port = line[1]
|
||||
tenant_id = line[2]
|
||||
part_num = line[3]
|
||||
for uline in res_unit_memory:
|
||||
uip = uline[0]
|
||||
uport = uline[1]
|
||||
utid = uline[2]
|
||||
umem = uline[3]
|
||||
utype = uline[4]
|
||||
if svr_ip == uip and svr_port == uport and tenant_id == utid:
|
||||
for mpline in res_svr_memstore_percent:
|
||||
mpip = mpline[0]
|
||||
mpport = mpline[1]
|
||||
if mpip == uip and mpport == uport:
|
||||
mspercent = int(mpline[3])
|
||||
mem_limit = umem
|
||||
if 0 == utype:
|
||||
# full类型的unit需要为memstore预留内存
|
||||
mem_limit = umem * (100 - mspercent) / 100
|
||||
part_num_limit = mem_limit / (part_static_cost + part_dynamic_cost / 10);
|
||||
if part_num_limit <= 1000:
|
||||
part_num_limit = mem_limit / (part_static_cost + part_dynamic_cost)
|
||||
if part_num >= (part_num_limit - part_num_reserved):
|
||||
raise MyError('{0} {1} {2} exceed tenant partition num limit, please check'.format(line, uline, mpline))
|
||||
break
|
||||
logging.info('check tenant partition num success')
|
||||
|
||||
# 5. 检查存在租户partition,但是不存在unit的observer
|
||||
def check_tenant_resource(query_cur):
|
||||
(desc, res_unit) = query_cur.exec_query("""select tenant_id, svr_ip, svr_port from __all_virtual_partition_info where (tenant_id, svr_ip, svr_port) not in (select tenant_id, svr_ip, svr_port from __all_unit, __all_resource_pool where __all_unit.resource_pool_id = __all_resource_pool.resource_pool_id group by tenant_id, svr_ip, svr_port) group by tenant_id, svr_ip, svr_port""")
|
||||
for line in res_unit:
|
||||
raise MyError('{0} tenant unit not exist but partition exist'.format(line))
|
||||
logging.info("check tenant resource success")
|
||||
|
||||
# 6. 检查progressive_merge_round都升到1
|
||||
def check_progressive_merge_round(query_cur):
|
||||
(desc, results) = query_cur.exec_query("""select count(*) as cnt from __all_virtual_table where progressive_merge_round = 0 and table_type not in (1,2,4) and data_table_id = 0""")
|
||||
if results[0][0] != 0:
|
||||
raise MyError("""progressive_merge_round of main table should all be 1""")
|
||||
(desc, results) = query_cur.exec_query("""select count(*) as cnt from __all_virtual_table where progressive_merge_round = 0 and table_type not in (1,2,4) and data_table_id > 0 and data_table_id in (select table_id from __all_virtual_table where table_type not in (1,2,4) and data_table_id = 0)""")
|
||||
if results[0][0] != 0:
|
||||
raise MyError("""progressive_merge_round of index should all be 1""")
|
||||
logging.info("""check progressive_merge_round status success""")
|
||||
|
||||
# 主库状态检查
|
||||
def check_primary_cluster_sync_status(query_cur, timeout):
|
||||
(desc, res) = query_cur.exec_query("""select current_scn from oceanbase.v$ob_cluster where cluster_role='PRIMARY' and cluster_status='VALID'""")
|
||||
if len(res) != 1:
|
||||
raise MyError('query results count is not 1')
|
||||
query_sql = "select count(*) from oceanbase.v$ob_standby_status where cluster_role != 'PHYSICAL STANDBY' or cluster_status != 'VALID' or current_scn < {0}".format(res[0][0]);
|
||||
times = timeout
|
||||
print times
|
||||
while times > 0 :
|
||||
(desc, res1) = query_cur.exec_query(query_sql)
|
||||
if len(res1) == 1 and res1[0][0] == 0:
|
||||
break;
|
||||
time.sleep(1)
|
||||
times -=1
|
||||
if times == 0:
|
||||
raise MyError("there exists standby cluster not synchronizing, checking primary cluster status failed!!!")
|
||||
else:
|
||||
logging.info("check primary cluster sync status success")
|
||||
|
||||
# 备库状态检查
|
||||
def check_standby_cluster_sync_status(query_cur, timeout):
|
||||
(desc, res) = query_cur.exec_query("""select time_to_usec(now(6)) from dual""")
|
||||
query_sql = "select count(*) from oceanbase.v$ob_cluster where (cluster_role != 'PHYSICAL STANDBY') or (cluster_status != 'VALID') or (current_scn < {0}) or (switchover_status != 'NOT ALLOWED')".format(res[0][0]);
|
||||
times = timeout
|
||||
while times > 0 :
|
||||
(desc, res2) = query_cur.exec_query(query_sql)
|
||||
if len(res2) == 1 and res2[0][0] == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
times -= 1
|
||||
if times == 0:
|
||||
raise MyError('current standby cluster not synchronizing, please check!!!')
|
||||
else:
|
||||
logging.info("check standby cluster sync status success")
|
||||
|
||||
# 判断是主库还是备库
|
||||
def check_cluster_sync_status(query_cur, timeout):
|
||||
(desc, res) = query_cur.exec_query("""select cluster_role from oceanbase.v$ob_cluster""")
|
||||
if res[0][0] == 'PRIMARY':
|
||||
check_primary_cluster_sync_status(query_cur, timeout)
|
||||
else:
|
||||
check_standby_cluster_sync_status(query_cur, timeout)
|
||||
|
||||
|
||||
# 开始升级前的检查
|
||||
# 开始健康检查
|
||||
def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout):
|
||||
try:
|
||||
conn = mysql.connector.connect(user = my_user,
|
||||
@ -417,11 +309,8 @@ def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout):
|
||||
try:
|
||||
query_cur = QueryCursor(cur)
|
||||
check_paxos_replica(query_cur)
|
||||
check_rebalance_task(query_cur)
|
||||
check_cluster_status(query_cur)
|
||||
check_tenant_part_num(query_cur)
|
||||
check_tenant_resource(query_cur)
|
||||
check_cluster_sync_status(query_cur, timeout)
|
||||
check_observer_status(query_cur)
|
||||
check_schema_status(query_cur)
|
||||
except Exception, e:
|
||||
logging.exception('run error')
|
||||
raise e
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -343,140 +343,6 @@ def fetch_tenant_ids(query_cur):
|
||||
logging.exception('fail to fetch distinct tenant ids')
|
||||
raise e
|
||||
|
||||
def check_ddl_and_dml_sync(conn, query_cur, standby_cluster_infos, tenant_ids):
|
||||
try:
|
||||
conn.commit()
|
||||
# check if need check ddl and dml sync
|
||||
is_primary = check_current_cluster_is_primary(query_cur)
|
||||
if not is_primary:
|
||||
logging.exception("""should be primary cluster""")
|
||||
raise e
|
||||
|
||||
# fetch sys stats
|
||||
sys_infos = []
|
||||
sql = """SELECT tenant_id,
|
||||
refreshed_schema_version,
|
||||
min_sys_table_scn,
|
||||
min_user_table_scn
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
ORDER BY tenant_id desc"""
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
if len(tenant_ids) != len(results):
|
||||
logging.exception("result not match")
|
||||
raise e
|
||||
else:
|
||||
for i in range(len(results)):
|
||||
if len(results[i]) != 4:
|
||||
logging.exception("length not match")
|
||||
raise e
|
||||
elif results[i][0] != tenant_ids[i]:
|
||||
logging.exception("tenant_id not match")
|
||||
raise e
|
||||
else:
|
||||
sys_info = {}
|
||||
sys_info['tenant_id'] = results[i][0]
|
||||
sys_info['refreshed_schema_version'] = results[i][1]
|
||||
sys_info['min_sys_table_scn'] = results[i][2]
|
||||
sys_info['min_user_table_scn'] = results[i][3]
|
||||
logging.info("sys info : {0}".format(sys_info))
|
||||
sys_infos.append(sys_info)
|
||||
conn.commit()
|
||||
|
||||
# check ddl and dml by cluster
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
check_ddl_and_dml_sync_by_cluster(standby_cluster_info, sys_infos)
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("fail to check ddl and dml sync")
|
||||
raise e
|
||||
|
||||
def check_ddl_and_dml_sync_by_cluster(standby_cluster_info, sys_infos):
|
||||
try:
|
||||
# connect
|
||||
logging.info("start to check ddl and dml sync by cluster: cluster_id = {0}"
|
||||
.format(standby_cluster_info['cluster_id']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
tmp_conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
tmp_cur = tmp_conn.cursor(buffered=True)
|
||||
tmp_conn.autocommit = True
|
||||
tmp_query_cur = Cursor(tmp_cur)
|
||||
is_primary = check_current_cluster_is_primary(tmp_query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
for sys_info in sys_infos:
|
||||
check_ddl_and_dml_sync_by_tenant(tmp_query_cur, sys_info)
|
||||
|
||||
# close
|
||||
tmp_cur.close()
|
||||
tmp_conn.close()
|
||||
logging.info("""check_ddl_and_dml_sync_by_cluster success : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("""fail to check ddl and dml sync : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
def check_ddl_and_dml_sync_by_tenant(query_cur, sys_info):
|
||||
try:
|
||||
times = 1800 # 30min
|
||||
logging.info("start to check ddl and dml sync by tenant : {0}".format(sys_info))
|
||||
start_time = time.time()
|
||||
sql = ""
|
||||
if 1 == sys_info['tenant_id'] :
|
||||
# 备库系统租户DML不走物理同步,需要升级脚本负责写入,系统租户仅校验DDL同步
|
||||
sql = """SELECT count(*)
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
WHERE tenant_id = {0}
|
||||
AND refreshed_schema_version >= {1}
|
||||
""".format(sys_info['tenant_id'],
|
||||
sys_info['refreshed_schema_version'])
|
||||
else:
|
||||
sql = """SELECT count(*)
|
||||
FROM oceanbase.v$ob_cluster_stats
|
||||
WHERE tenant_id = {0}
|
||||
AND refreshed_schema_version >= {1}
|
||||
AND min_sys_table_scn >= {2}
|
||||
AND min_user_table_scn >= {3}
|
||||
""".format(sys_info['tenant_id'],
|
||||
sys_info['refreshed_schema_version'],
|
||||
sys_info['min_sys_table_scn'],
|
||||
sys_info['min_user_table_scn'])
|
||||
while times > 0 :
|
||||
(desc, results) = query_cur.exec_query(sql)
|
||||
if len(results) == 1 and results[0][0] == 1:
|
||||
break;
|
||||
time.sleep(1)
|
||||
times -= 1
|
||||
if times == 0:
|
||||
logging.exception("check ddl and dml sync timeout! : {0}, cost = {1}"
|
||||
.format(sys_info, time.time() - start_time))
|
||||
raise e
|
||||
else:
|
||||
logging.info("check ddl and dml sync success! : {0}, cost = {1}"
|
||||
.format(sys_info, time.time() - start_time))
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("fail to check ddl and dml sync : {0}".format(sys_info))
|
||||
raise e
|
||||
|
||||
# 开始升级后的检查
|
||||
def do_check(my_host, my_port, my_user, my_passwd, upgrade_params):
|
||||
try:
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,317 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
import logging
|
||||
import getopt
|
||||
|
||||
class UpgradeParams:
|
||||
log_filename = 'upgrade_rolling_post.log'
|
||||
#### --------------start : my_error.py --------------
|
||||
class MyError(Exception):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
#### --------------start : actions.py--------------
|
||||
class QueryCursor:
|
||||
__cursor = None
|
||||
def __init__(self, cursor):
|
||||
self.__cursor = cursor
|
||||
def exec_sql(self, sql, print_when_succ = True):
|
||||
try:
|
||||
self.__cursor.execute(sql)
|
||||
rowcount = self.__cursor.rowcount
|
||||
if True == print_when_succ:
|
||||
logging.info('succeed to execute sql: %s, rowcount = %d', sql, rowcount)
|
||||
return rowcount
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connector error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
def exec_query(self, sql, print_when_succ = True):
|
||||
try:
|
||||
self.__cursor.execute(sql)
|
||||
results = self.__cursor.fetchall()
|
||||
rowcount = self.__cursor.rowcount
|
||||
if True == print_when_succ:
|
||||
logging.info('succeed to execute query: %s, rowcount = %d', sql, rowcount)
|
||||
return (self.__cursor.description, results)
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connector error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
#### ---------------end----------------------
|
||||
|
||||
#### --------------start : opt.py --------------
|
||||
help_str = \
|
||||
"""
|
||||
Help:
|
||||
""" +\
|
||||
sys.argv[0] + """ [OPTIONS]""" +\
|
||||
'\n\n' +\
|
||||
'-I, --help Display this help and exit.\n' +\
|
||||
'-V, --version Output version information and exit.\n' +\
|
||||
'-h, --host=name Connect to host.\n' +\
|
||||
'-P, --port=name Port number to use for connection.\n' +\
|
||||
'-u, --user=name User for login.\n' +\
|
||||
'-p, --password=name Password to use when connecting to server. If password is\n' +\
|
||||
' not given it\'s empty string "".\n' +\
|
||||
'-m, --module=name Modules to run. Modules should be a string combined by some of\n' +\
|
||||
' the following strings: ddl, normal_dml, each_tenant_dml,\n' +\
|
||||
' system_variable_dml, special_action, all. "all" represents\n' +\
|
||||
' that all modules should be run. They are splitted by ",".\n' +\
|
||||
' For example: -m all, or --module=ddl,normal_dml,special_action\n' +\
|
||||
'-l, --log-file=name Log file path. If log file path is not given it\'s ' + os.path.splitext(sys.argv[0])[0] + '.log\n' +\
|
||||
'\n\n' +\
|
||||
'Maybe you want to run cmd like that:\n' +\
|
||||
sys.argv[0] + ' -h 127.0.0.1 -P 3306 -u admin -p admin\n'
|
||||
|
||||
version_str = """version 1.0.0"""
|
||||
|
||||
class Option:
|
||||
__g_short_name_set = set([])
|
||||
__g_long_name_set = set([])
|
||||
__short_name = None
|
||||
__long_name = None
|
||||
__is_with_param = None
|
||||
__is_local_opt = None
|
||||
__has_value = None
|
||||
__value = None
|
||||
def __init__(self, short_name, long_name, is_with_param, is_local_opt, default_value = None):
|
||||
if short_name in Option.__g_short_name_set:
|
||||
raise MyError('duplicate option short name: {0}'.format(short_name))
|
||||
elif long_name in Option.__g_long_name_set:
|
||||
raise MyError('duplicate option long name: {0}'.format(long_name))
|
||||
Option.__g_short_name_set.add(short_name)
|
||||
Option.__g_long_name_set.add(long_name)
|
||||
self.__short_name = short_name
|
||||
self.__long_name = long_name
|
||||
self.__is_with_param = is_with_param
|
||||
self.__is_local_opt = is_local_opt
|
||||
self.__has_value = False
|
||||
if None != default_value:
|
||||
self.set_value(default_value)
|
||||
def is_with_param(self):
|
||||
return self.__is_with_param
|
||||
def get_short_name(self):
|
||||
return self.__short_name
|
||||
def get_long_name(self):
|
||||
return self.__long_name
|
||||
def has_value(self):
|
||||
return self.__has_value
|
||||
def get_value(self):
|
||||
return self.__value
|
||||
def set_value(self, value):
|
||||
self.__value = value
|
||||
self.__has_value = True
|
||||
def is_local_opt(self):
|
||||
return self.__is_local_opt
|
||||
def is_valid(self):
|
||||
return None != self.__short_name and None != self.__long_name and True == self.__has_value and None != self.__value
|
||||
|
||||
g_opts =\
|
||||
[\
|
||||
Option('I', 'help', False, True),\
|
||||
Option('V', 'version', False, True),\
|
||||
Option('h', 'host', True, False),\
|
||||
Option('P', 'port', True, False),\
|
||||
Option('u', 'user', True, False),\
|
||||
Option('p', 'password', True, False, ''),\
|
||||
# 要跑哪个模块,默认全跑
|
||||
Option('m', 'module', True, False, 'all'),\
|
||||
# 日志文件路径,不同脚本的main函数中中会改成不同的默认值
|
||||
Option('l', 'log-file', True, False)
|
||||
]\
|
||||
|
||||
def change_opt_defult_value(opt_long_name, opt_default_val):
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if opt.get_long_name() == opt_long_name:
|
||||
opt.set_value(opt_default_val)
|
||||
return
|
||||
|
||||
def has_no_local_opts():
|
||||
global g_opts
|
||||
no_local_opts = True
|
||||
for opt in g_opts:
|
||||
if opt.is_local_opt() and opt.has_value():
|
||||
no_local_opts = False
|
||||
return no_local_opts
|
||||
|
||||
def check_db_client_opts():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if not opt.is_local_opt() and not opt.has_value():
|
||||
raise MyError('option "-{0}" has not been specified, maybe you should run "{1} --help" for help'\
|
||||
.format(opt.get_short_name(), sys.argv[0]))
|
||||
|
||||
def parse_option(opt_name, opt_val):
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if opt_name in (('-' + opt.get_short_name()), ('--' + opt.get_long_name())):
|
||||
opt.set_value(opt_val)
|
||||
|
||||
def parse_options(argv):
|
||||
global g_opts
|
||||
short_opt_str = ''
|
||||
long_opt_list = []
|
||||
for opt in g_opts:
|
||||
if opt.is_with_param():
|
||||
short_opt_str += opt.get_short_name() + ':'
|
||||
else:
|
||||
short_opt_str += opt.get_short_name()
|
||||
for opt in g_opts:
|
||||
if opt.is_with_param():
|
||||
long_opt_list.append(opt.get_long_name() + '=')
|
||||
else:
|
||||
long_opt_list.append(opt.get_long_name())
|
||||
(opts, args) = getopt.getopt(argv, short_opt_str, long_opt_list)
|
||||
for (opt_name, opt_val) in opts:
|
||||
parse_option(opt_name, opt_val)
|
||||
if has_no_local_opts():
|
||||
check_db_client_opts()
|
||||
|
||||
def deal_with_local_opt(opt):
|
||||
if 'help' == opt.get_long_name():
|
||||
global help_str
|
||||
print help_str
|
||||
elif 'version' == opt.get_long_name():
|
||||
global version_str
|
||||
print version_str
|
||||
|
||||
def deal_with_local_opts():
|
||||
global g_opts
|
||||
if has_no_local_opts():
|
||||
raise MyError('no local options, can not deal with local options')
|
||||
else:
|
||||
for opt in g_opts:
|
||||
if opt.is_local_opt() and opt.has_value():
|
||||
deal_with_local_opt(opt)
|
||||
# 只处理一个
|
||||
return
|
||||
|
||||
def get_opt_host():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'host' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_port():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'port' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_user():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'user' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_password():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'password' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_module():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'module' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_log_file():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'log-file' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
#### ---------------end----------------------
|
||||
|
||||
def config_logging_module(log_filenamme):
|
||||
logging.basicConfig(level=logging.INFO,\
|
||||
format='[%(asctime)s] %(levelname)s %(filename)s:%(lineno)d %(message)s',\
|
||||
datefmt='%Y-%m-%d %H:%M:%S',\
|
||||
filename=log_filenamme,\
|
||||
filemode='w')
|
||||
# 定义日志打印格式
|
||||
formatter = logging.Formatter('[%(asctime)s] %(levelname)s %(filename)s:%(lineno)d %(message)s', '%Y-%m-%d %H:%M:%S')
|
||||
#######################################
|
||||
# 定义一个Handler打印INFO及以上级别的日志到sys.stdout
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
stdout_handler.setLevel(logging.INFO)
|
||||
# 设置日志打印格式
|
||||
stdout_handler.setFormatter(formatter)
|
||||
# 将定义好的stdout_handler日志handler添加到root logger
|
||||
logging.getLogger('').addHandler(stdout_handler)
|
||||
|
||||
def run(my_host, my_port, my_user, my_passwd, upgrade_params):
|
||||
try:
|
||||
conn = mysql.connector.connect(user = my_user,
|
||||
password = my_passwd,
|
||||
host = my_host,
|
||||
port = my_port,
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
conn.autocommit = True
|
||||
cur = conn.cursor(buffered=True)
|
||||
try:
|
||||
query_cur = QueryCursor(cur)
|
||||
(desc, results) = query_cur.exec_query("""select distinct value from __all_virtual_sys_parameter_stat where name='min_observer_version'""")
|
||||
if len(results) != 1:
|
||||
raise MyError('distinct observer version not exist')
|
||||
#rolling upgrade 在2.2.50版本后支持
|
||||
elif cmp(results[0][0], "2.2.50") >= 0:
|
||||
query_cur.exec_sql("""ALTER SYSTEM END ROLLING UPGRADE""")
|
||||
logging.info("END ROLLING UPGRADE success")
|
||||
else:
|
||||
logging.info("cluster version ({0}) less than 2.2.50, skip".format(results[0][0]))
|
||||
except Exception, e:
|
||||
logging.exception('run error')
|
||||
raise e
|
||||
finally:
|
||||
cur.close()
|
||||
conn.close()
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('connection error')
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error')
|
||||
raise e
|
||||
|
||||
if __name__ == '__main__':
|
||||
upgrade_params = UpgradeParams()
|
||||
change_opt_defult_value('log-file', upgrade_params.log_filename)
|
||||
parse_options(sys.argv[1:])
|
||||
if not has_no_local_opts():
|
||||
deal_with_local_opts()
|
||||
else:
|
||||
check_db_client_opts()
|
||||
log_filename = get_opt_log_file()
|
||||
upgrade_params.log_filename = log_filename
|
||||
# 日志配置放在这里是为了前面的操作不要覆盖掉日志文件
|
||||
config_logging_module(upgrade_params.log_filename)
|
||||
try:
|
||||
host = get_opt_host()
|
||||
port = int(get_opt_port())
|
||||
user = get_opt_user()
|
||||
password = get_opt_password()
|
||||
logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\"',\
|
||||
host, port, user, password, log_filename)
|
||||
run(host, port, user, password, upgrade_params)
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connctor error')
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error')
|
||||
raise e
|
||||
|
@ -1,317 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
import logging
|
||||
import getopt
|
||||
|
||||
class UpgradeParams:
|
||||
log_filename = 'upgrade_rolling_pre.log'
|
||||
#### --------------start : my_error.py --------------
|
||||
class MyError(Exception):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
#### --------------start : actions.py--------------
|
||||
class QueryCursor:
|
||||
__cursor = None
|
||||
def __init__(self, cursor):
|
||||
self.__cursor = cursor
|
||||
def exec_sql(self, sql, print_when_succ = True):
|
||||
try:
|
||||
self.__cursor.execute(sql)
|
||||
rowcount = self.__cursor.rowcount
|
||||
if True == print_when_succ:
|
||||
logging.info('succeed to execute sql: %s, rowcount = %d', sql, rowcount)
|
||||
return rowcount
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connector error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
def exec_query(self, sql, print_when_succ = True):
|
||||
try:
|
||||
self.__cursor.execute(sql)
|
||||
results = self.__cursor.fetchall()
|
||||
rowcount = self.__cursor.rowcount
|
||||
if True == print_when_succ:
|
||||
logging.info('succeed to execute query: %s, rowcount = %d', sql, rowcount)
|
||||
return (self.__cursor.description, results)
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connector error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error, fail to execute sql: %s', sql)
|
||||
raise e
|
||||
#### ---------------end----------------------
|
||||
|
||||
#### --------------start : opt.py --------------
|
||||
help_str = \
|
||||
"""
|
||||
Help:
|
||||
""" +\
|
||||
sys.argv[0] + """ [OPTIONS]""" +\
|
||||
'\n\n' +\
|
||||
'-I, --help Display this help and exit.\n' +\
|
||||
'-V, --version Output version information and exit.\n' +\
|
||||
'-h, --host=name Connect to host.\n' +\
|
||||
'-P, --port=name Port number to use for connection.\n' +\
|
||||
'-u, --user=name User for login.\n' +\
|
||||
'-p, --password=name Password to use when connecting to server. If password is\n' +\
|
||||
' not given it\'s empty string "".\n' +\
|
||||
'-m, --module=name Modules to run. Modules should be a string combined by some of\n' +\
|
||||
' the following strings: ddl, normal_dml, each_tenant_dml,\n' +\
|
||||
' system_variable_dml, special_action, all. "all" represents\n' +\
|
||||
' that all modules should be run. They are splitted by ",".\n' +\
|
||||
' For example: -m all, or --module=ddl,normal_dml,special_action\n' +\
|
||||
'-l, --log-file=name Log file path. If log file path is not given it\'s ' + os.path.splitext(sys.argv[0])[0] + '.log\n' +\
|
||||
'\n\n' +\
|
||||
'Maybe you want to run cmd like that:\n' +\
|
||||
sys.argv[0] + ' -h 127.0.0.1 -P 3306 -u admin -p admin\n'
|
||||
|
||||
version_str = """version 1.0.0"""
|
||||
|
||||
class Option:
|
||||
__g_short_name_set = set([])
|
||||
__g_long_name_set = set([])
|
||||
__short_name = None
|
||||
__long_name = None
|
||||
__is_with_param = None
|
||||
__is_local_opt = None
|
||||
__has_value = None
|
||||
__value = None
|
||||
def __init__(self, short_name, long_name, is_with_param, is_local_opt, default_value = None):
|
||||
if short_name in Option.__g_short_name_set:
|
||||
raise MyError('duplicate option short name: {0}'.format(short_name))
|
||||
elif long_name in Option.__g_long_name_set:
|
||||
raise MyError('duplicate option long name: {0}'.format(long_name))
|
||||
Option.__g_short_name_set.add(short_name)
|
||||
Option.__g_long_name_set.add(long_name)
|
||||
self.__short_name = short_name
|
||||
self.__long_name = long_name
|
||||
self.__is_with_param = is_with_param
|
||||
self.__is_local_opt = is_local_opt
|
||||
self.__has_value = False
|
||||
if None != default_value:
|
||||
self.set_value(default_value)
|
||||
def is_with_param(self):
|
||||
return self.__is_with_param
|
||||
def get_short_name(self):
|
||||
return self.__short_name
|
||||
def get_long_name(self):
|
||||
return self.__long_name
|
||||
def has_value(self):
|
||||
return self.__has_value
|
||||
def get_value(self):
|
||||
return self.__value
|
||||
def set_value(self, value):
|
||||
self.__value = value
|
||||
self.__has_value = True
|
||||
def is_local_opt(self):
|
||||
return self.__is_local_opt
|
||||
def is_valid(self):
|
||||
return None != self.__short_name and None != self.__long_name and True == self.__has_value and None != self.__value
|
||||
|
||||
g_opts =\
|
||||
[\
|
||||
Option('I', 'help', False, True),\
|
||||
Option('V', 'version', False, True),\
|
||||
Option('h', 'host', True, False),\
|
||||
Option('P', 'port', True, False),\
|
||||
Option('u', 'user', True, False),\
|
||||
Option('p', 'password', True, False, ''),\
|
||||
# 要跑哪个模块,默认全跑
|
||||
Option('m', 'module', True, False, 'all'),\
|
||||
# 日志文件路径,不同脚本的main函数中中会改成不同的默认值
|
||||
Option('l', 'log-file', True, False)
|
||||
]\
|
||||
|
||||
def change_opt_defult_value(opt_long_name, opt_default_val):
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if opt.get_long_name() == opt_long_name:
|
||||
opt.set_value(opt_default_val)
|
||||
return
|
||||
|
||||
def has_no_local_opts():
|
||||
global g_opts
|
||||
no_local_opts = True
|
||||
for opt in g_opts:
|
||||
if opt.is_local_opt() and opt.has_value():
|
||||
no_local_opts = False
|
||||
return no_local_opts
|
||||
|
||||
def check_db_client_opts():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if not opt.is_local_opt() and not opt.has_value():
|
||||
raise MyError('option "-{0}" has not been specified, maybe you should run "{1} --help" for help'\
|
||||
.format(opt.get_short_name(), sys.argv[0]))
|
||||
|
||||
def parse_option(opt_name, opt_val):
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if opt_name in (('-' + opt.get_short_name()), ('--' + opt.get_long_name())):
|
||||
opt.set_value(opt_val)
|
||||
|
||||
def parse_options(argv):
|
||||
global g_opts
|
||||
short_opt_str = ''
|
||||
long_opt_list = []
|
||||
for opt in g_opts:
|
||||
if opt.is_with_param():
|
||||
short_opt_str += opt.get_short_name() + ':'
|
||||
else:
|
||||
short_opt_str += opt.get_short_name()
|
||||
for opt in g_opts:
|
||||
if opt.is_with_param():
|
||||
long_opt_list.append(opt.get_long_name() + '=')
|
||||
else:
|
||||
long_opt_list.append(opt.get_long_name())
|
||||
(opts, args) = getopt.getopt(argv, short_opt_str, long_opt_list)
|
||||
for (opt_name, opt_val) in opts:
|
||||
parse_option(opt_name, opt_val)
|
||||
if has_no_local_opts():
|
||||
check_db_client_opts()
|
||||
|
||||
def deal_with_local_opt(opt):
|
||||
if 'help' == opt.get_long_name():
|
||||
global help_str
|
||||
print help_str
|
||||
elif 'version' == opt.get_long_name():
|
||||
global version_str
|
||||
print version_str
|
||||
|
||||
def deal_with_local_opts():
|
||||
global g_opts
|
||||
if has_no_local_opts():
|
||||
raise MyError('no local options, can not deal with local options')
|
||||
else:
|
||||
for opt in g_opts:
|
||||
if opt.is_local_opt() and opt.has_value():
|
||||
deal_with_local_opt(opt)
|
||||
# 只处理一个
|
||||
return
|
||||
|
||||
def get_opt_host():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'host' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_port():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'port' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_user():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'user' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_password():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'password' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_module():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'module' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
|
||||
def get_opt_log_file():
|
||||
global g_opts
|
||||
for opt in g_opts:
|
||||
if 'log-file' == opt.get_long_name():
|
||||
return opt.get_value()
|
||||
#### ---------------end----------------------
|
||||
|
||||
def config_logging_module(log_filenamme):
|
||||
logging.basicConfig(level=logging.INFO,\
|
||||
format='[%(asctime)s] %(levelname)s %(filename)s:%(lineno)d %(message)s',\
|
||||
datefmt='%Y-%m-%d %H:%M:%S',\
|
||||
filename=log_filenamme,\
|
||||
filemode='w')
|
||||
# 定义日志打印格式
|
||||
formatter = logging.Formatter('[%(asctime)s] %(levelname)s %(filename)s:%(lineno)d %(message)s', '%Y-%m-%d %H:%M:%S')
|
||||
#######################################
|
||||
# 定义一个Handler打印INFO及以上级别的日志到sys.stdout
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
stdout_handler.setLevel(logging.INFO)
|
||||
# 设置日志打印格式
|
||||
stdout_handler.setFormatter(formatter)
|
||||
# 将定义好的stdout_handler日志handler添加到root logger
|
||||
logging.getLogger('').addHandler(stdout_handler)
|
||||
|
||||
def run(my_host, my_port, my_user, my_passwd, upgrade_params):
|
||||
try:
|
||||
conn = mysql.connector.connect(user = my_user,
|
||||
password = my_passwd,
|
||||
host = my_host,
|
||||
port = my_port,
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
conn.autocommit = True
|
||||
cur = conn.cursor(buffered=True)
|
||||
try:
|
||||
query_cur = QueryCursor(cur)
|
||||
(desc, results) = query_cur.exec_query("""select distinct value from __all_virtual_sys_parameter_stat where name='min_observer_version'""")
|
||||
if len(results) != 1:
|
||||
raise MyError('distinct observer version not exist')
|
||||
#rolling upgrade 在2.2.50版本后支持
|
||||
elif cmp(results[0][0], "2.2.50") >= 0:
|
||||
query_cur.exec_sql("""ALTER SYSTEM BEGIN ROLLING UPGRADE""")
|
||||
logging.info("BEGIN ROLLING UPGRADE success")
|
||||
else:
|
||||
logging.info("cluster version ({0}) less than 2.2.50, skip".format(results[0][0]))
|
||||
except Exception, e:
|
||||
logging.exception('run error')
|
||||
raise e
|
||||
finally:
|
||||
cur.close()
|
||||
conn.close()
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('connection error')
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error')
|
||||
raise e
|
||||
|
||||
if __name__ == '__main__':
|
||||
upgrade_params = UpgradeParams()
|
||||
change_opt_defult_value('log-file', upgrade_params.log_filename)
|
||||
parse_options(sys.argv[1:])
|
||||
if not has_no_local_opts():
|
||||
deal_with_local_opts()
|
||||
else:
|
||||
check_db_client_opts()
|
||||
log_filename = get_opt_log_file()
|
||||
upgrade_params.log_filename = log_filename
|
||||
# 日志配置放在这里是为了前面的操作不要覆盖掉日志文件
|
||||
config_logging_module(upgrade_params.log_filename)
|
||||
try:
|
||||
host = get_opt_host()
|
||||
port = int(get_opt_port())
|
||||
user = get_opt_user()
|
||||
password = get_opt_password()
|
||||
logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\"',\
|
||||
host, port, user, password, log_filename)
|
||||
run(host, port, user, password, upgrade_params)
|
||||
except mysql.connector.Error, e:
|
||||
logging.exception('mysql connctor error')
|
||||
raise e
|
||||
except Exception, e:
|
||||
logging.exception('normal error')
|
||||
raise e
|
||||
|
@ -1,383 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import new
|
||||
import time
|
||||
import re
|
||||
import json
|
||||
import traceback
|
||||
import sys
|
||||
import mysql.connector
|
||||
from mysql.connector import errorcode
|
||||
import logging
|
||||
from my_error import MyError
|
||||
import actions
|
||||
from actions import DMLCursor
|
||||
from actions import QueryCursor
|
||||
from sys_vars_dict import sys_var_dict
|
||||
import my_utils
|
||||
|
||||
|
||||
# 由于用了/*+read_consistency(WEAK) */来查询,因此升级期间不能允许创建或删除租户
|
||||
|
||||
def calc_diff_sys_var(cur, tenant_id):
|
||||
try:
|
||||
change_tenant(cur, tenant_id)
|
||||
actual_tenant_id = get_actual_tenant_id(tenant_id)
|
||||
cur.execute("""select name, data_type, value, info, flags, min_val, max_val from __all_sys_variable_history where tenant_id=%s and (tenant_id, zone, name, schema_version) in (select tenant_id, zone, name, max(schema_version) from __all_sys_variable_history where tenant_id=%s group by tenant_id, zone, name);"""%(actual_tenant_id, actual_tenant_id))
|
||||
results = cur.fetchall()
|
||||
logging.info('there has %s system variable of tenant id %d', len(results), tenant_id)
|
||||
update_sys_var_list = []
|
||||
update_sys_var_ori_list = []
|
||||
add_sys_var_list = []
|
||||
for r in results:
|
||||
if sys_var_dict.has_key(r[0]):
|
||||
sys_var = sys_var_dict[r[0]]
|
||||
if long(sys_var["data_type"]) != long(r[1]) or sys_var["info"].strip() != r[3].strip() or long(sys_var["flags"]) != long(r[4]) or ("min_val" in sys_var.keys() and sys_var["min_val"] != r[5]) or ("max_val" in sys_var.keys() and sys_var["max_val"] != r[6]):
|
||||
update_sys_var_list.append(sys_var)
|
||||
update_sys_var_ori_list.append(r)
|
||||
for (name, sys_var) in sys_var_dict.items():
|
||||
sys_var_exist = 0
|
||||
for r in results:
|
||||
if r[0] == sys_var["name"]:
|
||||
sys_var_exist = 1
|
||||
break
|
||||
if 0 == sys_var_exist:
|
||||
add_sys_var_list.append(sys_var)
|
||||
# reset
|
||||
sys_tenant_id = 1
|
||||
change_tenant(cur, sys_tenant_id)
|
||||
return (update_sys_var_list, update_sys_var_ori_list, add_sys_var_list)
|
||||
except Exception, e:
|
||||
logging.exception('fail to calc diff sys var')
|
||||
raise e
|
||||
|
||||
def gen_update_sys_var_sql_for_tenant(tenant_id, sys_var):
|
||||
actual_tenant_id = get_actual_tenant_id(tenant_id)
|
||||
update_sql = 'update oceanbase.__all_sys_variable set data_type = ' + str(sys_var["data_type"])\
|
||||
+ ', info = \'' + sys_var["info"].strip() + '\', flags = ' + str(sys_var["flags"])
|
||||
update_sql = update_sql\
|
||||
+ ((', min_val = \'' + sys_var["min_val"] + '\'') if "min_val" in sys_var.keys() else '')\
|
||||
+ ((', max_val = \'' + sys_var["max_val"] + '\'') if "max_val" in sys_var.keys() else '')
|
||||
update_sql = update_sql + ' where tenant_id = ' + str(actual_tenant_id) + ' and name = \'' + sys_var["name"] + '\''
|
||||
return update_sql
|
||||
|
||||
def gen_update_sys_var_history_sql_for_tenant(dml_cur, tenant_id, sys_var):
|
||||
try:
|
||||
actual_tenant_id = get_actual_tenant_id(tenant_id)
|
||||
(desc, results) = dml_cur.exec_query("""select schema_version from oceanbase.__all_sys_variable_history
|
||||
where tenant_id = {0} and name = '{1}'
|
||||
order by schema_version desc limit 1"""
|
||||
.format(actual_tenant_id, sys_var["name"]))
|
||||
schema_version = results[0][0]
|
||||
(desc, results) = dml_cur.exec_query("""select value from __all_sys_variable where tenant_id={0} and name='{1}' limit 1"""
|
||||
.format(actual_tenant_id, sys_var["name"]))
|
||||
res_len = len(results)
|
||||
if res_len != 1:
|
||||
logging.error('fail to get value from __all_sys_variable, result count:'+ str(res_len))
|
||||
raise MyError('fail to get value from __all_sys_variable')
|
||||
value = results[0][0]
|
||||
min_val = sys_var["min_val"] if "min_val" in sys_var.keys() else ''
|
||||
max_val = sys_var["max_val"] if "max_val" in sys_var.keys() else ''
|
||||
replace_sql = """replace into oceanbase.__all_sys_variable_history(
|
||||
tenant_id,
|
||||
zone,
|
||||
name,
|
||||
schema_version,
|
||||
is_deleted,
|
||||
data_type,
|
||||
value,
|
||||
info,
|
||||
flags,
|
||||
min_val,
|
||||
max_val)
|
||||
values(%d, '', '%s', %d, 0, %d, '%s', '%s', %d, '%s', '%s')
|
||||
"""%(actual_tenant_id, sys_var["name"], schema_version, sys_var["data_type"], value, sys_var["info"], sys_var["flags"], min_val, max_val)
|
||||
return replace_sql
|
||||
except Exception, e:
|
||||
logging.exception('fail to gen replace sys var history sql')
|
||||
raise e
|
||||
|
||||
def gen_replace_sys_var_history_sql_for_tenant(dml_cur, tenant_id, sys_var):
|
||||
try:
|
||||
actual_tenant_id = get_actual_tenant_id(tenant_id)
|
||||
(desc, results) = dml_cur.exec_query("""select schema_version from oceanbase.__all_sys_variable_history
|
||||
where tenant_id={0} order by schema_version asc limit 1""".format(actual_tenant_id))
|
||||
schema_version = results[0][0]
|
||||
min_val = sys_var["min_val"] if "min_val" in sys_var.keys() else ''
|
||||
max_val = sys_var["max_val"] if "max_val" in sys_var.keys() else ''
|
||||
replace_sql = """replace into oceanbase.__all_sys_variable_history(
|
||||
tenant_id,
|
||||
zone,
|
||||
name,
|
||||
schema_version,
|
||||
is_deleted,
|
||||
data_type,
|
||||
value,
|
||||
info,
|
||||
flags,
|
||||
min_val,
|
||||
max_val)
|
||||
values(%d, '', '%s', %d, 0, %d, '%s', '%s', %d, '%s', '%s')
|
||||
"""%(actual_tenant_id, sys_var["name"], schema_version, sys_var["data_type"], sys_var["value"], sys_var["info"], sys_var["flags"], min_val, max_val)
|
||||
return replace_sql
|
||||
except Exception, e:
|
||||
logging.exception('fail to gen replace sys var history sql')
|
||||
raise e
|
||||
|
||||
|
||||
def gen_sys_var_update_sqls_for_tenant(query_cur, tenant_id, update_sys_var_list):
|
||||
update_sqls = ''
|
||||
for i in range(0, len(update_sys_var_list)):
|
||||
sys_var = update_sys_var_list[i]
|
||||
if i > 0:
|
||||
update_sqls += '\n'
|
||||
update_sqls += gen_update_sys_var_sql_for_tenant(tenant_id, sys_var) + ';\n'
|
||||
update_sqls += gen_update_sys_var_history_sql_for_tenant(query_cur, tenant_id, sys_var) + ';'
|
||||
return update_sqls
|
||||
|
||||
def update_sys_vars_for_tenant(dml_cur, tenant_id, update_sys_var_list):
|
||||
try:
|
||||
for i in range(0, len(update_sys_var_list)):
|
||||
sys_var = update_sys_var_list[i]
|
||||
update_sql = gen_update_sys_var_sql_for_tenant(tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(update_sql)
|
||||
if 1 != rowcount:
|
||||
# 以history为准,考虑可重入,此处不校验__all_sys_variable的更新结果
|
||||
logging.info('sys var not change, just skip, sql: %s, tenant_id: %d', update_sql, tenant_id)
|
||||
else:
|
||||
logging.info('succeed to update sys var for tenant, sql: %s, tenant_id: %d', update_sql, tenant_id)
|
||||
#replace update sys var to __all_sys_variable_history
|
||||
replace_sql = gen_update_sys_var_history_sql_for_tenant(dml_cur, tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(replace_sql)
|
||||
if 1 != rowcount and 2 != rowcount:
|
||||
logging.error('fail to replace sysvar, replace_sql:%s'%replace_sql)
|
||||
raise MyError('fail to repalce sysvar')
|
||||
else:
|
||||
logging.info('succeed to replace sys var history for tenant, sql: %s, tenant_id: %d', replace_sql, tenant_id)
|
||||
except Exception, e:
|
||||
logging.exception('fail to update for tenant, tenant_id: %d', tenant_id)
|
||||
raise e
|
||||
|
||||
def gen_add_sys_var_sql_for_tenant(tenant_id, sys_var):
|
||||
actual_tenant_id = get_actual_tenant_id(tenant_id)
|
||||
add_sql = 'replace into oceanbase.__all_sys_variable(tenant_id, zone, name, data_type, value, info, flags, min_val, max_val) values('\
|
||||
+ str(actual_tenant_id) +', \'\', \'' + sys_var["name"] + '\', ' + str(sys_var["data_type"]) + ', \'' + sys_var["value"] + '\', \''\
|
||||
+ sys_var["info"].strip() + '\', ' + str(sys_var["flags"]) + ', \''
|
||||
add_sql = add_sql + (sys_var["min_val"] if "min_val" in sys_var.keys() else '') + '\', \''\
|
||||
+ (sys_var["max_val"] if "max_val" in sys_var.keys() else '') + '\')'
|
||||
return add_sql
|
||||
|
||||
def gen_sys_var_add_sqls_for_tenant(query_cur, tenant_id, add_sys_var_list):
|
||||
add_sqls = ''
|
||||
for i in range(0, len(add_sys_var_list)):
|
||||
sys_var = add_sys_var_list[i]
|
||||
if i > 0:
|
||||
add_sqls += '\n'
|
||||
add_sqls += gen_add_sys_var_sql_for_tenant(tenant_id, sys_var) + ';\n'
|
||||
add_sqls += gen_replace_sys_var_history_sql_for_tenant(query_cur, tenant_id, sys_var) + ';'
|
||||
return add_sqls
|
||||
|
||||
def add_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list):
|
||||
try:
|
||||
for i in range(0, len(add_sys_var_list)):
|
||||
sys_var = add_sys_var_list[i]
|
||||
add_sql = gen_add_sys_var_sql_for_tenant(tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(add_sql)
|
||||
if 1 != rowcount:
|
||||
# 以history为准,考虑可重入,此处不校验__all_sys_variable的更新结果
|
||||
logging.info('sys var not change, just skip, sql: %s, tenant_id: %d', update_sql, tenant_id)
|
||||
else:
|
||||
logging.info('succeed to insert sys var for tenant, sql: %s, tenant_id: %d', add_sql, tenant_id)
|
||||
replace_sql = gen_replace_sys_var_history_sql_for_tenant(dml_cur, tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(replace_sql)
|
||||
if 1 != rowcount:
|
||||
logging.error('fail to replace system variable history, sql:%s'%replace_sql)
|
||||
raise MyError('fail to replace system variable history')
|
||||
else:
|
||||
logging.info('succeed to replace sys var for tenant, sql: %s, tenant_id: %d', replace_sql, tenant_id)
|
||||
except Exception, e:
|
||||
logging.exception('fail to add for tenant, tenant_id: %d', tenant_id)
|
||||
raise e
|
||||
|
||||
|
||||
def gen_sys_var_special_update_sqls_for_tenant(tenant_id):
|
||||
special_update_sqls = ''
|
||||
return special_update_sqls
|
||||
|
||||
def special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, sys_var_name, sys_var_value):
|
||||
try:
|
||||
sys_var = None
|
||||
for i in range(0, len(add_sys_var_list)):
|
||||
if (sys_var_name == add_sys_var_list[i]["name"]):
|
||||
sys_var = add_sys_var_list[i]
|
||||
break;
|
||||
|
||||
if None == sys_var:
|
||||
logging.info('%s is not new, no need special update again', sys_var_name)
|
||||
return
|
||||
|
||||
sys_var["value"] = sys_var_value;
|
||||
update_sql = gen_update_sys_var_value_sql_for_tenant(tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(update_sql)
|
||||
if 1 != rowcount:
|
||||
# 以history为准,考虑可重入,此处不校验__all_sys_variable的更新结果
|
||||
logging.info('sys var not change, just skip, sql: %s, tenant_id: %d', update_sql, tenant_id)
|
||||
else:
|
||||
logging.info('succeed to update sys var for tenant, sql: %s, tenant_id: %d', update_sql, tenant_id)
|
||||
#replace update sys var to __all_sys_variable_history
|
||||
replace_sql = gen_update_sys_var_history_sql_for_tenant(dml_cur, tenant_id, sys_var)
|
||||
rowcount = dml_cur.exec_update(replace_sql)
|
||||
if 1 != rowcount and 2 != rowcount:
|
||||
logging.error('fail to replace sysvar, replace_sql:%s'%replace_sql)
|
||||
raise MyError('fail to repalce sysvar')
|
||||
else:
|
||||
logging.info('succeed to replace sys var history for tenant, sql: %s, tenant_id: %d', replace_sql, tenant_id)
|
||||
except Exception, e:
|
||||
logging.exception('fail to add for tenant, tenant_id: %d', tenant_id)
|
||||
raise e
|
||||
|
||||
def get_sys_vars_upgrade_dmls_str(cur, query_cur, tenant_id_list, update_sys_var_list, add_sys_var_list):
|
||||
ret_str = ''
|
||||
if len(tenant_id_list) <= 0:
|
||||
logging.error('distinct tenant id count is <= 0, tenant_id_count: %d', len(tenant_id_list))
|
||||
raise MyError('invalid arg')
|
||||
for i in range(0, len(tenant_id_list)):
|
||||
tenant_id = tenant_id_list[i]
|
||||
change_tenant(cur, tenant_id)
|
||||
if i > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += gen_sys_var_update_sqls_for_tenant(query_cur, tenant_id, update_sys_var_list)
|
||||
if ret_str != '' and len(add_sys_var_list) > 0:
|
||||
ret_str += '\n'
|
||||
for i in range(0, len(tenant_id_list)):
|
||||
tenant_id = tenant_id_list[i]
|
||||
change_tenant(cur, tenant_id)
|
||||
if i > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += gen_sys_var_add_sqls_for_tenant(query_cur, tenant_id, add_sys_var_list)
|
||||
if ret_str != '' and gen_sys_var_special_update_sqls_for_tenant(tenant_id_list[0]) != '':
|
||||
ret_str += '\n'
|
||||
for i in range(0, len(tenant_id_list)):
|
||||
tenant_id = tenant_id_list[i]
|
||||
change_tenant(cur, tenant_id)
|
||||
if i > 0:
|
||||
ret_str += '\n'
|
||||
ret_str += gen_sys_var_special_update_sqls_for_tenant(tenant_id)
|
||||
sys_tenant_id= 1
|
||||
change_tenant(cur, sys_tenant_id)
|
||||
return ret_str
|
||||
|
||||
def gen_update_sys_var_value_sql_for_tenant(tenant_id, sys_var):
|
||||
update_sql = ('update oceanbase.__all_sys_variable set value = \'' + str(sys_var["value"])
|
||||
+ '\' where tenant_id = ' + str(tenant_id) + ' and name = \'' + sys_var["name"] + '\'')
|
||||
return update_sql
|
||||
|
||||
# 修改相关实现需要调整ObUpgradeUtils::upgrade_sys_variable()
|
||||
def exec_sys_vars_upgrade_dml(cur, tenant_id_list):
|
||||
if len(tenant_id_list) <= 0:
|
||||
logging.error('distinct tenant id count is <= 0, tenant_id_count: %d', len(tenant_id_list))
|
||||
raise MyError('invalid arg')
|
||||
dml_cur = DMLCursor(cur)
|
||||
# 操作前先dump出oceanbase.__all_sys_variable表的所有数据
|
||||
my_utils.query_and_dump_results(dml_cur, """select * from oceanbase.__all_virtual_sys_variable""")
|
||||
# 操作前先dump出oceanbase.__all_sys_variable_history表的所有数据
|
||||
my_utils.query_and_dump_results(dml_cur, """select * from oceanbase.__all_virtual_sys_variable_history""")
|
||||
|
||||
for i in range(0, len(tenant_id_list)):
|
||||
tenant_id = tenant_id_list[i]
|
||||
# calc diff
|
||||
(update_sys_var_list, update_sys_var_ori_list, add_sys_var_list) = calc_diff_sys_var(cur, tenant_id)
|
||||
logging.info('update system variables list: [%s]', ', '.join(str(sv) for sv in update_sys_var_list))
|
||||
logging.info('update system variables original list: [%s]', ', '.join(str(sv) for sv in update_sys_var_ori_list))
|
||||
logging.info('add system variables list: [%s]', ', '.join(str(sv) for sv in add_sys_var_list))
|
||||
# update
|
||||
change_tenant(cur, tenant_id)
|
||||
update_sys_vars_for_tenant(dml_cur, tenant_id, update_sys_var_list)
|
||||
add_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list)
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_date_format', 'YYYY-MM-DD HH24:MI:SS');
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_format', 'YYYY-MM-DD HH24:MI:SS.FF');
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_tz_format', 'YYYY-MM-DD HH24:MI:SS.FF TZR TZD');
|
||||
# reset
|
||||
sys_tenant_id = 1
|
||||
change_tenant(cur, sys_tenant_id)
|
||||
|
||||
def exec_sys_vars_upgrade_dml_in_standby_cluster(standby_cluster_infos):
|
||||
try:
|
||||
for standby_cluster_info in standby_cluster_infos:
|
||||
exec_sys_vars_upgrade_dml_by_cluster(standby_cluster_info)
|
||||
except Exception, e:
|
||||
logging.exception("""exec_sys_vars_upgrade_dml_in_standby_cluster failed""")
|
||||
raise e
|
||||
|
||||
def exec_sys_vars_upgrade_dml_by_cluster(standby_cluster_info):
|
||||
try:
|
||||
|
||||
logging.info("exec_sys_vars_upgrade_dml_by_cluster : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
conn = mysql.connector.connect(user = standby_cluster_info['user'],
|
||||
password = standby_cluster_info['pwd'],
|
||||
host = standby_cluster_info['ip'],
|
||||
port = standby_cluster_info['port'],
|
||||
database = 'oceanbase',
|
||||
raise_on_warnings = True)
|
||||
cur = conn.cursor(buffered=True)
|
||||
conn.autocommit = True
|
||||
dml_cur = DMLCursor(cur)
|
||||
query_cur = QueryCursor(cur)
|
||||
is_primary = actions.check_current_cluster_is_primary(query_cur)
|
||||
if is_primary:
|
||||
logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
# only update sys tenant in standby cluster
|
||||
tenant_id = 1
|
||||
# calc diff
|
||||
(update_sys_var_list, update_sys_var_ori_list, add_sys_var_list) = calc_diff_sys_var(cur, tenant_id)
|
||||
logging.info('update system variables list: [%s]', ', '.join(str(sv) for sv in update_sys_var_list))
|
||||
logging.info('update system variables original list: [%s]', ', '.join(str(sv) for sv in update_sys_var_ori_list))
|
||||
logging.info('add system variables list: [%s]', ', '.join(str(sv) for sv in add_sys_var_list))
|
||||
# update
|
||||
update_sys_vars_for_tenant(dml_cur, tenant_id, update_sys_var_list)
|
||||
add_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list)
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_date_format', 'YYYY-MM-DD HH24:MI:SS');
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_format', 'YYYY-MM-DD HH24:MI:SS.FF');
|
||||
special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_tz_format', 'YYYY-MM-DD HH24:MI:SS.FF TZR TZD');
|
||||
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
except Exception, e:
|
||||
logging.exception("""exec_sys_vars_upgrade_dml_in_standby_cluster failed :
|
||||
cluster_id = {0}, ip = {1}, port = {2}"""
|
||||
.format(standby_cluster_info['cluster_id'],
|
||||
standby_cluster_info['ip'],
|
||||
standby_cluster_info['port']))
|
||||
raise e
|
||||
|
||||
|
||||
def get_actual_tenant_id(tenant_id):
|
||||
return tenant_id if (1 == tenant_id) else 0;
|
||||
|
||||
def change_tenant(cur, tenant_id):
|
||||
# change tenant
|
||||
sql = "alter system change tenant tenant_id = {0};".format(tenant_id)
|
||||
logging.info(sql);
|
||||
cur.execute(sql);
|
||||
# check
|
||||
sql = "select effective_tenant_id();"
|
||||
cur.execute(sql)
|
||||
result = cur.fetchall()
|
||||
if (1 != len(result) or 1 != len(result[0])):
|
||||
raise MyError("invalid result cnt")
|
||||
elif (tenant_id != result[0][0]):
|
||||
raise MyError("effective_tenant_id:{0} , tenant_id:{1}".format(result[0][0], tenant_id))
|
Reference in New Issue
Block a user