[CP] enable compaction in upgrade
This commit is contained in:
@ -74,7 +74,7 @@ def do_upgrade(my_host, my_port, my_user, my_passwd, timeout, my_module_set, upg
|
|||||||
|
|
||||||
if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
logging.info('================begin to run health check action ===============')
|
logging.info('================begin to run health check action ===============')
|
||||||
upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, False) # need_check_major_status = False
|
upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
logging.info('================succeed to run health check action ===============')
|
logging.info('================succeed to run health check action ===============')
|
||||||
|
|
||||||
if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
||||||
|
|||||||
@ -96,7 +96,7 @@ def do_upgrade(my_host, my_port, my_user, my_passwd, timeout, my_module_set, upg
|
|||||||
|
|
||||||
if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
logging.info('================begin to run health check action ===============')
|
logging.info('================begin to run health check action ===============')
|
||||||
upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, True) # need_check_major_status = True
|
upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
logging.info('================succeed to run health check action ===============')
|
logging.info('================succeed to run health check action ===============')
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
|
|||||||
@ -28,14 +28,6 @@ def do_special_upgrade(conn, cur, timeout, user, passwd):
|
|||||||
# when upgrade across version, disable enable_ddl/major_freeze
|
# when upgrade across version, disable enable_ddl/major_freeze
|
||||||
if current_version != target_version:
|
if current_version != target_version:
|
||||||
actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
||||||
actions.set_parameter(cur, 'enable_major_freeze', 'False', timeout)
|
|
||||||
actions.set_tenant_parameter(cur, '_enable_adaptive_compaction', 'False', timeout)
|
|
||||||
# wait scheduler in storage to notice adaptive_compaction is switched to false
|
|
||||||
time.sleep(60 * 2)
|
|
||||||
query_cur = actions.QueryCursor(cur)
|
|
||||||
wait_major_timeout = 600
|
|
||||||
upgrade_health_checker.check_major_merge(query_cur, wait_major_timeout)
|
|
||||||
actions.do_suspend_merge(cur, timeout)
|
|
||||||
# When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
# When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
||||||
# The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
# The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
||||||
if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
||||||
|
|||||||
@ -411,9 +411,6 @@ def check_cluster_status(query_cur):
|
|||||||
(desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
(desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
||||||
if results[0][0] > 0 :
|
if results[0][0] > 0 :
|
||||||
fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
||||||
(desc, results) = query_cur.exec_query("""select /*+ query_timeout(1000000000) */ count(1) from __all_virtual_tablet_compaction_info where max_received_scn > finished_scn and max_received_scn > 0""")
|
|
||||||
if results[0][0] > 0 :
|
|
||||||
fail_list.append('{0} tablet is merging, please check'.format(results[0][0]))
|
|
||||||
logging.info('check cluster status success')
|
logging.info('check cluster status success')
|
||||||
|
|
||||||
# 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
# 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
||||||
|
|||||||
@ -393,7 +393,7 @@ def check_until_timeout(query_cur, sql, value, timeout):
|
|||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
|
|
||||||
# 开始健康检查
|
# 开始健康检查
|
||||||
def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, need_check_major_status, zone = ''):
|
def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, zone = ''):
|
||||||
try:
|
try:
|
||||||
conn = mysql.connector.connect(user = my_user,
|
conn = mysql.connector.connect(user = my_user,
|
||||||
password = my_passwd,
|
password = my_passwd,
|
||||||
@ -410,8 +410,6 @@ def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, need
|
|||||||
check_paxos_replica(query_cur, timeout)
|
check_paxos_replica(query_cur, timeout)
|
||||||
check_schema_status(query_cur, timeout)
|
check_schema_status(query_cur, timeout)
|
||||||
check_server_version_by_zone(query_cur, zone)
|
check_server_version_by_zone(query_cur, zone)
|
||||||
if True == need_check_major_status:
|
|
||||||
check_major_merge(query_cur, timeout)
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
logging.exception('run error')
|
logging.exception('run error')
|
||||||
raise e
|
raise e
|
||||||
@ -446,7 +444,7 @@ if __name__ == '__main__':
|
|||||||
zone = get_opt_zone()
|
zone = get_opt_zone()
|
||||||
logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
||||||
host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
||||||
do_check(host, port, user, password, upgrade_params, timeout, False, zone) # need_check_major_status = False
|
do_check(host, port, user, password, upgrade_params, timeout, zone)
|
||||||
except mysql.connector.Error, e:
|
except mysql.connector.Error, e:
|
||||||
logging.exception('mysql connctor error')
|
logging.exception('mysql connctor error')
|
||||||
raise e
|
raise e
|
||||||
|
|||||||
@ -674,7 +674,7 @@
|
|||||||
#
|
#
|
||||||
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
# logging.info('================begin to run health check action ===============')
|
# logging.info('================begin to run health check action ===============')
|
||||||
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, False) # need_check_major_status = False
|
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
# logging.info('================succeed to run health check action ===============')
|
# logging.info('================succeed to run health check action ===============')
|
||||||
#
|
#
|
||||||
# if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
# if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
||||||
@ -869,7 +869,7 @@
|
|||||||
#
|
#
|
||||||
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
# logging.info('================begin to run health check action ===============')
|
# logging.info('================begin to run health check action ===============')
|
||||||
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, True) # need_check_major_status = True
|
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
# logging.info('================succeed to run health check action ===============')
|
# logging.info('================succeed to run health check action ===============')
|
||||||
#
|
#
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
@ -1355,14 +1355,6 @@
|
|||||||
# # when upgrade across version, disable enable_ddl/major_freeze
|
# # when upgrade across version, disable enable_ddl/major_freeze
|
||||||
# if current_version != target_version:
|
# if current_version != target_version:
|
||||||
# actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
# actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
||||||
# actions.set_parameter(cur, 'enable_major_freeze', 'False', timeout)
|
|
||||||
# actions.set_tenant_parameter(cur, '_enable_adaptive_compaction', 'False', timeout)
|
|
||||||
# # wait scheduler in storage to notice adaptive_compaction is switched to false
|
|
||||||
# time.sleep(60 * 2)
|
|
||||||
# query_cur = actions.QueryCursor(cur)
|
|
||||||
# wait_major_timeout = 600
|
|
||||||
# upgrade_health_checker.check_major_merge(query_cur, wait_major_timeout)
|
|
||||||
# actions.do_suspend_merge(cur, timeout)
|
|
||||||
# # When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
# # When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
||||||
# # The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
# # The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
||||||
# if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
# if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
||||||
@ -2071,9 +2063,6 @@
|
|||||||
# (desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
# (desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
||||||
# if results[0][0] > 0 :
|
# if results[0][0] > 0 :
|
||||||
# fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
# fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
||||||
# (desc, results) = query_cur.exec_query("""select /*+ query_timeout(1000000000) */ count(1) from __all_virtual_tablet_compaction_info where max_received_scn > finished_scn and max_received_scn > 0""")
|
|
||||||
# if results[0][0] > 0 :
|
|
||||||
# fail_list.append('{0} tablet is merging, please check'.format(results[0][0]))
|
|
||||||
# logging.info('check cluster status success')
|
# logging.info('check cluster status success')
|
||||||
#
|
#
|
||||||
## 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
## 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
||||||
@ -2869,7 +2858,7 @@
|
|||||||
# time.sleep(10)
|
# time.sleep(10)
|
||||||
#
|
#
|
||||||
## 开始健康检查
|
## 开始健康检查
|
||||||
#def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, need_check_major_status, zone = ''):
|
#def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, zone = ''):
|
||||||
# try:
|
# try:
|
||||||
# conn = mysql.connector.connect(user = my_user,
|
# conn = mysql.connector.connect(user = my_user,
|
||||||
# password = my_passwd,
|
# password = my_passwd,
|
||||||
@ -2886,8 +2875,6 @@
|
|||||||
# check_paxos_replica(query_cur, timeout)
|
# check_paxos_replica(query_cur, timeout)
|
||||||
# check_schema_status(query_cur, timeout)
|
# check_schema_status(query_cur, timeout)
|
||||||
# check_server_version_by_zone(query_cur, zone)
|
# check_server_version_by_zone(query_cur, zone)
|
||||||
# if True == need_check_major_status:
|
|
||||||
# check_major_merge(query_cur, timeout)
|
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
# logging.exception('run error')
|
# logging.exception('run error')
|
||||||
# raise e
|
# raise e
|
||||||
@ -2922,7 +2909,7 @@
|
|||||||
# zone = get_opt_zone()
|
# zone = get_opt_zone()
|
||||||
# logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
# logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
||||||
# host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
# host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
||||||
# do_check(host, port, user, password, upgrade_params, timeout, False, zone) # need_check_major_status = False
|
# do_check(host, port, user, password, upgrade_params, timeout, zone)
|
||||||
# except mysql.connector.Error, e:
|
# except mysql.connector.Error, e:
|
||||||
# logging.exception('mysql connctor error')
|
# logging.exception('mysql connctor error')
|
||||||
# raise e
|
# raise e
|
||||||
@ -3064,7 +3051,6 @@
|
|||||||
# enable_ddl(cur, timeout)
|
# enable_ddl(cur, timeout)
|
||||||
# enable_rebalance(cur, timeout)
|
# enable_rebalance(cur, timeout)
|
||||||
# enable_rereplication(cur, timeout)
|
# enable_rereplication(cur, timeout)
|
||||||
# enable_major_freeze(cur, timeout)
|
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
# logging.exception('run error')
|
# logging.exception('run error')
|
||||||
# raise e
|
# raise e
|
||||||
|
|||||||
@ -130,7 +130,6 @@ def do_check(conn, cur, query_cur, timeout):
|
|||||||
enable_ddl(cur, timeout)
|
enable_ddl(cur, timeout)
|
||||||
enable_rebalance(cur, timeout)
|
enable_rebalance(cur, timeout)
|
||||||
enable_rereplication(cur, timeout)
|
enable_rereplication(cur, timeout)
|
||||||
enable_major_freeze(cur, timeout)
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
logging.exception('run error')
|
logging.exception('run error')
|
||||||
raise e
|
raise e
|
||||||
|
|||||||
@ -674,7 +674,7 @@
|
|||||||
#
|
#
|
||||||
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
# logging.info('================begin to run health check action ===============')
|
# logging.info('================begin to run health check action ===============')
|
||||||
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, False) # need_check_major_status = False
|
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
# logging.info('================succeed to run health check action ===============')
|
# logging.info('================succeed to run health check action ===============')
|
||||||
#
|
#
|
||||||
# if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
# if run_modules.MODULE_END_ROLLING_UPGRADE in my_module_set:
|
||||||
@ -869,7 +869,7 @@
|
|||||||
#
|
#
|
||||||
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
# if run_modules.MODULE_HEALTH_CHECK in my_module_set:
|
||||||
# logging.info('================begin to run health check action ===============')
|
# logging.info('================begin to run health check action ===============')
|
||||||
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, True) # need_check_major_status = True
|
# upgrade_health_checker.do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout)
|
||||||
# logging.info('================succeed to run health check action ===============')
|
# logging.info('================succeed to run health check action ===============')
|
||||||
#
|
#
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
@ -1355,14 +1355,6 @@
|
|||||||
# # when upgrade across version, disable enable_ddl/major_freeze
|
# # when upgrade across version, disable enable_ddl/major_freeze
|
||||||
# if current_version != target_version:
|
# if current_version != target_version:
|
||||||
# actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
# actions.set_parameter(cur, 'enable_ddl', 'False', timeout)
|
||||||
# actions.set_parameter(cur, 'enable_major_freeze', 'False', timeout)
|
|
||||||
# actions.set_tenant_parameter(cur, '_enable_adaptive_compaction', 'False', timeout)
|
|
||||||
# # wait scheduler in storage to notice adaptive_compaction is switched to false
|
|
||||||
# time.sleep(60 * 2)
|
|
||||||
# query_cur = actions.QueryCursor(cur)
|
|
||||||
# wait_major_timeout = 600
|
|
||||||
# upgrade_health_checker.check_major_merge(query_cur, wait_major_timeout)
|
|
||||||
# actions.do_suspend_merge(cur, timeout)
|
|
||||||
# # When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
# # When upgrading from a version prior to 4.2 to version 4.2, the bloom_filter should be disabled.
|
||||||
# # The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
# # The param _bloom_filter_enabled is no longer in use as of version 4.2, there is no need to enable it again.
|
||||||
# if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
# if actions.get_version(current_version) < actions.get_version('4.2.0.0')\
|
||||||
@ -2071,9 +2063,6 @@
|
|||||||
# (desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
# (desc, results) = query_cur.exec_query("""select count(1) from CDB_OB_MAJOR_COMPACTION where (GLOBAL_BROADCAST_SCN > LAST_SCN or STATUS != 'IDLE')""")
|
||||||
# if results[0][0] > 0 :
|
# if results[0][0] > 0 :
|
||||||
# fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
# fail_list.append('{0} tenant is merging, please check'.format(results[0][0]))
|
||||||
# (desc, results) = query_cur.exec_query("""select /*+ query_timeout(1000000000) */ count(1) from __all_virtual_tablet_compaction_info where max_received_scn > finished_scn and max_received_scn > 0""")
|
|
||||||
# if results[0][0] > 0 :
|
|
||||||
# fail_list.append('{0} tablet is merging, please check'.format(results[0][0]))
|
|
||||||
# logging.info('check cluster status success')
|
# logging.info('check cluster status success')
|
||||||
#
|
#
|
||||||
## 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
## 5. 检查是否有异常租户(creating,延迟删除,恢复中)
|
||||||
@ -2869,7 +2858,7 @@
|
|||||||
# time.sleep(10)
|
# time.sleep(10)
|
||||||
#
|
#
|
||||||
## 开始健康检查
|
## 开始健康检查
|
||||||
#def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, need_check_major_status, zone = ''):
|
#def do_check(my_host, my_port, my_user, my_passwd, upgrade_params, timeout, zone = ''):
|
||||||
# try:
|
# try:
|
||||||
# conn = mysql.connector.connect(user = my_user,
|
# conn = mysql.connector.connect(user = my_user,
|
||||||
# password = my_passwd,
|
# password = my_passwd,
|
||||||
@ -2886,8 +2875,6 @@
|
|||||||
# check_paxos_replica(query_cur, timeout)
|
# check_paxos_replica(query_cur, timeout)
|
||||||
# check_schema_status(query_cur, timeout)
|
# check_schema_status(query_cur, timeout)
|
||||||
# check_server_version_by_zone(query_cur, zone)
|
# check_server_version_by_zone(query_cur, zone)
|
||||||
# if True == need_check_major_status:
|
|
||||||
# check_major_merge(query_cur, timeout)
|
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
# logging.exception('run error')
|
# logging.exception('run error')
|
||||||
# raise e
|
# raise e
|
||||||
@ -2922,7 +2909,7 @@
|
|||||||
# zone = get_opt_zone()
|
# zone = get_opt_zone()
|
||||||
# logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
# logging.info('parameters from cmd: host=\"%s\", port=%s, user=\"%s\", password=\"%s\", log-file=\"%s\", timeout=%s, zone=\"%s\"', \
|
||||||
# host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
# host, port, user, password.replace('"', '\\"'), log_filename, timeout, zone)
|
||||||
# do_check(host, port, user, password, upgrade_params, timeout, False, zone) # need_check_major_status = False
|
# do_check(host, port, user, password, upgrade_params, timeout, zone)
|
||||||
# except mysql.connector.Error, e:
|
# except mysql.connector.Error, e:
|
||||||
# logging.exception('mysql connctor error')
|
# logging.exception('mysql connctor error')
|
||||||
# raise e
|
# raise e
|
||||||
@ -3064,7 +3051,6 @@
|
|||||||
# enable_ddl(cur, timeout)
|
# enable_ddl(cur, timeout)
|
||||||
# enable_rebalance(cur, timeout)
|
# enable_rebalance(cur, timeout)
|
||||||
# enable_rereplication(cur, timeout)
|
# enable_rereplication(cur, timeout)
|
||||||
# enable_major_freeze(cur, timeout)
|
|
||||||
# except Exception, e:
|
# except Exception, e:
|
||||||
# logging.exception('run error')
|
# logging.exception('run error')
|
||||||
# raise e
|
# raise e
|
||||||
|
|||||||
Reference in New Issue
Block a user