Merge branch 'pr_1652'

This commit is contained in:
ob-robot 2023-11-16 09:08:09 +00:00
commit d3ec0428d1
12 changed files with 538 additions and 35 deletions

View File

@ -592,9 +592,10 @@ int ObServerLogBlockMgr::do_load_(const char *log_disk_path)
CLOG_LOG(WARN, "try_continous_do_resize_ failed", K(ret), KPC(this),
K(log_disk_path), K(has_allocated_block_cnt));
} else if (FALSE_IT(time_guard.click("try_continous_to_resize_"))
|| false
|| (false
== check_log_pool_whehter_is_integrity_(has_allocated_block_cnt
* BLOCK_SIZE)) {
* BLOCK_SIZE) &&
OB_FAIL(recover_(has_allocated_block_cnt * BLOCK_SIZE)))) {
ret = OB_ERR_UNEXPECTED;
CLOG_LOG(ERROR, "check_log_pool_whehter_is_integrity_ failed, unexpected error",
K(ret), KPC(this), K(log_disk_path), K(has_allocated_block_cnt));
@ -605,6 +606,32 @@ int ObServerLogBlockMgr::do_load_(const char *log_disk_path)
return ret;
}
int ObServerLogBlockMgr::recover_(const int64_t has_allocated_block_byte_size)
{
int ret = OB_SUCCESS;
if (IS_NOT_INIT) {
ret = OB_NOT_INIT;
CLOG_LOG(WARN, "ObServerLogBlockMGR is not inited", K(ret), KPC(this));
} else {
const int64_t meta_curr_total_size_byte = log_pool_meta_.curr_total_size_;
const int64_t free_size_byte = get_free_size_guarded_by_lock_();
if (meta_curr_total_size_byte == free_size_byte + has_allocated_block_byte_size) {
// do nothing
} else if (meta_curr_total_size_byte > free_size_byte + has_allocated_block_byte_size) {
LogPoolMeta new_log_pool_meta = log_pool_meta_;
new_log_pool_meta.curr_total_size_ = free_size_byte + has_allocated_block_byte_size;
new_log_pool_meta.status_ = EXPANDING_STATUS;
return do_resize_(log_pool_meta_,
calc_block_cnt_by_size_(meta_curr_total_size_byte) -
calc_block_cnt_by_size_(free_size_byte + has_allocated_block_byte_size),
new_log_pool_meta);
} else {
ret = OB_ERR_UNEXPECTED;
}
}
return ret;
}
int ObServerLogBlockMgr::scan_log_disk_dir_(const char *log_disk_path,
int64_t &has_allocated_block_cnt)
{

View File

@ -224,6 +224,7 @@ private:
int prepare_dir_and_create_meta_(const char *log_pool_path,
const char *log_pool_tmp_path);
int do_load_(const char *log_disk_path);
int recover_(const int64_t has_allocated_block_byte_size);
int scan_log_disk_dir_(const char *log_disk_path, int64_t &has_allocated_block_cnt);
int scan_log_pool_dir_and_do_trim_();
int trim_log_pool_dir_and_init_block_id_range_(const BlockIdArray &block_id_array,

View File

@ -1,7 +1,10 @@
FROM openanolis/anolisos
# docker build --build-arg VERSION={VERSION} .
ARG VERSION
# docker build --build-arg --build-arg LOCAL_RPM="oceanbase-ce-4.3.0.0-1.alios7.aarch64.rpm" --build-arg LOCAL_LIB_RPM="oceanbase-ce-libs-4.3.0.0-1.alios7.aarch64.rpm" -t observer .
ARG VERSION="4.3.0.0"
ARG LOCAL_RPM
ARG LOCAL_LIB_RPM
RUN yum install -y yum-utils && \
yum-config-manager --add-repo https://mirrors.aliyun.com/oceanbase/OceanBase.repo && \
@ -12,12 +15,18 @@ RUN yum install -y yum-utils && \
RUN mkdir -p /root/pkg && \
cd /root/pkg && \
yum install -y --downloadonly --downloaddir=. oceanbase-ce-${VERSION}.el7 oceanbase-ce-libs-${VERSION}.el7 obagent && \
rm -rf /usr/obd/mirror/remote/* && \
if [ "${LOCAL_RPM}" != "" ]; then \
yum install -y --downloadonly --downloaddir=. obagent; \
else \
yum install -y --downloadonly --downloaddir=. oceanbase-ce-${VERSION}.el7 oceanbase-ce-libs-${VERSION}.el7 obagent; \
fi && \
yum clean all
COPY ${LOCAL_RPM} /root/pkg
COPY ${LOCAL_LIB_RPM} /root/pkg
COPY boot /root/boot/
ENV PATH /root/boot:$PATH
ENV LD_LIBRARY_PATH /root/ob/lib:$LD_LIBRARY_PATH
WORKDIR /root
CMD _boot

View File

@ -110,3 +110,16 @@ docker run -d -p 2881:2881 -v $PWD/ob:/root/ob -v $PWD/obd:/root/.obd --name oce
`oceanbase-ce` docker默认会将数据保存到 /root/ob 目录。必须同时绑定 /root/ob 和 /root/.obd 目录。如果仅仅绑定 /root/ob 目录的话,容器就没办法重启了,因为oceanbase-ce 是使用 [obd](https://github.com/oceanbase/obdeploy)来管理数据库集群的,而启动一个全新的docker容器时,里面没有任何数据库集群信息。
docker -v 参数的详细说明可以参考 [docker volumn](https://docs.docker.com/storage/volumes/)。
## 快速单机启动镜像构建
`tools/docker/standalone`目录下提供`fast_boot_docker_build.sh`脚本,通过该脚本可以构建快速启动镜像。在运行脚本之前,请首先修改`tools/docker/standalone/boot/_env`环境配置脚本:
- 必须:将`MODE`配置项修改为`STANDALONE`
- 可选:修改其余配置项
修改完毕后,执行镜像构建脚本,目前支持使用远程RPM包构建以及本地编译生成的RPM包两种方式:
- 本地:`./fast_boot_docker_build.sh -L <oceanbase_rpm_path> <oceanbase_lib_rpm_path>`
- 远程:`./fast_boot_docker_build.sh -R <remote_rpm_version>`
等待构建完毕后,可使用前述相同的方式启动、测试实例。

View File

@ -107,3 +107,16 @@ Note that you should use your own path.
The docker image `oceanbase-ce` saves the data to /root/ob directory default. You should bind both the /root/ob and /root/.obd. You can not start new docker image if you only bind the /root/ob directory, because the docker image oceanbase-ce uses the [obd](https://github.com/oceanbase/obdeploy) to manage database clusters and there is no information about the database cluster in a new docker container.
You can view more information about `docker -v` at [docker volume](https://docs.docker.com/storage/volumes/).
## Fast boot image building for a standalone node
The `fast_boot_docker_build.sh` script is provided in the `tools/docker/standalone` directory, through which the fast boot image can be built. Before running the script, please first modify the `tools/docker/standalone/boot/_env` environment configuration script:
- Required: Modify the `MODE` configuration item to `STANDALONE`
- Optional: Modify the remaining configuration items
After the modification is completed, execute the image build script. Currently, two methods are supported: building with remote RPM packages and building with RPM packages generated by local compilation:
- Local: `./fast_boot_docker_build.sh -L <oceanbase_rpm_path> <oceanbase_lib_rpm_path>`
- Remote: `./fast_boot_docker_build.sh -R <remote_rpm_version>`
After waiting for the build to be completed, you can start and test the instance in the same way as mentioned above.

View File

@ -4,6 +4,37 @@ cd "${CWD}"
source _env
STAMP="$(date +%s)"
STEP=1
PHASE_START_TIME=0
function timediff() {
start_time=$1
end_time=$2
start_s=${start_time%.*}
start_nanos=${start_time#*.}
end_s=${end_time%.*}
end_nanos=${end_time#*.}
if [ "$end_nanos" -lt "$start_nanos" ];then
end_s=$(( 10#$end_s - 1 ))
end_nanos=$(( 10#$end_nanos + 10**9 ))
fi
time=$(( 10#$end_s - 10#$start_s )).`printf "%03d\n" $(( (10#$end_nanos - 10#$start_nanos)/10**6 ))`
echo $time
}
function print_start_phase() {
echo "/////////////////////// STEP ${STEP}: $@ ///////////////////////"
STEP=$[ ${STEP} + 1 ]
PHASE_START_TIME=$(date +%s.%N)
}
function print_end_phase() {
cur_time=$(date +%s.%N)
echo "/////////////////////// phase end: $(timediff ${PHASE_START_TIME} ${cur_time}) s ///////////////////////"
}
function is_true() {
value=$1
@ -90,12 +121,14 @@ if [ -f "$HOME/.obd/cluster/${OB_CLUSTER_NAME}/config.yaml" ]; then
obd cluster start $OB_CLUSTER_NAME
else # nothing here, bootstrap
echo "generate boot.yaml ..."
print_start_phase "Config Generation"
TMPFILE="boot.${STAMP}.yaml"
get_mode
if [ "x${MODE}" == "xMINI" ]; then
if [ "x${MODE}" == "xSTANDALONE" ]; then
echo "oceanbase-ce docker in standalone mode"
cp -f boot-mini-tmp.yaml $TMPFILE
elif [ "x${MODE}" == "xMINI" ]; then
echo "oceanbase-ce docker in mini mode"
cp -f boot-mini-tmp.yaml $TMPFILE
elif [ "x${MODE}" == "xSLIM" ]; then
@ -118,43 +151,66 @@ else # nothing here, bootstrap
sed -i "s|@OB_DATAFILE_SIZE@|${OB_DATAFILE_SIZE}|g" $TMPFILE
sed -i "s|@OB_LOG_DISK_SIZE@|${OB_LOG_DISK_SIZE}|g" $TMPFILE
sed -i "s|@OB_ROOT_PASSWORD@|${OB_ROOT_PASSWORD}|g" $TMPFILE
[ "${OB_DATA_DIR}" ] && echo " data_dir: ${OB_DATA_DIR}" >> $TMPFILE
[ "${OB_REDO_DIR}" ] && echo " redo_dir: ${OB_REDO_DIR}" >> $TMPFILE
echo "create boot dirs and deploy ob cluster ..."
mkdir -p $OB_HOME_PATH
print_end_phase
print_start_phase "Ob-deploy mirror clone"
mkdir -p $OB_HOME_PATH
obd mirror clone /root/pkg/*.rpm \
&& obd mirror list local
print_end_phase
print_start_phase "Ob-deploy deploy"
remove_disk_check_logic_in_obd
obd devmode enable && obd cluster autodeploy "${OB_CLUSTER_NAME}" -c $TMPFILE;
if [ $? -ne 0 ]; then
deploy_failed
fi
if [ "x${MODE}" == "xSTANDALONE" ]; then
obd devmode enable && obd cluster deploy "${OB_CLUSTER_NAME}" -c $TMPFILE;
if [ $? -ne 0 ]; then
deploy_failed
fi
print_end_phase
print_start_phase "Ob-deploy restore store dir"
rm -rf ${OB_HOME_PATH}/store && tar -Sxzvf /root/boot/store.tar.gz -C ${OB_HOME_PATH}
print_end_phase
create_tenant_cmd="obd cluster tenant create ${OB_CLUSTER_NAME} -n ${OB_TENANT_NAME}"
if ! [ -z "${OB_TENANT_MINI_CPU}" ]; then
create_tenant_cmd="${create_tenant_cmd} --min-cpu=${OB_TENANT_MINI_CPU}"
fi;
if ! [ -z "${OB_TENANT_MEMORY_SIZE}" ]; then
create_tenant_cmd="${create_tenant_cmd} --memory-size=${OB_TENANT_MEMORY_SIZE}"
fi;
if ! [ -z "${OB_TENANT_LOG_DISK_SIZE}" ]; then
create_tenant_cmd="${create_tenant_cmd} --log-disk-size=${OB_TENANT_LOG_DISK_SIZE}"
fi;
eval ${create_tenant_cmd}
if [ $? -ne 0 ]; then
deploy_failed
fi
print_start_phase "Ob-deploy import etc"
cp -r /root/boot/etc/* ${OB_HOME_PATH}/etc
print_end_phase
if [ "x${MODE}" != "xSLIM" ]; then
obclient -h127.1 -uroot@${OB_TENANT_NAME} -A -P${OB_MYSQL_PORT} < init_tenant_user.sql
print_start_phase "Ob-deploy start"
obd cluster start ${OB_CLUSTER_NAME}
print_end_phase
else
run_custom_scripts /root/boot/init.d
fi
print_start_phase "Ob-deploy autodeploy"
obd devmode enable && obd cluster autodeploy "${OB_CLUSTER_NAME}" -c $TMPFILE;
print_end_phase
print_start_phase "Ob-deploy Create Tenant"
create_tenant_cmd="obd cluster tenant create ${OB_CLUSTER_NAME} -n ${OB_TENANT_NAME}"
if ! [ -z "${OB_TENANT_MINI_CPU}" ]; then
create_tenant_cmd="${create_tenant_cmd} --min-cpu=${OB_TENANT_MINI_CPU}"
fi;
if ! [ -z "${OB_TENANT_MEMORY_SIZE}" ]; then
create_tenant_cmd="${create_tenant_cmd} --memory-size=${OB_TENANT_MEMORY_SIZE}"
fi;
if ! [ -z "${OB_TENANT_LOG_DISK_SIZE}" ]; then
create_tenant_cmd="${create_tenant_cmd} --log-disk-size=${OB_TENANT_LOG_DISK_SIZE}"
fi;
eval ${create_tenant_cmd}
if [ $? -ne 0 ]; then
deploy_failed
fi
if [ "x${MODE}" != "xSLIM" ]; then
obclient -h127.1 -uroot@${OB_TENANT_NAME} -A -P${OB_MYSQL_PORT} < init_tenant_user.sql
else
run_custom_scripts /root/boot/init.d
fi
print_end_phase
fi
if [ $? -ne 0 ]; then
deploy_failed
fi

View File

@ -1,4 +1,4 @@
MINI_MODE=${MINI_MODE:-MINI}
MODE=${MODE:-MINI}
EXIT_WHILE_ERROR=${EXIT_WHILE_ERROR:-true}
OB_HOME_PATH="/root/ob"
OB_MYSQL_PORT="2881"

View File

@ -0,0 +1,126 @@
#!/bin/bash
MODE_FLAG=$1 # -L | --local | -R | --remote
REMOTE_VERSION_OR_LOCAL_RPM_PATH=$2
LOCAL_LIB_RPM_PATH=$3
REMOTE_VERSION_OR_LOCAL_RPM_NAME=""
LOCAL_LIB_RPM_NAME=""
TMP_INIT_STORE_PY_SCRIPT="init_store_for_fast_start.tmp.py"
ACTUAL_INIT_STORE_PY_SCRIPT="./fast_boot_docker_build_prepare/init_store_for_fast_start.py"
CWD=$(cd `dirname $0`;pwd)
cd "${CWD}"
function local_rpm_build() {
if [ ! -e ${REMOTE_VERSION_OR_LOCAL_RPM_PATH} ]; then
echo "local rpm is not exist"
exit -1
fi
if [ ! -e ${LOCAL_LIB_RPM_PATH} ]; then
echo "local lib rpm is not exist"
exit -1
fi
cp ${REMOTE_VERSION_OR_LOCAL_RPM_PATH} ./fast_boot_docker_build_prepare
cp ${REMOTE_VERSION_OR_LOCAL_RPM_PATH} .
cp ${LOCAL_LIB_RPM_PATH} ./fast_boot_docker_build_prepare
cp ${LOCAL_LIB_RPM_PATH} .
REMOTE_VERSION_OR_LOCAL_RPM_NAME=$(basename ${REMOTE_VERSION_OR_LOCAL_RPM_PATH})
LOCAL_LIB_RPM_NAME=$(basename ${LOCAL_LIB_RPM_PATH})
cd fast_boot_docker_build_prepare && \
docker build --build-arg LOCAL_RPM="${REMOTE_VERSION_OR_LOCAL_RPM_NAME}" --build-arg LOCAL_LIB_RPM="${LOCAL_LIB_RPM_NAME}" -t raw_observer .
if [ $? == 0 ]; then
echo "================== build prepare docker ok ==============="
else
echo "================== build prepare docker failed ==============="
exit -1
fi
cd "${CWD}" && mkdir -p ${CWD}/boot/etc
docker run -it -v ${CWD}/boot:/root/dest raw_observer
if [ $? == 0 ]; then
echo "================== prepare docker run ok ==============="
else
echo "================== prepare docker run failed ==============="
rm -rf ${CWD}/boot/etc
rm -rf ${CWD}/boot/store.tar.gz
exit -1
fi
cd "${CWD}"
docker build --build-arg LOCAL_RPM="${REMOTE_VERSION_OR_LOCAL_RPM_NAME}" --build-arg LOCAL_LIB_RPM="${LOCAL_LIB_RPM_NAME}" -t observer .
if [ $? == 0 ]; then
echo "================== fast boot docker build ok ==============="
else
echo "================== fast boot docker build failed ==============="
exit -1
fi
}
function remote_rpm_build() {
cd fast_boot_docker_build_prepare && \
docker build --build-arg VERSION="${REMOTE_VERSION_OR_LOCAL_RPM_NAME}" -t raw_observer .
if [ $? == 0 ]; then
echo "================== build prepare docker ok ==============="
else
echo "================== build prepare docker failed ==============="
exit -1
fi
cd "${CWD}" && mkdir -p ${CWD}/boot/etc
docker run -it -v ${CWD}/boot:/root/dest raw_observer
if [ $? == 0 ]; then
echo "================== prepare docker run ok ==============="
else
echo "================== prepare docker run failed ==============="
rm -rf ${CWD}/boot/etc
rm -rf ${CWD}/boot/store.tar.gz
exit -1
fi
cd "${CWD}"
docker build --build-arg VERSION="${REMOTE_VERSION_OR_LOCAL_RPM_NAME}" -t observer .
if [ $? == 0 ]; then
echo "================== fast boot docker build ok ==============="
else
echo "================== fast boot docker build failed ==============="
exit -1
fi
}
source ./boot/_env
if [ "x${MODE}" != "xSTANDALONE" ]; then
echo "please set MODE to STANDALONE for building fast boot docker"
exit -1
fi
OS=`uname`
cp ${TMP_INIT_STORE_PY_SCRIPT} ${ACTUAL_INIT_STORE_PY_SCRIPT}
if [ "$OS" == 'Darwin' ]; then
sed -i '' -e "s/@OB_MYSQL_PORT@/${OB_MYSQL_PORT}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
sed -i '' -e "s/@OB_RPC_PORT@/${OB_RPC_PORT}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
sed -i '' -e "s/@OB_TENANT_NAME@/${OB_TENANT_NAME}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
else
sed -i'' -e "s/@OB_MYSQL_PORT@/${OB_MYSQL_PORT}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
sed -i'' -e "s/@OB_RPC_PORT@/${OB_RPC_PORT}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
sed -i'' -e "s/@OB_TENANT_NAME@/${OB_TENANT_NAME}/g" ${ACTUAL_INIT_STORE_PY_SCRIPT}
fi
case $MODE_FLAG in
-L | --local)
local_rpm_build
if [ $? != 0 ]; then
echo "use local rpm build docker failed"
exit -1
fi
;;
-R | --remote)
remote_rpm_build
if [ $? != 0 ]; then
echo "use remote rpm build docker failed"
exit -1
fi
;;
esac

View File

@ -0,0 +1,31 @@
FROM openanolis/anolisos
# docker build --build-arg LOCAL_RPM="oceanbase-ce-4.3.0.0-1.alios7.aarch64.rpm" --build-arg LOCAL_LIB_RPM="oceanbase-ce-libs-4.3.0.0-1.alios7.aarch64.rpm" -t raw_observer .
# Maybe another default version?
ARG VERSION="4.3.0.0"
ARG LOCAL_RPM
ARG LOCAL_LIB_RPM
RUN yum install -y yum-utils && \
yum-config-manager --add-repo https://mirrors.aliyun.com/oceanbase/OceanBase.repo && \
sed -i 's/$releasever/7/' /etc/yum.repos.d/OceanBase.repo && \
yum install -y libaio mysql && \
yum clean all
RUN mkdir -p /root/pkg && mkdir -p /root/store && mkdir -p /root/dest && \
if [ "${LOCAL_RPM}" == "" ]; then \
yum install -y --downloadonly --downloaddir=. oceanbase-ce-${VERSION}.el7 oceanbase-ce-libs-${VERSION}.el7; \
fi && \
yum clean all
COPY ${LOCAL_RPM} /root/pkg
COPY ${LOCAL_LIB_RPM} /root/pkg
COPY boot /root/boot/
ENV PATH /root/boot:$PATH
ENV LD_LIBRARY_PATH /home/admin/oceanbase/lib:/root/ob/lib:$LD_LIBRARY_PATH
ENV BOOT_LOCAL_RPM ${LOCAL_RPM}
ENV BOOT_LOCAL_LIB_RPM ${LOCAL_LIB_RPM}
WORKDIR /root
CMD ["sh", "-c", "_boot ${BOOT_LOCAL_RPM} ${BOOT_LOCAL_LIB_RPM}"]

View File

@ -0,0 +1,34 @@
#!/bin/bash
LOCAL_RPM=$1
LOCAL_LIB_RPM=$2
CWD=$(cd `dirname $0`;pwd)
cd "${CWD}"
echo "================== prepare python env ... =================="
yum install -y wget python3 && wget https://bootstrap.pypa.io/pip/3.6/get-pip.py && \
python3 ./get-pip.py && pip install pymysql
if [ $? == 0 ]; then
echo "================== prepare python env ok =================="
else
echo "================== prepare python env failed =================="
exit -1
fi
echo "================== install oceanbase rpm ... =================="
cd /root/pkg && yum install -y ${LOCAL_RPM} && yum install -y ${LOCAL_LIB_RPM}
if [ $? == 0 ]; then
echo "================== install oceanbase rpm ok =================="
else
echo "================== install oceanbase rpm failed =================="
exit -1
fi
cd "${CWD}"
python3 ./init_store_for_fast_start.py /home/admin/oceanbase/bin/observer . /root/dest /root/dest/etc -d /root/store
if [ $? == 0 ]; then
echo "================== prepare fast boot finish ===================="
else
echo "================== prepare fast boot failed ===================="
exit -1
fi

View File

@ -0,0 +1,32 @@
STANDALONE_OB_BIN_PATH=$1
STANDALONE_OB_HOME_PATH=$2
STANDALONE_OB_DATA_PATH=$3
COMMAND=$4
NEED_CREATE_SOFT_LINK=$5
function clear_env() {
if [ "${NEED_CREATE_SOFT_LINK}" != "" ]; then
rm -rf ${STANDALONE_OB_HOME_PATH}/observer
fi
rm -rf ${STANDALONE_OB_HOME_PATH}/audit ${STANDALONE_OB_HOME_PATH}/etc* \
${STANDALONE_OB_HOME_PATH}/log ${STANDALONE_OB_HOME_PATH}/run \
${STANDALONE_OB_HOME_PATH}/wallet
rm -rf ${STANDALONE_OB_DATA_PATH}
}
function build_env() {
if [ "${NEED_CREATE_SOFT_LINK}" != "" ]; then
ln -s ${STANDALONE_OB_BIN_PATH} ${STANDALONE_OB_HOME_PATH}/observer
fi
mkdir ${STANDALONE_OB_DATA_PATH}
cd ${STANDALONE_OB_DATA_PATH} && mkdir clog slog sstable
}
case $COMMAND in
-C | --clear)
clear_env
;;
-B | --build)
build_env
;;
esac

View File

@ -0,0 +1,161 @@
import pymysql as mysql
import argparse
import time
import datetime
import subprocess
import os
import logging
def kill_server():
kill_observer_cmd = "ps -ef | grep observer | grep -v grep | grep -v init_store_for_fast_start.py | awk '{print $2}' | xargs kill -9"
kill_res = subprocess.call(kill_observer_cmd, shell=True)
if kill_res != 0:
logging.warn("kill observer failed")
exit(-1)
logging.info("kill observer ok")
def check_file_or_path_exist(bin_abs_path, home_abs_path, store_tar_file_path, etc_dest_dir):
if not os.path.isfile(bin_abs_path):
logging.warn("invalid bin path")
return False
if not os.path.isdir(home_abs_path):
logging.warn("invalid home path")
return False
if not os.path.isdir(store_tar_file_path):
logging.warn("invalid store tar file path")
return False
if not os.path.isdir(etc_dest_dir):
logging.warn("invalid etc dest dir")
return False
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
cur_path = os.curdir
cur_path = os.path.abspath(cur_path)
logging.info("=================== cur_path: %s ==============" % cur_path)
parser = argparse.ArgumentParser()
parser.add_argument("observer_bin_path", type=str, help="the path of observer binary file")
parser.add_argument("observer_home_path", type=str, help="the path of sys log / config file / sql.sock / audit info")
parser.add_argument("store_tar_file_dir", type=str, help="store dir zip target dir")
parser.add_argument("etc_dest_dir", type=str, help="the dest dir to save etc config files")
parser.add_argument("--only_build_env", action='store_true', help="build env & start observer without bootstrap and basic check")
parser.add_argument("-p", dest="mysql_port", type=str, default="@OB_MYSQL_PORT@")
parser.add_argument("-P", dest="rpc_port", type=str, default="@OB_RPC_PORT@")
parser.add_argument("-z", dest="zone", type=str, default="zone1")
parser.add_argument("-c", dest="cluster_id", type=str, default="1")
parser.add_argument("-d", dest="data_path", type=str, default="/data/store")
parser.add_argument("-i", dest="devname", type=str, default="lo")
parser.add_argument("-r", dest="rootservice", type=str, default="127.0.0.1:@OB_RPC_PORT@")
parser.add_argument("-I", dest="ip", type=str, default="127.0.0.1")
parser.add_argument("-l", dest="log_level", type=str, default="INFO")
parser.add_argument("-o", dest="opt_str", type=str, default="__min_full_resource_pool_memory=2147483648,memory_limit=6G,system_memory=1G,datafile_size=256M,log_disk_size=5G,cpu_count=16")
parser.add_argument("-N", dest="daemon", type=str, default="1")
parser.add_argument("--tenant_name", type=str, default="@OB_TENANT_NAME@")
parser.add_argument("--max_cpu", type=float, default=14.0)
parser.add_argument("--min_cpu", type=float, default=14.0)
parser.add_argument("--memory_size", type=int, default=3221225472)
parser.add_argument("--log_disk_size", type=int, default=3221225472)
args = parser.parse_args()
bin_abs_path = os.path.abspath(args.observer_bin_path)
home_abs_path = os.path.abspath(args.observer_home_path)
data_abs_path = os.path.abspath(args.data_path)
store_tar_file_path = os.path.abspath(args.store_tar_file_dir)
etc_dest_dir = os.path.abspath(args.etc_dest_dir)
if not check_file_or_path_exist(bin_abs_path, home_abs_path, store_tar_file_path, etc_dest_dir):
logging.warn("check file / path exist failed")
exit(-1)
rebuild_env_cmd = "sh ./env.sh %s %s %s -C true && sh ./env.sh %s %s %s -B true" % (bin_abs_path, home_abs_path, data_abs_path, \
bin_abs_path, home_abs_path, data_abs_path) if bin_abs_path != home_abs_path + "/observer" else \
"sh ./env.sh %s %s %s -C && sh ./env.sh %s %s %s -B" % (bin_abs_path, home_abs_path, data_abs_path, \
bin_abs_path, home_abs_path, data_abs_path)
# prepare environment for observer
env_prepare = subprocess.call(rebuild_env_cmd, shell=True)
if env_prepare != 0:
logging.warn("prepare env failed")
exit(-1)
# prepare observer start parameters
daemon_option = "-N" if args.daemon=="1" else ""
observer_args = "-p %s -P %s -z %s -c %s -d %s -i %s -r %s -I %s -l %s -o %s %s" % (args.mysql_port, args.rpc_port, args.zone, \
args.cluster_id, data_abs_path, args.devname, \
args.rootservice, args.ip, args.log_level, args.opt_str, \
daemon_option)
os.chdir(home_abs_path)
observer_cmd = "./observer %s" % (observer_args)
subprocess.Popen(observer_cmd, shell=True)
# bootstrap observer
time.sleep(4)
try:
db = mysql.connect(host=args.ip, user="root", port=int(args.mysql_port), passwd="")
cursor = db.cursor(cursor=mysql.cursors.DictCursor)
logging.info('connection success!')
if not args.only_build_env:
logging.info("waiting for bootstrap...")
bootstrap_begin = datetime.datetime.now()
cursor.execute("ALTER SYSTEM BOOTSTRAP ZONE '%s' SERVER '%s'" % (args.zone, args.rootservice))
bootstrap_end = datetime.datetime.now()
logging.info('bootstrap success: %s ms' % ((bootstrap_end - bootstrap_begin).total_seconds() * 1000))
# checkout server status
cursor.execute("select * from oceanbase.__all_server")
server_status = cursor.fetchall()
if len(server_status) != 1 or server_status[0]['status'] != 'ACTIVE':
logging.warn("get server status failed")
exit(-1)
logging.info('check server status ok')
# create test tenant
cursor.execute("create resource unit %s_unit max_cpu %s, memory_size %s, min_cpu %s, log_disk_size %s" % ( \
args.tenant_name, args.max_cpu, args.memory_size, args.min_cpu, args.log_disk_size))
cursor.execute("create resource pool %s_pool unit='%s_unit', unit_num=1, zone_list=('%s')" % ( \
args.tenant_name, args.tenant_name, args.zone))
logging.info("waiting for create tenant...")
create_tenant_begin = datetime.datetime.now()
cursor.execute("create tenant %s replica_num=1,zone_list=('%s'),primary_zone='RANDOM',resource_pool_list=('%s_pool') set ob_tcp_invited_nodes='%%', ob_compatibility_mode = 'mysql'" % ( \
args.tenant_name, args.zone, args.tenant_name))
create_tenant_end = datetime.datetime.now()
logging.info('create tenant success: %s ms' % ((create_tenant_end - create_tenant_begin).total_seconds() * 1000))
# grant privilege
cursor.execute("CREATE USER '%s'@'%%'" % (args.tenant_name))
cursor.execute("GRANT ALL ON *.* TO '%s'@'%%'" % (args.tenant_name))
logging.info("grant privilege success")
db.close()
except mysql.err.Error as e:
logging.warn("deploy observer failed")
kill_server()
exit(-1)
# stop observer
kill_server()
# build store tar file
build_store_tar_cmd = "cd %s/clog/log_pool && ls | grep '[0-9]' | xargs rm && cd %s/.. && \
tar -Sczvf %s/store.tar.gz ./store" % (data_abs_path, data_abs_path, store_tar_file_path)
build_res = subprocess.call(build_store_tar_cmd, shell=True)
if build_res != 0:
logging.warn("build store tar file failed")
exit(-1)
logging.info("build store tar file ok")
# copy config files to etc_dest_dir
cp_config_cmd = "cp -r %s/etc/* %s" % (home_abs_path, etc_dest_dir)
cp_config_res = subprocess.call(cp_config_cmd, shell=True)
if cp_config_res != 0:
logging.warn("cp config failed")
exit(-1)
logging.info("cp config ok")
# clean env
os.chdir(cur_path)
clean_env_cmd = "sh ./env.sh %s %s %s -C true" % (bin_abs_path, home_abs_path, data_abs_path) if bin_abs_path != home_abs_path + "/observer" else \
"sh ./env.sh %s %s %s -C" % (bin_abs_path, home_abs_path, data_abs_path)
clean_res = subprocess.call(clean_env_cmd, shell=True)
if clean_res != 0:
logging.warn("clean env failed")
exit(-1)
logging.info("clean all env ok")