patch 4.0
104
script/dooba/README.md
Executable file
@ -0,0 +1,104 @@
|
||||
# Description
|
||||
|
||||
The latest dooba stable version is 1.0
|
||||
|
||||
`dooba' is a easy tools monitoring oceanbase cluster for oceanbase admins. It's based on python curses library, and is a powerful tool for watching oceanbase cluster status with straightfoward vision.
|
||||
|
||||
dooba's gui is somewhat inspired by many other curses programes such as ncmpcpp, htop, wicd-curses, weechat-curses and so on.
|
||||
|
||||
Many other features will be involved in dooba in not so long days. Here is a simple list.
|
||||
|
||||
# Features
|
||||
|
||||
1. select dataid and cluster with list
|
||||
|
||||
2. auto resize widget size to adapt term size
|
||||
|
||||
3. monitor sql, cs, ups statistics, for each cluster
|
||||
|
||||
4. simple way ssh into any server, connection by mysql client
|
||||
|
||||
5. support multi-cluster, simple switch shortkey - 'c'
|
||||
|
||||
6. machine info monitor, must online environment
|
||||
|
||||
7. many other shortcut keys for widget
|
||||
|
||||
# The Next Step
|
||||
|
||||
1. server errors/warns preview, with this DBAs have no need ssh in some servers to look for which there're some errors for that server.
|
||||
|
||||
2. server manipulation: change log level, stop server and so on
|
||||
|
||||
3. integrate with deploy which is a powful dev tool written by Yuanqi
|
||||
|
||||
# How to use it???
|
||||
|
||||
```sh
|
||||
|
||||
dooba -h <LMS_HOST> -p <LMS_PORT>
|
||||
dooba --dataid=DATA_ID
|
||||
dooba -?
|
||||
dooba
|
||||
|
||||
```
|
||||
|
||||
# Change Log
|
||||
|
||||
Please look into dooba script file header.
|
||||
|
||||
# Screenshot
|
||||
|
||||
## version 1.0
|
||||
|
||||
### dooba offline index
|
||||
|
||||

|
||||
|
||||
### dooba machine
|
||||
|
||||

|
||||
|
||||
### dooba help
|
||||
|
||||

|
||||
|
||||
## version 0.4
|
||||
|
||||
### dooba shell
|
||||
|
||||

|
||||
|
||||
### dooba sql
|
||||
|
||||

|
||||
|
||||
### dooba UpdateServer
|
||||
|
||||

|
||||
|
||||
### dooba ChunkServer
|
||||
|
||||

|
||||
|
||||
## version 0.2, 0.3
|
||||
|
||||
### Gallery of OceanBase
|
||||
|
||||

|
||||
|
||||
### SQL of OceanBase
|
||||
|
||||

|
||||
|
||||
### UpdateServer of OceanBase
|
||||
|
||||

|
||||
|
||||
### ChunkServer of OceanBase
|
||||
|
||||

|
||||
|
||||
## version 0.1
|
||||
|
||||

|
||||
22
script/dooba/docs/start.dot
Executable file
@ -0,0 +1,22 @@
|
||||
digraph start_flow {
|
||||
start -> "提供IP PORT?"
|
||||
"提供IP PORT?" -> "检查IP PORT" [label = "是"]
|
||||
"检查IP PORT" -> DONE [label = "成功"]
|
||||
"检查IP PORT" -> FAIL [label = "失败"]
|
||||
"提供IP PORT?" -> "提供dataID?" [label = "否"]
|
||||
"提供dataID?" -> "获取lms列表" [label = "是"]
|
||||
"获取lms列表" -> "检查IP PORT" [label = "成功"]
|
||||
"获取lms列表" -> FAIL [label = "失败"]
|
||||
"提供dataID?" -> "获取dataID列表" [label = "否"]
|
||||
"获取dataID列表" -> "用户选择dataID" [label = "成功"]
|
||||
"获取dataID列表" -> FAIL [label = "失败"]
|
||||
"用户选择dataID" -> "获取lms列表" [label = "是"]
|
||||
"用户选择dataID" -> FAIL [label = "否"]
|
||||
|
||||
start[shape=circle,style=filled,fillcolor=CYAN]
|
||||
{
|
||||
FAIL[shape=circle,style=filled,fillcolor=RED];
|
||||
DONE[shape=circle,style=filled,fillcolor=GREEN];
|
||||
rank=same;
|
||||
}
|
||||
}
|
||||
BIN
script/dooba/docs/start.png
Executable file
|
After Width: | Height: | Size: 67 KiB |
55
script/dooba/docs/working_flow.dot
Executable file
@ -0,0 +1,55 @@
|
||||
digraph dooba {
|
||||
rankdir=LR;
|
||||
graph [fontsize=10 fontname="Verdana" compound=true];
|
||||
node [shape=record fontsize=10 fontname="Verdana"];
|
||||
edge [ fontsize=10 fontname="Verdana" ];
|
||||
subgraph cluster_gallery {
|
||||
label="dooba v0.1";
|
||||
style=filled;
|
||||
color=cyan;
|
||||
node [ style=filled, color=white ];
|
||||
gallery -> {sql_rt, sql_cnt, cs_rt, ups_rt} [label = widget];
|
||||
}
|
||||
subgraph cluster_ups {
|
||||
style=filled;
|
||||
color=lightgrey;
|
||||
node [ style=filled, color=white ];
|
||||
ups -> {ups_cnt, ups_queue_rt, ups_commit_rt, ups_flush_rt} [label = widget];
|
||||
}
|
||||
subgraph cluster_cs {
|
||||
style=filled;
|
||||
color=lightgrey;
|
||||
node [ style=filled, color=white ];
|
||||
cs -> {cs_cache, cs_io_cnt, cs_get_rt, cs_scan_rt} [label = widget];
|
||||
}
|
||||
subgraph cluster_ms {
|
||||
style=filled;
|
||||
color=lightgrey;
|
||||
node [ style=filled, color=white ];
|
||||
ms -> {ms_sql_cnt, ms_sql_rt, other} [label = widget];
|
||||
}
|
||||
subgraph cluster_rs {
|
||||
style=filled;
|
||||
color=lightgrey;
|
||||
node [ style=filled, color=white ];
|
||||
rs -> {} [label=widget];
|
||||
}
|
||||
subgraph cluster_custom {
|
||||
style=filled;
|
||||
color=lightgrey;
|
||||
node [ style=filled, color=white ];
|
||||
custom -> {} [label=widget];
|
||||
}
|
||||
|
||||
dooba -> { header, main, status_bar};
|
||||
main -> gallery [ltail=cluster_gallery];
|
||||
main -> ups [ltail=cluster_ups];
|
||||
main -> cs [ltail=cluster_cs];
|
||||
main -> ms [ltail=cluster_ms];
|
||||
main -> rs [ltail=cluster_rs];
|
||||
main -> custom [ltail=cluster_custom];
|
||||
|
||||
header [shape=record, label="<f0> [header] | appname | ... | ..."];
|
||||
status_bar [shape=record, label="<f0> [status_bar] | run_time | up_time | cluster | ..."];
|
||||
{ rank=1; header, status_bar }
|
||||
}
|
||||
BIN
script/dooba/docs/working_flow.png
Executable file
|
After Width: | Height: | Size: 68 KiB |
3181
script/dooba/dooba
Executable file
BIN
script/dooba/screenshot/v0_1.png
Executable file
|
After Width: | Height: | Size: 21 KiB |
BIN
script/dooba/screenshot/v0_2-cs.png
Executable file
|
After Width: | Height: | Size: 16 KiB |
BIN
script/dooba/screenshot/v0_2-gallery.png
Executable file
|
After Width: | Height: | Size: 12 KiB |
BIN
script/dooba/screenshot/v0_2-sql.png
Executable file
|
After Width: | Height: | Size: 15 KiB |
BIN
script/dooba/screenshot/v0_2-ups.png
Executable file
|
After Width: | Height: | Size: 17 KiB |
BIN
script/dooba/screenshot/v0_4-cs.png
Executable file
|
After Width: | Height: | Size: 150 KiB |
BIN
script/dooba/screenshot/v0_4-shell.png
Executable file
|
After Width: | Height: | Size: 70 KiB |
BIN
script/dooba/screenshot/v0_4-sql.png
Executable file
|
After Width: | Height: | Size: 138 KiB |
BIN
script/dooba/screenshot/v0_4-ups.png
Executable file
|
After Width: | Height: | Size: 68 KiB |
BIN
script/dooba/screenshot/v1_0-cs.png
Executable file
|
After Width: | Height: | Size: 141 KiB |
BIN
script/dooba/screenshot/v1_0-gallery.png
Executable file
|
After Width: | Height: | Size: 53 KiB |
BIN
script/dooba/screenshot/v1_0-help.png
Executable file
|
After Width: | Height: | Size: 87 KiB |
BIN
script/dooba/screenshot/v1_0-machine.png
Executable file
|
After Width: | Height: | Size: 106 KiB |
BIN
script/dooba/screenshot/v1_0-offline.png
Executable file
|
After Width: | Height: | Size: 38 KiB |
BIN
script/dooba/screenshot/v1_0-online.png
Executable file
|
After Width: | Height: | Size: 32 KiB |
BIN
script/dooba/screenshot/v1_0-select_cluster.png
Executable file
|
After Width: | Height: | Size: 34 KiB |
BIN
script/dooba/screenshot/v1_0-sql.png
Executable file
|
After Width: | Height: | Size: 131 KiB |
BIN
script/dooba/screenshot/v1_0-ups.png
Executable file
|
After Width: | Height: | Size: 110 KiB |
192
script/import/ob_import.py
Executable file
@ -0,0 +1,192 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import MySQLdb
|
||||
import Queue
|
||||
import threading
|
||||
import multiprocessing
|
||||
import traceback
|
||||
import random
|
||||
|
||||
param = {}
|
||||
param['data_file'] = sys.argv[1]
|
||||
param['delima'] = '\1'
|
||||
param['nop'] = '\2'
|
||||
param['null'] = '\3'
|
||||
|
||||
param['table_name'] = 't1'
|
||||
param['column_count'] = 4
|
||||
param['host'] = "10.232.38.8"
|
||||
param['port'] = 35999
|
||||
param['user'] = "admin"
|
||||
param['passwd'] = "admin"
|
||||
param['batch_count'] = 1000
|
||||
param['concurrency'] = 20
|
||||
|
||||
id_column_map = [
|
||||
(0,'c1'),
|
||||
(1,'c2'),
|
||||
(2,'c3'),
|
||||
(3,'c4'),
|
||||
]
|
||||
|
||||
param['id_column_map'] = id_column_map
|
||||
|
||||
for k,v in param.items():
|
||||
print k, v
|
||||
|
||||
class MyExcept(BaseException):
|
||||
pass
|
||||
|
||||
def get_column_type(cursor, table_name):
|
||||
column_type_map = {}
|
||||
if cursor.execute("select table_id from __first_tablet_entry where table_name = '%s'" % table_name) != 1:
|
||||
raise MyExcept
|
||||
table_id = cursor.fetchone()[0]
|
||||
if cursor.execute("select column_name, data_type from __all_all_column where table_id = %d" % table_id) <= 0:
|
||||
raise MyExcept
|
||||
for item in cursor.fetchall():
|
||||
column_type_map[item[0]] = item[1]
|
||||
return column_type_map
|
||||
|
||||
|
||||
def add_value(count, line_num, tokens, set_values, execute_values):
|
||||
for id_column in id_column_map:
|
||||
id = id_column[0]
|
||||
if tokens[id] == param['nop']:
|
||||
execute_values.append('@nop')
|
||||
elif tokens[id] == param['null']:
|
||||
execute_values.append('@null')
|
||||
else:
|
||||
var_name = '@a%d' % count
|
||||
execute_values.append(var_name)
|
||||
set_values.append("%s='%s'" % (var_name, tokens[id]))
|
||||
count += 1
|
||||
return count
|
||||
|
||||
def gen_replace_sql(**param):
|
||||
values = "(%s)" % ','.join([ '?' for i in xrange(0,len(id_column_map)) ])
|
||||
values = ",".join([ values for i in xrange(0, param["batch_count"]) ])
|
||||
column_def = ','.join([ i[1] for i in id_column_map ])
|
||||
param["values"] = values
|
||||
param["column_def"] = column_def
|
||||
return "prepare p1 from replace into %(table_name)s(%(column_def)s) values%(values)s" % param
|
||||
|
||||
def worker():
|
||||
conn = MySQLdb.connect(host=param["host"], port=param["port"], user=param["user"], passwd = param["passwd"])
|
||||
conn.autocommit(True)
|
||||
cursor = conn.cursor()
|
||||
replace_sql = gen_replace_sql(**param)
|
||||
cursor.execute(replace_sql)
|
||||
|
||||
print "work", os.getpid()
|
||||
|
||||
while True:
|
||||
lines = q.get()
|
||||
if lines == None:
|
||||
break
|
||||
if len(lines) != param['batch_count']:
|
||||
raise MyExcept
|
||||
set_values = []
|
||||
execute_values = []
|
||||
count = 0
|
||||
for i in xrange(0, len(lines)):
|
||||
line = lines[i]
|
||||
tokens = line.split(param['delima'])
|
||||
if len(tokens) != param['column_count']:
|
||||
print tokens
|
||||
raise MyExcept
|
||||
count = add_value(count, i, tokens, set_values, execute_values);
|
||||
set_sql = "set " + ",".join(set_values)
|
||||
execute_sql = "execute p1 using " + ",".join(execute_values)
|
||||
|
||||
try:
|
||||
cursor.execute(set_sql)
|
||||
cursor.execute(execute_sql)
|
||||
except:
|
||||
print set_sql
|
||||
print execute_sql
|
||||
print "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
|
||||
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
def import_lines(lines):
|
||||
conn = MySQLdb.connect(host=param["host"], port=param["port"], user=param["user"], passwd = param["passwd"])
|
||||
conn.autocommit(True)
|
||||
cursor = conn.cursor()
|
||||
param['batch_count'] = len(lines)
|
||||
replace_sql = gen_replace_sql(**param)
|
||||
cursor.execute(replace_sql)
|
||||
|
||||
if len(lines) != param['batch_count']:
|
||||
raise MyExcept
|
||||
set_values = []
|
||||
execute_values = []
|
||||
count = 0
|
||||
for i in xrange(0, len(lines)):
|
||||
line = lines[i]
|
||||
tokens = line.split(param['delima'])
|
||||
if len(tokens) != param['column_count']:
|
||||
print tokens
|
||||
raise MyExcept
|
||||
count = add_value(count, i, tokens, set_values, execute_values);
|
||||
set_sql = "set " + ",".join(set_values)
|
||||
execute_sql = "execute p1 using " + ",".join(execute_values)
|
||||
|
||||
try:
|
||||
cursor.execute(set_sql)
|
||||
cursor.execute(execute_sql)
|
||||
except:
|
||||
print set_sql
|
||||
print execute_sql
|
||||
print "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
|
||||
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
|
||||
|
||||
f = open(param['data_file'])
|
||||
line_count = 0
|
||||
q = multiprocessing.Queue(param['concurrency'] * 2)
|
||||
|
||||
threads = []
|
||||
for i in xrange(param['concurrency']):
|
||||
t = multiprocessing.Process(target=worker)
|
||||
threads.append(t)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
print "main", os.getpid()
|
||||
|
||||
while True:
|
||||
flag = False
|
||||
lines = []
|
||||
for i in xrange(0, param['batch_count']):
|
||||
line = f.readline()
|
||||
if 0 == len(line):
|
||||
flag = True
|
||||
break
|
||||
line = line.strip()
|
||||
lines.append(line)
|
||||
if flag:
|
||||
if len(lines) != 0:
|
||||
import_lines(lines)
|
||||
break
|
||||
line_count += len(lines)
|
||||
if line_count % 10000 == 0:
|
||||
print line_count
|
||||
if len(lines) == param['batch_count']:
|
||||
q.put(lines, timeout = 3)
|
||||
|
||||
for i in xrange(0, param['concurrency']):
|
||||
q.put(None)
|
||||
|
||||
f.close()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
|
||||
258
script/plan/outline.py
Executable file
@ -0,0 +1,258 @@
|
||||
#!/usr/bin/python
|
||||
# coding=utf8
|
||||
import os
|
||||
import argparse
|
||||
import mysql.connector
|
||||
import sys
|
||||
|
||||
class Config:
|
||||
pass
|
||||
|
||||
g_config = Config()
|
||||
|
||||
class Stat:
|
||||
def reset(self):
|
||||
self.db_count = 0
|
||||
self.outline_count = 0
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
|
||||
g_stat = Stat()
|
||||
|
||||
class ImportStat:
|
||||
def reset(self):
|
||||
self.succ_db_count = 0
|
||||
self.fail_db_count = 0
|
||||
self.succ_outline_count = 0
|
||||
self.fail_outline_count = 0
|
||||
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
|
||||
g_import_stat = ImportStat()
|
||||
|
||||
#参数
|
||||
#-h -P -u -p -t -i -d
|
||||
#如果-t没指定就是所有租户
|
||||
#
|
||||
#-d 表示dump
|
||||
#-i 表示import
|
||||
|
||||
#查询__all_server表。获取有多少台server, 设置并发度
|
||||
#设置查询超时时间
|
||||
#查询GV$OB_PLAN_CACHE_PLAN_STAT表获取outline_data
|
||||
#
|
||||
#select sql_id, sql_text, outline_data from GV$OB_PLAN_CACHE_PLAN_STAT。检查sql_id是否有冲突,outline_data是否有冲突//获取对应的database。
|
||||
#
|
||||
#dump成outline语句, 生成对应的database。
|
||||
#生成outline语句
|
||||
|
||||
def get_connect():
|
||||
return mysql.connector.connect(host = g_config.host, port = g_config.port, user = g_config.username, password = g_config.password)
|
||||
|
||||
def get_real_db_id(tenant_id, db_id):
|
||||
if db_id == 1:
|
||||
return tenant_id << 40 | db_id
|
||||
else:
|
||||
return db_id
|
||||
|
||||
def get_args(args):
|
||||
parser = argparse.ArgumentParser(add_help = False)
|
||||
parser.add_argument('-h', '--host', dest='host', type=str)
|
||||
parser.add_argument('-P', '--port', dest='port', type=int)
|
||||
parser.add_argument('-u', '--username', dest='username', type=str)
|
||||
parser.add_argument('-p', '--password', dest='password', type=str)
|
||||
parser.add_argument('-t', '--tenant', dest='tenant', type=int)
|
||||
parser.add_argument('-d', '--dump', dest='dump', action='store_true')
|
||||
parser.add_argument('-i', '--import', dest='import1', action='store_true')
|
||||
ret = parser.parse_args(args)
|
||||
|
||||
if ret.host == None:
|
||||
print >> sys.stderr, 'please give hostname: -h'
|
||||
return -1
|
||||
else:
|
||||
g_config.host = ret.host
|
||||
|
||||
if ret.port == None:
|
||||
print >> sys.stderr, 'please give port: -P'
|
||||
return -1
|
||||
else:
|
||||
g_config.port = ret.port
|
||||
|
||||
if ret.username == None:
|
||||
print >> sys.stderr, 'please give username: -u'
|
||||
return -1
|
||||
else:
|
||||
g_config.username = ret.username
|
||||
|
||||
if ret.tenant == None:
|
||||
print >> sys.stderr, 'please give tenant_id: -t'
|
||||
return -1
|
||||
else:
|
||||
g_config.tenant = ret.tenant
|
||||
|
||||
g_config.password = ret.password
|
||||
|
||||
if ret.dump == False and ret.import1 == False:
|
||||
print >> sys.stderr, 'please give dump or import: -d/-i'
|
||||
return -1
|
||||
elif ret.dump == True and ret.import1 == True:
|
||||
print >> sys.stderr, 'only dump or import: -d/-i'
|
||||
return -1
|
||||
else:
|
||||
g_config.dump = ret.dump
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def output(name, sql_id, outline_data):
|
||||
g_stat.outline_count += 1
|
||||
print "create outline auto_gen_%s on '%s' using hint %s;" % (name, sql_id, outline_data)
|
||||
|
||||
def check(lines):
|
||||
for i in range(1,len(lines)):
|
||||
if lines[i][1] != lines[0][1]:
|
||||
return False
|
||||
elif lines[i][2] != lines[0][2]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_db_name(db_id):
|
||||
conn = get_connect()
|
||||
cur = conn.cursor()
|
||||
sql = "select database_name from oceanbase.__all_database where database_id = %d and tenant_id = %d" % (db_id, g_config.tenant)
|
||||
cur.execute(sql)
|
||||
rs = cur.fetchone()
|
||||
return rs[0]
|
||||
|
||||
|
||||
def dump_db_outline(db_id, items):
|
||||
if db_id == 18446744073709551615:
|
||||
for item in items:
|
||||
print >> sys.stderr, "sql_id = %s | sql_text = %s : no use database" % (item[0], item[1])
|
||||
return
|
||||
db_name = get_db_name(db_id)
|
||||
g_stat.db_count += 1
|
||||
print
|
||||
print "use %s;" % db_name
|
||||
map = {}
|
||||
for line in items:
|
||||
sql_id = line[0]
|
||||
sql_text = line[1]
|
||||
outline_data = line[2]
|
||||
|
||||
if sql_id in map:
|
||||
map[sql_id].append((sql_id, sql_text, outline_data))
|
||||
else:
|
||||
map[sql_id] = [(sql_id, sql_text, outline_data)]
|
||||
|
||||
count = 0
|
||||
for k, v in map.items():
|
||||
name = '%d_%s%d' % (g_config.tenant, db_name, count)
|
||||
if len(v) == 1:
|
||||
output(name, v[0][0], v[0][2])
|
||||
else:
|
||||
if check(v):
|
||||
output(name, v[0][0], v[0][2])
|
||||
else:
|
||||
print >> sys.stderr, "sql_id = %s has conflict" % (v[0][0])
|
||||
count += 1
|
||||
|
||||
|
||||
def dump_outline():
|
||||
conn = get_connect()
|
||||
cur = conn.cursor()
|
||||
cur.execute('select count(1) from oceanbase.__all_server')
|
||||
rs = cur.fetchone()
|
||||
server_count = rs[0]
|
||||
cur.execute('select db_id, sql_id, statement, outline_data from oceanbase.GV$OB_PLAN_CACHE_PLAN_STAT where tenant_id = %d order by db_id' % g_config.tenant)
|
||||
rs = cur.fetchall()
|
||||
last_db_id = 0
|
||||
items = []
|
||||
for i in range(0, len(rs)):
|
||||
db_id = get_real_db_id(1, rs[i][0])
|
||||
if db_id == last_db_id:
|
||||
items.append(rs[i][1:])
|
||||
else:
|
||||
if len(items) != 0:
|
||||
dump_db_outline(last_db_id, items)
|
||||
items = []
|
||||
last_db_id = db_id
|
||||
items.append(rs[i][1:])
|
||||
if len(items) != 0:
|
||||
dump_db_outline(last_db_id, items)
|
||||
items = []
|
||||
print >> sys.stderr, "%d database and %d outline dumped" % (g_stat.db_count, g_stat.outline_count)
|
||||
|
||||
#导入outline。判断outline是否存在。如果存在,那么就不导入了。
|
||||
#
|
||||
#查询__all_outline表。判断是否全部正确导入。
|
||||
#
|
||||
#输出导入信息。
|
||||
#1. 总共导入了多少outline, 之前已经存在多少outline
|
||||
#2. 中间有冲突的信息输出
|
||||
|
||||
def import_outline():
|
||||
conn = get_connect()
|
||||
cur = conn.cursor()
|
||||
cur.execute("select effective_tenant_id()")
|
||||
rs = cur.fetchone()
|
||||
if rs[0] != g_config.tenant:
|
||||
print >> sys.stderr, 'tenant id not equal %d <> %d' % (rs[0], g_config.tenant)
|
||||
sys.exit(-1)
|
||||
|
||||
state = 0
|
||||
for line in sys.stdin:
|
||||
line = line.strip()
|
||||
if len(line) != 0 and line[0] != '#':
|
||||
if len(line) >= 3 and line[:3] == 'use':
|
||||
print >> sys.stderr, 'change database: %s' % line
|
||||
try:
|
||||
cur.execute(line)
|
||||
state = 0
|
||||
g_import_stat.succ_db_count += 1
|
||||
except:
|
||||
g_import_stat.fail_db_count += 1
|
||||
print >> sys.stderr, 'fail to execute: %s' % line
|
||||
state = 1
|
||||
else:
|
||||
if state == 0:
|
||||
try:
|
||||
cur.execute(line)
|
||||
g_import_stat.succ_outline_count += 1
|
||||
except:
|
||||
print >> sys.stderr, 'fail to execute: %s' % line
|
||||
g_import_stat.fail_outline_count += 1
|
||||
else:
|
||||
g_import_stat.fail_outline_count += 1
|
||||
print >> sys.stderr, 'skip to execute: %s' % line
|
||||
print >> sys.stderr, "db succ %d | db fail %d | outline succ %d | outline fail %d" % (g_import_stat.succ_db_count, g_import_stat.fail_db_count, g_import_stat.succ_outline_count, g_import_stat.fail_outline_count)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if -1 == get_args(sys.argv[1:]):
|
||||
sys.exit(-1)
|
||||
|
||||
if g_config.dump:
|
||||
conn = get_connect()
|
||||
cur = conn.cursor()
|
||||
cur.execute("select effective_tenant_id()")
|
||||
rs = cur.fetchone()
|
||||
if rs[0] != 1:
|
||||
print >> sys.stderr, 'please use sys tenant to dump'
|
||||
sys.exit(-1)
|
||||
|
||||
cur.execute("select * from oceanbase.__all_tenant where tenant_id = %d" % g_config.tenant)
|
||||
rs = cur.fetchall()
|
||||
if 1 != len(rs):
|
||||
print >> sys.stderr, 'no such tenant_id %d ' % g_config.tenant
|
||||
sys.exit(-1)
|
||||
|
||||
dump_outline()
|
||||
else:
|
||||
import_outline()
|
||||
|
||||
|
||||
32
script/sqlaudit/sqlaudit.py
Normal file
@ -0,0 +1,32 @@
|
||||
'''
|
||||
example
|
||||
python sqladuit.py -h 100.69.198.71 -P 31903 -uroot -Uroot
|
||||
'''
|
||||
|
||||
import mysql.connector
|
||||
from optparse import OptionParser
|
||||
|
||||
parser = OptionParser(add_help_option=False)
|
||||
parser.add_option("-h", "--host", dest="host", help="host")
|
||||
parser.add_option("-P", "--port", dest="port", help="port")
|
||||
parser.add_option("-u", "--user", dest="user", help="user")
|
||||
parser.add_option("-p", "--password", dest="password", help="password")
|
||||
parser.add_option("-U", "--username", dest="username", help="username")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
user_name = options.username
|
||||
if options.password == None:
|
||||
passwd = ''
|
||||
else:
|
||||
passwd = options.password
|
||||
conn = mysql.connector.connect(host=options.host, port = int(options.port), user = options.user, password = passwd);
|
||||
cur = conn.cursor(dictionary = True);
|
||||
last_request_time = 0
|
||||
while True:
|
||||
cur.execute("select request_time, query_sql from oceanbase.V$OB_SQL_AUDIT where request_time > %d and user_name = '%s' limit 1000" % (last_request_time, user_name))
|
||||
rs = cur.fetchall()
|
||||
for line in rs:
|
||||
sql = line['query_sql'].strip()
|
||||
if sql[:len("select")].lower() == 'select':
|
||||
print sql
|
||||
last_request_time = line['request_time']
|
||||