move test folder

This commit is contained in:
wangzelin.wzl
2022-08-12 19:29:16 +08:00
parent 29e0cb7475
commit d5269307a9
419 changed files with 275972 additions and 77007 deletions

View File

@ -0,0 +1,228 @@
use test;
drop table if exists pg_trans_test;
create tablegroup tg1 binding true;
create table pg_trans_test1_1(id1 int, id2 int) tablegroup tg1;
create table pg_trans_test1_2(id1 int, id2 int) tablegroup tg1;
create index i1 on pg_trans_test1_1(id2) local;
desc pg_trans_test1_1;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES MUL NULL
desc pg_trans_test1_2;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
insert into pg_trans_test1_1 values(1, 1);
insert into pg_trans_test1_2 values(2, 2);
begin;
insert into pg_trans_test1_1 values(3, 3);
insert into pg_trans_test1_2 values(4, 4);
commit;
select * from pg_trans_test1_1;
id1 id2
1 1
3 3
select * from pg_trans_test1_2;
id1 id2
2 2
4 4
create tablegroup tg2 binding true partition by hash partitions 2;
create table pg_trans_test2_1(id1 int, id2 int) tablegroup tg2 partition by hash(id1 % 2) partitions 2;
create index i1 on pg_trans_test2_1(id2) local;
create table pg_trans_test2_2(id1 int, id2 int) tablegroup tg2 partition by hash(id1 % 2) partitions 2;
desc pg_trans_test2_1;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES MUL NULL
desc pg_trans_test2_2;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
insert into pg_trans_test2_1 values(5, 5);
insert into pg_trans_test2_2 values(6, 6);
select * from pg_trans_test2_1;
id1 id2
5 5
select * from pg_trans_test2_2;
id1 id2
6 6
begin;
insert into pg_trans_test2_1 values(7, 7);
insert into pg_trans_test2_2 values(8, 8);
commit;
select * from pg_trans_test2_1;
id1 id2
5 5
7 7
select * from pg_trans_test2_2;
id1 id2
6 6
8 8
begin;
insert into pg_trans_test2_1 values(7, 7);
insert into pg_trans_test2_1 values(8, 8);
insert into pg_trans_test2_2 values(9, 9);
insert into pg_trans_test2_2 values(10, 10);
commit;
select * from pg_trans_test2_1;
id1 id2
8 8
5 5
7 7
7 7
select * from pg_trans_test2_2;
id1 id2
6 6
8 8
10 10
9 9
create tablegroup tg3 binding true partition by range columns 1 (partition p0 values less than (10), partition p1 values less than(20));
create table pg_trans_test3_1(id1 int, id2 int) tablegroup tg3 partition by range columns(id1) (partition p0 values less than (10), partition p1 values less than(20));
create table pg_trans_test3_2(id1 int, id2 int) tablegroup tg3 partition by range columns(id1) (partition p0 values less than (10), partition p1 values less than(20));
desc pg_trans_test3_1;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
desc pg_trans_test3_2;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
insert into pg_trans_test3_1 values(5, 5);
insert into pg_trans_test3_2 values(15, 15);
select * from pg_trans_test3_1;
id1 id2
5 5
select * from pg_trans_test3_2;
id1 id2
15 15
begin;
insert into pg_trans_test3_1 values(6, 6);
insert into pg_trans_test3_2 values(8, 8);
commit;
begin;
insert into pg_trans_test3_1 values(11, 11);
insert into pg_trans_test3_2 values(12, 12);
commit;
select * from pg_trans_test3_1;
id1 id2
5 5
6 6
11 11
select * from pg_trans_test3_2;
id1 id2
8 8
12 12
15 15
begin;
insert into pg_trans_test3_1 values(1, 1);
insert into pg_trans_test3_1 values(13, 13);
insert into pg_trans_test3_2 values(2, 2);
insert into pg_trans_test3_2 values(14, 14);
commit;
select * from pg_trans_test3_1;
id1 id2
1 1
5 5
6 6
11 11
13 13
select * from pg_trans_test3_2;
id1 id2
2 2
8 8
12 12
14 14
15 15
create tablegroup tg4 binding true partition by list columns 1 (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
create table pg_trans_test4_1(id1 int, id2 int) tablegroup tg4 partition by list columns(id1) (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
create table pg_trans_test4_2(id1 int, id2 int) tablegroup tg4 partition by list columns(id1) (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
desc pg_trans_test4_1;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
desc pg_trans_test4_2;
Field Type Null Key Default Extra
id1 int(11) YES NULL
id2 int(11) YES NULL
insert into pg_trans_test4_1 values(5, 5);
insert into pg_trans_test4_2 values(15, 15);
select * from pg_trans_test4_1;
id1 id2
5 5
select * from pg_trans_test4_2;
id1 id2
15 15
begin;
insert into pg_trans_test4_1 values(6, 6);
insert into pg_trans_test4_2 values(8, 8);
commit;
begin;
insert into pg_trans_test4_1 values(11, 11);
insert into pg_trans_test4_2 values(12, 12);
commit;
select * from pg_trans_test4_1;
id1 id2
5 5
6 6
11 11
select * from pg_trans_test4_2;
id1 id2
8 8
12 12
15 15
begin;
insert into pg_trans_test4_1 values(1, 1);
insert into pg_trans_test4_1 values(13, 13);
insert into pg_trans_test4_2 values(2, 2);
insert into pg_trans_test4_2 values(14, 14);
commit;
select * from pg_trans_test4_1;
id1 id2
1 1
5 5
6 6
11 11
13 13
select * from pg_trans_test4_2;
id1 id2
2 2
8 8
12 12
14 14
15 15
create tablegroup tg5 binding true;
create table pg_trans_test5_1 (pk int) tablegroup tg5;
create table pg_trans_test5_2 (pk int) tablegroup tg5;
insert into pg_trans_test5_1 values (1), (2), (3), (4);
insert into pg_trans_test5_2 values (1), (2), (3), (4);
select * from pg_trans_test5_1 as l join pg_trans_test5_2 as r where l.pk = r.pk;
pk pk
1 1
2 2
3 3
4 4
drop table if exists pg_trans_test1_1;
drop table if exists pg_trans_test1_2;
drop table if exists pg_trans_test2_1;
drop table if exists pg_trans_test2_2;
drop table if exists pg_trans_test3_1;
drop table if exists pg_trans_test3_2;
drop table if exists pg_trans_test4_1;
drop table if exists pg_trans_test4_2;
drop table if exists pg_trans_test5_1;
drop table if exists pg_trans_test5_2;
purge recyclebin;
drop tablegroup tg1;
drop tablegroup tg2;
drop tablegroup tg3;
drop tablegroup tg4;
drop tablegroup tg5;

View File

@ -0,0 +1,169 @@
drop database if exists xm_test;
drop database if exists xm_test_db1;
create database xm_test;
use xm_test;
set tx_isolation = 'SERIALIZABLE';
set autocommit = 1;
set tx_isolation = 'SERIALIZABLE';
set autocommit = 1;
create table xm_test_t1 (c1 int primary key, c2 int);
create table xm_test_t2 (c1 int primary key, c2 int);
insert into xm_test_t1 values(1,1);
insert into xm_test_t1 values(2,1);
insert into xm_test_t2 values(2,1);
begin;
select * from xm_test_t1;
c1 c2
1 1
2 1
insert into xm_test_t1 values (3, 1);
insert into xm_test_t1 values (4, 1);
select * from xm_test_t1;
c1 c2
1 1
2 1
4 1
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
commit;
begin;
select /*+read_consistency(weak) */ * from xm_test_t1;
ERROR 0A000: weak consistency under SERIALIZABLE isolation level not supported
commit;
begin;
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
insert into xm_test_t1 values (5, 1);
show databases like 'xm_%';
Database (xm_%)
xm_test
create database xm_test_db1;
show databases like 'xm_%';
Database (xm_%)
xm_test
xm_test_db1
insert into xm_test_t1 values (6, 1);
show databases like 'xm_%';
Database (xm_%)
xm_test
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
commit;
create table t_global_index (pk int primary key) partition by hash(pk) partitions 10;
insert into t_global_index values (1), (2), (3);
create index index1 on t_global_index (pk) global;
show index from t_global_index;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible
t_global_index 0 PRIMARY 1 pk A NULL NULL NULL BTREE available YES
t_global_index 1 index1 1 pk A NULL NULL NULL BTREE available YES
begin;
insert into xm_test_t1 values (7, 1);
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
select table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
table_name
xm_test_t1
create table xm_test_t4 (pk int primary key);
insert into xm_test_t1 values (8, 1);
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
8 1
select table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
table_name
xm_test_t1
select /*+read_consistency(weak) */ table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
table_name
xm_test_t1
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
commit;
begin;
insert into xm_test_t1 values (9, 1);
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
9 1
select t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
table_name
xm_test_t1
xm_test_t2
t_global_index
xm_test_t4
create table xm_test_t5 (pk int primary key);
insert into xm_test_t1 values (10, 1);
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
10 1
select t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
table_name
xm_test_t1
xm_test_t2
t_global_index
xm_test_t4
xm_test_t5
select /*+read_consistency(weak) */ t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
table_name
xm_test_t1
xm_test_t2
t_global_index
xm_test_t4
xm_test_t5
select * from xm_test_t1;
c1 c2
1 1
2 1
3 1
4 1
5 1
6 1
7 1
8 1
9 1
commit;

View File

@ -0,0 +1,172 @@
drop table if exists t1;
drop table if exists t2;
create table t1 (c1 int primary key, c2 int);
create table t2 (c1 int primary key, c2 int);
desc t1;
Field Type Null Key Default Extra
c1 int(11) NO PRI NULL
c2 int(11) YES NULL
desc t2;
Field Type Null Key Default Extra
c1 int(11) NO PRI NULL
c2 int(11) YES NULL
insert into t1 values(1,1);
insert into t1 values(2,1);
insert into t2 values(2,1);
select * from t1;
c1 c2
1 1
2 1
select * from t2;
c1 c2
2 1
set autocommit=1;
select /*+read_consistency(weak)+*/ * from t1;
c1 c2
1 1
2 1
select /*+read_consistency(weak)+*/ * from t1 where c1 = 1;
c1 c2
1 1
select /*+read_consistency(weak)+*/ * from t2;
c1 c2
2 1
select /*+read_consistency(weak)+*/ * from t2 where c1 = 2;
c1 c2
2 1
select /*+read_consistency(weak)+*/ * from t1 as l join t1 as r where l.c1 = r.c1;
c1 c2 c1 c2
1 1 1 1
2 1 2 1
select /*+read_consistency(weak)+*/ * from t1 join t2 where t1.c1 = t2.c1;
c1 c2 c1 c2
2 1 2 1
set autocommit=0;
select /*+read_consistency(weak)+*/ * from t1 as l join t1 as r where l.c1 = r.c1;
c1 c2 c1 c2
1 1 1 1
2 1 2 1
select /*+read_consistency(weak)+*/ * from t1 join t2 where t1.c1 = t2.c1;
c1 c2 c1 c2
2 1 2 1
select /*+read_consistency(weak)+*/* from t1;
c1 c2
1 1
2 1
select /*+read_consistency(weak)+*/* from t2;
c1 c2
2 1
commit;
begin;
insert into t1 values(3, 1);
insert into t2 values(3, 1);
select /*+read_consistency(weak)+*/* from t1;
c1 c2
1 1
2 1
3 1
select /*+read_consistency(weak)+*/* from t2;
c1 c2
2 1
3 1
commit;
begin;
select /*+read_consistency(weak)+*/* from t1;
c1 c2
1 1
2 1
3 1
select /*+read_consistency(weak)+*/* from t2;
c1 c2
2 1
3 1
insert into t1 values(4, 1);
ERROR 0A000: different consistency type in one transaction not supported
select * from t1;
c1 c2
1 1
2 1
3 1
insert into t2 values(4, 1);
ERROR 0A000: different consistency type in one transaction not supported
select * from t2;
c1 c2
2 1
3 1
commit;
begin;
select /*+read_consistency(weak)+*/* from t1 for update;
c1 c2
1 1
2 1
3 1
select /*+read_consistency(weak)+*/* from t2;
c1 c2
2 1
3 1
insert into t1 values(5, 1);
select * from t1;
c1 c2
1 1
2 1
3 1
5 1
insert into t2 values(5, 1);
select * from t2;
c1 c2
2 1
3 1
5 1
commit;
begin;
select /*+read_consistency(weak)+*/* from t2;
c1 c2
2 1
3 1
5 1
select /*+read_consistency(weak)+*/* from t1 for update;
ERROR 0A000: different consistency type in one transaction not supported
insert into t1 values(6, 1);
ERROR 0A000: different consistency type in one transaction not supported
select * from t1;
c1 c2
1 1
2 1
3 1
5 1
insert into t2 values(6, 1);
ERROR 0A000: different consistency type in one transaction not supported
select * from t2;
c1 c2
2 1
3 1
5 1
commit;
begin;
select /*+read_consistency(strong)*/* from t1;
c1 c2
1 1
2 1
3 1
5 1
select /*+read_consistency(weak)*/* from t1;
c1 c2
1 1
2 1
3 1
5 1
commit;
begin;
select /*+read_consistency(weak)*/* from t1;
c1 c2
1 1
2 1
3 1
5 1
select * from t1;
c1 c2
1 1
2 1
3 1
5 1
commit;

View File

@ -0,0 +1,28 @@
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service LTS
set global ob_timestamp_service='GTS';
ERROR 42000: Variable 'ob_timestamp_service' can't be set to the value of 'GTS'
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service LTS
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service LTS
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service GTS
set global ob_timestamp_service='LTS';
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service LTS
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service LTS
set global ob_timestamp_service='GTS';
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service GTS
show global variables like 'ob_timestamp_service';
Variable_name Value
ob_timestamp_service GTS

View File

@ -0,0 +1,197 @@
--disable_query_log
set @@session.explicit_defaults_for_timestamp=off;
--enable_query_log
#owner : shanyan.g
#owner group : transaction
#description : 测试partition group相关的case
connect (obsys,$OBMYSQL_MS0,admin,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connect (conn1,$OBMYSQL_MS0,$OBMYSQL_USR,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connection obsys;
##修改配置
--disable_abort_on_error
## case:检查表结构
connection conn1;
use test;
--disable_warnings
drop table if exists pg_trans_test;
--enable_warnings
#############非分区表##########
create tablegroup tg1 binding true;
create table pg_trans_test1_1(id1 int, id2 int) tablegroup tg1;
create table pg_trans_test1_2(id1 int, id2 int) tablegroup tg1;
create index i1 on pg_trans_test1_1(id2) local;
desc pg_trans_test1_1;
desc pg_trans_test1_2;
insert into pg_trans_test1_1 values(1, 1);
insert into pg_trans_test1_2 values(2, 2);
begin;
insert into pg_trans_test1_1 values(3, 3);
insert into pg_trans_test1_2 values(4, 4);
commit;
select * from pg_trans_test1_1;
select * from pg_trans_test1_2;
######### HASH分区 ########
create tablegroup tg2 binding true partition by hash partitions 2;
create table pg_trans_test2_1(id1 int, id2 int) tablegroup tg2 partition by hash(id1 % 2) partitions 2;
create index i1 on pg_trans_test2_1(id2) local;
create table pg_trans_test2_2(id1 int, id2 int) tablegroup tg2 partition by hash(id1 % 2) partitions 2;
desc pg_trans_test2_1;
desc pg_trans_test2_2;
insert into pg_trans_test2_1 values(5, 5);
insert into pg_trans_test2_2 values(6, 6);
select * from pg_trans_test2_1;
select * from pg_trans_test2_2;
#单pg事务
begin;
insert into pg_trans_test2_1 values(7, 7);
insert into pg_trans_test2_2 values(8, 8);
commit;
select * from pg_trans_test2_1;
select * from pg_trans_test2_2;
#多pg事务
begin;
insert into pg_trans_test2_1 values(7, 7);
insert into pg_trans_test2_1 values(8, 8);
insert into pg_trans_test2_2 values(9, 9);
insert into pg_trans_test2_2 values(10, 10);
commit;
select * from pg_trans_test2_1;
select * from pg_trans_test2_2;
########### RANGE分区 ############
create tablegroup tg3 binding true partition by range columns 1 (partition p0 values less than (10), partition p1 values less than(20));
create table pg_trans_test3_1(id1 int, id2 int) tablegroup tg3 partition by range columns(id1) (partition p0 values less than (10), partition p1 values less than(20));
create table pg_trans_test3_2(id1 int, id2 int) tablegroup tg3 partition by range columns(id1) (partition p0 values less than (10), partition p1 values less than(20));
desc pg_trans_test3_1;
desc pg_trans_test3_2;
insert into pg_trans_test3_1 values(5, 5);
insert into pg_trans_test3_2 values(15, 15);
select * from pg_trans_test3_1;
select * from pg_trans_test3_2;
#单pg事务
begin;
insert into pg_trans_test3_1 values(6, 6);
insert into pg_trans_test3_2 values(8, 8);
commit;
begin;
insert into pg_trans_test3_1 values(11, 11);
insert into pg_trans_test3_2 values(12, 12);
commit;
select * from pg_trans_test3_1;
select * from pg_trans_test3_2;
#多pg事务
begin;
insert into pg_trans_test3_1 values(1, 1);
insert into pg_trans_test3_1 values(13, 13);
insert into pg_trans_test3_2 values(2, 2);
insert into pg_trans_test3_2 values(14, 14);
commit;
select * from pg_trans_test3_1;
select * from pg_trans_test3_2;
########### LIST分区 ############
create tablegroup tg4 binding true partition by list columns 1 (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
create table pg_trans_test4_1(id1 int, id2 int) tablegroup tg4 partition by list columns(id1) (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
create table pg_trans_test4_2(id1 int, id2 int) tablegroup tg4 partition by list columns(id1) (
partition p0 values in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
partition p1 values in (11, 12, 13, 14, 15, 16, 17, 18, 19, 20)
);
desc pg_trans_test4_1;
desc pg_trans_test4_2;
insert into pg_trans_test4_1 values(5, 5);
insert into pg_trans_test4_2 values(15, 15);
select * from pg_trans_test4_1;
select * from pg_trans_test4_2;
#单pg事务
begin;
insert into pg_trans_test4_1 values(6, 6);
insert into pg_trans_test4_2 values(8, 8);
commit;
begin;
insert into pg_trans_test4_1 values(11, 11);
insert into pg_trans_test4_2 values(12, 12);
commit;
select * from pg_trans_test4_1;
select * from pg_trans_test4_2;
#多pg事务
begin;
insert into pg_trans_test4_1 values(1, 1);
insert into pg_trans_test4_1 values(13, 13);
insert into pg_trans_test4_2 values(2, 2);
insert into pg_trans_test4_2 values(14, 14);
commit;
select * from pg_trans_test4_1;
select * from pg_trans_test4_2;
###############################################################
# 验证单个PG,多个分区场景
create tablegroup tg5 binding true;
create table pg_trans_test5_1 (pk int) tablegroup tg5;
create table pg_trans_test5_2 (pk int) tablegroup tg5;
insert into pg_trans_test5_1 values (1), (2), (3), (4);
insert into pg_trans_test5_2 values (1), (2), (3), (4);
select * from pg_trans_test5_1 as l join pg_trans_test5_2 as r where l.pk = r.pk;
###############################################################
##删除table
drop table if exists pg_trans_test1_1;
drop table if exists pg_trans_test1_2;
drop table if exists pg_trans_test2_1;
drop table if exists pg_trans_test2_2;
drop table if exists pg_trans_test3_1;
drop table if exists pg_trans_test3_2;
drop table if exists pg_trans_test4_1;
drop table if exists pg_trans_test4_2;
drop table if exists pg_trans_test5_1;
drop table if exists pg_trans_test5_2;
#删除table group
purge recyclebin;
drop tablegroup tg1;
drop tablegroup tg2;
drop tablegroup tg3;
drop tablegroup tg4;
drop tablegroup tg5;
connection obsys;
##还原配置
--enable_abort_on_error

View File

@ -0,0 +1,134 @@
--disable_query_log
set @@session.explicit_defaults_for_timestamp=off;
--enable_query_log
#owner: wanhong.wwh
#owner group: transaction
#description: 测试可串行化隔离级别在各种语句类型场景下的表现,及其限制
#
# 原则:
# 1. 无论是何种表类型,可串行化隔离级别统一采用事务级别快照
# 2. 目前仅支持读最新数据,不支持弱一致性读
# 3. 不支持指定快照读场景,指定快照读不应该出现在可串行化隔离级别中
# 创建database
--disable_warnings
drop database if exists xm_test;
drop database if exists xm_test_db1;
--enable_warnings
create database xm_test;
use xm_test;
# sleep一下,保证连接成功
sleep 5;
connect (conn1,$OBMYSQL_MS0,$OBMYSQL_USR,$OBMYSQL_PWD,xm_test,$OBMYSQL_PORT);
connect (conn2,$OBMYSQL_MS0,$OBMYSQL_USR,$OBMYSQL_PWD,xm_test,$OBMYSQL_PORT);
connection conn1;
#设置可串行化隔离级别
set tx_isolation = 'SERIALIZABLE';
set autocommit = 1;
connection conn2;
#设置可串行化隔离级别
set tx_isolation = 'SERIALIZABLE';
set autocommit = 1;
connection conn1;
#建表
create table xm_test_t1 (c1 int primary key, c2 int);
create table xm_test_t2 (c1 int primary key, c2 int);
#准备数据
insert into xm_test_t1 values(1,1);
insert into xm_test_t1 values(2,1);
insert into xm_test_t2 values(2,1);
########### case 1: 用户表强一致性读
begin;
select * from xm_test_t1;
insert into xm_test_t1 values (3, 1);
# 并行的session执行事务,插入数据
connection conn2;
insert into xm_test_t1 values (4, 1);
select * from xm_test_t1;
# 切回session,要求看不到并发事务的修改
connection conn1;
select * from xm_test_t1;
commit;
########### case 2: 用户表弱一致性读,期望报错,不支持
begin;
--error 1235
select /*+read_consistency(weak) */ * from xm_test_t1;
commit;
########### case 3: 事务中执行SHOW语句
# SHOW语句的内部实现实际上是在查询内部表,采用的是用户SESSION
# 大部分的SHOW语句查询的都是虚拟表,直接采用内存中的schema结构,这种情况下,SHOW语句与隔离级别无关,始终看到最新的数据;
# 对于show database like语句,实际查询的是__all_database,采用用户SESSION,受用户事务隔离级别影响。
begin;
select * from xm_test_t1;
insert into xm_test_t1 values (5, 1);
# show database like语句会查询__all_database表,这里会采用事务级别快照
show databases like 'xm_%';
# 新的连接上建database
connection conn2;
create database xm_test_db1;
show databases like 'xm_%';
insert into xm_test_t1 values (6, 1);
# 切回SESSION
# 再次show database like,应该看不到新创建的database
connection conn1;
show databases like 'xm_%';
select * from xm_test_t1;
commit;
########### case 4: 建索引语句
# 无论事务隔离级别是否是可串行化,都支持建索引
# 内部实现上,建索引语句采用内部SESSION事务,采用RC隔离级别
connection conn1;
create table t_global_index (pk int primary key) partition by hash(pk) partitions 10;
insert into t_global_index values (1), (2), (3);
create index index1 on t_global_index (pk) global;
--source mysql_test/include/check_all_idx_ok.inc
show index from t_global_index;
connection conn1;
########### case 5: 用户发起的内部表语句
# 要求始终读取一致的schema数据
connection conn1;
begin;
insert into xm_test_t1 values (7, 1);
select * from xm_test_t1;
# 查询内部表SQL
select table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
# 并行的session创建表,修改xm_test_t1
connection conn2;
create table xm_test_t4 (pk int primary key);
insert into xm_test_t1 values (8, 1);
select * from xm_test_t1;
# 切回session,再次查询内部表,查到的数据与之前一致,看不到新增的表
connection conn1;
select table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
# 弱一致性读的内部表语句要求也是一样的
select /*+read_consistency(weak) */ table_name from oceanbase.__all_table_v2 as t, oceanbase.__all_database as d where d.database_name='xm_test' and d.database_id = t.database_id and (t.table_name='xm_test_t1' or t.table_name='xm_test_t4');
select * from xm_test_t1;
commit;
########### case 6: 内部发起的内部表语句,采用独立的内部SESSION,与现有事务没有关系,采用READ-COMMITTED隔离级别
# gv$table表实现上查询的是__all_virtual_table,__all_virtual_table实现上采用内部SESSION查询__all_table_v2
# 期望每次读取都读到最新的数据
connection conn1;
begin;
insert into xm_test_t1 values (9, 1);
select * from xm_test_t1;
select t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
# 并行的session创建表,修改xm_test_t1
connection conn2;
create table xm_test_t5 (pk int primary key);
insert into xm_test_t1 values (10, 1);
select * from xm_test_t1;
# 切回session,再次查询内部表,要求看到新建的表
connection conn1;
select t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
# 弱一致性读的内部表语句要求也是一样的
select /*+read_consistency(weak) */ t.table_name from oceanbase.gv$table as t where t.database_name='xm_test' and t.table_type=3;
select * from xm_test_t1;
commit;

View File

@ -0,0 +1,122 @@
--disable_query_log
set @@session.explicit_defaults_for_timestamp=off;
--enable_query_log
#owner: shanyan.g
#owner group: transaction
#description: 测试备机读语句的合法性
connect (conn1,$OBMYSQL_MS0,$OBMYSQL_USR,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connection conn1;
--disable_warnings
drop table if exists t1;
drop table if exists t2;
--enable_warnings
#建表
create table t1 (c1 int primary key, c2 int);
create table t2 (c1 int primary key, c2 int);
desc t1;
desc t2;
#准备数据
insert into t1 values(1,1);
insert into t1 values(2,1);
insert into t2 values(2,1);
#check数据是否插入成功
select * from t1;
select * from t2;
#保证备机上版本号是更新过的
sleep 2;
# case 1: 单partition备机读操作
set autocommit=1;
select /*+read_consistency(weak)+*/ * from t1;
select /*+read_consistency(weak)+*/ * from t1 where c1 = 1;
select /*+read_consistency(weak)+*/ * from t2;
select /*+read_consistency(weak)+*/ * from t2 where c1 = 2;
# case 2: ac=1的单partition self join事务
select /*+read_consistency(weak)+*/ * from t1 as l join t1 as r where l.c1 = r.c1;
# case 3: ac=1的多partition事务;
select /*+read_consistency(weak)+*/ * from t1 join t2 where t1.c1 = t2.c1;
# case 4: ac = 0的多partition事务
set autocommit=0;
select /*+read_consistency(weak)+*/ * from t1 as l join t1 as r where l.c1 = r.c1;
select /*+read_consistency(weak)+*/ * from t1 join t2 where t1.c1 = t2.c1;
select /*+read_consistency(weak)+*/* from t1;
select /*+read_consistency(weak)+*/* from t2;
commit;
##下面开始测试备机读异常的情况
#case 5: 强一致操作中混入弱一致性查询
begin;
insert into t1 values(3, 1);
insert into t2 values(3, 1);
#下面的这两条语句,正常来讲,应该报错的,但sql根据前两条语句,将其发到了leader上,没有报错
select /*+read_consistency(weak)+*/* from t1;
select /*+read_consistency(weak)+*/* from t2;
commit;
#插入数据成功了,sleep一下,以便weak读的结果一致
sleep 2;
#case 6: 弱一致操作中混入强一致性操作
begin;
select /*+read_consistency(weak)+*/* from t1;
select /*+read_consistency(weak)+*/* from t2;
#下面的这两条语句,必须要报错:not supportted
--error 1235
insert into t1 values(4, 1);
#预期按照弱一致性操作执行
select * from t1;
--error 1235
insert into t2 values(4, 1);
select * from t2;
commit;
#case 7:select for update操作1
begin;
#目前sql是忽略这个hint的,当成强一致性读来操作;
select /*+read_consistency(weak)+*/* from t1 for update;
#既然第一条语句判断事务是强一致性操作的,因此下面的语句发给了leader
select /*+read_consistency(weak)+*/* from t2;
insert into t1 values(5, 1);
select * from t1;
insert into t2 values(5, 1);
select * from t2;
commit;
#插入数据成功了,sleep一下,以便weak读的结果一致
sleep 2;
#case 8:select for update操作2
begin;
#第一条语句是弱一致性查询,该事务就应该是弱一致性的;
select /*+read_consistency(weak)+*/* from t2;
#事务是弱一致性的,下面的语句需要报错
--error 1235
select /*+read_consistency(weak)+*/* from t1 for update;
--error 1235
insert into t1 values(6, 1);
select * from t1;
--error 1235
insert into t2 values(6, 1);
select * from t2;
commit;
#case 9:其他场景
begin;
#事务是强一致性的,下面的语句全部按照强一致性操作
select /*+read_consistency(strong)*/* from t1;
#预期strong读
select /*+read_consistency(weak)*/* from t1;
commit;
begin;
#事务是弱一致性的,下面的语句全部走弱一致性逻辑
select /*+read_consistency(weak)*/* from t1;
select * from t1;
commit;

View File

@ -0,0 +1,71 @@
--disable_query_log
set @@session.explicit_defaults_for_timestamp=off;
--enable_query_log
#owner: gjw228474
#owner group: transaction
#description: 测试系统租户下gts防御
#
# 原则:
# 1. 系统租户的时钟源为LTS
# 2. 不允许设置系统租户的时钟源为非LTS的其他时钟源
# 3. 普通租户的时钟源默认为GTS,且可以设置为LTS
--disable_abort_on_error
--disable_warnings
connect (obsys,$OBMYSQL_MS0,admin,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connection obsys;
## 系统租户下的时钟源测试
show global variables like 'ob_timestamp_service';
set global ob_timestamp_service='GTS';
show global variables like 'ob_timestamp_service';
disconnect obsys;
connect (obsys,$OBMYSQL_MS0,admin,$OBMYSQL_PWD,test,$OBMYSQL_PORT);
connection obsys;
show global variables like 'ob_timestamp_service';
## 普通租户下的时钟源测试
--disable_query_log
create resource unit if not exists ts_source_unit max_cpu=1, min_memory='1G',max_memory='1G',max_disk_size='1G',max_iops=1000,max_session_num=1000;
create resource pool if not exists ts_source_pool1 unit='ts_source_unit', unit_num=1;
create tenant if not exists ts_source_tenant1 RESOURCE_POOL_LIST=('ts_source_pool1') set ob_tcp_invited_nodes='%';
--source mysql_test/include/check_tenant_sync.inc
--enable_query_log
connect (obcommon,$OBMYSQL_MS0,root@ts_source_tenant1,,*NO-ONE*,$OBMYSQL_PORT);
connection obcommon;
show global variables like 'ob_timestamp_service';
set global ob_timestamp_service='LTS';
show global variables like 'ob_timestamp_service';
disconnect obcommon;
connect (obcommon,$OBMYSQL_MS0,root@ts_source_tenant1,,*NO-ONE*,$OBMYSQL_PORT);
connection obcommon;
show global variables like 'ob_timestamp_service';
set global ob_timestamp_service='GTS';
show global variables like 'ob_timestamp_service';
disconnect obcommon;
connect (obcommon,$OBMYSQL_MS0,root@ts_source_tenant1,,*NO-ONE*,$OBMYSQL_PORT);
connection obcommon;
show global variables like 'ob_timestamp_service';
## 清理数据
--disable_query_log
connection obsys;
drop tenant ts_source_tenant1 force;
drop resource pool ts_source_pool1;
drop resource unit ts_source_unit;
--enable_query_log
--enable_abort_on_error
--enable_warnings