[feature-wip](recover) new recover ddl and support show catalog recycle bin (#13067)
This commit is contained in:
@ -46,8 +46,10 @@ public final class FeMetaVersion {
|
||||
public static final int VERSION_112 = 112;
|
||||
// add password options
|
||||
public static final int VERSION_113 = 113;
|
||||
// add new recover info for recover ddl
|
||||
public static final int VERSION_114 = 114;
|
||||
// note: when increment meta version, should assign the latest version to VERSION_CURRENT
|
||||
public static final int VERSION_CURRENT = VERSION_113;
|
||||
public static final int VERSION_CURRENT = VERSION_114;
|
||||
|
||||
// all logs meta version should >= the minimum version, so that we could remove many if clause, for example
|
||||
// if (FE_METAVERSION < VERSION_94) ...
|
||||
|
||||
@ -263,6 +263,7 @@ terminal String
|
||||
KW_BEGIN,
|
||||
KW_BETWEEN,
|
||||
KW_BIGINT,
|
||||
KW_BIN,
|
||||
KW_BINLOG,
|
||||
KW_BITMAP,
|
||||
KW_BITMAP_UNION,
|
||||
@ -479,6 +480,7 @@ terminal String
|
||||
KW_POLICY,
|
||||
KW_PRECEDING,
|
||||
KW_PERCENT,
|
||||
KW_RECYCLE,
|
||||
KW_PROC,
|
||||
KW_PROCEDURE,
|
||||
KW_PROCESSLIST,
|
||||
@ -674,7 +676,7 @@ nonterminal Expr set_expr_or_default;
|
||||
nonterminal ArrayList<Expr> expr_list, values, row_value, opt_values;
|
||||
nonterminal ArrayList<Expr> func_arg_list;
|
||||
nonterminal ArrayList<Expr> expr_pipe_list;
|
||||
nonterminal String select_alias, opt_table_alias, lock_alias;
|
||||
nonterminal String select_alias, opt_table_alias, lock_alias, opt_alias;
|
||||
nonterminal ArrayList<String> ident_list;
|
||||
nonterminal PartitionNames opt_partition_names, partition_names;
|
||||
nonterminal ArrayList<Long> opt_tablet_list, tablet_list;
|
||||
@ -768,6 +770,7 @@ nonterminal Integer opt_distribution_number;
|
||||
nonterminal Long opt_field_length;
|
||||
nonterminal KeysDesc opt_keys;
|
||||
nonterminal KeysDesc opt_mv_keys;
|
||||
nonterminal Long opt_id;
|
||||
|
||||
nonterminal PartitionKeyDesc partition_key_desc;
|
||||
nonterminal PartitionKeyDesc list_partition_key_desc;
|
||||
@ -2747,17 +2750,17 @@ drop_stmt ::=
|
||||
|
||||
// Recover statement
|
||||
recover_stmt ::=
|
||||
KW_RECOVER KW_DATABASE ident:dbName
|
||||
KW_RECOVER KW_DATABASE ident:dbName opt_id:dbId opt_alias:alias
|
||||
{:
|
||||
RESULT = new RecoverDbStmt(dbName);
|
||||
RESULT = new RecoverDbStmt(dbName, dbId, alias);
|
||||
:}
|
||||
| KW_RECOVER KW_TABLE table_name:dbTblName
|
||||
| KW_RECOVER KW_TABLE table_name:dbTblName opt_id:tableId opt_alias:alias
|
||||
{:
|
||||
RESULT = new RecoverTableStmt(dbTblName);
|
||||
RESULT = new RecoverTableStmt(dbTblName, tableId, alias);
|
||||
:}
|
||||
| KW_RECOVER KW_PARTITION ident:partitionName KW_FROM table_name:dbTblName
|
||||
| KW_RECOVER KW_PARTITION ident:partitionName opt_id:partitionId opt_alias:alias KW_FROM table_name:dbTblName
|
||||
{:
|
||||
RESULT = new RecoverPartitionStmt(dbTblName, partitionName);
|
||||
RESULT = new RecoverPartitionStmt(dbTblName, partitionName, partitionId, alias);
|
||||
:}
|
||||
;
|
||||
|
||||
@ -3623,6 +3626,10 @@ show_param ::=
|
||||
{:
|
||||
RESULT = new ShowAnalyzeStmt(tbl, parser.where, orderByClause, limitClause);
|
||||
:}
|
||||
| KW_CATALOG KW_RECYCLE KW_BIN opt_wild_where
|
||||
{:
|
||||
RESULT = new ShowCatalogRecycleBinStmt(parser.where);
|
||||
:}
|
||||
;
|
||||
|
||||
opt_tmp ::=
|
||||
@ -3741,6 +3748,17 @@ opt_wild_where ::=
|
||||
:}
|
||||
;
|
||||
|
||||
opt_id ::=
|
||||
/* empty */
|
||||
{:
|
||||
RESULT = (long)-1;
|
||||
:}
|
||||
| INTEGER_LITERAL:id
|
||||
{:
|
||||
RESULT = id;
|
||||
:}
|
||||
;
|
||||
|
||||
opt_alter_type ::=
|
||||
KW_ROLLUP
|
||||
{:
|
||||
@ -4981,6 +4999,17 @@ opt_common_hints ::=
|
||||
:}
|
||||
;
|
||||
|
||||
opt_alias ::=
|
||||
/* empty */
|
||||
{:
|
||||
RESULT = null;
|
||||
:}
|
||||
| KW_AS ident:alias
|
||||
{:
|
||||
RESULT = alias;
|
||||
:}
|
||||
;
|
||||
|
||||
opt_table_alias ::=
|
||||
/* empty */
|
||||
{:
|
||||
@ -6681,6 +6710,8 @@ keyword ::=
|
||||
{: RESULT = id; :}
|
||||
| KW_CATALOGS:id
|
||||
{: RESULT = id; :}
|
||||
| KW_RECYCLE:id
|
||||
{: RESULT = id; :}
|
||||
;
|
||||
|
||||
// Identifier that contain keyword
|
||||
|
||||
@ -808,7 +808,7 @@ public class MaterializedViewHandler extends AlterHandler {
|
||||
long mvIndexId = dropMaterializedView(mvName, olapTable);
|
||||
// Step3: log drop mv operation
|
||||
EditLog editLog = Env.getCurrentEnv().getEditLog();
|
||||
editLog.logDropRollup(new DropInfo(db.getId(), olapTable.getId(), mvIndexId, false));
|
||||
editLog.logDropRollup(new DropInfo(db.getId(), olapTable.getId(), mvIndexId, false, 0));
|
||||
LOG.info("finished drop materialized view [{}] in table [{}]", mvName, olapTable.getName());
|
||||
} catch (MetaNotFoundException e) {
|
||||
if (dropMaterializedViewStmt.isIfExists()) {
|
||||
|
||||
@ -33,15 +33,29 @@ import com.google.common.base.Strings;
|
||||
|
||||
public class RecoverDbStmt extends DdlStmt {
|
||||
private String dbName;
|
||||
private long dbId = -1;
|
||||
private String newDbName = "";
|
||||
|
||||
public RecoverDbStmt(String dbName) {
|
||||
public RecoverDbStmt(String dbName, long dbId, String newDbName) {
|
||||
this.dbName = dbName;
|
||||
this.dbId = dbId;
|
||||
if (newDbName != null) {
|
||||
this.newDbName = newDbName;
|
||||
}
|
||||
}
|
||||
|
||||
public String getDbName() {
|
||||
return dbName;
|
||||
}
|
||||
|
||||
public long getDbId() {
|
||||
return dbId;
|
||||
}
|
||||
|
||||
public String getNewDbName() {
|
||||
return newDbName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
|
||||
super.analyze(analyzer);
|
||||
@ -50,6 +64,10 @@ public class RecoverDbStmt extends DdlStmt {
|
||||
}
|
||||
dbName = ClusterNamespace.getFullName(getClusterName(), dbName);
|
||||
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
newDbName = ClusterNamespace.getFullName(getClusterName(), newDbName);
|
||||
}
|
||||
|
||||
if (!Env.getCurrentEnv().getAuth().checkDbPriv(ConnectContext.get(), dbName,
|
||||
PrivPredicate.of(PrivBitSet.of(
|
||||
PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), Operator.OR))) {
|
||||
@ -60,6 +78,18 @@ public class RecoverDbStmt extends DdlStmt {
|
||||
|
||||
@Override
|
||||
public String toSql() {
|
||||
return "RECOVER DATABASE " + dbName;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("RECOVER");
|
||||
sb.append(" DATABASE ");
|
||||
sb.append(this.dbName);
|
||||
if (this.dbId != -1) {
|
||||
sb.append(" ");
|
||||
sb.append(this.dbId);
|
||||
}
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
sb.append(" AS ");
|
||||
sb.append(this.newDbName);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,10 +34,16 @@ import com.google.common.base.Strings;
|
||||
public class RecoverPartitionStmt extends DdlStmt {
|
||||
private TableName dbTblName;
|
||||
private String partitionName;
|
||||
private long partitionId = -1;
|
||||
private String newPartitionName = "";
|
||||
|
||||
public RecoverPartitionStmt(TableName dbTblName, String partitionName) {
|
||||
public RecoverPartitionStmt(TableName dbTblName, String partitionName, long partitionId, String newPartitionName) {
|
||||
this.dbTblName = dbTblName;
|
||||
this.partitionName = partitionName;
|
||||
this.partitionId = partitionId;
|
||||
if (newPartitionName != null) {
|
||||
this.newPartitionName = newPartitionName;
|
||||
}
|
||||
}
|
||||
|
||||
public String getDbName() {
|
||||
@ -52,6 +58,14 @@ public class RecoverPartitionStmt extends DdlStmt {
|
||||
return partitionName;
|
||||
}
|
||||
|
||||
public long getPartitionId() {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public String getNewPartitionName() {
|
||||
return newPartitionName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
|
||||
dbTblName.analyze(analyzer);
|
||||
@ -70,7 +84,16 @@ public class RecoverPartitionStmt extends DdlStmt {
|
||||
@Override
|
||||
public String toSql() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("RECOVER PARTITION ").append(partitionName).append(" FROM ");
|
||||
sb.append("RECOVER PARTITION ").append(partitionName);
|
||||
if (this.partitionId != -1) {
|
||||
sb.append(" ");
|
||||
sb.append(this.partitionId);
|
||||
}
|
||||
if (!Strings.isNullOrEmpty(newPartitionName)) {
|
||||
sb.append(" AS ");
|
||||
sb.append(this.newPartitionName);
|
||||
}
|
||||
sb.append(" FROM ");
|
||||
if (!Strings.isNullOrEmpty(getDbName())) {
|
||||
sb.append(getDbName()).append(".");
|
||||
}
|
||||
|
||||
@ -33,9 +33,15 @@ import com.google.common.base.Strings;
|
||||
|
||||
public class RecoverTableStmt extends DdlStmt {
|
||||
private TableName dbTblName;
|
||||
private long tableId = -1;
|
||||
private String newTableName = "";
|
||||
|
||||
public RecoverTableStmt(TableName dbTblName) {
|
||||
public RecoverTableStmt(TableName dbTblName, long tableId, String newTableName) {
|
||||
this.dbTblName = dbTblName;
|
||||
this.tableId = tableId;
|
||||
if (newTableName != null) {
|
||||
this.newTableName = newTableName;
|
||||
}
|
||||
}
|
||||
|
||||
public String getDbName() {
|
||||
@ -46,6 +52,14 @@ public class RecoverTableStmt extends DdlStmt {
|
||||
return dbTblName.getTbl();
|
||||
}
|
||||
|
||||
public long getTableId() {
|
||||
return tableId;
|
||||
}
|
||||
|
||||
public String getNewTableName() {
|
||||
return newTableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
|
||||
dbTblName.analyze(analyzer);
|
||||
@ -71,6 +85,14 @@ public class RecoverTableStmt extends DdlStmt {
|
||||
sb.append(getDbName()).append(".");
|
||||
}
|
||||
sb.append(getTableName());
|
||||
if (this.tableId != -1) {
|
||||
sb.append(" ");
|
||||
sb.append(this.tableId);
|
||||
}
|
||||
if (!Strings.isNullOrEmpty(newTableName)) {
|
||||
sb.append(" AS ");
|
||||
sb.append(this.newTableName);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@ -0,0 +1,154 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.analysis;
|
||||
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.ScalarType;
|
||||
import org.apache.doris.common.AnalysisException;
|
||||
import org.apache.doris.common.CaseSensibility;
|
||||
import org.apache.doris.common.PatternMatcher;
|
||||
import org.apache.doris.common.UserException;
|
||||
import org.apache.doris.qe.ShowResultSetMetaData;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class ShowCatalogRecycleBinStmt extends ShowStmt {
|
||||
public static final ImmutableList<String> TITLE_NAMES = new ImmutableList.Builder<String>()
|
||||
.add("Type").add("Name").add("DbId").add("TableId").add("PartitionId").add("DropTime")
|
||||
.build();
|
||||
|
||||
private Expr where;
|
||||
private String nameValue;
|
||||
private boolean isAccurateMatch;
|
||||
|
||||
public ShowCatalogRecycleBinStmt(Expr where) {
|
||||
this.where = where;
|
||||
}
|
||||
|
||||
public String getNameValue() {
|
||||
return nameValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void analyze(Analyzer analyzer) throws UserException {
|
||||
super.analyze(analyzer);
|
||||
|
||||
if (where == null) {
|
||||
return;
|
||||
}
|
||||
boolean valid = analyzeWhereClause();
|
||||
if (!valid) {
|
||||
throw new AnalysisException("Where clause should like: Name = \"name\", "
|
||||
+ " or Name LIKE \"matcher\"");
|
||||
}
|
||||
}
|
||||
|
||||
private boolean analyzeWhereClause() {
|
||||
if (!(where instanceof LikePredicate) && !(where instanceof BinaryPredicate)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (where instanceof BinaryPredicate) {
|
||||
BinaryPredicate binaryPredicate = (BinaryPredicate) where;
|
||||
if (BinaryPredicate.Operator.EQ != binaryPredicate.getOp()) {
|
||||
return false;
|
||||
}
|
||||
isAccurateMatch = true;
|
||||
}
|
||||
|
||||
if (where instanceof LikePredicate) {
|
||||
LikePredicate likePredicate = (LikePredicate) where;
|
||||
if (LikePredicate.Operator.LIKE != likePredicate.getOp()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// left child
|
||||
if (!(where.getChild(0) instanceof SlotRef)) {
|
||||
return false;
|
||||
}
|
||||
String leftKey = ((SlotRef) where.getChild(0)).getColumnName();
|
||||
if (!"name".equalsIgnoreCase(leftKey)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// right child
|
||||
if (!(where.getChild(1) instanceof StringLiteral)) {
|
||||
return false;
|
||||
}
|
||||
nameValue = ((StringLiteral) where.getChild(1)).getStringValue();
|
||||
if (Strings.isNullOrEmpty(nameValue)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShowResultSetMetaData getMetaData() {
|
||||
ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
|
||||
for (String title : TITLE_NAMES) {
|
||||
builder.addColumn(new Column(title, ScalarType.createVarchar(30)));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toSql() {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("SHOW CATALOG RECYCLE BIN");
|
||||
|
||||
builder.append(where.toSql());
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return toSql();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RedirectStatus getRedirectStatus() {
|
||||
return RedirectStatus.FORWARD_NO_SYNC;
|
||||
}
|
||||
|
||||
public boolean isAccurateMatch() {
|
||||
return isAccurateMatch;
|
||||
}
|
||||
|
||||
public Expr getWhere() {
|
||||
return where;
|
||||
}
|
||||
|
||||
public Predicate<String> getNamePredicate() throws AnalysisException {
|
||||
if (null == where) {
|
||||
return name -> true;
|
||||
}
|
||||
if (isAccurateMatch) {
|
||||
return CaseSensibility.PARTITION.getCaseSensibility()
|
||||
? name -> name.equals(nameValue) : name -> name.equalsIgnoreCase(nameValue);
|
||||
} else {
|
||||
PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(
|
||||
nameValue, CaseSensibility.PARTITION.getCaseSensibility());
|
||||
return patternMatcher::match;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -21,17 +21,17 @@ import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
|
||||
import org.apache.doris.catalog.TableIf.TableType;
|
||||
import org.apache.doris.common.Config;
|
||||
import org.apache.doris.common.DdlException;
|
||||
import org.apache.doris.common.ErrorCode;
|
||||
import org.apache.doris.common.ErrorReport;
|
||||
import org.apache.doris.common.FeMetaVersion;
|
||||
import org.apache.doris.common.io.Text;
|
||||
import org.apache.doris.common.io.Writable;
|
||||
import org.apache.doris.common.util.MasterDaemon;
|
||||
import org.apache.doris.common.util.RangeUtils;
|
||||
import org.apache.doris.common.util.TimeUtils;
|
||||
import org.apache.doris.persist.RecoverInfo;
|
||||
import org.apache.doris.thrift.TStorageMedium;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Range;
|
||||
@ -42,10 +42,13 @@ import org.apache.logging.log4j.Logger;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
private static final Logger LOG = LogManager.getLogger(CatalogRecycleBin.class);
|
||||
@ -67,7 +70,9 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
idToRecycleTime = Maps.newHashMap();
|
||||
}
|
||||
|
||||
public synchronized boolean recycleDatabase(Database db, Set<String> tableNames) {
|
||||
public synchronized boolean recycleDatabase(Database db, Set<String> tableNames, Set<Long> tableIds,
|
||||
boolean isReplay, long replayRecycleTime) {
|
||||
long recycleTime = 0;
|
||||
if (idToDatabase.containsKey(db.getId())) {
|
||||
LOG.error("db[{}] already in recycle bin.", db.getId());
|
||||
return false;
|
||||
@ -76,29 +81,34 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
// db should be empty. all tables are recycled before
|
||||
Preconditions.checkState(db.getTables().isEmpty());
|
||||
|
||||
// erase db with same name
|
||||
eraseDatabaseWithSameName(db.getFullName());
|
||||
|
||||
// recycle db
|
||||
RecycleDatabaseInfo databaseInfo = new RecycleDatabaseInfo(db, tableNames);
|
||||
RecycleDatabaseInfo databaseInfo = new RecycleDatabaseInfo(db, tableNames, tableIds);
|
||||
idToDatabase.put(db.getId(), databaseInfo);
|
||||
idToRecycleTime.put(db.getId(), System.currentTimeMillis());
|
||||
if (!isReplay || replayRecycleTime == 0) {
|
||||
recycleTime = System.currentTimeMillis();
|
||||
} else {
|
||||
recycleTime = replayRecycleTime;
|
||||
}
|
||||
idToRecycleTime.put(db.getId(), recycleTime);
|
||||
LOG.info("recycle db[{}-{}]", db.getId(), db.getFullName());
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized boolean recycleTable(long dbId, Table table, boolean isReplay) {
|
||||
public synchronized boolean recycleTable(long dbId, Table table, boolean isReplay, long replayRecycleTime) {
|
||||
long recycleTime = 0;
|
||||
if (idToTable.containsKey(table.getId())) {
|
||||
LOG.error("table[{}] already in recycle bin.", table.getId());
|
||||
return false;
|
||||
}
|
||||
|
||||
// erase table with same name
|
||||
eraseTableWithSameName(dbId, table.getName(), isReplay);
|
||||
|
||||
// recycle table
|
||||
RecycleTableInfo tableInfo = new RecycleTableInfo(dbId, table);
|
||||
idToRecycleTime.put(table.getId(), System.currentTimeMillis());
|
||||
if (!isReplay || replayRecycleTime == 0) {
|
||||
recycleTime = System.currentTimeMillis();
|
||||
} else {
|
||||
recycleTime = replayRecycleTime;
|
||||
}
|
||||
idToRecycleTime.put(table.getId(), recycleTime);
|
||||
idToTable.put(table.getId(), tableInfo);
|
||||
LOG.info("recycle table[{}-{}]", table.getId(), table.getName());
|
||||
return true;
|
||||
@ -106,17 +116,13 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
|
||||
public synchronized boolean recyclePartition(long dbId, long tableId, Partition partition,
|
||||
Range<PartitionKey> range, PartitionItem listPartitionItem,
|
||||
DataProperty dataProperty,
|
||||
ReplicaAllocation replicaAlloc,
|
||||
DataProperty dataProperty, ReplicaAllocation replicaAlloc,
|
||||
boolean isInMemory) {
|
||||
if (idToPartition.containsKey(partition.getId())) {
|
||||
LOG.error("partition[{}] already in recycle bin.", partition.getId());
|
||||
return false;
|
||||
}
|
||||
|
||||
// erase partition with same name
|
||||
erasePartitionWithSameName(dbId, tableId, partition.getName());
|
||||
|
||||
// recycle partition
|
||||
RecyclePartitionInfo partitionInfo = new RecyclePartitionInfo(dbId, tableId, partition,
|
||||
range, listPartitionItem, dataProperty, replicaAlloc, isInMemory);
|
||||
@ -126,6 +132,14 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized Long getRecycleTimeById(long id) {
|
||||
return idToRecycleTime.get(id);
|
||||
}
|
||||
|
||||
public synchronized void setRecycleTimeByIdForReplay(long id, Long recycleTime) {
|
||||
idToRecycleTime.put(id, recycleTime);
|
||||
}
|
||||
|
||||
private synchronized boolean isExpire(long id, long currentTimeMs) {
|
||||
long latency = currentTimeMs - idToRecycleTime.get(id);
|
||||
return latency > minEraseLatency && latency > Config.catalog_trash_expire_second * 1000L;
|
||||
@ -147,24 +161,6 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void eraseDatabaseWithSameName(String dbName) {
|
||||
Iterator<Map.Entry<Long, RecycleDatabaseInfo>> iterator = idToDatabase.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecycleDatabaseInfo> entry = iterator.next();
|
||||
RecycleDatabaseInfo dbInfo = entry.getValue();
|
||||
Database db = dbInfo.getDb();
|
||||
if (db.getFullName().equals(dbName)) {
|
||||
iterator.remove();
|
||||
idToRecycleTime.remove(entry.getKey());
|
||||
|
||||
// remove database transaction manager
|
||||
Env.getCurrentEnv().getGlobalTransactionMgr().removeDatabaseTransactionMgr(db.getId());
|
||||
|
||||
LOG.info("erase database[{}] name: {}", db.getId(), dbName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void replayEraseDatabase(long dbId) {
|
||||
idToDatabase.remove(dbId);
|
||||
idToRecycleTime.remove(dbId);
|
||||
@ -196,28 +192,6 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
} // end for tables
|
||||
}
|
||||
|
||||
private synchronized void eraseTableWithSameName(long dbId, String tableName, boolean isReplay) {
|
||||
Iterator<Map.Entry<Long, RecycleTableInfo>> iterator = idToTable.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecycleTableInfo> entry = iterator.next();
|
||||
RecycleTableInfo tableInfo = entry.getValue();
|
||||
if (tableInfo.getDbId() != dbId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Table table = tableInfo.getTable();
|
||||
if (table.getName().equals(tableName)) {
|
||||
if (table.getType() == TableType.OLAP) {
|
||||
Env.getCurrentEnv().onEraseOlapTable((OlapTable) table, isReplay);
|
||||
}
|
||||
|
||||
iterator.remove();
|
||||
idToRecycleTime.remove(table.getId());
|
||||
LOG.info("erase table[{}] name: {}", table.getId(), tableName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void replayEraseTable(long tableId) {
|
||||
LOG.info("before replay erase table[{}]", tableId);
|
||||
RecycleTableInfo tableInfo = idToTable.remove(tableId);
|
||||
@ -250,26 +224,6 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
} // end for partitions
|
||||
}
|
||||
|
||||
private synchronized void erasePartitionWithSameName(long dbId, long tableId, String partitionName) {
|
||||
Iterator<Map.Entry<Long, RecyclePartitionInfo>> iterator = idToPartition.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecyclePartitionInfo> entry = iterator.next();
|
||||
RecyclePartitionInfo partitionInfo = entry.getValue();
|
||||
if (partitionInfo.getDbId() != dbId || partitionInfo.getTableId() != tableId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Partition partition = partitionInfo.getPartition();
|
||||
if (partition.getName().equals(partitionName)) {
|
||||
Env.getCurrentEnv().onErasePartition(partition);
|
||||
iterator.remove();
|
||||
idToRecycleTime.remove(entry.getKey());
|
||||
|
||||
LOG.info("erase partition[{}] name: {}", partition.getId(), partitionName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void replayErasePartition(long partitionId) {
|
||||
RecyclePartitionInfo partitionInfo = idToPartition.remove(partitionId);
|
||||
idToRecycleTime.remove(partitionId);
|
||||
@ -282,19 +236,27 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
LOG.info("replay erase partition[{}]", partitionId);
|
||||
}
|
||||
|
||||
public synchronized Database recoverDatabase(String dbName) throws DdlException {
|
||||
public synchronized Database recoverDatabase(String dbName, long dbId) throws DdlException {
|
||||
RecycleDatabaseInfo dbInfo = null;
|
||||
long recycleTime = -1;
|
||||
Iterator<Map.Entry<Long, RecycleDatabaseInfo>> iterator = idToDatabase.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecycleDatabaseInfo> entry = iterator.next();
|
||||
if (dbName.equals(entry.getValue().getDb().getFullName())) {
|
||||
dbInfo = entry.getValue();
|
||||
break;
|
||||
if (dbId == -1) {
|
||||
if (recycleTime <= idToRecycleTime.get(entry.getKey())) {
|
||||
recycleTime = idToRecycleTime.get(entry.getKey());
|
||||
dbInfo = entry.getValue();
|
||||
}
|
||||
} else if (entry.getKey() == dbId) {
|
||||
dbInfo = entry.getValue();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dbInfo == null) {
|
||||
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
|
||||
throw new DdlException("Unknown database '" + dbName + "' or database id '" + dbId + "'");
|
||||
}
|
||||
|
||||
// 1. recover all tables in this db
|
||||
@ -327,12 +289,14 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
private void recoverAllTables(RecycleDatabaseInfo dbInfo) throws DdlException {
|
||||
Database db = dbInfo.getDb();
|
||||
Set<String> tableNames = Sets.newHashSet(dbInfo.getTableNames());
|
||||
Set<Long> tableIds = Sets.newHashSet(dbInfo.getTableIds());
|
||||
long dbId = db.getId();
|
||||
Iterator<Map.Entry<Long, RecycleTableInfo>> iterator = idToTable.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecycleTableInfo> entry = iterator.next();
|
||||
RecycleTableInfo tableInfo = entry.getValue();
|
||||
if (tableInfo.getDbId() != dbId || !tableNames.contains(tableInfo.getTable().getName())) {
|
||||
if (tableInfo.getDbId() != dbId || !tableNames.contains(tableInfo.getTable().getName())
|
||||
|| !tableIds.contains(tableInfo.getTable().getId())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -349,8 +313,11 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized boolean recoverTable(Database db, String tableName) {
|
||||
public synchronized boolean recoverTable(Database db, String tableName, long tableId,
|
||||
String newTableName) throws DdlException {
|
||||
// make sure to get db lock
|
||||
Table table = null;
|
||||
long recycleTime = -1;
|
||||
long dbId = db.getId();
|
||||
Iterator<Map.Entry<Long, RecycleTableInfo>> iterator = idToTable.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
@ -360,29 +327,32 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
continue;
|
||||
}
|
||||
|
||||
Table table = tableInfo.getTable();
|
||||
if (!table.getName().equals(tableName)) {
|
||||
if (!tableInfo.getTable().getName().equals(tableName)) {
|
||||
continue;
|
||||
}
|
||||
table.writeLock();
|
||||
try {
|
||||
db.createTable(table);
|
||||
iterator.remove();
|
||||
idToRecycleTime.remove(table.getId());
|
||||
// log
|
||||
RecoverInfo recoverInfo = new RecoverInfo(dbId, table.getId(), -1L);
|
||||
Env.getCurrentEnv().getEditLog().logRecoverTable(recoverInfo);
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
|
||||
if (tableId == -1) {
|
||||
if (recycleTime <= idToRecycleTime.get(entry.getKey())) {
|
||||
recycleTime = idToRecycleTime.get(entry.getKey());
|
||||
table = tableInfo.getTable();
|
||||
}
|
||||
} else if (entry.getKey() == tableId) {
|
||||
table = tableInfo.getTable();
|
||||
break;
|
||||
}
|
||||
LOG.info("recover db[{}] with table[{}]: {}", dbId, table.getId(), table.getName());
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
if (table == null) {
|
||||
throw new DdlException("Unknown table '" + tableName + "' or table id '" + tableId + "' in "
|
||||
+ db.getFullName());
|
||||
}
|
||||
|
||||
innerRecoverTable(db, table, tableName, newTableName, null, false);
|
||||
LOG.info("recover db[{}] with table[{}]: {}", dbId, table.getId(), table.getName());
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized void replayRecoverTable(Database db, long tableId) {
|
||||
public synchronized void replayRecoverTable(Database db, long tableId, String newTableName) throws DdlException {
|
||||
// make sure to get db write lock
|
||||
Iterator<Map.Entry<Long, RecycleTableInfo>> iterator = idToTable.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
@ -393,20 +363,60 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
Preconditions.checkState(tableInfo.getDbId() == db.getId());
|
||||
Table table = tableInfo.getTable();
|
||||
table.writeLock();
|
||||
try {
|
||||
db.createTable(tableInfo.getTable());
|
||||
iterator.remove();
|
||||
idToRecycleTime.remove(tableInfo.getTable().getId());
|
||||
LOG.info("replay recover table[{}]", tableId);
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
String tableName = table.getName();
|
||||
if (innerRecoverTable(db, table, tableName, newTableName, iterator, true)) {
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void recoverPartition(long dbId, OlapTable table, String partitionName) throws DdlException {
|
||||
private synchronized boolean innerRecoverTable(Database db, Table table, String tableName, String newTableName,
|
||||
Iterator<Map.Entry<Long, RecycleTableInfo>> iterator,
|
||||
boolean isReplay) throws DdlException {
|
||||
table.writeLock();
|
||||
try {
|
||||
if (!Strings.isNullOrEmpty(newTableName)) {
|
||||
if (Env.isStoredTableNamesLowerCase()) {
|
||||
newTableName = newTableName.toLowerCase();
|
||||
}
|
||||
if (!tableName.equals(newTableName)) {
|
||||
// check if name is already used
|
||||
if (db.getTable(newTableName).isPresent()) {
|
||||
throw new DdlException("Table name[" + newTableName + "] is already used");
|
||||
}
|
||||
|
||||
if (table.getType() == TableType.OLAP) {
|
||||
// olap table should also check if any rollup has same name as "newTableName"
|
||||
((OlapTable) table).checkAndSetName(newTableName, false);
|
||||
} else {
|
||||
table.setName(newTableName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db.createTable(table);
|
||||
if (isReplay) {
|
||||
iterator.remove();
|
||||
} else {
|
||||
idToTable.remove(table.getId());
|
||||
}
|
||||
idToRecycleTime.remove(table.getId());
|
||||
if (isReplay) {
|
||||
LOG.info("replay recover table[{}]", table.getId());
|
||||
} else {
|
||||
// log
|
||||
RecoverInfo recoverInfo = new RecoverInfo(db.getId(), table.getId(), -1L, "", newTableName, "");
|
||||
Env.getCurrentEnv().getEditLog().logRecoverTable(recoverInfo);
|
||||
}
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public synchronized void recoverPartition(long dbId, OlapTable table, String partitionName,
|
||||
long partitionIdToRecover, String newPartitionName) throws DdlException {
|
||||
long recycleTime = -1;
|
||||
// make sure to get db write lock
|
||||
RecyclePartitionInfo recoverPartitionInfo = null;
|
||||
|
||||
@ -423,12 +433,20 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
continue;
|
||||
}
|
||||
|
||||
recoverPartitionInfo = partitionInfo;
|
||||
break;
|
||||
if (partitionIdToRecover == -1) {
|
||||
if (recycleTime <= idToRecycleTime.get(entry.getKey())) {
|
||||
recycleTime = idToRecycleTime.get(entry.getKey());
|
||||
recoverPartitionInfo = partitionInfo;
|
||||
}
|
||||
} else if (entry.getKey() == partitionIdToRecover) {
|
||||
recoverPartitionInfo = partitionInfo;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (recoverPartitionInfo == null) {
|
||||
throw new DdlException("No partition named " + partitionName + " in table " + table.getName());
|
||||
throw new DdlException("No partition named '" + partitionName + "' or partition id '" + partitionIdToRecover
|
||||
+ "' in table " + table.getName());
|
||||
}
|
||||
|
||||
PartitionInfo partitionInfo = table.getPartitionInfo();
|
||||
@ -447,7 +465,15 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
// recover partition
|
||||
Partition recoverPartition = recoverPartitionInfo.getPartition();
|
||||
Preconditions.checkState(recoverPartition.getName().equalsIgnoreCase(partitionName));
|
||||
if (!Strings.isNullOrEmpty(newPartitionName)) {
|
||||
if (table.checkPartitionNameExist(newPartitionName)) {
|
||||
throw new DdlException("Partition name[" + newPartitionName + "] is already used");
|
||||
}
|
||||
}
|
||||
table.addPartition(recoverPartition);
|
||||
if (!Strings.isNullOrEmpty(newPartitionName)) {
|
||||
table.renamePartition(partitionName, newPartitionName);
|
||||
}
|
||||
|
||||
// recover partition info
|
||||
long partitionId = recoverPartition.getId();
|
||||
@ -461,13 +487,14 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
idToRecycleTime.remove(partitionId);
|
||||
|
||||
// log
|
||||
RecoverInfo recoverInfo = new RecoverInfo(dbId, table.getId(), partitionId);
|
||||
RecoverInfo recoverInfo = new RecoverInfo(dbId, table.getId(), partitionId, "", "", newPartitionName);
|
||||
Env.getCurrentEnv().getEditLog().logRecoverPartition(recoverInfo);
|
||||
LOG.info("recover partition[{}]", partitionId);
|
||||
}
|
||||
|
||||
// The caller should keep table write lock
|
||||
public synchronized void replayRecoverPartition(OlapTable table, long partitionId) {
|
||||
public synchronized void replayRecoverPartition(OlapTable table, long partitionId,
|
||||
String newPartitionName) throws DdlException {
|
||||
Iterator<Map.Entry<Long, RecyclePartitionInfo>> iterator = idToPartition.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<Long, RecyclePartitionInfo> entry = iterator.next();
|
||||
@ -477,8 +504,15 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
|
||||
Preconditions.checkState(recyclePartitionInfo.getTableId() == table.getId());
|
||||
|
||||
if (!Strings.isNullOrEmpty(newPartitionName)) {
|
||||
if (table.checkPartitionNameExist(newPartitionName)) {
|
||||
throw new DdlException("Partition name[" + newPartitionName + "] is already used");
|
||||
}
|
||||
}
|
||||
table.addPartition(recyclePartitionInfo.getPartition());
|
||||
if (!Strings.isNullOrEmpty(newPartitionName)) {
|
||||
table.renamePartition(recyclePartitionInfo.getPartition().getName(), newPartitionName);
|
||||
}
|
||||
PartitionInfo partitionInfo = table.getPartitionInfo();
|
||||
PartitionItem recoverItem = null;
|
||||
if (partitionInfo.getType() == PartitionType.RANGE) {
|
||||
@ -549,7 +583,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
if (!idToDatabase.containsKey(dbId)) {
|
||||
LOG.error("db[{}] is neither in catalog nor in recycle bin"
|
||||
+ " when rebuilding inverted index from recycle bin, partition[{}]",
|
||||
dbId, partitionId);
|
||||
dbId, partitionId);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
@ -560,7 +594,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
if (!idToTable.containsKey(tableId)) {
|
||||
LOG.error("table[{}] is neither in catalog nor in recycle bin"
|
||||
+ " when rebuilding inverted index from recycle bin, partition[{}]",
|
||||
tableId, partitionId);
|
||||
tableId, partitionId);
|
||||
continue;
|
||||
}
|
||||
RecycleTableInfo tableInfo = idToTable.get(tableId);
|
||||
@ -596,6 +630,88 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
eraseDatabase(currentTimeMs);
|
||||
}
|
||||
|
||||
public List<List<String>> getInfo() {
|
||||
List<List<String>> infos = Lists.newArrayList();
|
||||
List<List<String>> dbInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecycleDatabaseInfo> entry : idToDatabase.entrySet()) {
|
||||
List<String> info = Lists.newArrayList();
|
||||
info.add("Database");
|
||||
RecycleDatabaseInfo dbInfo = entry.getValue();
|
||||
Database db = dbInfo.getDb();
|
||||
info.add(db.getFullName());
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
info.add("");
|
||||
info.add("");
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
|
||||
dbInfos.add(info);
|
||||
}
|
||||
// sort by Name, DropTime
|
||||
dbInfos.sort((x, y) -> {
|
||||
int nameRet = x.get(1).compareTo(y.get(1));
|
||||
if (nameRet == 0) {
|
||||
return x.get(5).compareTo(y.get(5));
|
||||
} else {
|
||||
return nameRet;
|
||||
}
|
||||
});
|
||||
|
||||
List<List<String>> tableInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecycleTableInfo> entry : idToTable.entrySet()) {
|
||||
List<String> info = Lists.newArrayList();
|
||||
info.add("Table");
|
||||
RecycleTableInfo tableInfo = entry.getValue();
|
||||
Table table = tableInfo.getTable();
|
||||
info.add(table.getName());
|
||||
info.add(String.valueOf(tableInfo.getDbId()));
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
info.add("");
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
|
||||
tableInfos.add(info);
|
||||
}
|
||||
// sort by Name, DropTime
|
||||
tableInfos.sort((x, y) -> {
|
||||
int nameRet = x.get(1).compareTo(y.get(1));
|
||||
if (nameRet == 0) {
|
||||
return x.get(5).compareTo(y.get(5));
|
||||
} else {
|
||||
return nameRet;
|
||||
}
|
||||
});
|
||||
|
||||
List<List<String>> partitionInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecyclePartitionInfo> entry : idToPartition.entrySet()) {
|
||||
List<String> info = Lists.newArrayList();
|
||||
info.add("Partition");
|
||||
RecyclePartitionInfo partitionInfo = entry.getValue();
|
||||
Partition partition = partitionInfo.getPartition();
|
||||
info.add(partition.getName());
|
||||
info.add(String.valueOf(partitionInfo.getDbId()));
|
||||
info.add(String.valueOf(partitionInfo.getTableId()));
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
|
||||
partitionInfos.add(info);
|
||||
}
|
||||
// sort by Name, DropTime
|
||||
partitionInfos.sort((x, y) -> {
|
||||
int nameRet = x.get(1).compareTo(y.get(1));
|
||||
if (nameRet == 0) {
|
||||
return x.get(5).compareTo(y.get(5));
|
||||
} else {
|
||||
return nameRet;
|
||||
}
|
||||
});
|
||||
|
||||
infos = Stream.of(dbInfos, tableInfos, partitionInfos).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
int count = idToDatabase.size();
|
||||
@ -663,14 +779,17 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
public class RecycleDatabaseInfo implements Writable {
|
||||
private Database db;
|
||||
private Set<String> tableNames;
|
||||
private Set<Long> tableIds;
|
||||
|
||||
public RecycleDatabaseInfo() {
|
||||
tableNames = Sets.newHashSet();
|
||||
tableIds = Sets.newHashSet();
|
||||
}
|
||||
|
||||
public RecycleDatabaseInfo(Database db, Set<String> tableNames) {
|
||||
public RecycleDatabaseInfo(Database db, Set<String> tableNames, Set<Long> tableIds) {
|
||||
this.db = db;
|
||||
this.tableNames = tableNames;
|
||||
this.tableIds = tableIds;
|
||||
}
|
||||
|
||||
public Database getDb() {
|
||||
@ -681,6 +800,10 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
return tableNames;
|
||||
}
|
||||
|
||||
public Set<Long> getTableIds() {
|
||||
return tableIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
db.write(out);
|
||||
@ -690,6 +813,12 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
for (String tableName : tableNames) {
|
||||
Text.writeString(out, tableName);
|
||||
}
|
||||
|
||||
count = tableIds.size();
|
||||
out.writeInt(count);
|
||||
for (long tableId : tableIds) {
|
||||
out.writeLong(tableId);
|
||||
}
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
@ -700,6 +829,13 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
String tableName = Text.readString(in);
|
||||
tableNames.add(tableName);
|
||||
}
|
||||
if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_114) {
|
||||
count = in.readInt();
|
||||
for (int i = 0; i < count; i++) {
|
||||
long tableId = in.readLong();
|
||||
tableIds.add(tableId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2617,7 +2617,7 @@ public class Env {
|
||||
}
|
||||
|
||||
public void replayCreateDb(Database db) {
|
||||
getInternalCatalog().replayCreateDb(db);
|
||||
getInternalCatalog().replayCreateDb(db, "");
|
||||
}
|
||||
|
||||
public void dropDb(DropDbStmt stmt) throws DdlException {
|
||||
@ -2628,8 +2628,8 @@ public class Env {
|
||||
getInternalCatalog().replayDropLinkDb(info);
|
||||
}
|
||||
|
||||
public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException {
|
||||
getInternalCatalog().replayDropDb(dbName, isForceDrop);
|
||||
public void replayDropDb(String dbName, boolean isForceDrop, Long recycleTime) throws DdlException {
|
||||
getInternalCatalog().replayDropDb(dbName, isForceDrop, recycleTime);
|
||||
}
|
||||
|
||||
public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException {
|
||||
@ -2649,13 +2649,7 @@ public class Env {
|
||||
}
|
||||
|
||||
public void replayRecoverDatabase(RecoverInfo info) {
|
||||
long dbId = info.getDbId();
|
||||
Database db = Env.getCurrentRecycleBin().replayRecoverDatabase(dbId);
|
||||
|
||||
// add db to catalog
|
||||
replayCreateDb(db);
|
||||
|
||||
LOG.info("replay recover db[{}]", dbId);
|
||||
getInternalCatalog().replayRecoverDatabase(info);
|
||||
}
|
||||
|
||||
public void alterDatabaseQuota(AlterDatabaseQuotaStmt stmt) throws DdlException {
|
||||
@ -2725,7 +2719,7 @@ public class Env {
|
||||
getInternalCatalog().replayErasePartition(partitionId);
|
||||
}
|
||||
|
||||
public void replayRecoverPartition(RecoverInfo info) throws MetaNotFoundException {
|
||||
public void replayRecoverPartition(RecoverInfo info) throws MetaNotFoundException, DdlException {
|
||||
getInternalCatalog().replayRecoverPartition(info);
|
||||
}
|
||||
|
||||
@ -3210,19 +3204,21 @@ public class Env {
|
||||
getInternalCatalog().dropTable(stmt);
|
||||
}
|
||||
|
||||
public boolean unprotectDropTable(Database db, Table table, boolean isForceDrop, boolean isReplay) {
|
||||
return getInternalCatalog().unprotectDropTable(db, table, isForceDrop, isReplay);
|
||||
public boolean unprotectDropTable(Database db, Table table, boolean isForceDrop, boolean isReplay,
|
||||
Long recycleTime) {
|
||||
return getInternalCatalog().unprotectDropTable(db, table, isForceDrop, isReplay, recycleTime);
|
||||
}
|
||||
|
||||
public void replayDropTable(Database db, long tableId, boolean isForceDrop) throws MetaNotFoundException {
|
||||
getInternalCatalog().replayDropTable(db, tableId, isForceDrop);
|
||||
public void replayDropTable(Database db, long tableId, boolean isForceDrop,
|
||||
Long recycleTime) throws MetaNotFoundException {
|
||||
getInternalCatalog().replayDropTable(db, tableId, isForceDrop, recycleTime);
|
||||
}
|
||||
|
||||
public void replayEraseTable(long tableId) {
|
||||
getInternalCatalog().replayEraseTable(tableId);
|
||||
}
|
||||
|
||||
public void replayRecoverTable(RecoverInfo info) throws MetaNotFoundException {
|
||||
public void replayRecoverTable(RecoverInfo info) throws MetaNotFoundException, DdlException {
|
||||
getInternalCatalog().replayRecoverTable(info);
|
||||
}
|
||||
|
||||
|
||||
@ -174,6 +174,7 @@ import org.apache.doris.thrift.TTaskType;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
@ -436,9 +437,12 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
*
|
||||
* @param db
|
||||
*/
|
||||
public void replayCreateDb(Database db) {
|
||||
public void replayCreateDb(Database db, String newDbName) {
|
||||
tryLock(true);
|
||||
try {
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
db.setNameWithLock(newDbName);
|
||||
}
|
||||
unprotectCreateDb(db);
|
||||
} finally {
|
||||
unlock();
|
||||
@ -465,6 +469,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
// 2. drop tables in db
|
||||
Database db = this.fullNameToDb.get(dbName);
|
||||
db.writeLock();
|
||||
long recycleTime = 0;
|
||||
try {
|
||||
if (!stmt.isForceDrop()) {
|
||||
if (Env.getCurrentEnv().getGlobalTransactionMgr().existCommittedTxns(db.getId(), null, null)) {
|
||||
@ -511,6 +516,10 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
// save table names for recycling
|
||||
Set<String> tableNames = db.getTableNamesWithLock();
|
||||
List<Table> tableList = db.getTablesOnIdOrder();
|
||||
Set<Long> tableIds = Sets.newHashSet();
|
||||
for (Table table : tableList) {
|
||||
tableIds.add(table.getId());
|
||||
}
|
||||
MetaLockUtils.writeLockTables(tableList);
|
||||
try {
|
||||
if (!stmt.isForceDrop()) {
|
||||
@ -527,13 +536,14 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
}
|
||||
}
|
||||
}
|
||||
unprotectDropDb(db, stmt.isForceDrop(), false);
|
||||
unprotectDropDb(db, stmt.isForceDrop(), false, 0);
|
||||
} finally {
|
||||
MetaLockUtils.writeUnlockTables(tableList);
|
||||
}
|
||||
|
||||
if (!stmt.isForceDrop()) {
|
||||
Env.getCurrentRecycleBin().recycleDatabase(db, tableNames);
|
||||
Env.getCurrentRecycleBin().recycleDatabase(db, tableNames, tableIds, false, 0);
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(db.getId());
|
||||
} else {
|
||||
Env.getCurrentEnv().eraseDatabase(db.getId(), false);
|
||||
}
|
||||
@ -546,7 +556,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
fullNameToDb.remove(db.getFullName());
|
||||
final Cluster cluster = nameToCluster.get(db.getClusterName());
|
||||
cluster.removeDb(dbName, db.getId());
|
||||
DropDbInfo info = new DropDbInfo(dbName, stmt.isForceDrop());
|
||||
DropDbInfo info = new DropDbInfo(dbName, stmt.isForceDrop(), recycleTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropDb(info);
|
||||
} finally {
|
||||
unlock();
|
||||
@ -555,13 +565,13 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
LOG.info("finish drop database[{}], is force : {}", dbName, stmt.isForceDrop());
|
||||
}
|
||||
|
||||
public void unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay) {
|
||||
public void unprotectDropDb(Database db, boolean isForeDrop, boolean isReplay, long recycleTime) {
|
||||
// drop Iceberg database table creation records
|
||||
if (db.getDbProperties().getIcebergProperty().isExist()) {
|
||||
icebergTableCreationRecordMgr.deregisterDb(db);
|
||||
}
|
||||
for (Table table : db.getTables()) {
|
||||
unprotectDropTable(db, table, isForeDrop, isReplay);
|
||||
unprotectDropTable(db, table, isForeDrop, isReplay, recycleTime);
|
||||
}
|
||||
db.markDropped();
|
||||
}
|
||||
@ -581,7 +591,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
}
|
||||
}
|
||||
|
||||
public void replayDropDb(String dbName, boolean isForceDrop) throws DdlException {
|
||||
public void replayDropDb(String dbName, boolean isForceDrop, Long recycleTime) throws DdlException {
|
||||
tryLock(true);
|
||||
try {
|
||||
Database db = fullNameToDb.get(dbName);
|
||||
@ -589,14 +599,18 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
try {
|
||||
Set<String> tableNames = db.getTableNamesWithLock();
|
||||
List<Table> tableList = db.getTablesOnIdOrder();
|
||||
Set<Long> tableIds = Sets.newHashSet();
|
||||
for (Table table : tableList) {
|
||||
tableIds.add(table.getId());
|
||||
}
|
||||
MetaLockUtils.writeLockTables(tableList);
|
||||
try {
|
||||
unprotectDropDb(db, isForceDrop, true);
|
||||
unprotectDropDb(db, isForceDrop, true, recycleTime);
|
||||
} finally {
|
||||
MetaLockUtils.writeUnlockTables(tableList);
|
||||
}
|
||||
if (!isForceDrop) {
|
||||
Env.getCurrentRecycleBin().recycleDatabase(db, tableNames);
|
||||
Env.getCurrentRecycleBin().recycleDatabase(db, tableNames, tableIds, true, recycleTime);
|
||||
} else {
|
||||
Env.getCurrentEnv().eraseDatabase(db.getId(), false);
|
||||
}
|
||||
@ -615,11 +629,18 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
|
||||
public void recoverDatabase(RecoverDbStmt recoverStmt) throws DdlException {
|
||||
// check is new db with same name already exist
|
||||
if (getDb(recoverStmt.getDbName()).isPresent()) {
|
||||
throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist.");
|
||||
String newDbName = recoverStmt.getNewDbName();
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
if (getDb(newDbName).isPresent()) {
|
||||
throw new DdlException("Database[" + newDbName + "] already exist.");
|
||||
}
|
||||
} else {
|
||||
if (getDb(recoverStmt.getDbName()).isPresent()) {
|
||||
throw new DdlException("Database[" + recoverStmt.getDbName() + "] already exist.");
|
||||
}
|
||||
}
|
||||
|
||||
Database db = Env.getCurrentRecycleBin().recoverDatabase(recoverStmt.getDbName());
|
||||
Database db = Env.getCurrentRecycleBin().recoverDatabase(recoverStmt.getDbName(), recoverStmt.getDbId());
|
||||
|
||||
// add db to catalog
|
||||
if (!tryLock(false)) {
|
||||
@ -629,19 +650,34 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
List<Table> tableList = db.getTablesOnIdOrder();
|
||||
MetaLockUtils.writeLockTables(tableList);
|
||||
try {
|
||||
if (fullNameToDb.containsKey(db.getFullName())) {
|
||||
throw new DdlException("Database[" + db.getFullName() + "] already exist.");
|
||||
// it's ok that we do not put db back to CatalogRecycleBin
|
||||
// cause this db cannot recover any more
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
if (fullNameToDb.containsKey(newDbName)) {
|
||||
throw new DdlException("Database[" + newDbName + "] already exist.");
|
||||
// it's ok that we do not put db back to CatalogRecycleBin
|
||||
// cause this db cannot recover any more
|
||||
}
|
||||
} else {
|
||||
if (fullNameToDb.containsKey(db.getFullName())) {
|
||||
throw new DdlException("Database[" + db.getFullName() + "] already exist.");
|
||||
// it's ok that we do not put db back to CatalogRecycleBin
|
||||
// cause this db cannot recover any more
|
||||
}
|
||||
}
|
||||
if (!Strings.isNullOrEmpty(newDbName)) {
|
||||
try {
|
||||
db.writeUnlock();
|
||||
db.setNameWithLock(newDbName);
|
||||
} finally {
|
||||
db.writeLock();
|
||||
}
|
||||
}
|
||||
|
||||
fullNameToDb.put(db.getFullName(), db);
|
||||
idToDb.put(db.getId(), db);
|
||||
final Cluster cluster = nameToCluster.get(db.getClusterName());
|
||||
cluster.addDb(db.getFullName(), db.getId());
|
||||
|
||||
// log
|
||||
RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L);
|
||||
RecoverInfo recoverInfo = new RecoverInfo(db.getId(), -1L, -1L, newDbName, "", "");
|
||||
Env.getCurrentEnv().getEditLog().logRecoverDb(recoverInfo);
|
||||
db.unmarkDropped();
|
||||
} finally {
|
||||
@ -656,14 +692,21 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
public void recoverTable(RecoverTableStmt recoverStmt) throws DdlException {
|
||||
String dbName = recoverStmt.getDbName();
|
||||
String tableName = recoverStmt.getTableName();
|
||||
String newTableName = recoverStmt.getNewTableName();
|
||||
|
||||
Database db = (Database) getDbOrDdlException(dbName);
|
||||
db.writeLockOrDdlException();
|
||||
try {
|
||||
if (db.getTable(tableName).isPresent()) {
|
||||
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
|
||||
if (Strings.isNullOrEmpty(newTableName)) {
|
||||
if (db.getTable(tableName).isPresent()) {
|
||||
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
|
||||
}
|
||||
} else {
|
||||
if (db.getTable(newTableName).isPresent()) {
|
||||
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, newTableName);
|
||||
}
|
||||
}
|
||||
if (!Env.getCurrentRecycleBin().recoverTable(db, tableName)) {
|
||||
if (!Env.getCurrentRecycleBin().recoverTable(db, tableName, recoverStmt.getTableId(), newTableName)) {
|
||||
ErrorReport.reportDdlException(ErrorCode.ERR_UNKNOWN_TABLE, tableName, dbName);
|
||||
}
|
||||
} finally {
|
||||
@ -680,11 +723,21 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
olapTable.writeLockOrDdlException();
|
||||
try {
|
||||
String partitionName = recoverStmt.getPartitionName();
|
||||
if (olapTable.getPartition(partitionName) != null) {
|
||||
throw new DdlException("partition[" + partitionName + "] already exist in table[" + tableName + "]");
|
||||
String newPartitionName = recoverStmt.getNewPartitionName();
|
||||
if (Strings.isNullOrEmpty(newPartitionName)) {
|
||||
if (olapTable.getPartition(partitionName) != null) {
|
||||
throw new DdlException("partition[" + partitionName + "] "
|
||||
+ "already exist in table[" + tableName + "]");
|
||||
}
|
||||
} else {
|
||||
if (olapTable.getPartition(newPartitionName) != null) {
|
||||
throw new DdlException("partition[" + newPartitionName + "] "
|
||||
+ "already exist in table[" + tableName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
Env.getCurrentRecycleBin().recoverPartition(db.getId(), olapTable, partitionName);
|
||||
Env.getCurrentRecycleBin().recoverPartition(db.getId(), olapTable, partitionName,
|
||||
recoverStmt.getPartitionId(), newPartitionName);
|
||||
} finally {
|
||||
olapTable.writeUnlock();
|
||||
}
|
||||
@ -696,11 +749,12 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
|
||||
public void replayRecoverDatabase(RecoverInfo info) {
|
||||
long dbId = info.getDbId();
|
||||
String newDbName = info.getNewDbName();
|
||||
Database db = Env.getCurrentRecycleBin().replayRecoverDatabase(dbId);
|
||||
|
||||
// add db to catalog
|
||||
replayCreateDb(db);
|
||||
|
||||
replayCreateDb(db, newDbName);
|
||||
db.unmarkDropped();
|
||||
LOG.info("replay recover db[{}]", dbId);
|
||||
}
|
||||
|
||||
@ -843,8 +897,8 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
+ " please use \"DROP table FORCE\".");
|
||||
}
|
||||
}
|
||||
DropInfo info = new DropInfo(db.getId(), table.getId(), -1L, stmt.isForceDrop());
|
||||
table.writeLock();
|
||||
long recycleTime = 0;
|
||||
try {
|
||||
if (table instanceof OlapTable && !stmt.isForceDrop()) {
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
@ -855,10 +909,14 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
+ " please use \"DROP table FORCE\".");
|
||||
}
|
||||
}
|
||||
unprotectDropTable(db, table, stmt.isForceDrop(), false);
|
||||
unprotectDropTable(db, table, stmt.isForceDrop(), false, 0);
|
||||
if (!stmt.isForceDrop()) {
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(table.getId());
|
||||
}
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
}
|
||||
DropInfo info = new DropInfo(db.getId(), table.getId(), -1L, stmt.isForceDrop(), recycleTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropTable(info);
|
||||
} finally {
|
||||
db.writeUnlock();
|
||||
@ -866,7 +924,8 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
LOG.info("finished dropping table: {} from db: {}, is force: {}", tableName, dbName, stmt.isForceDrop());
|
||||
}
|
||||
|
||||
public boolean unprotectDropTable(Database db, Table table, boolean isForceDrop, boolean isReplay) {
|
||||
public boolean unprotectDropTable(Database db, Table table, boolean isForceDrop, boolean isReplay,
|
||||
long recycleTime) {
|
||||
if (table.getType() == TableType.ELASTICSEARCH) {
|
||||
esRepository.deRegisterTable(table.getId());
|
||||
} else if (table.getType() == TableType.OLAP) {
|
||||
@ -880,7 +939,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
|
||||
db.dropTable(table.getName());
|
||||
if (!isForceDrop) {
|
||||
Env.getCurrentRecycleBin().recycleTable(db.getId(), table, isReplay);
|
||||
Env.getCurrentRecycleBin().recycleTable(db.getId(), table, isReplay, recycleTime);
|
||||
} else {
|
||||
if (table.getType() == TableType.OLAP) {
|
||||
Env.getCurrentEnv().onEraseOlapTable((OlapTable) table, isReplay);
|
||||
@ -898,12 +957,13 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void replayDropTable(Database db, long tableId, boolean isForceDrop) throws MetaNotFoundException {
|
||||
public void replayDropTable(Database db, long tableId, boolean isForceDrop,
|
||||
Long recycleTime) throws MetaNotFoundException {
|
||||
Table table = db.getTableOrMetaException(tableId);
|
||||
db.writeLock();
|
||||
table.writeLock();
|
||||
try {
|
||||
unprotectDropTable(db, table, isForceDrop, true);
|
||||
unprotectDropTable(db, table, isForceDrop, true, recycleTime);
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
db.writeUnlock();
|
||||
@ -914,11 +974,11 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
Env.getCurrentRecycleBin().replayEraseTable(tableId);
|
||||
}
|
||||
|
||||
public void replayRecoverTable(RecoverInfo info) throws MetaNotFoundException {
|
||||
public void replayRecoverTable(RecoverInfo info) throws MetaNotFoundException, DdlException {
|
||||
Database db = (Database) getDbOrMetaException(info.getDbId());
|
||||
db.writeLock();
|
||||
db.writeLockOrDdlException();
|
||||
try {
|
||||
Env.getCurrentRecycleBin().replayRecoverTable(db, info.getTableId());
|
||||
Env.getCurrentRecycleBin().replayRecoverTable(db, info.getTableId(), info.getNewTableName());
|
||||
} finally {
|
||||
db.writeUnlock();
|
||||
}
|
||||
@ -1509,11 +1569,13 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
}
|
||||
|
||||
// drop
|
||||
long recycleTime = 0;
|
||||
if (isTempPartition) {
|
||||
olapTable.dropTempPartition(partitionName, true);
|
||||
} else {
|
||||
Partition partition = null;
|
||||
if (!clause.isForceDrop()) {
|
||||
Partition partition = olapTable.getPartition(partitionName);
|
||||
partition = olapTable.getPartition(partitionName);
|
||||
if (partition != null) {
|
||||
if (Env.getCurrentEnv().getGlobalTransactionMgr()
|
||||
.existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) {
|
||||
@ -1526,11 +1588,14 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
}
|
||||
}
|
||||
olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop());
|
||||
if (!clause.isForceDrop() && partition != null) {
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(partition.getId());
|
||||
}
|
||||
}
|
||||
|
||||
// log
|
||||
DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionName, isTempPartition,
|
||||
clause.isForceDrop());
|
||||
clause.isForceDrop(), recycleTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropPartition(info);
|
||||
|
||||
LOG.info("succeed in dropping partition[{}], is temp : {}, is force : {}", partitionName, isTempPartition,
|
||||
@ -1545,7 +1610,11 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
if (info.isTempPartition()) {
|
||||
olapTable.dropTempPartition(info.getPartitionName(), true);
|
||||
} else {
|
||||
olapTable.dropPartition(info.getDbId(), info.getPartitionName(), info.isForceDrop());
|
||||
Partition partition = olapTable.dropPartition(info.getDbId(), info.getPartitionName(),
|
||||
info.isForceDrop());
|
||||
if (!info.isForceDrop() && partition != null && info.getRecycleTime() != 0) {
|
||||
Env.getCurrentRecycleBin().setRecycleTimeByIdForReplay(partition.getId(), info.getRecycleTime());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
olapTable.writeUnlock();
|
||||
@ -1556,12 +1625,13 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
Env.getCurrentRecycleBin().replayErasePartition(partitionId);
|
||||
}
|
||||
|
||||
public void replayRecoverPartition(RecoverInfo info) throws MetaNotFoundException {
|
||||
public void replayRecoverPartition(RecoverInfo info) throws MetaNotFoundException, DdlException {
|
||||
Database db = (Database) getDbOrMetaException(info.getDbId());
|
||||
OlapTable olapTable = (OlapTable) db.getTableOrMetaException(info.getTableId(), TableType.OLAP);
|
||||
olapTable.writeLock();
|
||||
olapTable.writeLockOrDdlException();
|
||||
try {
|
||||
Env.getCurrentRecycleBin().replayRecoverPartition(olapTable, info.getPartitionId());
|
||||
Env.getCurrentRecycleBin().replayRecoverPartition(olapTable, info.getPartitionId(),
|
||||
info.getNewPartitionName());
|
||||
} finally {
|
||||
olapTable.writeUnlock();
|
||||
}
|
||||
|
||||
@ -32,14 +32,17 @@ public class DropDbInfo implements Writable {
|
||||
private String dbName;
|
||||
@SerializedName(value = "forceDrop")
|
||||
private boolean forceDrop = false;
|
||||
@SerializedName(value = "recycleTime")
|
||||
private long recycleTime = 0;
|
||||
|
||||
public DropDbInfo() {
|
||||
this("", false);
|
||||
this("", false, 0);
|
||||
}
|
||||
|
||||
public DropDbInfo(String dbName, boolean forceDrop) {
|
||||
public DropDbInfo(String dbName, boolean forceDrop, long recycleTime) {
|
||||
this.dbName = dbName;
|
||||
this.forceDrop = forceDrop;
|
||||
this.recycleTime = recycleTime;
|
||||
}
|
||||
|
||||
public String getDbName() {
|
||||
@ -50,6 +53,10 @@ public class DropDbInfo implements Writable {
|
||||
return forceDrop;
|
||||
}
|
||||
|
||||
public Long getRecycleTime() {
|
||||
return recycleTime;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private void readFields(DataInput in) throws IOException {
|
||||
dbName = Text.readString(in);
|
||||
@ -78,7 +85,8 @@ public class DropDbInfo implements Writable {
|
||||
DropDbInfo info = (DropDbInfo) obj;
|
||||
|
||||
return (dbName.equals(info.getDbName()))
|
||||
&& (forceDrop == info.isForceDrop());
|
||||
&& (forceDrop == info.isForceDrop())
|
||||
&& (recycleTime == info.getRecycleTime());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -17,6 +17,8 @@
|
||||
|
||||
package org.apache.doris.persist;
|
||||
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.common.FeMetaVersion;
|
||||
import org.apache.doris.common.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
@ -29,15 +31,17 @@ public class DropInfo implements Writable {
|
||||
|
||||
private long indexId;
|
||||
private boolean forceDrop = false;
|
||||
private long recycleTime = 0;
|
||||
|
||||
public DropInfo() {
|
||||
}
|
||||
|
||||
public DropInfo(long dbId, long tableId, long indexId, boolean forceDrop) {
|
||||
public DropInfo(long dbId, long tableId, long indexId, boolean forceDrop, long recycleTime) {
|
||||
this.dbId = dbId;
|
||||
this.tableId = tableId;
|
||||
this.indexId = indexId;
|
||||
this.forceDrop = forceDrop;
|
||||
this.recycleTime = recycleTime;
|
||||
}
|
||||
|
||||
public long getDbId() {
|
||||
@ -56,6 +60,10 @@ public class DropInfo implements Writable {
|
||||
return forceDrop;
|
||||
}
|
||||
|
||||
public Long getRecycleTime() {
|
||||
return recycleTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(dbId);
|
||||
@ -67,6 +75,7 @@ public class DropInfo implements Writable {
|
||||
out.writeBoolean(true);
|
||||
out.writeLong(indexId);
|
||||
}
|
||||
out.writeLong(recycleTime);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
@ -79,6 +88,9 @@ public class DropInfo implements Writable {
|
||||
} else {
|
||||
indexId = -1L;
|
||||
}
|
||||
if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_114) {
|
||||
recycleTime = in.readLong();
|
||||
}
|
||||
}
|
||||
|
||||
public static DropInfo read(DataInput in) throws IOException {
|
||||
@ -99,6 +111,6 @@ public class DropInfo implements Writable {
|
||||
DropInfo info = (DropInfo) obj;
|
||||
|
||||
return (dbId == info.dbId) && (tableId == info.tableId) && (indexId == info.indexId)
|
||||
&& (forceDrop == info.forceDrop);
|
||||
&& (forceDrop == info.forceDrop) && (recycleTime == info.recycleTime);
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,17 +38,20 @@ public class DropPartitionInfo implements Writable {
|
||||
private boolean isTempPartition = false;
|
||||
@SerializedName(value = "forceDrop")
|
||||
private boolean forceDrop = false;
|
||||
@SerializedName(value = "recycleTime")
|
||||
private long recycleTime = 0;
|
||||
|
||||
private DropPartitionInfo() {
|
||||
}
|
||||
|
||||
public DropPartitionInfo(Long dbId, Long tableId, String partitionName,
|
||||
boolean isTempPartition, boolean forceDrop) {
|
||||
boolean isTempPartition, boolean forceDrop, long recycleTime) {
|
||||
this.dbId = dbId;
|
||||
this.tableId = tableId;
|
||||
this.partitionName = partitionName;
|
||||
this.isTempPartition = isTempPartition;
|
||||
this.forceDrop = forceDrop;
|
||||
this.recycleTime = recycleTime;
|
||||
}
|
||||
|
||||
public Long getDbId() {
|
||||
@ -71,6 +74,10 @@ public class DropPartitionInfo implements Writable {
|
||||
return forceDrop;
|
||||
}
|
||||
|
||||
public Long getRecycleTime() {
|
||||
return recycleTime;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private void readFields(DataInput in) throws IOException {
|
||||
dbId = in.readLong();
|
||||
@ -104,6 +111,7 @@ public class DropPartitionInfo implements Writable {
|
||||
&& (tableId.equals(info.tableId))
|
||||
&& (partitionName.equals(info.partitionName))
|
||||
&& (isTempPartition == info.isTempPartition)
|
||||
&& (forceDrop == info.forceDrop);
|
||||
&& (forceDrop == info.forceDrop)
|
||||
&& (recycleTime == info.recycleTime);
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,7 +172,7 @@ public class EditLog {
|
||||
}
|
||||
case OperationType.OP_DROP_DB: {
|
||||
DropDbInfo dropDbInfo = (DropDbInfo) journal.getData();
|
||||
env.replayDropDb(dropDbInfo.getDbName(), dropDbInfo.isForceDrop());
|
||||
env.replayDropDb(dropDbInfo.getDbName(), dropDbInfo.isForceDrop(), dropDbInfo.getRecycleTime());
|
||||
break;
|
||||
}
|
||||
case OperationType.OP_ALTER_DB: {
|
||||
@ -218,7 +218,7 @@ public class EditLog {
|
||||
Database db = Env.getCurrentInternalCatalog().getDbOrMetaException(info.getDbId());
|
||||
LOG.info("Begin to unprotect drop table. db = " + db.getFullName() + " table = "
|
||||
+ info.getTableId());
|
||||
env.replayDropTable(db, info.getTableId(), info.isForceDrop());
|
||||
env.replayDropTable(db, info.getTableId(), info.isForceDrop(), info.getRecycleTime());
|
||||
break;
|
||||
}
|
||||
case OperationType.OP_ADD_PARTITION: {
|
||||
@ -310,7 +310,8 @@ public class EditLog {
|
||||
BatchDropInfo batchDropInfo = (BatchDropInfo) journal.getData();
|
||||
for (long indexId : batchDropInfo.getIndexIdSet()) {
|
||||
env.getMaterializedViewHandler().replayDropRollup(
|
||||
new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), indexId, false), env);
|
||||
new DropInfo(batchDropInfo.getDbId(), batchDropInfo.getTableId(), indexId, false, 0),
|
||||
env);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -17,6 +17,9 @@
|
||||
|
||||
package org.apache.doris.persist;
|
||||
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.common.FeMetaVersion;
|
||||
import org.apache.doris.common.io.Text;
|
||||
import org.apache.doris.common.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
@ -25,17 +28,24 @@ import java.io.IOException;
|
||||
|
||||
public class RecoverInfo implements Writable {
|
||||
private long dbId;
|
||||
private String newDbName;
|
||||
private long tableId;
|
||||
private String newTableName;
|
||||
private long partitionId;
|
||||
private String newPartitionName;
|
||||
|
||||
public RecoverInfo() {
|
||||
// for persist
|
||||
}
|
||||
|
||||
public RecoverInfo(long dbId, long tableId, long partitionId) {
|
||||
public RecoverInfo(long dbId, long tableId, long partitionId, String newDbName, String newTableName,
|
||||
String newPartitionName) {
|
||||
this.dbId = dbId;
|
||||
this.tableId = tableId;
|
||||
this.partitionId = partitionId;
|
||||
this.newDbName = newDbName;
|
||||
this.newTableName = newTableName;
|
||||
this.newPartitionName = newPartitionName;
|
||||
}
|
||||
|
||||
public long getDbId() {
|
||||
@ -50,17 +60,38 @@ public class RecoverInfo implements Writable {
|
||||
return partitionId;
|
||||
}
|
||||
|
||||
public String getNewDbName() {
|
||||
return newDbName;
|
||||
}
|
||||
|
||||
public String getNewTableName() {
|
||||
return newTableName;
|
||||
}
|
||||
|
||||
public String getNewPartitionName() {
|
||||
return newPartitionName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeLong(dbId);
|
||||
out.writeLong(tableId);
|
||||
out.writeLong(partitionId);
|
||||
|
||||
Text.writeString(out, newDbName);
|
||||
Text.writeString(out, newTableName);
|
||||
Text.writeString(out, newPartitionName);
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
dbId = in.readLong();
|
||||
tableId = in.readLong();
|
||||
partitionId = in.readLong();
|
||||
if (Env.getCurrentEnvJournalVersion() >= FeMetaVersion.VERSION_114) {
|
||||
newDbName = Text.readString(in);
|
||||
newTableName = Text.readString(in);
|
||||
newPartitionName = Text.readString(in);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -32,6 +32,7 @@ import org.apache.doris.analysis.ShowAuthorStmt;
|
||||
import org.apache.doris.analysis.ShowBackendsStmt;
|
||||
import org.apache.doris.analysis.ShowBackupStmt;
|
||||
import org.apache.doris.analysis.ShowBrokerStmt;
|
||||
import org.apache.doris.analysis.ShowCatalogRecycleBinStmt;
|
||||
import org.apache.doris.analysis.ShowCatalogStmt;
|
||||
import org.apache.doris.analysis.ShowClusterStmt;
|
||||
import org.apache.doris.analysis.ShowCollationStmt;
|
||||
@ -207,6 +208,7 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
// Execute one show statement.
|
||||
@ -371,6 +373,8 @@ public class ShowExecutor {
|
||||
handleShowAnalyze();
|
||||
} else if (stmt instanceof AdminCopyTabletStmt) {
|
||||
handleCopyTablet();
|
||||
} else if (stmt instanceof ShowCatalogRecycleBinStmt) {
|
||||
handleShowCatalogRecycleBin();
|
||||
} else {
|
||||
handleEmtpy();
|
||||
}
|
||||
@ -2378,4 +2382,15 @@ public class ShowExecutor {
|
||||
AgentTaskQueue.removeBatchTask(batchTask, TTaskType.MAKE_SNAPSHOT);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleShowCatalogRecycleBin() throws AnalysisException {
|
||||
ShowCatalogRecycleBinStmt showStmt = (ShowCatalogRecycleBinStmt) stmt;
|
||||
|
||||
Predicate<String> predicate = showStmt.getNamePredicate();
|
||||
List<List<String>> infos = Env.getCurrentRecycleBin().getInfo().stream()
|
||||
.filter(x -> predicate.test(x.get(1)))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
resultSet = new ShowResultSet(showStmt.getMetaData(), infos);
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,6 +115,7 @@ import org.apache.doris.qe.SqlModeHelper;
|
||||
keywordMap.put("begin", new Integer(SqlParserSymbols.KW_BEGIN));
|
||||
keywordMap.put("between", new Integer(SqlParserSymbols.KW_BETWEEN));
|
||||
keywordMap.put("bigint", new Integer(SqlParserSymbols.KW_BIGINT));
|
||||
keywordMap.put("bin", new Integer(SqlParserSymbols.KW_BIN));
|
||||
keywordMap.put("binlog", new Integer(SqlParserSymbols.KW_BINLOG));
|
||||
keywordMap.put("bitmap", new Integer(SqlParserSymbols.KW_BITMAP));
|
||||
keywordMap.put("bitmap_union", new Integer(SqlParserSymbols.KW_BITMAP_UNION));
|
||||
@ -354,6 +355,7 @@ import org.apache.doris.qe.SqlModeHelper;
|
||||
keywordMap.put("real", new Integer(SqlParserSymbols.KW_DOUBLE));
|
||||
keywordMap.put("rebalance", new Integer(SqlParserSymbols.KW_REBALANCE));
|
||||
keywordMap.put("recover", new Integer(SqlParserSymbols.KW_RECOVER));
|
||||
keywordMap.put("recycle", new Integer(SqlParserSymbols.KW_RECYCLE));
|
||||
keywordMap.put("refresh", new Integer(SqlParserSymbols.KW_REFRESH));
|
||||
keywordMap.put("regexp", new Integer(SqlParserSymbols.KW_REGEXP));
|
||||
keywordMap.put("release", new Integer(SqlParserSymbols.KW_RELEASE));
|
||||
|
||||
@ -133,7 +133,7 @@ public class DropDbTest {
|
||||
String recoverDbSql = "recover database test2";
|
||||
RecoverDbStmt recoverDbStmt = (RecoverDbStmt) UtFrameUtils.parseAndAnalyzeStmt(recoverDbSql, connectContext);
|
||||
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
|
||||
"Unknown database 'default_cluster:test2'",
|
||||
"Unknown database 'default_cluster:test2' or database id '-1'",
|
||||
() -> Env.getCurrentEnv().recoverDatabase(recoverDbStmt));
|
||||
|
||||
dropDbSql = "drop schema test3 force";
|
||||
|
||||
@ -113,7 +113,7 @@ public class DropPartitionTest {
|
||||
String recoverPartitionSql = "recover partition p20210202 from test.tbl1";
|
||||
RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseAndAnalyzeStmt(recoverPartitionSql, connectContext);
|
||||
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
|
||||
"No partition named p20210202 in table tbl1",
|
||||
"No partition named 'p20210202' or partition id '-1' in table tbl1",
|
||||
() -> Env.getCurrentEnv().recoverPartition(recoverPartitionStmt));
|
||||
}
|
||||
|
||||
|
||||
@ -109,7 +109,7 @@ public class DropTableTest {
|
||||
String recoverDbSql = "recover table test.tbl2";
|
||||
RecoverTableStmt recoverTableStmt = (RecoverTableStmt) UtFrameUtils.parseAndAnalyzeStmt(recoverDbSql, connectContext);
|
||||
ExceptionChecker.expectThrowsWithMsg(DdlException.class,
|
||||
"Unknown table 'tbl2'",
|
||||
"Unknown table 'tbl2' or table id '-1' in default_cluster:test",
|
||||
() -> Env.getCurrentEnv().recoverTable(recoverTableStmt));
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,22 +87,41 @@ public class RecoverTest {
|
||||
Env.getCurrentEnv().getAlterInstance().processAlterTable(alterTableStmt);
|
||||
}
|
||||
|
||||
private static void recoverDb(String db) throws Exception {
|
||||
RecoverDbStmt recoverDbStmt = (RecoverDbStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover database " + db, connectContext);
|
||||
Env.getCurrentEnv().recoverDatabase(recoverDbStmt);
|
||||
private static void recoverDb(String db, long dbId) throws Exception {
|
||||
if (dbId != -1) {
|
||||
RecoverDbStmt recoverDbStmt = (RecoverDbStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover database " + db + " " + String.valueOf(dbId), connectContext);
|
||||
Env.getCurrentEnv().recoverDatabase(recoverDbStmt);
|
||||
} else {
|
||||
RecoverDbStmt recoverDbStmt = (RecoverDbStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover database " + db, connectContext);
|
||||
Env.getCurrentEnv().recoverDatabase(recoverDbStmt);
|
||||
}
|
||||
}
|
||||
|
||||
private static void recoverTable(String db, String tbl) throws Exception {
|
||||
RecoverTableStmt recoverTableStmt = (RecoverTableStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover table " + db + "." + tbl, connectContext);
|
||||
Env.getCurrentEnv().recoverTable(recoverTableStmt);
|
||||
private static void recoverTable(String db, String tbl, long tableId) throws Exception {
|
||||
if (tableId != -1) {
|
||||
RecoverTableStmt recoverTableStmt = (RecoverTableStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover table " + db + "." + tbl + " " + String.valueOf(tableId), connectContext);
|
||||
Env.getCurrentEnv().recoverTable(recoverTableStmt);
|
||||
} else {
|
||||
RecoverTableStmt recoverTableStmt = (RecoverTableStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover table " + db + "." + tbl, connectContext);
|
||||
Env.getCurrentEnv().recoverTable(recoverTableStmt);
|
||||
}
|
||||
}
|
||||
|
||||
private static void recoverPartition(String db, String tbl, String part) throws Exception {
|
||||
RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover partition " + part + " from " + db + "." + tbl, connectContext);
|
||||
Env.getCurrentEnv().recoverPartition(recoverPartitionStmt);
|
||||
private static void recoverPartition(String db, String tbl, String part, long partId) throws Exception {
|
||||
if (partId != -1) {
|
||||
RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover partition " + part + " " + String.valueOf(partId) + " from " + db + "." + tbl,
|
||||
connectContext);
|
||||
Env.getCurrentEnv().recoverPartition(recoverPartitionStmt);
|
||||
} else {
|
||||
RecoverPartitionStmt recoverPartitionStmt = (RecoverPartitionStmt) UtFrameUtils.parseAndAnalyzeStmt(
|
||||
"recover partition " + part + " from " + db + "." + tbl, connectContext);
|
||||
Env.getCurrentEnv().recoverPartition(recoverPartitionStmt);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean checkDbExist(String dbName) {
|
||||
@ -122,6 +141,51 @@ public class RecoverTest {
|
||||
.flatMap(db -> db.getTable(tblName)).map(table -> table.getPartition(partName)).isPresent();
|
||||
}
|
||||
|
||||
private static long getDbId(String dbName) throws DdlException {
|
||||
Database db = (Database) Env.getCurrentInternalCatalog()
|
||||
.getDbOrDdlException((ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, dbName)));
|
||||
if (db != null) {
|
||||
return db.getId();
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
private static long getTableId(String dbName, String tblName) throws DdlException {
|
||||
Database db = (Database) Env.getCurrentInternalCatalog()
|
||||
.getDbOrDdlException((ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, dbName)));
|
||||
if (db != null) {
|
||||
OlapTable olapTable = db.getOlapTableOrDdlException(tblName);
|
||||
if (olapTable != null) {
|
||||
return olapTable.getId();
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
private static long getPartId(String dbName, String tblName, String partName) throws DdlException {
|
||||
Database db = (Database) Env.getCurrentInternalCatalog()
|
||||
.getDbOrDdlException((ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, dbName)));
|
||||
if (db != null) {
|
||||
OlapTable olapTable = db.getOlapTableOrDdlException(tblName);
|
||||
if (olapTable != null) {
|
||||
Partition partition = olapTable.getPartition(partName);
|
||||
if (partition != null) {
|
||||
return partition.getId();
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRecover() throws Exception {
|
||||
createDb("test");
|
||||
@ -157,7 +221,16 @@ public class RecoverTest {
|
||||
Assert.assertFalse(checkDbExist("test"));
|
||||
Assert.assertFalse(checkTableExist("test", "table1"));
|
||||
|
||||
recoverDb("test");
|
||||
recoverDb("test", -1);
|
||||
Assert.assertTrue(checkDbExist("test"));
|
||||
Assert.assertTrue(checkTableExist("test", "table1"));
|
||||
|
||||
long dbId = getDbId("test");
|
||||
dropDb("test");
|
||||
Assert.assertFalse(checkDbExist("test"));
|
||||
Assert.assertFalse(checkTableExist("test", "table1"));
|
||||
|
||||
recoverDb("test", dbId);
|
||||
Assert.assertTrue(checkDbExist("test"));
|
||||
Assert.assertTrue(checkTableExist("test", "table1"));
|
||||
|
||||
@ -165,7 +238,7 @@ public class RecoverTest {
|
||||
Assert.assertTrue(checkDbExist("test"));
|
||||
Assert.assertFalse(checkTableExist("test", "table1"));
|
||||
|
||||
recoverTable("test", "table1");
|
||||
recoverTable("test", "table1", -1);
|
||||
Assert.assertTrue(checkDbExist("test"));
|
||||
Assert.assertTrue(checkTableExist("test", "table1"));
|
||||
|
||||
@ -201,7 +274,7 @@ public class RecoverTest {
|
||||
Assert.assertTrue(checkTableExist("test", "table1"));
|
||||
|
||||
try {
|
||||
recoverTable("test", "table1");
|
||||
recoverTable("test", "table1", -1);
|
||||
Assert.fail("should not recover succeed");
|
||||
} catch (DdlException e) {
|
||||
e.printStackTrace();
|
||||
@ -211,7 +284,7 @@ public class RecoverTest {
|
||||
dropPartition("test", "table1", "p1");
|
||||
Assert.assertFalse(checkPartitionExist("test", "table1", "p1"));
|
||||
|
||||
recoverPartition("test", "table1", "p1");
|
||||
recoverPartition("test", "table1", "p1", -1);
|
||||
Assert.assertTrue(checkPartitionExist("test", "table1", "p1"));
|
||||
}
|
||||
|
||||
@ -250,7 +323,16 @@ public class RecoverTest {
|
||||
Assert.assertFalse(checkDbExist("test2"));
|
||||
Assert.assertFalse(checkTableExist("test2", "table2"));
|
||||
|
||||
recoverDb("test2");
|
||||
recoverDb("test2", -1);
|
||||
Assert.assertTrue(checkDbExist("test2"));
|
||||
Assert.assertTrue(checkTableExist("test2", "table2"));
|
||||
|
||||
long dbId = getDbId("test2");
|
||||
dropDb("test2");
|
||||
Assert.assertFalse(checkDbExist("test2"));
|
||||
Assert.assertFalse(checkTableExist("test2", "table2"));
|
||||
|
||||
recoverDb("test2", dbId);
|
||||
Assert.assertTrue(checkDbExist("test2"));
|
||||
Assert.assertTrue(checkTableExist("test2", "table2"));
|
||||
|
||||
@ -258,7 +340,7 @@ public class RecoverTest {
|
||||
Assert.assertTrue(checkDbExist("test2"));
|
||||
Assert.assertFalse(checkTableExist("test2", "table2"));
|
||||
|
||||
recoverTable("test2", "table2");
|
||||
recoverTable("test2", "table2", -1);
|
||||
Assert.assertTrue(checkDbExist("test2"));
|
||||
Assert.assertTrue(checkTableExist("test2", "table2"));
|
||||
|
||||
@ -294,7 +376,7 @@ public class RecoverTest {
|
||||
Assert.assertTrue(checkTableExist("test2", "table2"));
|
||||
|
||||
try {
|
||||
recoverTable("test2", "table2");
|
||||
recoverTable("test2", "table2", -1);
|
||||
Assert.fail("should not recover succeed");
|
||||
} catch (DdlException e) {
|
||||
e.printStackTrace();
|
||||
@ -304,7 +386,7 @@ public class RecoverTest {
|
||||
dropPartition("test2", "table2", "p1");
|
||||
Assert.assertFalse(checkPartitionExist("test2", "table2", "p1"));
|
||||
|
||||
recoverPartition("test2", "table2", "p1");
|
||||
recoverPartition("test2", "table2", "p1", -1);
|
||||
Assert.assertTrue(checkPartitionExist("test2", "table2", "p1"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ public class DropDbInfoTest {
|
||||
DropDbInfo info1 = new DropDbInfo();
|
||||
info1.write(dos);
|
||||
|
||||
DropDbInfo info2 = new DropDbInfo("test_db", true);
|
||||
DropDbInfo info2 = new DropDbInfo("test_db", true, 0);
|
||||
info2.write(dos);
|
||||
|
||||
dos.flush();
|
||||
@ -64,9 +64,9 @@ public class DropDbInfoTest {
|
||||
|
||||
Assert.assertTrue(rInfo2.equals(rInfo2));
|
||||
Assert.assertFalse(rInfo2.equals(this));
|
||||
Assert.assertFalse(info2.equals(new DropDbInfo("test_db1", true)));
|
||||
Assert.assertFalse(info2.equals(new DropDbInfo("test_db", false)));
|
||||
Assert.assertTrue(info2.equals(new DropDbInfo("test_db", true)));
|
||||
Assert.assertFalse(info2.equals(new DropDbInfo("test_db1", true, 0)));
|
||||
Assert.assertFalse(info2.equals(new DropDbInfo("test_db", false, 0)));
|
||||
Assert.assertTrue(info2.equals(new DropDbInfo("test_db", true, 0)));
|
||||
|
||||
// 3. delete files
|
||||
dis.close();
|
||||
|
||||
@ -44,7 +44,7 @@ public class DropInfoTest {
|
||||
DropInfo info1 = new DropInfo();
|
||||
info1.write(dos);
|
||||
|
||||
DropInfo info2 = new DropInfo(1, 2, -1, true);
|
||||
DropInfo info2 = new DropInfo(1, 2, -1, true, 0);
|
||||
info2.write(dos);
|
||||
|
||||
dos.flush();
|
||||
@ -65,10 +65,10 @@ public class DropInfoTest {
|
||||
|
||||
Assert.assertTrue(rInfo2.equals(rInfo2));
|
||||
Assert.assertFalse(rInfo2.equals(this));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(0, 2, -1L, true)));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(1, 0, -1L, true)));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(1, 2, -1L, false)));
|
||||
Assert.assertTrue(info2.equals(new DropInfo(1, 2, -1L, true)));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(0, 2, -1L, true, 0)));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(1, 0, -1L, true, 0)));
|
||||
Assert.assertFalse(info2.equals(new DropInfo(1, 2, -1L, false, 0)));
|
||||
Assert.assertTrue(info2.equals(new DropInfo(1, 2, -1L, true, 0)));
|
||||
|
||||
// 3. delete files
|
||||
dis.close();
|
||||
|
||||
@ -41,7 +41,7 @@ public class DropPartitionInfoTest {
|
||||
file.createNewFile();
|
||||
DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
|
||||
|
||||
DropPartitionInfo info1 = new DropPartitionInfo(1L, 2L, "test_partition", false, true);
|
||||
DropPartitionInfo info1 = new DropPartitionInfo(1L, 2L, "test_partition", false, true, 0);
|
||||
info1.write(dos);
|
||||
|
||||
dos.flush();
|
||||
@ -60,12 +60,12 @@ public class DropPartitionInfoTest {
|
||||
|
||||
Assert.assertTrue(rInfo1.equals(info1));
|
||||
Assert.assertFalse(rInfo1.equals(this));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(-1L, 2L, "test_partition", false, true)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, -2L, "test_partition", false, true)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition1", false, true)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", true, true)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", false, false)));
|
||||
Assert.assertTrue(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", false, true)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(-1L, 2L, "test_partition", false, true, 0)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, -2L, "test_partition", false, true, 0)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition1", false, true, 0)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", true, true, 0)));
|
||||
Assert.assertFalse(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", false, false, 0)));
|
||||
Assert.assertTrue(info1.equals(new DropPartitionInfo(1L, 2L, "test_partition", false, true, 0)));
|
||||
|
||||
// 3. delete files
|
||||
dis.close();
|
||||
|
||||
83
regression-test/data/ddl_p0/test_recover.out
Normal file
83
regression-test/data/ddl_p0/test_recover.out
Normal file
@ -0,0 +1,83 @@
|
||||
-- This file is automatically generated. You should know what you did if you want to edit this
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p1000 VALUES [("333"), ("1000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p1000 VALUES [("333"), ("1000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p2000 VALUES [("333"), ("1000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p2000 VALUES [("1000"), ("2000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p2000 VALUES [("1000"), ("2000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p1000 VALUES [("333"), ("1000")),\nPARTITION p2000 VALUES [("1000"), ("2000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
|
||||
-- !select --
|
||||
test_recover_tb
|
||||
|
||||
-- !select --
|
||||
|
||||
-- !select --
|
||||
test_recover_tb
|
||||
|
||||
-- !select --
|
||||
|
||||
-- !select --
|
||||
test_recover_tb_new
|
||||
|
||||
-- !select --
|
||||
test_recover_tb
|
||||
test_recover_tb_new
|
||||
|
||||
-- !select --
|
||||
test_recover_db CREATE DATABASE `test_recover_db`
|
||||
|
||||
-- !select --
|
||||
test_recover_db CREATE DATABASE `test_recover_db`
|
||||
|
||||
-- !select --
|
||||
test_recover_db CREATE DATABASE `test_recover_db`
|
||||
|
||||
-- !select --
|
||||
test_recover_db_new CREATE DATABASE `test_recover_db_new`
|
||||
|
||||
-- !select --
|
||||
test_recover_db CREATE DATABASE `test_recover_db`
|
||||
|
||||
-- !select --
|
||||
test_recover_db_new CREATE DATABASE `test_recover_db_new`
|
||||
|
||||
-- !select --
|
||||
test_recover_tb CREATE TABLE `test_recover_tb` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p1000 VALUES [("333"), ("1000")),\nPARTITION p2000 VALUES [("1000"), ("2000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb_new CREATE TABLE `test_recover_tb_new` (\n `k1` int(11) NULL,\n `k2` datetime NULL\n) ENGINE=OLAP\nUNIQUE KEY(`k1`)\nCOMMENT 'OLAP'\nPARTITION BY RANGE(`k1`)\n(PARTITION p111 VALUES [("-1000"), ("111")),\nPARTITION p222 VALUES [("111"), ("222")),\nPARTITION p333 VALUES [("222"), ("333")),\nPARTITION p1000 VALUES [("333"), ("1000")))\nDISTRIBUTED BY HASH(`k1`) BUCKETS 3\nPROPERTIES (\n"replication_allocation" = "tag.location.default: 1",\n"in_memory" = "false",\n"storage_format" = "V2",\n"disable_auto_compaction" = "false"\n);
|
||||
|
||||
-- !select --
|
||||
test_recover_tb
|
||||
test_recover_tb_1
|
||||
test_recover_tb_2
|
||||
test_recover_tb_new
|
||||
|
||||
-- !select --
|
||||
test_recover_tb_2
|
||||
|
||||
268
regression-test/suites/ddl_p0/test_recover.groovy
Normal file
268
regression-test/suites/ddl_p0/test_recover.groovy
Normal file
@ -0,0 +1,268 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
suite("test_recover") {
|
||||
try {
|
||||
sql """
|
||||
CREATE DATABASE IF NOT EXISTS `test_recover_db`
|
||||
"""
|
||||
|
||||
sql """
|
||||
CREATE TABLE IF NOT EXISTS `test_recover_db`.`test_recover_tb` (
|
||||
`k1` int(11) NULL,
|
||||
`k2` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
UNIQUE KEY(`k1`)
|
||||
PARTITION BY RANGE(`k1`)
|
||||
(PARTITION p111 VALUES [('-1000'), ('111')),
|
||||
PARTITION p222 VALUES [('111'), ('222')),
|
||||
PARTITION p333 VALUES [('222'), ('333')),
|
||||
PARTITION p1000 VALUES [('333'), ('1000')))
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1",
|
||||
"in_memory" = "false",
|
||||
"storage_format" = "V2"
|
||||
)
|
||||
"""
|
||||
|
||||
// test drop/recover partition
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
ALTER TABLE `test_recover_db`.`test_recover_tb` DROP PARTITION p1000;
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
RECOVER PARTITION p1000 FROM `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
ALTER TABLE `test_recover_db`.`test_recover_tb` DROP PARTITION p1000
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
RECOVER PARTITION p1000 AS p2000 FROM `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
ALTER TABLE `test_recover_db`.`test_recover_tb` DROP PARTITION p2000
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
ALTER TABLE `test_recover_db`.`test_recover_tb` ADD PARTITION p2000 VALUES [('1000'), ('2000'))
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
ALTER TABLE `test_recover_db`.`test_recover_tb` DROP PARTITION p2000
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
RECOVER PARTITION p2000 FROM `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
sql """
|
||||
RECOVER PARTITION p2000 AS p1000 FROM `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE TABLE `test_recover_db`.`test_recover_tb` """
|
||||
|
||||
// test drop/recover table
|
||||
|
||||
sql """
|
||||
DROP TABLE `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
RECOVER TABLE `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
DROP TABLE `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
CREATE TABLE `test_recover_db`.`test_recover_tb` (
|
||||
`k1` int(11) NULL,
|
||||
`k2` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
UNIQUE KEY(`k1`)
|
||||
PARTITION BY RANGE(`k1`)
|
||||
(PARTITION p111 VALUES [('-1000'), ('111')),
|
||||
PARTITION p222 VALUES [('111'), ('222')),
|
||||
PARTITION p333 VALUES [('222'), ('333')),
|
||||
PARTITION p1000 VALUES [('333'), ('1000')))
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1",
|
||||
"in_memory" = "false",
|
||||
"storage_format" = "V2"
|
||||
)
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
DROP TABLE `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
RECOVER TABLE `test_recover_db`.`test_recover_tb` AS `test_recover_tb_new`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
sql """
|
||||
RECOVER TABLE `test_recover_db`.`test_recover_tb`
|
||||
"""
|
||||
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
|
||||
|
||||
// test drop/recover db
|
||||
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db` """
|
||||
|
||||
sql """
|
||||
DROP DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
sql """
|
||||
RECOVER DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db` """
|
||||
|
||||
sql """
|
||||
CREATE TABLE `test_recover_db`.`test_recover_tb_1` (
|
||||
`k1` int(11) NULL,
|
||||
`k2` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
UNIQUE KEY(`k1`)
|
||||
PARTITION BY RANGE(`k1`)
|
||||
(PARTITION p111 VALUES [('-1000'), ('111')),
|
||||
PARTITION p222 VALUES [('111'), ('222')),
|
||||
PARTITION p333 VALUES [('222'), ('333')),
|
||||
PARTITION p1000 VALUES [('333'), ('1000')))
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1",
|
||||
"in_memory" = "false",
|
||||
"storage_format" = "V2"
|
||||
)
|
||||
"""
|
||||
|
||||
sql """
|
||||
DROP DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
sql """
|
||||
CREATE DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db` """
|
||||
|
||||
sql """
|
||||
DROP DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
sql """
|
||||
RECOVER DATABASE `test_recover_db` AS `test_recover_db_new`
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db_new` """
|
||||
|
||||
sql """
|
||||
CREATE TABLE `test_recover_db_new`.`test_recover_tb_2` (
|
||||
`k1` int(11) NULL,
|
||||
`k2` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
UNIQUE KEY(`k1`)
|
||||
PARTITION BY RANGE(`k1`)
|
||||
(PARTITION p111 VALUES [('-1000'), ('111')),
|
||||
PARTITION p222 VALUES [('111'), ('222')),
|
||||
PARTITION p333 VALUES [('222'), ('333')),
|
||||
PARTITION p1000 VALUES [('333'), ('1000')))
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1",
|
||||
"in_memory" = "false",
|
||||
"storage_format" = "V2"
|
||||
)
|
||||
"""
|
||||
|
||||
sql """
|
||||
RECOVER DATABASE `test_recover_db`
|
||||
"""
|
||||
|
||||
sql """
|
||||
CREATE TABLE `test_recover_db`.`test_recover_tb_2` (
|
||||
`k1` int(11) NULL,
|
||||
`k2` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
UNIQUE KEY(`k1`)
|
||||
PARTITION BY RANGE(`k1`)
|
||||
(PARTITION p111 VALUES [('-1000'), ('111')),
|
||||
PARTITION p222 VALUES [('111'), ('222')),
|
||||
PARTITION p333 VALUES [('222'), ('333')),
|
||||
PARTITION p1000 VALUES [('333'), ('1000')))
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1",
|
||||
"in_memory" = "false",
|
||||
"storage_format" = "V2"
|
||||
)
|
||||
"""
|
||||
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db` """
|
||||
qt_select """ SHOW CREATE DATABASE `test_recover_db_new` """
|
||||
qt_select """SHOW CREATE TABLE `test_recover_db`.`test_recover_tb`"""
|
||||
qt_select """SHOW CREATE TABLE `test_recover_db`.`test_recover_tb_new`"""
|
||||
qt_select """SHOW TABLES FROM `test_recover_db`"""
|
||||
qt_select """SHOW TABLES FROM `test_recover_db_new`"""
|
||||
|
||||
} finally {
|
||||
sql """ DROP DATABASE IF EXISTS `test_recover_db` FORCE """
|
||||
sql """ DROP DATABASE IF EXISTS `test_recover_db_new` FORCE """
|
||||
}
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user