cherry pick from #39028
This commit is contained in:
@ -35,13 +35,15 @@ import java.util.Set;
|
||||
|
||||
public class RestoreStmt extends AbstractBackupStmt {
|
||||
private static final String PROP_ALLOW_LOAD = "allow_load";
|
||||
private static final String PROP_REPLICATION_NUM = "replication_num";
|
||||
private static final String PROP_BACKUP_TIMESTAMP = "backup_timestamp";
|
||||
private static final String PROP_META_VERSION = "meta_version";
|
||||
private static final String PROP_RESERVE_REPLICA = "reserve_replica";
|
||||
private static final String PROP_RESERVE_DYNAMIC_PARTITION_ENABLE = "reserve_dynamic_partition_enable";
|
||||
private static final String PROP_IS_BEING_SYNCED = PropertyAnalyzer.PROPERTIES_IS_BEING_SYNCED;
|
||||
|
||||
public static final String PROP_RESERVE_REPLICA = "reserve_replica";
|
||||
public static final String PROP_RESERVE_DYNAMIC_PARTITION_ENABLE = "reserve_dynamic_partition_enable";
|
||||
public static final String PROP_CLEAN_TABLES = "clean_tables";
|
||||
public static final String PROP_CLEAN_PARTITIONS = "clean_partitions";
|
||||
|
||||
private boolean allowLoad = false;
|
||||
private ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION;
|
||||
private String backupTimestamp = null;
|
||||
@ -50,16 +52,18 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
private boolean reserveDynamicPartitionEnable = false;
|
||||
private boolean isLocal = false;
|
||||
private boolean isBeingSynced = false;
|
||||
private boolean isCleanTables = false;
|
||||
private boolean isCleanPartitions = false;
|
||||
private byte[] meta = null;
|
||||
private byte[] jobInfo = null;
|
||||
|
||||
public RestoreStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause restoreTableRefClause,
|
||||
Map<String, String> properties) {
|
||||
Map<String, String> properties) {
|
||||
super(labelName, repoName, restoreTableRefClause, properties);
|
||||
}
|
||||
|
||||
public RestoreStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause restoreTableRefClause,
|
||||
Map<String, String> properties, byte[] meta, byte[] jobInfo) {
|
||||
Map<String, String> properties, byte[] meta, byte[] jobInfo) {
|
||||
super(labelName, repoName, restoreTableRefClause, properties);
|
||||
this.meta = meta;
|
||||
this.jobInfo = jobInfo;
|
||||
@ -109,6 +113,14 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
return isBeingSynced;
|
||||
}
|
||||
|
||||
public boolean isCleanTables() {
|
||||
return isCleanTables;
|
||||
}
|
||||
|
||||
public boolean isCleanPartitions() {
|
||||
return isCleanPartitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void analyze(Analyzer analyzer) throws UserException {
|
||||
if (repoName.equals(Repository.KEEP_ON_LOCAL_REPO_NAME)) {
|
||||
@ -142,17 +154,7 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
|
||||
Map<String, String> copiedProperties = Maps.newHashMap(properties);
|
||||
// allow load
|
||||
if (copiedProperties.containsKey(PROP_ALLOW_LOAD)) {
|
||||
if (copiedProperties.get(PROP_ALLOW_LOAD).equalsIgnoreCase("true")) {
|
||||
allowLoad = true;
|
||||
} else if (copiedProperties.get(PROP_ALLOW_LOAD).equalsIgnoreCase("false")) {
|
||||
allowLoad = false;
|
||||
} else {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
"Invalid allow load value: " + copiedProperties.get(PROP_ALLOW_LOAD));
|
||||
}
|
||||
copiedProperties.remove(PROP_ALLOW_LOAD);
|
||||
}
|
||||
allowLoad = eatBooleanProperty(copiedProperties, PROP_ALLOW_LOAD, allowLoad);
|
||||
|
||||
// replication num
|
||||
this.replicaAlloc = PropertyAnalyzer.analyzeReplicaAllocation(copiedProperties, "");
|
||||
@ -160,34 +162,16 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
this.replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION;
|
||||
}
|
||||
// reserve replica
|
||||
if (copiedProperties.containsKey(PROP_RESERVE_REPLICA)) {
|
||||
if (copiedProperties.get(PROP_RESERVE_REPLICA).equalsIgnoreCase("true")) {
|
||||
reserveReplica = true;
|
||||
} else if (copiedProperties.get(PROP_RESERVE_REPLICA).equalsIgnoreCase("false")) {
|
||||
reserveReplica = false;
|
||||
} else {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
"Invalid reserve_replica value: " + copiedProperties.get(PROP_RESERVE_REPLICA));
|
||||
}
|
||||
// force set reserveReplica to false, do not keep the origin allocation
|
||||
if (reserveReplica && !Config.force_olap_table_replication_allocation.isEmpty()) {
|
||||
reserveReplica = false;
|
||||
}
|
||||
copiedProperties.remove(PROP_RESERVE_REPLICA);
|
||||
reserveReplica = eatBooleanProperty(copiedProperties, PROP_RESERVE_REPLICA, reserveReplica);
|
||||
// force set reserveReplica to false, do not keep the origin allocation
|
||||
if (reserveReplica && !Config.force_olap_table_replication_allocation.isEmpty()) {
|
||||
reserveReplica = false;
|
||||
}
|
||||
|
||||
// reserve dynamic partition enable
|
||||
if (copiedProperties.containsKey(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE)) {
|
||||
if (copiedProperties.get(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE).equalsIgnoreCase("true")) {
|
||||
reserveDynamicPartitionEnable = true;
|
||||
} else if (copiedProperties.get(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE).equalsIgnoreCase("false")) {
|
||||
reserveDynamicPartitionEnable = false;
|
||||
} else {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
"Invalid reserve dynamic partition enable value: "
|
||||
+ copiedProperties.get(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE));
|
||||
}
|
||||
copiedProperties.remove(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE);
|
||||
}
|
||||
reserveDynamicPartitionEnable = eatBooleanProperty(
|
||||
copiedProperties, PROP_RESERVE_DYNAMIC_PARTITION_ENABLE, reserveDynamicPartitionEnable);
|
||||
|
||||
// backup timestamp
|
||||
if (copiedProperties.containsKey(PROP_BACKUP_TIMESTAMP)) {
|
||||
backupTimestamp = copiedProperties.get(PROP_BACKUP_TIMESTAMP);
|
||||
@ -211,17 +195,13 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
}
|
||||
|
||||
// is being synced
|
||||
if (copiedProperties.containsKey(PROP_IS_BEING_SYNCED)) {
|
||||
if (copiedProperties.get(PROP_IS_BEING_SYNCED).equalsIgnoreCase("true")) {
|
||||
isBeingSynced = true;
|
||||
} else if (copiedProperties.get(PROP_IS_BEING_SYNCED).equalsIgnoreCase("false")) {
|
||||
isBeingSynced = false;
|
||||
} else {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
"Invalid is being synced value: " + copiedProperties.get(PROP_IS_BEING_SYNCED));
|
||||
}
|
||||
copiedProperties.remove(PROP_IS_BEING_SYNCED);
|
||||
}
|
||||
isBeingSynced = eatBooleanProperty(copiedProperties, PROP_IS_BEING_SYNCED, isBeingSynced);
|
||||
|
||||
// is clean tables
|
||||
isCleanTables = eatBooleanProperty(copiedProperties, PROP_CLEAN_TABLES, isCleanTables);
|
||||
|
||||
// is clean partitions
|
||||
isCleanPartitions = eatBooleanProperty(copiedProperties, PROP_CLEAN_PARTITIONS, isCleanPartitions);
|
||||
|
||||
if (!copiedProperties.isEmpty()) {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
@ -247,4 +227,22 @@ public class RestoreStmt extends AbstractBackupStmt {
|
||||
sb.append("\n)");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private boolean eatBooleanProperty(Map<String, String> copiedProperties, String name, boolean defaultValue)
|
||||
throws AnalysisException {
|
||||
boolean retval = defaultValue;
|
||||
if (copiedProperties.containsKey(name)) {
|
||||
String value = copiedProperties.get(name);
|
||||
if (value.equalsIgnoreCase("true")) {
|
||||
retval = true;
|
||||
} else if (value.equalsIgnoreCase("false")) {
|
||||
retval = false;
|
||||
} else {
|
||||
ErrorReport.reportAnalysisException(ErrorCode.ERR_COMMON_ERROR,
|
||||
"Invalid boolean property " + name + " value: " + value);
|
||||
}
|
||||
copiedProperties.remove(name);
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
||||
@ -514,12 +514,12 @@ public class BackupHandler extends MasterDaemon implements Writable {
|
||||
db.getId(), db.getFullName(), jobInfo, stmt.allowLoad(), stmt.getReplicaAlloc(),
|
||||
stmt.getTimeoutMs(), metaVersion, stmt.reserveReplica(),
|
||||
stmt.reserveDynamicPartitionEnable(), stmt.isBeingSynced(),
|
||||
env, Repository.KEEP_ON_LOCAL_REPO_ID, backupMeta);
|
||||
stmt.isCleanTables(), stmt.isCleanPartitions(), env, Repository.KEEP_ON_LOCAL_REPO_ID, backupMeta);
|
||||
} else {
|
||||
restoreJob = new RestoreJob(stmt.getLabel(), stmt.getBackupTimestamp(),
|
||||
db.getId(), db.getFullName(), jobInfo, stmt.allowLoad(), stmt.getReplicaAlloc(),
|
||||
stmt.getTimeoutMs(), stmt.getMetaVersion(), stmt.reserveReplica(), stmt.reserveDynamicPartitionEnable(),
|
||||
stmt.isBeingSynced(), env, repository.getId());
|
||||
stmt.isBeingSynced(), stmt.isCleanTables(), stmt.isCleanPartitions(), env, repository.getId());
|
||||
}
|
||||
|
||||
env.getEditLog().logRestoreJob(restoreJob);
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
package org.apache.doris.backup;
|
||||
|
||||
import org.apache.doris.analysis.BackupStmt.BackupContent;
|
||||
import org.apache.doris.analysis.RestoreStmt;
|
||||
import org.apache.doris.backup.BackupJobInfo.BackupIndexInfo;
|
||||
import org.apache.doris.backup.BackupJobInfo.BackupOlapTableInfo;
|
||||
import org.apache.doris.backup.BackupJobInfo.BackupPartitionInfo;
|
||||
@ -63,6 +64,7 @@ import org.apache.doris.common.util.DbUtil;
|
||||
import org.apache.doris.common.util.DynamicPartitionUtil;
|
||||
import org.apache.doris.common.util.PropertyAnalyzer;
|
||||
import org.apache.doris.common.util.TimeUtils;
|
||||
import org.apache.doris.datasource.InternalCatalog;
|
||||
import org.apache.doris.datasource.property.S3ClientBEProperties;
|
||||
import org.apache.doris.resource.Tag;
|
||||
import org.apache.doris.task.AgentBatchTask;
|
||||
@ -106,9 +108,12 @@ import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class RestoreJob extends AbstractJob {
|
||||
private static final String PROP_RESERVE_REPLICA = "reserve_replica";
|
||||
private static final String PROP_RESERVE_DYNAMIC_PARTITION_ENABLE = "reserve_dynamic_partition_enable";
|
||||
private static final String PROP_RESERVE_REPLICA = RestoreStmt.PROP_RESERVE_REPLICA;
|
||||
private static final String PROP_RESERVE_DYNAMIC_PARTITION_ENABLE =
|
||||
RestoreStmt.PROP_RESERVE_DYNAMIC_PARTITION_ENABLE;
|
||||
private static final String PROP_IS_BEING_SYNCED = PropertyAnalyzer.PROPERTIES_IS_BEING_SYNCED;
|
||||
private static final String PROP_CLEAN_TABLES = RestoreStmt.PROP_CLEAN_TABLES;
|
||||
private static final String PROP_CLEAN_PARTITIONS = RestoreStmt.PROP_CLEAN_PARTITIONS;
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(RestoreJob.class);
|
||||
|
||||
@ -173,6 +178,11 @@ public class RestoreJob extends AbstractJob {
|
||||
|
||||
private boolean isBeingSynced = false;
|
||||
|
||||
// Whether to delete existing tables that are not involved in the restore.
|
||||
private boolean isCleanTables = false;
|
||||
// Whether to delete existing partitions that are not involved in the restore.
|
||||
private boolean isCleanPartitions = false;
|
||||
|
||||
// restore properties
|
||||
private Map<String, String> properties = Maps.newHashMap();
|
||||
|
||||
@ -182,7 +192,8 @@ public class RestoreJob extends AbstractJob {
|
||||
|
||||
public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad,
|
||||
ReplicaAllocation replicaAlloc, long timeoutMs, int metaVersion, boolean reserveReplica,
|
||||
boolean reserveDynamicPartitionEnable, boolean isBeingSynced, Env env, long repoId) {
|
||||
boolean reserveDynamicPartitionEnable, boolean isBeingSynced, boolean isCleanTables,
|
||||
boolean isCleanPartitions, Env env, long repoId) {
|
||||
super(JobType.RESTORE, label, dbId, dbName, timeoutMs, env, repoId);
|
||||
this.backupTimestamp = backupTs;
|
||||
this.jobInfo = jobInfo;
|
||||
@ -197,16 +208,21 @@ public class RestoreJob extends AbstractJob {
|
||||
}
|
||||
this.reserveDynamicPartitionEnable = reserveDynamicPartitionEnable;
|
||||
this.isBeingSynced = isBeingSynced;
|
||||
this.isCleanTables = isCleanTables;
|
||||
this.isCleanPartitions = isCleanPartitions;
|
||||
properties.put(PROP_RESERVE_REPLICA, String.valueOf(reserveReplica));
|
||||
properties.put(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE, String.valueOf(reserveDynamicPartitionEnable));
|
||||
properties.put(PROP_IS_BEING_SYNCED, String.valueOf(isBeingSynced));
|
||||
properties.put(PROP_CLEAN_TABLES, String.valueOf(isCleanTables));
|
||||
properties.put(PROP_CLEAN_PARTITIONS, String.valueOf(isCleanPartitions));
|
||||
}
|
||||
|
||||
public RestoreJob(String label, String backupTs, long dbId, String dbName, BackupJobInfo jobInfo, boolean allowLoad,
|
||||
ReplicaAllocation replicaAlloc, long timeoutMs, int metaVersion, boolean reserveReplica,
|
||||
boolean reserveDynamicPartitionEnable, boolean isBeingSynced, Env env, long repoId, BackupMeta backupMeta) {
|
||||
boolean reserveDynamicPartitionEnable, boolean isBeingSynced, boolean isCleanTables,
|
||||
boolean isCleanPartitions, Env env, long repoId, BackupMeta backupMeta) {
|
||||
this(label, backupTs, dbId, dbName, jobInfo, allowLoad, replicaAlloc, timeoutMs, metaVersion, reserveReplica,
|
||||
reserveDynamicPartitionEnable, isBeingSynced, env, repoId);
|
||||
reserveDynamicPartitionEnable, isBeingSynced, isCleanTables, isCleanPartitions, env, repoId);
|
||||
this.backupMeta = backupMeta;
|
||||
}
|
||||
|
||||
@ -873,7 +889,7 @@ public class RestoreJob extends AbstractJob {
|
||||
|
||||
if (ok) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("finished to create all restored replcias. {}", this);
|
||||
LOG.debug("finished to create all restored replicas. {}", this);
|
||||
}
|
||||
// add restored partitions.
|
||||
// table should be in State RESTORE, so no other partitions can be
|
||||
@ -1455,7 +1471,7 @@ public class RestoreJob extends AbstractJob {
|
||||
return;
|
||||
}
|
||||
|
||||
Tablet tablet = idx.getTablet(info.getTabletId());
|
||||
Tablet tablet = idx.getTablet(info.getTabletId());
|
||||
if (tablet == null) {
|
||||
status = new Status(ErrCode.NOT_FOUND,
|
||||
"tablet " + info.getTabletId() + " does not exist in restored table "
|
||||
@ -1603,7 +1619,7 @@ public class RestoreJob extends AbstractJob {
|
||||
return;
|
||||
}
|
||||
|
||||
Tablet tablet = idx.getTablet(info.getTabletId());
|
||||
Tablet tablet = idx.getTablet(info.getTabletId());
|
||||
if (tablet == null) {
|
||||
status = new Status(ErrCode.NOT_FOUND,
|
||||
"tablet " + info.getTabletId() + " does not exist in restored table "
|
||||
@ -1808,6 +1824,14 @@ public class RestoreJob extends AbstractJob {
|
||||
}
|
||||
}
|
||||
|
||||
// Drop the exists but non-restored table/partitions.
|
||||
if (isCleanTables || isCleanPartitions) {
|
||||
Status st = dropAllNonRestoredTableAndPartitions(db);
|
||||
if (!st.ok()) {
|
||||
return st;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isReplay) {
|
||||
restoredPartitions.clear();
|
||||
restoredTbls.clear();
|
||||
@ -1830,6 +1854,59 @@ public class RestoreJob extends AbstractJob {
|
||||
return Status.OK;
|
||||
}
|
||||
|
||||
private Status dropAllNonRestoredTableAndPartitions(Database db) {
|
||||
try {
|
||||
for (Table table : db.getTables()) {
|
||||
long tableId = table.getId();
|
||||
String tableName = table.getName();
|
||||
TableType tableType = table.getType();
|
||||
BackupOlapTableInfo backupTableInfo = jobInfo.backupOlapTableObjects.get(tableName);
|
||||
if (tableType != TableType.OLAP && tableType != TableType.ODBC && tableType != TableType.VIEW) {
|
||||
continue;
|
||||
}
|
||||
if (tableType == TableType.OLAP && backupTableInfo != null) {
|
||||
// drop the non restored partitions.
|
||||
dropNonRestoredPartitions(db, (OlapTable) table, backupTableInfo);
|
||||
} else if (isCleanTables) {
|
||||
// otherwise drop the entire table.
|
||||
LOG.info("drop non restored table {}({}). {}", tableName, tableId, this);
|
||||
boolean isForceDrop = false; // move this table into recyclebin.
|
||||
env.getInternalCatalog().dropTableWithoutCheck(db, table, isForceDrop);
|
||||
}
|
||||
}
|
||||
return Status.OK;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("drop all non restored table and partitions failed. {}", this, e);
|
||||
return new Status(ErrCode.COMMON_ERROR, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void dropNonRestoredPartitions(
|
||||
Database db, OlapTable table, BackupOlapTableInfo backupTableInfo) throws DdlException {
|
||||
if (!isCleanPartitions || !table.writeLockIfExist()) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
long tableId = table.getId();
|
||||
String tableName = table.getQualifiedName();
|
||||
InternalCatalog catalog = env.getInternalCatalog();
|
||||
for (String partitionName : table.getPartitionNames()) {
|
||||
if (backupTableInfo.containsPart(partitionName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG.info("drop non restored partition {} of table {}({}). {}",
|
||||
partitionName, tableName, tableId, this);
|
||||
boolean isTempPartition = false;
|
||||
boolean isForceDrop = false; // move this partition into recyclebin.
|
||||
catalog.dropPartitionWithoutCheck(db, table, partitionName, isTempPartition, isForceDrop);
|
||||
}
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
private void releaseSnapshots() {
|
||||
if (snapshotInfos.isEmpty()) {
|
||||
return;
|
||||
@ -2235,6 +2312,8 @@ public class RestoreJob extends AbstractJob {
|
||||
reserveReplica = Boolean.parseBoolean(properties.get(PROP_RESERVE_REPLICA));
|
||||
reserveDynamicPartitionEnable = Boolean.parseBoolean(properties.get(PROP_RESERVE_DYNAMIC_PARTITION_ENABLE));
|
||||
isBeingSynced = Boolean.parseBoolean(properties.get(PROP_IS_BEING_SYNCED));
|
||||
isCleanTables = Boolean.parseBoolean(properties.get(PROP_CLEAN_TABLES));
|
||||
isCleanPartitions = Boolean.parseBoolean(properties.get(PROP_CLEAN_PARTITIONS));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -148,8 +148,8 @@ public class Partition extends MetaObject implements Writable {
|
||||
public void updateVersionForRestore(long visibleVersion) {
|
||||
this.setVisibleVersion(visibleVersion);
|
||||
this.nextVersion = this.visibleVersion + 1;
|
||||
LOG.info("update partition {} version for restore: visible: {}, next: {}",
|
||||
name, visibleVersion, nextVersion);
|
||||
LOG.info("update partition {}({}) version for restore: visible: {}, next: {}",
|
||||
name, id, visibleVersion, nextVersion);
|
||||
}
|
||||
|
||||
public void updateVisibleVersion(long visibleVersion) {
|
||||
|
||||
@ -892,44 +892,18 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
watch.split();
|
||||
costTimes.put("2:existCommittedTxns", watch.getSplitTime());
|
||||
}
|
||||
table.writeLock();
|
||||
watch.split();
|
||||
costTimes.put("3:tableWriteLock", watch.getSplitTime());
|
||||
long recycleTime = 0;
|
||||
try {
|
||||
if (table instanceof OlapTable && !stmt.isForceDrop()) {
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
if ((olapTable.getState() != OlapTableState.NORMAL)) {
|
||||
throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState()
|
||||
+ ", cannot be dropped." + " please cancel the operation on olap table firstly."
|
||||
+ " If you want to forcibly drop(cannot be recovered),"
|
||||
+ " please use \"DROP table FORCE\".");
|
||||
}
|
||||
|
||||
if (table instanceof OlapTable && !stmt.isForceDrop()) {
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
if ((olapTable.getState() != OlapTableState.NORMAL)) {
|
||||
throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState()
|
||||
+ ", cannot be dropped." + " please cancel the operation on olap table firstly."
|
||||
+ " If you want to forcibly drop(cannot be recovered),"
|
||||
+ " please use \"DROP table FORCE\".");
|
||||
}
|
||||
if (table.getType() == TableType.MATERIALIZED_VIEW) {
|
||||
Env.getCurrentEnv().getMtmvService().dropMTMV((MTMV) table);
|
||||
}
|
||||
unprotectDropTable(db, table, stmt.isForceDrop(), false, 0);
|
||||
watch.split();
|
||||
costTimes.put("4:unprotectDropTable", watch.getSplitTime());
|
||||
if (!stmt.isForceDrop()) {
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(table.getId());
|
||||
watch.split();
|
||||
costTimes.put("5:getRecycleTimeById", watch.getSplitTime());
|
||||
}
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
}
|
||||
|
||||
Env.getCurrentEnv().getQueryStats().clear(Env.getCurrentEnv().getCurrentCatalog().getId(),
|
||||
db.getId(), table.getId());
|
||||
|
||||
Env.getCurrentEnv().getAnalysisManager().removeTableStats(table.getId());
|
||||
|
||||
DropInfo info = new DropInfo(db.getId(), table.getId(), tableName, -1L, stmt.isForceDrop(), recycleTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropTable(info);
|
||||
|
||||
Env.getCurrentEnv().getMtmvService().dropTable(table);
|
||||
dropTableInternal(db, table, stmt.isForceDrop(), watch, costTimes);
|
||||
} catch (UserException e) {
|
||||
throw new DdlException(e.getMessage(), e.getMysqlErrorCode());
|
||||
} finally {
|
||||
@ -941,6 +915,62 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
tableName, dbName, stmt.isForceDrop(), costTimes);
|
||||
}
|
||||
|
||||
// drop table without any check.
|
||||
public void dropTableWithoutCheck(Database db, Table table, boolean forceDrop) throws DdlException {
|
||||
if (!db.writeLockIfExist()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
LOG.info("drop table {} without check, force: {}", table.getQualifiedName(), forceDrop);
|
||||
dropTableInternal(db, table, forceDrop, null, null);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("drop table without check", e);
|
||||
throw e;
|
||||
} finally {
|
||||
db.writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Drop a table, the db lock must hold.
|
||||
private void dropTableInternal(Database db, Table table, boolean forceDrop,
|
||||
StopWatch watch, Map<String, Long> costTimes) throws DdlException {
|
||||
table.writeLock();
|
||||
String tableName = table.getName();
|
||||
if (watch != null) {
|
||||
watch.split();
|
||||
costTimes.put("3:tableWriteLock", watch.getSplitTime());
|
||||
}
|
||||
long recycleTime = 0;
|
||||
try {
|
||||
if (table.getType() == TableType.MATERIALIZED_VIEW) {
|
||||
Env.getCurrentEnv().getMtmvService().dropMTMV((MTMV) table);
|
||||
}
|
||||
unprotectDropTable(db, table, forceDrop, false, 0);
|
||||
if (watch != null) {
|
||||
watch.split();
|
||||
costTimes.put("4:unprotectDropTable", watch.getSplitTime());
|
||||
}
|
||||
if (!forceDrop) {
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(table.getId());
|
||||
if (watch != null) {
|
||||
watch.split();
|
||||
costTimes.put("5:getRecycleTimeById", watch.getSplitTime());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
table.writeUnlock();
|
||||
}
|
||||
|
||||
Env.getCurrentEnv().getQueryStats().clear(Env.getCurrentEnv().getCurrentCatalog().getId(),
|
||||
db.getId(), table.getId());
|
||||
|
||||
Env.getCurrentEnv().getAnalysisManager().removeTableStats(table.getId());
|
||||
|
||||
DropInfo info = new DropInfo(db.getId(), table.getId(), tableName, -1L, forceDrop, recycleTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropTable(info);
|
||||
Env.getCurrentEnv().getMtmvService().dropTable(table);
|
||||
}
|
||||
|
||||
private static String genDropHint(String dbName, TableIf table) {
|
||||
String type = "";
|
||||
if (table instanceof View) {
|
||||
@ -1801,6 +1831,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
|
||||
String partitionName = clause.getPartitionName();
|
||||
boolean isTempPartition = clause.isTempPartition();
|
||||
boolean isForceDrop = clause.isForceDrop();
|
||||
|
||||
olapTable.checkNormalStateForAlter();
|
||||
if (!olapTable.checkPartitionNameExist(partitionName, isTempPartition)) {
|
||||
@ -1818,27 +1849,31 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table");
|
||||
}
|
||||
|
||||
// drop
|
||||
if (!isTempPartition && !isForceDrop) {
|
||||
Partition partition = olapTable.getPartition(partitionName);
|
||||
if (partition != null && Env.getCurrentGlobalTransactionMgr()
|
||||
.existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) {
|
||||
throw new DdlException(
|
||||
"There are still some transactions in the COMMITTED state waiting to be completed."
|
||||
+ " The partition [" + partitionName
|
||||
+ "] cannot be dropped. If you want to forcibly drop(cannot be recovered),"
|
||||
+ " please use \"DROP partition FORCE\".");
|
||||
}
|
||||
}
|
||||
|
||||
dropPartitionWithoutCheck(db, olapTable, partitionName, isTempPartition, isForceDrop);
|
||||
}
|
||||
|
||||
// drop partition without any check, the caller should hold the table write lock.
|
||||
public void dropPartitionWithoutCheck(Database db, OlapTable olapTable, String partitionName,
|
||||
boolean isTempPartition, boolean isForceDrop) throws DdlException {
|
||||
Partition partition = null;
|
||||
long recycleTime = 0;
|
||||
long recycleTime = -1;
|
||||
if (isTempPartition) {
|
||||
olapTable.dropTempPartition(partitionName, true);
|
||||
} else {
|
||||
if (!clause.isForceDrop()) {
|
||||
partition = olapTable.getPartition(partitionName);
|
||||
if (partition != null) {
|
||||
if (Env.getCurrentGlobalTransactionMgr()
|
||||
.existCommittedTxns(db.getId(), olapTable.getId(), partition.getId())) {
|
||||
throw new DdlException(
|
||||
"There are still some transactions in the COMMITTED state waiting to be completed."
|
||||
+ " The partition [" + partitionName
|
||||
+ "] cannot be dropped. If you want to forcibly drop(cannot be recovered),"
|
||||
+ " please use \"DROP partition FORCE\".");
|
||||
}
|
||||
}
|
||||
}
|
||||
olapTable.dropPartition(db.getId(), partitionName, clause.isForceDrop());
|
||||
if (!clause.isForceDrop() && partition != null) {
|
||||
partition = olapTable.dropPartition(db.getId(), partitionName, isForceDrop);
|
||||
if (!isForceDrop && partition != null) {
|
||||
recycleTime = Env.getCurrentRecycleBin().getRecycleTimeById(partition.getId());
|
||||
}
|
||||
}
|
||||
@ -1850,8 +1885,7 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
// it does not affect the logic of deleting the partition
|
||||
try {
|
||||
Env.getCurrentEnv().getEventProcessor().processEvent(
|
||||
new DropPartitionEvent(db.getCatalog().getId(), db.getId(),
|
||||
olapTable.getId()));
|
||||
new DropPartitionEvent(db.getCatalog().getId(), db.getId(), olapTable.getId()));
|
||||
} catch (Throwable t) {
|
||||
// According to normal logic, no exceptions will be thrown,
|
||||
// but in order to avoid bugs affecting the original logic, all exceptions are caught
|
||||
@ -1860,10 +1894,10 @@ public class InternalCatalog implements CatalogIf<Database> {
|
||||
// log
|
||||
long partitionId = partition == null ? -1L : partition.getId();
|
||||
DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionId, partitionName,
|
||||
isTempPartition, clause.isForceDrop(), recycleTime, version, versionTime);
|
||||
isTempPartition, isForceDrop, recycleTime, version, versionTime);
|
||||
Env.getCurrentEnv().getEditLog().logDropPartition(info);
|
||||
LOG.info("succeed in dropping partition[{}], table : [{}-{}], is temp : {}, is force : {}",
|
||||
partitionName, olapTable.getId(), olapTable.getName(), isTempPartition, clause.isForceDrop());
|
||||
partitionName, olapTable.getId(), olapTable.getName(), isTempPartition, isForceDrop);
|
||||
}
|
||||
|
||||
public void replayDropPartition(DropPartitionInfo info) throws MetaNotFoundException {
|
||||
|
||||
@ -3100,6 +3100,18 @@ public class FrontendServiceImpl implements FrontendService.Iface {
|
||||
LabelName label = new LabelName(request.getDb(), request.getLabelName());
|
||||
String repoName = request.getRepoName();
|
||||
Map<String, String> properties = request.getProperties();
|
||||
|
||||
// Restore requires that all properties are known, so the old version of FE will not be able
|
||||
// to recognize the properties of the new version. Therefore, request parameters are used here
|
||||
// instead of directly putting them in properties to avoid compatibility issues of cross-version
|
||||
// synchronization.
|
||||
if (request.isCleanPartitions()) {
|
||||
properties.put(RestoreStmt.PROP_CLEAN_PARTITIONS, "true");
|
||||
}
|
||||
if (request.isCleanTables()) {
|
||||
properties.put(RestoreStmt.PROP_CLEAN_TABLES, "true");
|
||||
}
|
||||
|
||||
AbstractBackupTableRefClause restoreTableRefClause = null;
|
||||
if (request.isSetTableRefs()) {
|
||||
List<TableRef> tableRefs = new ArrayList<>();
|
||||
|
||||
Reference in New Issue
Block a user