[improve](catalog recycle bin) show data size info when show catalog recycle bin (#30592)
This commit is contained in:
@ -34,7 +34,7 @@ import java.util.function.Predicate;
|
||||
public class ShowCatalogRecycleBinStmt extends ShowStmt {
|
||||
public static final ImmutableList<String> TITLE_NAMES = new ImmutableList.Builder<String>()
|
||||
.add("Type").add("Name").add("DbId").add("TableId").add("PartitionId").add("DropTime")
|
||||
.build();
|
||||
.add("DataSize").add("RemoteDataSize").build();
|
||||
|
||||
private Expr where;
|
||||
private String nameValue;
|
||||
|
||||
@ -23,8 +23,10 @@ import org.apache.doris.common.Config;
|
||||
import org.apache.doris.common.DdlException;
|
||||
import org.apache.doris.common.FeConstants;
|
||||
import org.apache.doris.common.FeMetaVersion;
|
||||
import org.apache.doris.common.Pair;
|
||||
import org.apache.doris.common.io.Text;
|
||||
import org.apache.doris.common.io.Writable;
|
||||
import org.apache.doris.common.util.DebugUtil;
|
||||
import org.apache.doris.common.util.DynamicPartitionUtil;
|
||||
import org.apache.doris.common.util.MasterDaemon;
|
||||
import org.apache.doris.common.util.RangeUtils;
|
||||
@ -48,6 +50,7 @@ import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -980,30 +983,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
|
||||
public List<List<String>> getInfo() {
|
||||
List<List<String>> dbInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecycleDatabaseInfo> entry : idToDatabase.entrySet()) {
|
||||
List<String> info = Lists.newArrayList();
|
||||
info.add("Database");
|
||||
RecycleDatabaseInfo dbInfo = entry.getValue();
|
||||
Database db = dbInfo.getDb();
|
||||
info.add(db.getFullName());
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
info.add("");
|
||||
info.add("");
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
|
||||
dbInfos.add(info);
|
||||
}
|
||||
// sort by Name, DropTime
|
||||
dbInfos.sort((x, y) -> {
|
||||
int nameRet = x.get(1).compareTo(y.get(1));
|
||||
if (nameRet == 0) {
|
||||
return x.get(5).compareTo(y.get(5));
|
||||
} else {
|
||||
return nameRet;
|
||||
}
|
||||
});
|
||||
Map<Long, Pair<Long, Long>> dbToDataSize = new HashMap<>();
|
||||
|
||||
List<List<String>> tableInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecycleTableInfo> entry : idToTable.entrySet()) {
|
||||
@ -1017,6 +997,22 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
info.add("");
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
// data size
|
||||
long dataSize = table.getDataSize(false);
|
||||
info.add(DebugUtil.printByteWithUnit(dataSize));
|
||||
// remote data size
|
||||
long remoteDataSize = table instanceof OlapTable ? ((OlapTable) table).getRemoteDataSize() : 0L;
|
||||
info.add(DebugUtil.printByteWithUnit(remoteDataSize));
|
||||
// calculate database data size
|
||||
dbToDataSize.compute(tableInfo.getDbId(), (k, v) -> {
|
||||
if (v == null) {
|
||||
return Pair.of(dataSize, remoteDataSize);
|
||||
} else {
|
||||
v.first += dataSize;
|
||||
v.second += remoteDataSize;
|
||||
return v;
|
||||
}
|
||||
});
|
||||
|
||||
tableInfos.add(info);
|
||||
}
|
||||
@ -1042,6 +1038,22 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
// data size
|
||||
long dataSize = partition.getDataSize(false);
|
||||
info.add(DebugUtil.printByteWithUnit(dataSize));
|
||||
// remote data size
|
||||
long remoteDataSize = partition.getRemoteDataSize();
|
||||
info.add(DebugUtil.printByteWithUnit(remoteDataSize));
|
||||
// calculate database data size
|
||||
dbToDataSize.compute(partitionInfo.getDbId(), (k, v) -> {
|
||||
if (v == null) {
|
||||
return Pair.of(dataSize, remoteDataSize);
|
||||
} else {
|
||||
v.first += dataSize;
|
||||
v.second += remoteDataSize;
|
||||
return v;
|
||||
}
|
||||
});
|
||||
|
||||
partitionInfos.add(info);
|
||||
}
|
||||
@ -1055,6 +1067,36 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
|
||||
}
|
||||
});
|
||||
|
||||
List<List<String>> dbInfos = Lists.newArrayList();
|
||||
for (Map.Entry<Long, RecycleDatabaseInfo> entry : idToDatabase.entrySet()) {
|
||||
List<String> info = Lists.newArrayList();
|
||||
info.add("Database");
|
||||
RecycleDatabaseInfo dbInfo = entry.getValue();
|
||||
Database db = dbInfo.getDb();
|
||||
info.add(db.getFullName());
|
||||
info.add(String.valueOf(entry.getKey()));
|
||||
info.add("");
|
||||
info.add("");
|
||||
//info.add(String.valueOf(idToRecycleTime.get(entry.getKey())));
|
||||
info.add(TimeUtils.longToTimeString(idToRecycleTime.get(entry.getKey())));
|
||||
// data size
|
||||
Pair<Long, Long> dataSizePair = dbToDataSize.getOrDefault(entry.getKey(), Pair.of(0L, 0L));
|
||||
info.add(DebugUtil.printByteWithUnit(dataSizePair.first));
|
||||
// remote data size
|
||||
info.add(DebugUtil.printByteWithUnit(dataSizePair.second));
|
||||
|
||||
dbInfos.add(info);
|
||||
}
|
||||
// sort by Name, DropTime
|
||||
dbInfos.sort((x, y) -> {
|
||||
int nameRet = x.get(1).compareTo(y.get(1));
|
||||
if (nameRet == 0) {
|
||||
return x.get(5).compareTo(y.get(5));
|
||||
} else {
|
||||
return nameRet;
|
||||
}
|
||||
});
|
||||
|
||||
return Stream.of(dbInfos, tableInfos, partitionInfos).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
|
||||
BIN
regression-test/data/catalog_recycle_bin_p0/all_types.csv.gz
Normal file
BIN
regression-test/data/catalog_recycle_bin_p0/all_types.csv.gz
Normal file
Binary file not shown.
@ -400,6 +400,20 @@ class Suite implements GroovyInterceptable {
|
||||
}
|
||||
}
|
||||
|
||||
long getTableId(String dbName, String tableName) {
|
||||
def dbInfo = sql "show proc '/dbs'"
|
||||
for(List<Object> row : dbInfo) {
|
||||
if (row[1].equals(dbName)) {
|
||||
def tbInfo = sql "show proc '/dbs/${row[0]}' "
|
||||
for (List<Object> tb : tbInfo) {
|
||||
if (tb[1].equals(tableName)) {
|
||||
return tb[0].toLong()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
long getDbId() {
|
||||
def dbInfo = sql "show proc '/dbs'"
|
||||
for(List<Object> row : dbInfo) {
|
||||
@ -410,6 +424,15 @@ class Suite implements GroovyInterceptable {
|
||||
}
|
||||
}
|
||||
|
||||
long getDbId(String dbName) {
|
||||
def dbInfo = sql "show proc '/dbs'"
|
||||
for (List<Object> row : dbInfo) {
|
||||
if (row[1].equals(dbName)) {
|
||||
return row[0].toLong()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
long getTableVersion(long dbId, String tableName) {
|
||||
def result = sql_return_maparray """show proc '/dbs/${dbId}'"""
|
||||
for (def res : result) {
|
||||
|
||||
146
regression-test/suites/catalog_recycle_bin_p0/show.groovy
Normal file
146
regression-test/suites/catalog_recycle_bin_p0/show.groovy
Normal file
@ -0,0 +1,146 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
suite("show") {
|
||||
def dbs = [
|
||||
'regression_test_catalog_recycle_bin_db0',
|
||||
'regression_test_catalog_recycle_bin_db1',
|
||||
'regression_test_catalog_recycle_bin_db2'
|
||||
]
|
||||
def dbIds = []
|
||||
|
||||
for (def db : dbs) {
|
||||
sql """ drop database if exists $db """
|
||||
sql """ create database $db """
|
||||
dbIds.add(getDbId(db) + '')
|
||||
}
|
||||
|
||||
def tables = [
|
||||
[dbs[1], 'show0'],
|
||||
[dbs[1], 'show1'],
|
||||
[dbs[2], 'show2'],
|
||||
[dbs[2], 'show3'],
|
||||
[dbs[2], 'show4']
|
||||
]
|
||||
def tableIds = []
|
||||
|
||||
for (def entry in tables) {
|
||||
def dbName = entry[0]
|
||||
def tableName = entry[1]
|
||||
sql """
|
||||
CREATE TABLE IF NOT EXISTS $dbName.$tableName (
|
||||
`k1` int(11) NULL,
|
||||
`k2` tinyint(4) NULL,
|
||||
`k3` smallint(6) NULL,
|
||||
`k4` bigint(20) NULL,
|
||||
`k5` largeint(40) NULL,
|
||||
`k6` float NULL,
|
||||
`k7` double NULL,
|
||||
`k8` decimal(9, 0) NULL,
|
||||
`k9` char(10) NULL,
|
||||
`k10` varchar(1024) NULL,
|
||||
`k11` text NULL,
|
||||
`k12` date NULL,
|
||||
`k13` datetime NULL
|
||||
) ENGINE=OLAP
|
||||
PARTITION BY RANGE(k1)
|
||||
(
|
||||
PARTITION p2000 VALUES LESS THAN ('2000'),
|
||||
PARTITION p5000 VALUES LESS THAN ('5000')
|
||||
)
|
||||
DISTRIBUTED BY HASH(`k1`) BUCKETS 3
|
||||
PROPERTIES (
|
||||
"replication_allocation" = "tag.location.default: 1"
|
||||
);
|
||||
"""
|
||||
tableIds.add(getTableId(dbName, tableName) + '')
|
||||
|
||||
streamLoad {
|
||||
db "${dbName}"
|
||||
table "${tableName}"
|
||||
|
||||
set 'column_separator', ','
|
||||
set 'compress_type', 'gz'
|
||||
|
||||
file 'all_types.csv.gz'
|
||||
time 10000 // limit inflight 10s
|
||||
|
||||
check { result, exception, startTime, endTime ->
|
||||
if (exception != null) {
|
||||
throw exception
|
||||
}
|
||||
log.info("Stream load result: ${result}".toString())
|
||||
def json = parseJson(result)
|
||||
assertEquals("success", json.Status.toLowerCase())
|
||||
assertEquals(2500, json.NumberTotalRows)
|
||||
assertEquals(0, json.NumberFilteredRows)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("dbIds: " + dbIds)
|
||||
logger.info("tableIds: " + tableIds)
|
||||
|
||||
sql """ drop database ${dbs[0]} """
|
||||
sql """ drop database ${dbs[1]} """
|
||||
sql """ drop table ${tables[2][0]}.${tables[2][1]} """
|
||||
sql """ alter table ${tables[3][0]}.${tables[3][1]} drop partition p2000 """
|
||||
|
||||
def checkShowResults = () -> {
|
||||
def results = sql_return_maparray """ show catalog recycle bin """
|
||||
// logger.info("show catalog recycle bin result: " + results)
|
||||
def recycleBinSize = 0
|
||||
for (final def result in results) {
|
||||
logger.info("result: " + result)
|
||||
if (result.DbId == dbIds[0] && result.TableId == '') { // db0 is dropped
|
||||
assertEquals(result['DataSize'], '0.000 ')
|
||||
assertEquals(result['RemoteDataSize'], '0.000 ')
|
||||
recycleBinSize++
|
||||
} else if (result.DbId == dbIds[1] && result.TableId == '') { // db1 is dropped
|
||||
assertTrue(result['DataSize'].startsWith('44') && result['DataSize'].contains('KB'))
|
||||
assertEquals(result['RemoteDataSize'], '0.000 ')
|
||||
recycleBinSize++
|
||||
} else if (result.TableId == tableIds[2]) { // table2 is dropped
|
||||
assertEquals(result['PartitionId'], '')
|
||||
assertTrue(result['DataSize'].startsWith('22') && result['DataSize'].contains('KB'))
|
||||
assertEquals(result['RemoteDataSize'], '0.000 ')
|
||||
recycleBinSize++
|
||||
} else if (result.TableId == tableIds[3]) { // the partition of table3 is dropped
|
||||
assertFalse(result['PartitionId'].isEmpty())
|
||||
assertTrue(result['DataSize'].startsWith('12') && result['DataSize'].contains('KB'))
|
||||
assertEquals(result['RemoteDataSize'], '0.000 ')
|
||||
recycleBinSize++
|
||||
}
|
||||
}
|
||||
assertEquals(4, recycleBinSize)
|
||||
}
|
||||
|
||||
for (def i = 0; i < 20; ++i) {
|
||||
try {
|
||||
logger.info("round " + i)
|
||||
checkShowResults()
|
||||
break
|
||||
} catch (Throwable e) {
|
||||
if (i == 9) {
|
||||
throw e
|
||||
} else {
|
||||
// wait for the data size report
|
||||
sleep(10000)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -15,9 +15,6 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Test some keywords that may conflict.
|
||||
// For example, "bin" is used for function "bin",
|
||||
// and also used "show catalog recycle bin"
|
||||
suite("test_bugfix_block_reuse") {
|
||||
sql "drop table if exists test_bugfix_block_reuse;"
|
||||
sql """
|
||||
|
||||
@ -15,9 +15,6 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Test some keywords that may conflict.
|
||||
// For example, "bin" is used for function "bin",
|
||||
// and also used "show catalog recycle bin"
|
||||
suite("test_many_inlineview") {
|
||||
sql """
|
||||
drop table if exists ods_drp_ch_sys_codelist_et;
|
||||
|
||||
Reference in New Issue
Block a user