[Branch 2.1] backport systable PR (#34384,#40153,#40456,#40455,#40568) (#40687)
backport https://github.com/apache/doris/pull/40568 https://github.com/apache/doris/pull/40455 https://github.com/apache/doris/pull/40456 https://github.com/apache/doris/pull/40153 https://github.com/apache/doris/pull/34384 Test result: 2024-09-11 11:00:45.618 INFO [suite-thread-1] (SuiteContext.groovy:309) - Recover original connection 2024-09-11 11:00:45.619 INFO [suite-thread-1] (Suite.groovy:359) - Execute sql: REVOKE SELECT_PRIV ON test_partitions_schema_db.duplicate_table FROM partitions_user 2024-09-11 11:00:45.625 INFO [suite-thread-1] (SuiteContext.groovy:299) - Create new connection for user 'partitions_user' 2024-09-11 11:00:45.632 INFO [suite-thread-1] (Suite.groovy:1162) - Execute tag: select_check_5, sql: select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,SUBPARTITION_NAME,PARTITION_ORDINAL_POSITION,SUBPARTITION_ORDINAL_POSITION,PARTITION_METHOD,SUBPARTITION_METHOD,PARTITION_EXPRESSION,SUBPARTITION_EXPRESSION,PARTITION_DESCRIPTION,TABLE_ROWS,AVG_ROW_LENGTH,DATA_LENGTH,MAX_DATA_LENGTH,INDEX_LENGTH,DATA_FREE,CHECKSUM,PARTITION_COMMENT,NODEGROUP,TABLESPACE_NAME from information_schema.partitions where table_schema="test_partitions_schema_db" order by TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,PARTITION_NAME,SUBPARTITION_NAME,PARTITION_ORDINAL_POSITION,SUBPARTITION_ORDINAL_POSITION,PARTITION_METHOD,SUBPARTITION_METHOD,PARTITION_EXPRESSION,SUBPARTITION_EXPRESSION,PARTITION_DESCRIPTION,TABLE_ROWS,AVG_ROW_LENGTH,DATA_LENGTH,MAX_DATA_LENGTH,INDEX_LENGTH,DATA_FREE,CHECKSUM,PARTITION_COMMENT,NODEGROUP,TABLESPACE_NAME 2024-09-11 11:00:45.644 INFO [suite-thread-1] (SuiteContext.groovy:309) - Recover original connection 2024-09-11 11:00:45.645 INFO [suite-thread-1] (ScriptContext.groovy:120) - Run test_partitions_schema in /root/doris/workspace/doris/regression-test/suites/query_p0/system/test_partitions_schema.groovy succeed 2024-09-11 11:00:45.652 INFO [main] (RegressionTest.groovy:259) - Start to run single scripts 2024-09-11 11:01:10.321 INFO [main] (RegressionTest.groovy:380) - Success suites: /root/doris/workspace/doris/regression-test/suites/query_p0/system/test_partitions_schema.groovy: group=default,p0, name=test_partitions_schema 2024-09-11 11:01:10.322 INFO [main] (RegressionTest.groovy:459) - All suites success. ____ _ ____ ____ _____ ____ | _ \ / \ / ___/ ___|| ____| _ \ | |_) / _ \ \___ \___ \| _| | | | | | __/ ___ \ ___) |__) | |___| |_| | |_| /_/ \_\____/____/|_____|____/ 2024-09-11 11:01:10.322 INFO [main] (RegressionTest.groovy:410) - Test 1 suites, failed 0 suites, fatal 0 scripts, skipped 0 scripts 2024-09-11 11:01:10.322 INFO [main] (RegressionTest.groovy:119) - Test finished 2024-09-11 11:03:00.712 INFO [suite-thread-1] (Suite.groovy:1162) - Execute tag: select_check_5, sql: select * from information_schema.table_options ORDER BY TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,TABLE_MODEL,TABLE_MODEL_KEY,DISTRIBUTE_KEY,DISTRIBUTE_TYPE,BUCKETS_NUM,PARTITION_NUM; 2024-09-11 11:03:00.729 INFO [suite-thread-1] (SuiteContext.groovy:309) - Recover original connection 2024-09-11 11:03:00.731 INFO [suite-thread-1] (ScriptContext.groovy:120) - Run test_table_options in /root/doris/workspace/doris/regression-test/suites/query_p0/system/test_table_options.groovy succeed 2024-09-11 11:03:04.817 INFO [main] (RegressionTest.groovy:259) - Start to run single scripts 2024-09-11 11:03:28.741 INFO [main] (RegressionTest.groovy:380) - Success suites: /root/doris/workspace/doris/regression-test/suites/query_p0/system/test_table_options.groovy: group=default,p0, name=test_table_options 2024-09-11 11:03:28.742 INFO [main] (RegressionTest.groovy:459) - All suites success. ____ _ ____ ____ _____ ____ | _ \ / \ / ___/ ___|| ____| _ \ | |_) / _ \ \___ \___ \| _| | | | | | __/ ___ \ ___) |__) | |___| |_| | |_| /_/ \_\____/____/|_____|____/ 2024-09-11 11:03:28.742 INFO [main] (RegressionTest.groovy:410) - Test 1 suites, failed 0 suites, fatal 0 scripts, skipped 0 scripts 2024-09-11 11:03:28.742 INFO [main] (RegressionTest.groovy:119) - Test finished *************************** 7. row *************************** PartitionId: 18035 PartitionName: p100 VisibleVersion: 2 VisibleVersionTime: 2024-09-11 10:59:28 State: NORMAL PartitionKey: col_1 Range: [types: [INT]; keys: [83647]; ..types: [INT]; keys: [2147483647]; ) DistributionKey: pk Buckets: 10 ReplicationNum: 1 StorageMedium: HDD CooldownTime: 9999-12-31 15:59:59 RemoteStoragePolicy: LastConsistencyCheckTime: NULL DataSize: 2.872 KB IsInMemory: false ReplicaAllocation: tag.location.default: 1 IsMutable: true SyncWithBaseTables: true UnsyncTables: NULL CommittedVersion: 2 RowCount: 4 7 rows in set (0.01 sec) --------- Co-authored-by: Mingyu Chen <morningman.cmy@gmail.com>
This commit is contained in:
@ -75,7 +75,6 @@ public enum SchemaTableType {
|
||||
SCH_WORKLOAD_GROUPS("WORKLOAD_GROUPS", "WORKLOAD_GROUPS", TSchemaTableType.SCH_WORKLOAD_GROUPS),
|
||||
SCHE_USER("user", "user", TSchemaTableType.SCH_USER),
|
||||
SCH_PROCS_PRIV("procs_priv", "procs_priv", TSchemaTableType.SCH_PROCS_PRIV),
|
||||
|
||||
SCH_WORKLOAD_POLICY("WORKLOAD_POLICY", "WORKLOAD_POLICY",
|
||||
TSchemaTableType.SCH_WORKLOAD_POLICY),
|
||||
SCH_FILE_CACHE_STATISTICS("FILE_CACHE_STATISTICS", "FILE_CACHE_STATISTICS",
|
||||
@ -87,7 +86,9 @@ public enum SchemaTableType {
|
||||
SCH_TABLE_PROPERTIES("TABLE_PROPERTIES", "TABLE_PROPERTIES",
|
||||
TSchemaTableType.SCH_TABLE_PROPERTIES),
|
||||
SCH_CATALOG_META_CACHE_STATISTICS("CATALOG_META_CACHE_STATISTICS", "CATALOG_META_CACHE_STATISTICS",
|
||||
TSchemaTableType.SCH_CATALOG_META_CACHE_STATISTICS);
|
||||
TSchemaTableType.SCH_CATALOG_META_CACHE_STATISTICS),
|
||||
SCH_TABLE_OPTIONS("TABLE_OPTIONS", "TABLE_OPTIONS",
|
||||
TSchemaTableType.SCH_TABLE_OPTIONS);
|
||||
|
||||
private static final String dbName = "INFORMATION_SCHEMA";
|
||||
private static SelectList fullSelectLists;
|
||||
|
||||
@ -58,6 +58,14 @@ public class ListPartitionItem extends PartitionItem {
|
||||
return partitionKeys;
|
||||
}
|
||||
|
||||
public String getItemsString() {
|
||||
return toString();
|
||||
}
|
||||
|
||||
public String getItemsSql() {
|
||||
return toSql();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDefaultPartition() {
|
||||
return isDefaultPartition;
|
||||
|
||||
@ -2025,6 +2025,20 @@ public class OlapTable extends Table implements MTMVRelatedTableIf {
|
||||
return keysNum;
|
||||
}
|
||||
|
||||
public String getKeyColAsString() {
|
||||
StringBuilder str = new StringBuilder();
|
||||
str.append("");
|
||||
for (Column column : getBaseSchema()) {
|
||||
if (column.isKey()) {
|
||||
if (str.length() != 0) {
|
||||
str.append(",");
|
||||
}
|
||||
str.append(column.getName());
|
||||
}
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
|
||||
public boolean convertHashDistributionToRandomDistribution() {
|
||||
boolean hasChanged = false;
|
||||
if (defaultDistributionInfo.getType() == DistributionInfoType.HASH) {
|
||||
|
||||
@ -478,4 +478,23 @@ public class Partition extends MetaObject implements Writable {
|
||||
public boolean isRollupIndex(long id) {
|
||||
return idToVisibleRollupIndex.containsKey(id);
|
||||
}
|
||||
|
||||
|
||||
public long getRowCount() {
|
||||
return getBaseIndex().getRowCount();
|
||||
}
|
||||
|
||||
public long getAvgRowLength() {
|
||||
long rowCount = getBaseIndex().getRowCount();
|
||||
long dataSize = getBaseIndex().getDataSize(false);
|
||||
if (rowCount > 0) {
|
||||
return dataSize / rowCount;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public long getDataLength() {
|
||||
return getBaseIndex().getDataSize(false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,6 +122,19 @@ public class PartitionInfo implements Writable {
|
||||
return partitionColumns;
|
||||
}
|
||||
|
||||
public String getDisplayPartitionColumns() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
int index = 0;
|
||||
for (Column c : partitionColumns) {
|
||||
if (index != 0) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append(c.getDisplayName());
|
||||
index++;
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public Map<Long, PartitionItem> getIdToItem(boolean isTemp) {
|
||||
if (isTemp) {
|
||||
return idToTempItem;
|
||||
|
||||
@ -60,4 +60,8 @@ public abstract class PartitionItem implements Comparable<PartitionItem>, Writab
|
||||
public abstract boolean isGreaterThanSpecifiedTime(int pos, Optional<String> dateFormatOptional,
|
||||
long nowTruncSubSec)
|
||||
throws AnalysisException;
|
||||
|
||||
|
||||
//get the unique string of the partition item in sql format
|
||||
public abstract String getItemsSql();
|
||||
}
|
||||
|
||||
@ -46,6 +46,14 @@ public class RangePartitionItem extends PartitionItem {
|
||||
return partitionKeyRange;
|
||||
}
|
||||
|
||||
public String getItemsString() {
|
||||
return toString();
|
||||
}
|
||||
|
||||
public String getItemsSql() {
|
||||
return toPartitionKeyDesc().toSql();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDefaultPartition() {
|
||||
return false;
|
||||
|
||||
@ -558,6 +558,18 @@ public class SchemaTable extends Table {
|
||||
.column("METRIC_VALUE", ScalarType.createStringType())
|
||||
.build())
|
||||
)
|
||||
.put("table_options",
|
||||
new SchemaTable(SystemIdGenerator.getNextId(), "table_options", TableType.SCHEMA,
|
||||
builder().column("TABLE_CATALOG", ScalarType.createVarchar(NAME_CHAR_LEN))
|
||||
.column("TABLE_SCHEMA", ScalarType.createVarchar(NAME_CHAR_LEN))
|
||||
.column("TABLE_NAME", ScalarType.createVarchar(NAME_CHAR_LEN))
|
||||
.column("TABLE_MODEL", ScalarType.createStringType())
|
||||
.column("TABLE_MODEL_KEY", ScalarType.createStringType())
|
||||
.column("DISTRIBUTE_KEY", ScalarType.createStringType())
|
||||
.column("DISTRIBUTE_TYPE", ScalarType.createStringType())
|
||||
.column("BUCKETS_NUM", ScalarType.createType(PrimitiveType.INT))
|
||||
.column("PARTITION_NUM", ScalarType.createType(PrimitiveType.INT))
|
||||
.build()))
|
||||
.build();
|
||||
|
||||
private boolean fetchAllFe = false;
|
||||
|
||||
@ -657,4 +657,15 @@ public class TableProperty implements Writable {
|
||||
properties.remove(DynamicPartitionProperty.REPLICATION_NUM);
|
||||
}
|
||||
}
|
||||
|
||||
public String getPropertiesString() {
|
||||
StringBuilder str = new StringBuilder("");
|
||||
for (Map.Entry<String, String> entry : properties.entrySet()) {
|
||||
if (str.length() != 0) {
|
||||
str.append(", ");
|
||||
}
|
||||
str.append(entry.getKey() + " = " + entry.getValue());
|
||||
}
|
||||
return str.toString();
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,6 +76,7 @@ public class PartitionsProcDir implements ProcDirInterface {
|
||||
.add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime").add("RemoteStoragePolicy")
|
||||
.add("LastConsistencyCheckTime").add("DataSize").add("IsInMemory").add("ReplicaAllocation")
|
||||
.add("IsMutable").add("SyncWithBaseTables").add("UnsyncTables").add("CommittedVersion")
|
||||
.add("RowCount")
|
||||
.build();
|
||||
|
||||
private Database db;
|
||||
@ -383,6 +384,9 @@ public class PartitionsProcDir implements ProcDirInterface {
|
||||
partitionInfo.add(partition.getCommittedVersion());
|
||||
trow.addToColumnValue(new TCell().setLongVal(partition.getCommittedVersion()));
|
||||
|
||||
partitionInfo.add(partition.getRowCount());
|
||||
trow.addToColumnValue(new TCell().setLongVal(partition.getRowCount()));
|
||||
|
||||
partitionInfos.add(Pair.of(partitionInfo, trow));
|
||||
}
|
||||
} finally {
|
||||
|
||||
@ -21,9 +21,15 @@ import org.apache.doris.analysis.UserIdentity;
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.Database;
|
||||
import org.apache.doris.catalog.DatabaseIf;
|
||||
import org.apache.doris.catalog.DistributionInfo;
|
||||
import org.apache.doris.catalog.DistributionInfo.DistributionInfoType;
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.catalog.HashDistributionInfo;
|
||||
import org.apache.doris.catalog.MTMV;
|
||||
import org.apache.doris.catalog.OlapTable;
|
||||
import org.apache.doris.catalog.Partition;
|
||||
import org.apache.doris.catalog.PartitionItem;
|
||||
import org.apache.doris.catalog.PartitionType;
|
||||
import org.apache.doris.catalog.SchemaTable;
|
||||
import org.apache.doris.catalog.Table;
|
||||
import org.apache.doris.catalog.TableIf;
|
||||
@ -96,6 +102,7 @@ import java.text.SimpleDateFormat;
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -112,12 +119,16 @@ public class MetadataGenerator {
|
||||
|
||||
private static final ImmutableMap<String, Integer> WORKLOAD_SCHED_POLICY_COLUMN_TO_INDEX;
|
||||
|
||||
private static final ImmutableMap<String, Integer> TABLE_OPTIONS_COLUMN_TO_INDEX;
|
||||
|
||||
private static final ImmutableMap<String, Integer> WORKLOAD_GROUP_PRIVILEGES_COLUMN_TO_INDEX;
|
||||
|
||||
private static final ImmutableMap<String, Integer> TABLE_PROPERTIES_COLUMN_TO_INDEX;
|
||||
|
||||
private static final ImmutableMap<String, Integer> META_CACHE_STATS_COLUMN_TO_INDEX;
|
||||
|
||||
private static final ImmutableMap<String, Integer> PARTITIONS_COLUMN_TO_INDEX;
|
||||
|
||||
static {
|
||||
ImmutableMap.Builder<String, Integer> activeQueriesbuilder = new ImmutableMap.Builder();
|
||||
List<Column> activeQueriesColList = SchemaTable.TABLE_MAP.get("active_queries").getFullSchema();
|
||||
@ -145,6 +156,13 @@ public class MetadataGenerator {
|
||||
}
|
||||
WORKLOAD_SCHED_POLICY_COLUMN_TO_INDEX = policyBuilder.build();
|
||||
|
||||
ImmutableMap.Builder<String, Integer> optionBuilder = new ImmutableMap.Builder();
|
||||
List<Column> optionColList = SchemaTable.TABLE_MAP.get("table_options").getFullSchema();
|
||||
for (int i = 0; i < optionColList.size(); i++) {
|
||||
optionBuilder.put(optionColList.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
TABLE_OPTIONS_COLUMN_TO_INDEX = optionBuilder.build();
|
||||
|
||||
ImmutableMap.Builder<String, Integer> wgPrivsBuilder = new ImmutableMap.Builder();
|
||||
List<Column> wgPrivsColList = SchemaTable.TABLE_MAP.get("workload_group_privileges").getFullSchema();
|
||||
for (int i = 0; i < wgPrivsColList.size(); i++) {
|
||||
@ -165,6 +183,13 @@ public class MetadataGenerator {
|
||||
metaCacheBuilder.put(metaCacheColList.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
META_CACHE_STATS_COLUMN_TO_INDEX = metaCacheBuilder.build();
|
||||
|
||||
ImmutableMap.Builder<String, Integer> partitionsBuilder = new ImmutableMap.Builder();
|
||||
List<Column> partitionsColList = SchemaTable.TABLE_MAP.get("partitions").getFullSchema();
|
||||
for (int i = 0; i < partitionsColList.size(); i++) {
|
||||
partitionsBuilder.put(partitionsColList.get(i).getName().toLowerCase(), i);
|
||||
}
|
||||
PARTITIONS_COLUMN_TO_INDEX = partitionsBuilder.build();
|
||||
}
|
||||
|
||||
public static TFetchSchemaTableDataResult getMetadataTable(TFetchSchemaTableDataRequest request) throws TException {
|
||||
@ -244,6 +269,10 @@ public class MetadataGenerator {
|
||||
result = workloadSchedPolicyMetadataResult(schemaTableParams);
|
||||
columnIndex = WORKLOAD_SCHED_POLICY_COLUMN_TO_INDEX;
|
||||
break;
|
||||
case TABLE_OPTIONS:
|
||||
result = tableOptionsMetadataResult(schemaTableParams);
|
||||
columnIndex = TABLE_OPTIONS_COLUMN_TO_INDEX;
|
||||
break;
|
||||
case WORKLOAD_GROUP_PRIVILEGES:
|
||||
result = workloadGroupPrivsMetadataResult(schemaTableParams);
|
||||
columnIndex = WORKLOAD_GROUP_PRIVILEGES_COLUMN_TO_INDEX;
|
||||
@ -256,6 +285,10 @@ public class MetadataGenerator {
|
||||
result = metaCacheStatsMetadataResult(schemaTableParams);
|
||||
columnIndex = META_CACHE_STATS_COLUMN_TO_INDEX;
|
||||
break;
|
||||
case PARTITIONS:
|
||||
result = partitionsMetadataResult(schemaTableParams);
|
||||
columnIndex = PARTITIONS_COLUMN_TO_INDEX;
|
||||
break;
|
||||
default:
|
||||
return errorResult("invalid schema table name.");
|
||||
}
|
||||
@ -1046,6 +1079,123 @@ public class MetadataGenerator {
|
||||
return result;
|
||||
}
|
||||
|
||||
private static void tableOptionsForInternalCatalog(UserIdentity currentUserIdentity,
|
||||
CatalogIf catalog, DatabaseIf database, List<TableIf> tables, List<TRow> dataBatch) {
|
||||
for (TableIf table : tables) {
|
||||
if (!(table instanceof OlapTable)) {
|
||||
continue;
|
||||
}
|
||||
if (!Env.getCurrentEnv().getAccessManager().checkTblPriv(currentUserIdentity, catalog.getName(),
|
||||
database.getFullName(), table.getName(), PrivPredicate.SHOW)) {
|
||||
continue;
|
||||
}
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
TRow trow = new TRow();
|
||||
trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG
|
||||
trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA
|
||||
trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME
|
||||
trow.addToColumnValue(
|
||||
new TCell().setStringVal(olapTable.getKeysType().toMetadata())); // TABLE_MODEL
|
||||
trow.addToColumnValue(
|
||||
new TCell().setStringVal(olapTable.getKeyColAsString())); // key columTypes
|
||||
|
||||
DistributionInfo distributionInfo = olapTable.getDefaultDistributionInfo();
|
||||
if (distributionInfo.getType() == DistributionInfoType.HASH) {
|
||||
HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
|
||||
List<Column> distributionColumns = hashDistributionInfo.getDistributionColumns();
|
||||
StringBuilder distributeKey = new StringBuilder();
|
||||
for (Column c : distributionColumns) {
|
||||
if (distributeKey.length() != 0) {
|
||||
distributeKey.append(",");
|
||||
}
|
||||
distributeKey.append(c.getName());
|
||||
}
|
||||
if (distributeKey.length() == 0) {
|
||||
trow.addToColumnValue(new TCell().setStringVal(""));
|
||||
} else {
|
||||
trow.addToColumnValue(
|
||||
new TCell().setStringVal(distributeKey.toString()));
|
||||
}
|
||||
trow.addToColumnValue(new TCell().setStringVal("HASH")); // DISTRIBUTE_TYPE
|
||||
} else {
|
||||
trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_KEY
|
||||
trow.addToColumnValue(new TCell().setStringVal("RANDOM")); // DISTRIBUTE_TYPE
|
||||
}
|
||||
trow.addToColumnValue(new TCell().setIntVal(distributionInfo.getBucketNum())); // BUCKETS_NUM
|
||||
trow.addToColumnValue(new TCell().setIntVal(olapTable.getPartitionNum())); // PARTITION_NUM
|
||||
dataBatch.add(trow);
|
||||
}
|
||||
}
|
||||
|
||||
private static void tableOptionsForExternalCatalog(UserIdentity currentUserIdentity,
|
||||
CatalogIf catalog, DatabaseIf database, List<TableIf> tables, List<TRow> dataBatch) {
|
||||
for (TableIf table : tables) {
|
||||
if (!Env.getCurrentEnv().getAccessManager().checkTblPriv(currentUserIdentity, catalog.getName(),
|
||||
database.getFullName(), table.getName(), PrivPredicate.SHOW)) {
|
||||
continue;
|
||||
}
|
||||
TRow trow = new TRow();
|
||||
trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG
|
||||
trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA
|
||||
trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME
|
||||
trow.addToColumnValue(
|
||||
new TCell().setStringVal("")); // TABLE_MODEL
|
||||
trow.addToColumnValue(
|
||||
new TCell().setStringVal("")); // key columTypes
|
||||
trow.addToColumnValue(new TCell().setStringVal("")); // DISTRIBUTE_KEY
|
||||
trow.addToColumnValue(new TCell().setStringVal("")); // DISTRIBUTE_TYPE
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); // BUCKETS_NUM
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); // PARTITION_NUM
|
||||
dataBatch.add(trow);
|
||||
}
|
||||
}
|
||||
|
||||
private static TFetchSchemaTableDataResult tableOptionsMetadataResult(TSchemaTableRequestParams params) {
|
||||
if (!params.isSetCurrentUserIdent()) {
|
||||
return errorResult("current user ident is not set.");
|
||||
}
|
||||
if (!params.isSetDbId()) {
|
||||
return errorResult("current db id is not set.");
|
||||
}
|
||||
|
||||
if (!params.isSetCatalog()) {
|
||||
return errorResult("current catalog is not set.");
|
||||
}
|
||||
|
||||
TUserIdentity tcurrentUserIdentity = params.getCurrentUserIdent();
|
||||
UserIdentity currentUserIdentity = UserIdentity.fromThrift(tcurrentUserIdentity);
|
||||
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
|
||||
List<TRow> dataBatch = Lists.newArrayList();
|
||||
Long dbId = params.getDbId();
|
||||
String clg = params.getCatalog();
|
||||
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(clg);
|
||||
if (catalog == null) {
|
||||
// catalog is NULL let return empty to BE
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
DatabaseIf database = catalog.getDbNullable(dbId);
|
||||
if (database == null) {
|
||||
// BE gets the database id list from FE and then invokes this interface
|
||||
// per database. there is a chance that in between database can be dropped.
|
||||
// so need to handle database not exist case and return ok so that BE continue the
|
||||
// loop with next database.
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
List<TableIf> tables = database.getTables();
|
||||
if (catalog instanceof InternalCatalog) {
|
||||
tableOptionsForInternalCatalog(currentUserIdentity, catalog, database, tables, dataBatch);
|
||||
} else if (catalog instanceof ExternalCatalog) {
|
||||
tableOptionsForExternalCatalog(currentUserIdentity, catalog, database, tables, dataBatch);
|
||||
}
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
|
||||
private static void tablePropertiesForInternalCatalog(UserIdentity currentUserIdentity,
|
||||
CatalogIf catalog, DatabaseIf database, List<TableIf> tables, List<TRow> dataBatch) {
|
||||
for (TableIf table : tables) {
|
||||
@ -1119,8 +1269,14 @@ public class MetadataGenerator {
|
||||
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
|
||||
Long dbId = params.getDbId();
|
||||
String clg = params.getCatalog();
|
||||
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(clg);
|
||||
List<TRow> dataBatch = Lists.newArrayList();
|
||||
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(clg);
|
||||
if (catalog == null) {
|
||||
// catalog is NULL let return empty to BE
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
DatabaseIf database = catalog.getDbNullable(dbId);
|
||||
if (database == null) {
|
||||
// BE gets the database id list from FE and then invokes this interface
|
||||
@ -1164,7 +1320,124 @@ public class MetadataGenerator {
|
||||
fillBatch(dataBatch, icebergCache.getCacheStats(), catalogIf.getName());
|
||||
}
|
||||
}
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
|
||||
private static void partitionsForInternalCatalog(UserIdentity currentUserIdentity,
|
||||
CatalogIf catalog, DatabaseIf database, List<TableIf> tables, List<TRow> dataBatch) {
|
||||
for (TableIf table : tables) {
|
||||
if (!(table instanceof OlapTable)) {
|
||||
continue;
|
||||
}
|
||||
if (!Env.getCurrentEnv().getAccessManager().checkTblPriv(currentUserIdentity, catalog.getName(),
|
||||
database.getFullName(), table.getName(), PrivPredicate.SHOW)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
Collection<Partition> allPartitions = olapTable.getAllPartitions();
|
||||
|
||||
for (Partition partition : allPartitions) {
|
||||
TRow trow = new TRow();
|
||||
trow.addToColumnValue(new TCell().setStringVal(catalog.getName())); // TABLE_CATALOG
|
||||
trow.addToColumnValue(new TCell().setStringVal(database.getFullName())); // TABLE_SCHEMA
|
||||
trow.addToColumnValue(new TCell().setStringVal(table.getName())); // TABLE_NAME
|
||||
trow.addToColumnValue(new TCell().setStringVal(partition.getName())); // PARTITION_NAME
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_NAME (always null)
|
||||
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //PARTITION_ORDINAL_POSITION (not available)
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //SUBPARTITION_ORDINAL_POSITION (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal(
|
||||
olapTable.getPartitionInfo().getType().toString())); // PARTITION_METHOD
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_METHOD(always null)
|
||||
PartitionItem item = olapTable.getPartitionInfo().getItem(partition.getId());
|
||||
if ((olapTable.getPartitionInfo().getType() == PartitionType.UNPARTITIONED) || (item == null)) {
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // if unpartitioned, its null
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null)
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // PARITION DESC, its null
|
||||
} else {
|
||||
trow.addToColumnValue(new TCell().setStringVal(
|
||||
olapTable.getPartitionInfo()
|
||||
.getDisplayPartitionColumns().toString())); // PARTITION_EXPRESSION
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // SUBPARTITION_EXPRESSION (always null)
|
||||
trow.addToColumnValue(new TCell().setStringVal(
|
||||
item.getItemsSql())); // PARITION DESC
|
||||
}
|
||||
trow.addToColumnValue(new TCell().setLongVal(partition.getRowCount())); //TABLE_ROWS (PARTITION row)
|
||||
trow.addToColumnValue(new TCell().setLongVal(partition.getAvgRowLength())); //AVG_ROW_LENGTH
|
||||
trow.addToColumnValue(new TCell().setLongVal(partition.getDataLength())); //DATA_LENGTH
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //MAX_DATA_LENGTH (not available)
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //INDEX_LENGTH (not available)
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //DATA_FREE (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); //CREATE_TIME (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal(
|
||||
TimeUtils.longToTimeString(partition.getVisibleVersionTime()))); //UPDATE_TIME
|
||||
trow.addToColumnValue(new TCell().setStringVal("NULL")); // CHECK_TIME (not available)
|
||||
trow.addToColumnValue(new TCell().setIntVal(0)); //CHECKSUM (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal("")); // PARTITION_COMMENT (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal("")); // NODEGROUP (not available)
|
||||
trow.addToColumnValue(new TCell().setStringVal("")); // TABLESPACE_NAME (not available)
|
||||
dataBatch.add(trow);
|
||||
}
|
||||
} // for table
|
||||
}
|
||||
|
||||
private static void partitionsForExternalCatalog(UserIdentity currentUserIdentity,
|
||||
CatalogIf catalog, DatabaseIf database, List<TableIf> tables, List<TRow> dataBatch) {
|
||||
for (TableIf table : tables) {
|
||||
if (!Env.getCurrentEnv().getAccessManager().checkTblPriv(currentUserIdentity, catalog.getName(),
|
||||
database.getFullName(), table.getName(), PrivPredicate.SHOW)) {
|
||||
continue;
|
||||
}
|
||||
// TODO
|
||||
} // for table
|
||||
}
|
||||
|
||||
private static TFetchSchemaTableDataResult partitionsMetadataResult(TSchemaTableRequestParams params) {
|
||||
if (!params.isSetCurrentUserIdent()) {
|
||||
return errorResult("current user ident is not set.");
|
||||
}
|
||||
|
||||
if (!params.isSetDbId()) {
|
||||
return errorResult("current db id is not set.");
|
||||
}
|
||||
|
||||
if (!params.isSetCatalog()) {
|
||||
return errorResult("current catalog is not set.");
|
||||
}
|
||||
|
||||
TUserIdentity tcurrentUserIdentity = params.getCurrentUserIdent();
|
||||
UserIdentity currentUserIdentity = UserIdentity.fromThrift(tcurrentUserIdentity);
|
||||
TFetchSchemaTableDataResult result = new TFetchSchemaTableDataResult();
|
||||
Long dbId = params.getDbId();
|
||||
String clg = params.getCatalog();
|
||||
List<TRow> dataBatch = Lists.newArrayList();
|
||||
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(clg);
|
||||
if (catalog == null) {
|
||||
// catalog is NULL let return empty to BE
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
DatabaseIf database = catalog.getDbNullable(dbId);
|
||||
if (database == null) {
|
||||
// BE gets the database id list from FE and then invokes this interface
|
||||
// per database. there is a chance that in between database can be dropped.
|
||||
// so need to handle database not exist case and return ok so that BE continue the
|
||||
// loop with next database.
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
}
|
||||
List<TableIf> tables = database.getTables();
|
||||
if (catalog instanceof InternalCatalog) {
|
||||
// only olap tables
|
||||
partitionsForInternalCatalog(currentUserIdentity, catalog, database, tables, dataBatch);
|
||||
} else if (catalog instanceof ExternalCatalog) {
|
||||
partitionsForExternalCatalog(currentUserIdentity, catalog, database, tables, dataBatch);
|
||||
}
|
||||
result.setDataBatch(dataBatch);
|
||||
result.setStatus(new TStatus(TStatusCode.OK));
|
||||
return result;
|
||||
|
||||
Reference in New Issue
Block a user