diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendProcNode.java index 0ce21396cf..78c110d5d5 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendProcNode.java @@ -33,7 +33,7 @@ import java.util.Map; public class BackendProcNode implements ProcNodeInterface { public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() .add("RootPath").add("DataUsedCapacity").add("OtherUsedCapacity").add("AvailCapacity") - .add("TotalCapacity").add("TotalUsedPct").add("State").add("PathHash") + .add("TotalCapacity").add("TotalUsedPct").add("State").add("PathHash").add("StorageMedium") .build(); private Backend backend; @@ -53,17 +53,19 @@ public class BackendProcNode implements ProcNodeInterface { List info = Lists.newArrayList(); info.add(entry.getKey()); + DiskInfo disk = entry.getValue(); + // data used - long dataUsedB = entry.getValue().getDataUsedCapacityB(); + long dataUsedB = disk.getDataUsedCapacityB(); Pair dataUsedUnitPair = DebugUtil.getByteUint(dataUsedB); info.add(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(dataUsedUnitPair.first) + " " + dataUsedUnitPair.second); // avail - long availB = entry.getValue().getAvailableCapacityB(); + long availB = disk.getAvailableCapacityB(); Pair availUnitPair = DebugUtil.getByteUint(availB); // total - long totalB = entry.getValue().getTotalCapacityB(); + long totalB = disk.getTotalCapacityB(); Pair totalUnitPair = DebugUtil.getByteUint(totalB); // other long otherB = totalB - availB - dataUsedB; @@ -82,8 +84,9 @@ public class BackendProcNode implements ProcNodeInterface { } info.add(String.format("%.2f", used) + " %"); - info.add(entry.getValue().getState().name()); - info.add(String.valueOf(entry.getValue().getPathHash())); + info.add(disk.getState().name()); + info.add(String.valueOf(disk.getPathHash())); + info.add(disk.getStorageMedium().name()); result.addRow(info); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ReplicasProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ReplicasProcNode.java index 56ebbd3d83..9b2a8f6d8f 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ReplicasProcNode.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ReplicasProcNode.java @@ -17,6 +17,7 @@ package org.apache.doris.common.proc; +import org.apache.doris.catalog.DiskInfo; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.OlapTable; import org.apache.doris.catalog.Replica; @@ -43,7 +44,8 @@ public class ReplicasProcNode implements ProcNodeInterface { .add("BackendId").add("Version").add("LstSuccessVersion").add("LstFailedVersion").add("LstFailedTime") .add("SchemaHash").add("LocalDataSize").add("RemoteDataSize").add("RowCount").add("State").add("IsBad") .add("IsUserDrop") - .add("VersionCount").add("PathHash").add("MetaUrl").add("CompactionStatus").add("CooldownReplicaId") + .add("VersionCount").add("PathHash").add("Path") + .add("MetaUrl").add("CompactionStatus").add("CooldownReplicaId") .add("CooldownMetaId").add("QueryHits").build(); private long tabletId; @@ -84,6 +86,16 @@ public class ReplicasProcNode implements ProcNodeInterface { String metaUrl = String.format("http://" + hostPort + "/api/meta/header/%d", tabletId); String compactionUrl = String.format("http://" + hostPort + "/api/compaction/show?tablet_id=%d", tabletId); + String path = ""; + if (be != null) { + DiskInfo diskInfo = be.getDisks().values().stream() + .filter(disk -> disk.getPathHash() == replica.getPathHash()) + .findFirst().orElse(null); + if (diskInfo != null) { + path = diskInfo.getRootPath(); + } + } + String cooldownMetaId = ""; if (replica.getCooldownMetaId() != null) { cooldownMetaId = replica.getCooldownMetaId().toString(); @@ -107,6 +119,7 @@ public class ReplicasProcNode implements ProcNodeInterface { String.valueOf(replica.isUserDrop()), String.valueOf(replica.getVersionCount()), String.valueOf(replica.getPathHash()), + path, metaUrl, compactionUrl, String.valueOf(tablet.getCooldownConf().first), diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletsProcDir.java index d4c79db92d..c82a55bd38 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletsProcDir.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletsProcDir.java @@ -17,6 +17,7 @@ package org.apache.doris.common.proc; +import org.apache.doris.catalog.DiskInfo; import org.apache.doris.catalog.Env; import org.apache.doris.catalog.MaterializedIndex; import org.apache.doris.catalog.Replica; @@ -51,7 +52,8 @@ public class TabletsProcDir implements ProcDirInterface { .add("LstSuccessVersion").add("LstFailedVersion").add("LstFailedTime") .add("LocalDataSize").add("RemoteDataSize").add("RowCount").add("State") .add("LstConsistencyCheckTime").add("CheckVersion") - .add("VersionCount").add("QueryHits").add("PathHash").add("MetaUrl").add("CompactionStatus") + .add("VersionCount").add("QueryHits").add("PathHash").add("Path") + .add("MetaUrl").add("CompactionStatus") .add("CooldownReplicaId").add("CooldownMetaId").build(); private Table table; @@ -68,6 +70,12 @@ public class TabletsProcDir implements ProcDirInterface { ImmutableMap backendMap = Env.getCurrentSystemInfo().getIdToBackend(); List> tabletInfos = new ArrayList>(); + Map pathHashToRoot = new HashMap<>(); + for (Backend be : backendMap.values()) { + for (DiskInfo diskInfo : be.getDisks().values()) { + pathHashToRoot.put(diskInfo.getPathHash(), diskInfo.getRootPath()); + } + } table.readLock(); try { Map replicaIdToQueryHits = new HashMap<>(); @@ -80,6 +88,7 @@ public class TabletsProcDir implements ProcDirInterface { } replicaIdToQueryHits = QueryStatsUtil.getMergedReplicasStats(replicaIds); } + // get infos for (Tablet tablet : index.getTablets()) { long tabletId = tablet.getId(); @@ -107,6 +116,7 @@ public class TabletsProcDir implements ProcDirInterface { tabletInfo.add(-1); // version count tabletInfo.add(0L); // query hits tabletInfo.add(-1); // path hash + tabletInfo.add(FeConstants.null_string); // path tabletInfo.add(FeConstants.null_string); // meta url tabletInfo.add(FeConstants.null_string); // compaction status tabletInfo.add(-1); // cooldown replica id @@ -140,6 +150,7 @@ public class TabletsProcDir implements ProcDirInterface { tabletInfo.add(replica.getVersionCount()); tabletInfo.add(replicaIdToQueryHits.getOrDefault(replica.getId(), 0L)); tabletInfo.add(replica.getPathHash()); + tabletInfo.add(pathHashToRoot.getOrDefault(replica.getPathHash(), "")); Backend be = backendMap.get(replica.getBackendId()); String host = (be == null ? Backend.DUMMY_IP : be.getHost()); int port = (be == null ? 0 : be.getHttpPort()); diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendProcNodeTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendProcNodeTest.java index 5e6f26561d..cefa0e0d52 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendProcNodeTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/proc/BackendProcNodeTest.java @@ -97,7 +97,7 @@ public class BackendProcNodeTest { Assert.assertTrue(result.getRows().size() >= 1); Assert.assertEquals(Lists.newArrayList("RootPath", "DataUsedCapacity", "OtherUsedCapacity", "AvailCapacity", - "TotalCapacity", "TotalUsedPct", "State", "PathHash"), result.getColumnNames()); + "TotalCapacity", "TotalUsedPct", "State", "PathHash", "StorageMedium"), result.getColumnNames()); } } diff --git a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy index 9e238570ac..96b97c1704 100644 --- a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy +++ b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/Suite.groovy @@ -975,7 +975,12 @@ class Suite implements GroovyInterceptable { def result = [:] tablets.each { row -> - def tablet_id = row[0] + def tablet_id + if (row.containsKey("TabletId")) { + tablet_id = row.TabletId + } else { + tablet_id = row[0] + } if (!result.containsKey(tablet_id)) { result[tablet_id] = row } diff --git a/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy b/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy index d4c6601295..bf2e6f5f2c 100644 --- a/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy +++ b/regression-test/suites/compaction/test_base_compaction_with_dup_key_max_file_size_limit.groovy @@ -150,9 +150,9 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { ) """ - def tablet = (sql """ show tablets from ${tableName}; """)[0] - String tablet_id = tablet[0] - String trigger_backend_id = tablet[2] + def tablet = (sql_return_maparray """ show tablets from ${tableName}; """)[0] + String tablet_id = tablet.TabletId + String trigger_backend_id = tablet.BackendId // rowsets: // [0-1] 0 @@ -224,4 +224,4 @@ suite("test_base_compaction_with_dup_key_max_file_size_limit", "p2") { assertTrue(rowCount[0][0] != rows) } finally { } -} \ No newline at end of file +} diff --git a/regression-test/suites/compaction/test_compacation_with_delete.groovy b/regression-test/suites/compaction/test_compacation_with_delete.groovy index 6530ed57a8..e41f787c52 100644 --- a/regression-test/suites/compaction/test_compacation_with_delete.groovy +++ b/regression-test/suites/compaction/test_compacation_with_delete.groovy @@ -96,12 +96,12 @@ suite("test_compaction_with_delete") { DELETE from ${tableName} where cost = '5' """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -116,12 +116,12 @@ suite("test_compaction_with_delete") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -134,10 +134,9 @@ suite("test_compaction_with_delete") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_compaction_agg_keys.groovy b/regression-test/suites/compaction/test_compaction_agg_keys.groovy index 20a54bb2d4..3681b2dcac 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys.groovy @@ -101,12 +101,12 @@ suite("test_compaction_agg_keys") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,QueryHits,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -121,12 +121,12 @@ suite("test_compaction_agg_keys") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -140,11 +140,10 @@ suite("test_compaction_agg_keys") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy index 36a8383dda..1610587602 100644 --- a/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_agg_keys_with_delete.groovy @@ -112,12 +112,12 @@ suite("test_compaction_agg_keys_with_delete") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -132,12 +132,12 @@ suite("test_compaction_agg_keys_with_delete") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -150,11 +150,10 @@ suite("test_compaction_agg_keys_with_delete") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/compaction/test_compaction_dup_keys.groovy b/regression-test/suites/compaction/test_compaction_dup_keys.groovy index d9d19d7bb8..458185ba80 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys.groovy @@ -100,12 +100,12 @@ suite("test_compaction_dup_keys") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) @@ -121,12 +121,12 @@ suite("test_compaction_dup_keys") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -139,11 +139,10 @@ suite("test_compaction_dup_keys") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy index 18e8885122..2e34086172 100644 --- a/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_dup_keys_with_delete.groovy @@ -112,12 +112,12 @@ suite("test_compaction_dup_keys_with_delete") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -133,12 +133,12 @@ suite("test_compaction_dup_keys_with_delete") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -152,11 +152,10 @@ suite("test_compaction_dup_keys_with_delete") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 + for (def tablet in tablets) { + String tablet_id = tablet.TabletId - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/compaction/test_compaction_uniq_cluster_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_uniq_cluster_keys_with_delete.groovy index 7634752400..e0995a0428 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_cluster_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_cluster_keys_with_delete.groovy @@ -121,12 +121,12 @@ suite("test_compaction_uniq_cluster_keys_with_delete") { qt_select_default2 """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -141,12 +141,12 @@ suite("test_compaction_uniq_cluster_keys_with_delete") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -159,10 +159,9 @@ suite("test_compaction_uniq_cluster_keys_with_delete") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy index 34b357627d..dd8d1dfb7d 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys.groovy @@ -100,12 +100,12 @@ suite("test_compaction_uniq_keys") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -120,12 +120,12 @@ suite("test_compaction_uniq_keys") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -139,10 +139,9 @@ suite("test_compaction_uniq_keys") { logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_cluster_key.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_cluster_key.groovy index 3a8fee9aba..a0b896b77f 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_cluster_key.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_cluster_key.groovy @@ -105,12 +105,12 @@ suite("test_compaction_uniq_keys_cluster_key") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -125,12 +125,12 @@ suite("test_compaction_uniq_keys_cluster_key") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -143,10 +143,9 @@ suite("test_compaction_uniq_keys_cluster_key") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy index 484ae9a7f4..e493d6b036 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_row_store.groovy @@ -154,14 +154,14 @@ suite("test_compaction_uniq_keys_row_store") { (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.028', '2017-10-01 11:11:11.018', 'Beijing', 10, 1, NULL, NULL, NULL, NULL, '2020-01-05', 1, 34, 20) """ //TabletId,ReplicaIdBackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ checkValue() // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -176,12 +176,12 @@ suite("test_compaction_uniq_keys_row_store") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -195,10 +195,9 @@ suite("test_compaction_uniq_keys_row_store") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy index 3902264de2..18a46422d3 100644 --- a/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy +++ b/regression-test/suites/compaction/test_compaction_uniq_keys_with_delete.groovy @@ -116,12 +116,12 @@ suite("test_compaction_uniq_keys_with_delete") { qt_select_default2 """ SELECT * FROM ${tableName} t ORDER BY user_id; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -136,12 +136,12 @@ suite("test_compaction_uniq_keys_with_delete") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -154,10 +154,9 @@ suite("test_compaction_uniq_keys_with_delete") { def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_full_compaction.groovy b/regression-test/suites/compaction/test_full_compaction.groovy index 81f22bf37c..10d5fc1457 100644 --- a/regression-test/suites/compaction/test_full_compaction.groovy +++ b/regression-test/suites/compaction/test_full_compaction.groovy @@ -98,16 +98,15 @@ suite("test_full_compaction") { qt_skip_delete """select * from ${tableName} order by user_id, value""" //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ def replicaNum = get_table_replica_num(tableName) logger.info("get table replica num: " + replicaNum) // before full compaction, there are 7 rowsets. int rowsetCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -117,9 +116,9 @@ suite("test_full_compaction") { assert (rowsetCount == 7 * replicaNum) // trigger full compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId times = 1 do{ @@ -140,12 +139,12 @@ suite("test_full_compaction") { } // wait for full compaction done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -158,10 +157,9 @@ suite("test_full_compaction") { // after full compaction, there is only 1 rowset. rowsetCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) diff --git a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy index cd721774c2..66c9e4bbbc 100644 --- a/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy +++ b/regression-test/suites/compaction/test_full_compaction_by_table_id.groovy @@ -100,12 +100,12 @@ suite("test_full_compaction_by_table_id") { qt_skip_delete """select * from ${tableName} order by user_id, value""" //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // before full compaction, there are 7 rowsets in all tablets. - for (int i=0; i - for (String[] tablet : tablets) { - String tablet_id = tablet[0] - String backend_id = tablet[2] + def trigger_full_compaction_on_tablets = { tablets -> + for (def tablet : tablets) { + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId int times = 1 String compactionStatus; @@ -58,13 +58,13 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { } } - def wait_full_compaction_done = { String[][] tablets -> - for (String[] tablet in tablets) { + def wait_full_compaction_done = { tablets -> + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - String backend_id = tablet[2] + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -75,11 +75,10 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { } } - def get_rowset_count = {String[][] tablets -> + def get_rowset_count = { tablets -> int rowsetCount = 0 - for (String[] tablet in tablets) { - def compactionStatusUrlIndex = 18 - def (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + def (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -121,13 +120,13 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { qt_sql """ select * from ${tableName} where score < 100 order by id, name, hobbies, score """ } - def run_test = { String[][] tablets -> + def run_test = { tablets -> insert_data.call() run_sql.call() int replicaNum = 1 - String[][] dedup_tablets = deduplicate_tablets(tablets) + def dedup_tablets = deduplicate_tablets(tablets) if (dedup_tablets.size() > 0) { replicaNum = Math.round(tablets.size() / dedup_tablets.size()) if (replicaNum != 1 && replicaNum != 3) { @@ -274,7 +273,7 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ run_test.call(tablets) @@ -304,7 +303,7 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { ); """ - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ run_test.call(tablets) } finally { @@ -312,4 +311,4 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") { set_be_config.call("inverted_index_compaction_enable", invertedIndexCompactionEnable.toString()) } } -} \ No newline at end of file +} diff --git a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_compaction.groovy b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_compaction.groovy index 924ab0a9f4..b32ae0df92 100644 --- a/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_compaction.groovy +++ b/regression-test/suites/inverted_index_p0/index_change/test_index_change_with_compaction.groovy @@ -132,7 +132,7 @@ suite("test_index_change_with_compaction") { qt_select_default """ SELECT * FROM ${tableName} t ORDER BY user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time; """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // create inverted index sql """ CREATE INDEX idx_user_id ON ${tableName}(`user_id`) USING INVERTED """ @@ -142,9 +142,9 @@ suite("test_index_change_with_compaction") { wait_for_latest_op_on_table_finish(tableName, timeout) // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId StringBuilder sb = new StringBuilder(); sb.append("curl -X POST http://") sb.append(backendId_to_backendIP.get(backend_id)) @@ -177,12 +177,12 @@ suite("test_index_change_with_compaction") { sql "build index idx_city on ${tableName}" // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId StringBuilder sb = new StringBuilder(); sb.append("curl -X GET http://") sb.append(backendId_to_backendIP.get(backend_id)) @@ -206,12 +206,11 @@ suite("test_index_change_with_compaction") { } int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId StringBuilder sb = new StringBuilder(); - def compactionStatusUrlIndex = 18 sb.append("curl -X GET ") - sb.append(tablet[compactionStatusUrlIndex]) + sb.append(tablet.CompactionStatus) String command = sb.toString() // wait for cleaning stale_rowsets process = command.execute() @@ -227,7 +226,7 @@ suite("test_index_change_with_compaction") { } } - String[][] dedup_tablets = deduplicate_tablets(tablets) + def dedup_tablets = deduplicate_tablets(tablets) // In the p0 testing environment, there are no expected operations such as scaling down BE (backend) services // if tablets or dedup_tablets is empty, exception is thrown, and case fail diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy index 8bbedb9779..cc8516a32d 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_dup_keys.groovy @@ -32,10 +32,10 @@ suite("test_index_compaction_dup_keys", "p0") { } } - def trigger_full_compaction_on_tablets = { String[][] tablets -> - for (String[] tablet : tablets) { - String tablet_id = tablet[0] - String backend_id = tablet[2] + def trigger_full_compaction_on_tablets = { tablets -> + for (def tablet : tablets) { + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId int times = 1 String compactionStatus; @@ -58,13 +58,13 @@ suite("test_index_compaction_dup_keys", "p0") { } } - def wait_full_compaction_done = { String[][] tablets -> - for (String[] tablet in tablets) { + def wait_full_compaction_done = { tablets -> + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - String backend_id = tablet[2] + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -75,11 +75,10 @@ suite("test_index_compaction_dup_keys", "p0") { } } - def get_rowset_count = {String[][] tablets -> + def get_rowset_count = { tablets -> int rowsetCount = 0 - for (String[] tablet in tablets) { - def compactionStatusUrlIndex = 18 - def (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + def (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -163,8 +162,8 @@ suite("test_index_compaction_dup_keys", "p0") { qt_sql """ select * from ${tableName} where score < 100 order by id, name, hobbies, score """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ - String[][] dedup_tablets = deduplicate_tablets(tablets) + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + def dedup_tablets = deduplicate_tablets(tablets) // In the p0 testing environment, there are no expected operations such as scaling down BE (backend) services // if tablets or dedup_tablets is empty, exception is thrown, and case fail diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_null.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_null.groovy index 0afe12909d..6765480a4f 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_null.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_null.groovy @@ -32,10 +32,10 @@ suite("test_index_compaction_null", "p0") { } } - def trigger_full_compaction_on_tablets = { String[][] tablets -> - for (String[] tablet : tablets) { - String tablet_id = tablet[0] - String backend_id = tablet[2] + def trigger_full_compaction_on_tablets = { tablets -> + for (def tablet : tablets) { + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId int times = 1 String compactionStatus; @@ -58,13 +58,13 @@ suite("test_index_compaction_null", "p0") { } } - def wait_full_compaction_done = { String[][] tablets -> - for (String[] tablet in tablets) { + def wait_full_compaction_done = { tablets -> + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - String backend_id = tablet[2] + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -75,11 +75,10 @@ suite("test_index_compaction_null", "p0") { } } - def get_rowset_count = {String[][] tablets -> + def get_rowset_count = { tablets -> int rowsetCount = 0 - for (String[] tablet in tablets) { - def compactionStatusUrlIndex = 18 - def (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + def (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -152,14 +151,14 @@ suite("test_index_compaction_null", "p0") { qt_select_match_2 "SELECT * FROM ${tableName} WHERE addr MATCH_ALL 'addr fengtai' ORDER BY id" } - def run_test = { String[][] tablets -> + def run_test = { tablets -> insert_data.call() insert_data.call() run_sql.call() int replicaNum = 1 - String[][] dedup_tablets = deduplicate_tablets(tablets) + def dedup_tablets = deduplicate_tablets(tablets) if (dedup_tablets.size() > 0) { replicaNum = Math.round(tablets.size() / dedup_tablets.size()) if (replicaNum != 1 && replicaNum != 3) { @@ -261,7 +260,7 @@ suite("test_index_compaction_null", "p0") { """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ run_test.call(tablets) @@ -294,7 +293,7 @@ suite("test_index_compaction_null", "p0") { ) """ - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ run_test.call(tablets) } finally { @@ -302,4 +301,4 @@ suite("test_index_compaction_null", "p0") { set_be_config.call("inverted_index_compaction_enable", invertedIndexCompactionEnable.toString()) } } -} \ No newline at end of file +} diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy index 64d2ee2164..76970feb46 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_unique_keys.groovy @@ -32,10 +32,10 @@ suite("test_index_compaction_unique_keys", "p0") { } } - def trigger_full_compaction_on_tablets = { String[][] tablets -> - for (String[] tablet : tablets) { - String tablet_id = tablet[0] - String backend_id = tablet[2] + def trigger_full_compaction_on_tablets = { tablets -> + for (def tablet : tablets) { + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId int times = 1 String compactionStatus; @@ -58,13 +58,13 @@ suite("test_index_compaction_unique_keys", "p0") { } } - def wait_full_compaction_done = { String[][] tablets -> - for (String[] tablet in tablets) { + def wait_full_compaction_done = { tablets -> + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - String backend_id = tablet[2] + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -75,11 +75,10 @@ suite("test_index_compaction_unique_keys", "p0") { } } - def get_rowset_count = {String[][] tablets -> + def get_rowset_count = { tablets -> int rowsetCount = 0 - for (String[] tablet in tablets) { - def compactionStatusUrlIndex = 18 - def (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + def (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -167,8 +166,8 @@ suite("test_index_compaction_unique_keys", "p0") { qt_sql """ select * from ${tableName} where score < 100 order by id, name, hobbies, score """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ - String[][] dedup_tablets = deduplicate_tablets(tablets) + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + def dedup_tablets = deduplicate_tablets(tablets) // In the p0 testing environment, there are no expected operations such as scaling down BE (backend) services // if tablets or dedup_tablets is empty, exception is thrown, and case fail diff --git a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy index dd6e91e53b..693e4aac1b 100644 --- a/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy +++ b/regression-test/suites/inverted_index_p0/index_compaction/test_index_compaction_with_multi_index_segments.groovy @@ -32,10 +32,10 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { } } - def trigger_full_compaction_on_tablets = { String[][] tablets -> - for (String[] tablet : tablets) { - String tablet_id = tablet[0] - String backend_id = tablet[2] + def trigger_full_compaction_on_tablets = { tablets -> + for (def tablet : tablets) { + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId int times = 1 String compactionStatus; @@ -58,13 +58,13 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { } } - def wait_full_compaction_done = { String[][] tablets -> - for (String[] tablet in tablets) { + def wait_full_compaction_done = { tablets -> + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - String backend_id = tablet[2] + String tablet_id = tablet.TabletId + String backend_id = tablet.BackendId def (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -75,11 +75,10 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { } } - def get_rowset_count = {String[][] tablets -> + def get_rowset_count = { tablets -> int rowsetCount = 0 - for (String[] tablet in tablets) { - def compactionStatusUrlIndex = 18 - def (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + def (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim()) @@ -193,8 +192,8 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { qt_sql """ select * from ${tableName} where comment_id < 8 order by file_time, comment_id, body """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ - String[][] dedup_tablets = deduplicate_tablets(tablets) + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ + def dedup_tablets = deduplicate_tablets(tablets) // In the p0 testing environment, there are no expected operations such as scaling down BE (backend) services // if tablets or dedup_tablets is empty, exception is thrown, and case fail @@ -235,7 +234,7 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { ("2018-02-21 12:00:00", 9, "I\'m using the builds"), ("2018-02-21 12:00:00", 10, "I\'m using the builds"); """ - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ // before full compaction, there are 2 rowsets. rowsetCount = get_rowset_count.call(tablets) @@ -313,7 +312,7 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { qt_sql """ select * from ${tableName} where comment_id < 8 order by file_time, comment_id, body """ //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ // before full compaction, there are 3 rowsets. rowsetCount = get_rowset_count.call(tablets) @@ -346,7 +345,7 @@ suite("test_index_compaction_with_multi_index_segments", "p0") { ("2018-02-21 20:00:00", 9, "I\'m using the builds"), ("2018-02-21 21:00:00", 10, "I\'m using the builds"); """ - tablets = sql """ show tablets from ${tableName}; """ + tablets = sql_return_maparray """ show tablets from ${tableName}; """ // before full compaction, there are 2 rowsets. rowsetCount = get_rowset_count.call(tablets) diff --git a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy index 4a3b99c1d0..4a80004169 100644 --- a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy +++ b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy @@ -98,10 +98,10 @@ suite("test_map_load_and_compaction", "p0") { // check here 2 rowsets //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${testTable}; """ - String[] tablet = tablets[0] + def tablets = sql_return_maparray """ show tablets from ${testTable}; """ + def tablet = tablets[0] // check rowsets number - String compactionStatus = tablet[18] + String compactionStatus = tablet.CompactionStatus checkCompactionStatus.call(compactionStatus, 6) // trigger compaction @@ -109,8 +109,8 @@ suite("test_map_load_and_compaction", "p0") { def backendId_to_backendIP = [:] def backendId_to_backendHttpPort = [:] getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort); - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId def (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) diff --git a/regression-test/suites/nereids_p0/select_tablets/select_with_tablets.groovy b/regression-test/suites/nereids_p0/select_tablets/select_with_tablets.groovy index 86bc846a11..e3de644867 100644 --- a/regression-test/suites/nereids_p0/select_tablets/select_with_tablets.groovy +++ b/regression-test/suites/nereids_p0/select_tablets/select_with_tablets.groovy @@ -44,36 +44,35 @@ suite("select_with_tablets") { logger.info("insert result: " + insert_res.toString()) order_qt_select1 """ SELECT * FROM ${table_name1} """ - def res = sql """ show tablets from ${table_name1} where version = 2 """ + def res = sql_return_maparray """ show tablets from ${table_name1} where version = 2 """ res = deduplicate_tablets(res) assertTrue(res.size() == 1) - assertTrue(res[0].size() == 21) - assertEquals("2", res[0][4]) + assertEquals("2", res[0].Version) - order_qt_select2 """ SELECT * FROM ${table_name1} TABLET(${res[0][0]}) """ - order_qt_select3 """ SELECT * FROM ${table_name1} PARTITION less_than_20 TABLET(${res[0][0]}) """ - // result should be empty because TABLET(${res[0][0]}) is not belonged to partition between_20_70. - order_qt_select4 """ SELECT * FROM ${table_name1} PARTITION between_20_70 TABLET(${res[0][0]}) """ + order_qt_select2 """ SELECT * FROM ${table_name1} TABLET(${res[0].TabletId}) """ + order_qt_select3 """ SELECT * FROM ${table_name1} PARTITION less_than_20 TABLET(${res[0].TabletId}) """ + // result should be empty because TABLET(${res[0].TabletId}) is not belonged to partition between_20_70. + order_qt_select4 """ SELECT * FROM ${table_name1} PARTITION between_20_70 TABLET(${res[0].TabletId}) """ order_qt_select5 """ SELECT * FROM ${table_name1} where id < 2 """ - order_qt_select6 """ SELECT * FROM ${table_name1} TABLET(${res[0][0]}) where id = 2 """ - order_qt_select7 """ SELECT * FROM ${table_name1} TABLET(${res[0][0]}) where id < 2 """ - order_qt_select8 """ SELECT * FROM ${table_name1} PARTITION less_than_20 TABLET(${res[0][0]}) where id < 2 """ + order_qt_select6 """ SELECT * FROM ${table_name1} TABLET(${res[0].TabletId}) where id = 2 """ + order_qt_select7 """ SELECT * FROM ${table_name1} TABLET(${res[0].TabletId}) where id < 2 """ + order_qt_select8 """ SELECT * FROM ${table_name1} PARTITION less_than_20 TABLET(${res[0].TabletId}) where id < 2 """ // result of order_qt_select9 should be empty - order_qt_select9 """ SELECT * FROM ${table_name1} PARTITION between_20_70 TABLET(${res[0][0]}) where id < 2""" + order_qt_select9 """ SELECT * FROM ${table_name1} PARTITION between_20_70 TABLET(${res[0].TabletId}) where id < 2""" order_qt_select10 """ SELECT * FROM ${table_name1} PARTITION less_than_20 where id < 2""" // result of order_qt_select11 should be empty order_qt_select11 """ SELECT * FROM ${table_name1} PARTITION between_20_70 where id < 2""" - res = sql """ show tablets from ${table_name1} where version = 1 """ + res = sql_return_maparray """ show tablets from ${table_name1} where version = 1 """ res = deduplicate_tablets(res) assertTrue(res.size() == 2) - assertEquals("1", res[0][4]) - assertEquals("1", res[1][4]) - // result should be empty because TABLET(${res[0][0]}) does not have data. - order_qt_select12 """ SELECT * FROM ${table_name1} TABLET(${res[0][0]}) """ - // result should be empty because TABLET(${res[1][0]}) does not have data. - order_qt_select13 """ SELECT * FROM ${table_name1} TABLET(${res[1][0]}) """ + assertEquals("1", res[0].Version) + assertEquals("1", res[1].Version) + // result should be empty because TABLET(${res[0].TabletId}) does not have data. + order_qt_select12 """ SELECT * FROM ${table_name1} TABLET(${res[0].TabletId}) """ + // result should be empty because TABLET(${res[1].TabletId}) does not have data. + order_qt_select13 """ SELECT * FROM ${table_name1} TABLET(${res[1].TabletId}) """ // Test non-partitioned table def table_no_partition = "table_no_partition" @@ -94,14 +93,14 @@ suite("select_with_tablets") { logger.info("insert result: " + insert_res.toString()) order_qt_no_partition_1 """ SELECT * FROM ${table_no_partition} """ - res = sql """ show tablets from ${table_no_partition} where version = 2 """ + res = sql_return_maparray """ show tablets from ${table_no_partition} where version = 2 """ res = deduplicate_tablets(res) - order_qt_no_partition_2 """ SELECT * FROM ${table_no_partition} TABLET(${res[0][0]}) """ - order_qt_no_partition_3 """ SELECT * FROM ${table_no_partition} TABLET(${res[1][0]}) """ - order_qt_no_partition_4 """ SELECT * FROM ${table_no_partition} TABLET(${res[2][0]}) """ + order_qt_no_partition_2 """ SELECT * FROM ${table_no_partition} TABLET(${res[0].TabletId}) """ + order_qt_no_partition_3 """ SELECT * FROM ${table_no_partition} TABLET(${res[1].TabletId}) """ + order_qt_no_partition_4 """ SELECT * FROM ${table_no_partition} TABLET(${res[2].TabletId}) """ order_qt_no_partition_5 """ SELECT * FROM ${table_no_partition} where id = 2 """ - order_qt_no_partition_6 """ SELECT * FROM ${table_no_partition} TABLET(${res[0][0]}) where id = 2 """ - order_qt_no_partition_7 """ SELECT * FROM ${table_no_partition} TABLET(${res[0][0]}) where id > 2 """ + order_qt_no_partition_6 """ SELECT * FROM ${table_no_partition} TABLET(${res[0].TabletId}) where id = 2 """ + order_qt_no_partition_7 """ SELECT * FROM ${table_no_partition} TABLET(${res[0].TabletId}) where id > 2 """ } diff --git a/regression-test/suites/variant_p0/compaction/test_compaction.groovy b/regression-test/suites/variant_p0/compaction/test_compaction.groovy index 582c476386..e5359f305f 100644 --- a/regression-test/suites/variant_p0/compaction/test_compaction.groovy +++ b/regression-test/suites/variant_p0/compaction/test_compaction.groovy @@ -80,12 +80,12 @@ suite("test_compaction_variant") { //TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus - String[][] tablets = sql """ show tablets from ${tableName}; """ + def tablets = sql_return_maparray """ show tablets from ${tableName}; """ // trigger compactions for all tablets in ${tableName} - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - backend_id = tablet[2] + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Run compaction: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -100,12 +100,12 @@ suite("test_compaction_variant") { } // wait for all compactions done - for (String[] tablet in tablets) { + for (def tablet in tablets) { boolean running = true do { Thread.sleep(1000) - String tablet_id = tablet[0] - backend_id = tablet[2] + String tablet_id = tablet.TabletId + backend_id = tablet.BackendId (code, out, err) = be_get_compaction_status(backendId_to_backendIP.get(backend_id), backendId_to_backendHttpPort.get(backend_id), tablet_id) logger.info("Get compaction status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) @@ -116,10 +116,9 @@ suite("test_compaction_variant") { } int rowCount = 0 - for (String[] tablet in tablets) { - String tablet_id = tablet[0] - def compactionStatusUrlIndex = 18 - (code, out, err) = curl("GET", tablet[compactionStatusUrlIndex]) + for (def tablet in tablets) { + String tablet_id = tablet.TabletId + (code, out, err) = curl("GET", tablet.CompactionStatus) logger.info("Show tablets status: code=" + code + ", out=" + out + ", err=" + err) assertEquals(code, 0) def tabletJson = parseJson(out.trim())