[feature](cooldown) Decouple storage policy and resource (#15873)
This commit is contained in:
@ -475,7 +475,7 @@ public class Alter {
|
||||
// currently, only in memory and storage policy property could reach here
|
||||
Preconditions.checkState(properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)
|
||||
|| properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY));
|
||||
((SchemaChangeHandler) schemaChangeHandler).updatePartitionsInMemoryMeta(
|
||||
((SchemaChangeHandler) schemaChangeHandler).updatePartitionsProperties(
|
||||
db, tableName, partitionNames, properties);
|
||||
OlapTable olapTable = (OlapTable) table;
|
||||
olapTable.writeLockOrDdlException();
|
||||
@ -489,7 +489,7 @@ public class Alter {
|
||||
// currently, only in memory and storage policy property could reach here
|
||||
Preconditions.checkState(properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)
|
||||
|| properties.containsKey(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY));
|
||||
((SchemaChangeHandler) schemaChangeHandler).updateTableInMemoryMeta(db, tableName, properties);
|
||||
((SchemaChangeHandler) schemaChangeHandler).updateTableProperties(db, tableName, properties);
|
||||
} else {
|
||||
throw new DdlException("Invalid alter operation: " + alterClause.getOpType());
|
||||
}
|
||||
|
||||
@ -74,6 +74,8 @@ import org.apache.doris.common.util.Util;
|
||||
import org.apache.doris.mysql.privilege.PrivPredicate;
|
||||
import org.apache.doris.persist.RemoveAlterJobV2OperationLog;
|
||||
import org.apache.doris.persist.TableAddOrDropColumnsInfo;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.PolicyTypeEnum;
|
||||
import org.apache.doris.qe.ConnectContext;
|
||||
import org.apache.doris.task.AgentBatchTask;
|
||||
import org.apache.doris.task.AgentTaskExecutor;
|
||||
@ -102,6 +104,7 @@ import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -1852,10 +1855,27 @@ public class SchemaChangeHandler extends AlterHandler {
|
||||
LOG.info("send clear alter task for table {}, number: {}", olapTable.getName(), batchTask.getTaskNum());
|
||||
}
|
||||
|
||||
static long storagePolicyNameToId(String storagePolicy) throws DdlException {
|
||||
if (storagePolicy == null) {
|
||||
return -1; // don't update storage policy
|
||||
}
|
||||
// if storagePolicy is "", means to reset the storage policy of this table
|
||||
if (storagePolicy.isEmpty()) {
|
||||
return 0; // reset storage policy
|
||||
} else {
|
||||
Optional<Policy> policy = Env.getCurrentEnv().getPolicyMgr()
|
||||
.findPolicy(storagePolicy, PolicyTypeEnum.STORAGE);
|
||||
if (!policy.isPresent()) {
|
||||
throw new DdlException("StoragePolicy[" + storagePolicy + "] not exist");
|
||||
}
|
||||
return policy.get().getId();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update all partitions' in-memory property of table
|
||||
* Update all partitions' properties of table
|
||||
*/
|
||||
public void updateTableInMemoryMeta(Database db, String tableName, Map<String, String> properties)
|
||||
public void updateTableProperties(Database db, String tableName, Map<String, String> properties)
|
||||
throws UserException {
|
||||
List<Partition> partitions = Lists.newArrayList();
|
||||
OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP);
|
||||
@ -1866,43 +1886,61 @@ public class SchemaChangeHandler extends AlterHandler {
|
||||
olapTable.readUnlock();
|
||||
}
|
||||
|
||||
boolean isInMemory = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY));
|
||||
String storagePolicy = properties.getOrDefault(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY, "");
|
||||
if (isInMemory == olapTable.isInMemory() && storagePolicy.equalsIgnoreCase("")) {
|
||||
LOG.info("isInMemory == olapTable.isInMemory() and storagePolicy empty"
|
||||
+ isInMemory + " " + olapTable.isInMemory() + " " + storagePolicy);
|
||||
String inMemory = properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY);
|
||||
int isInMemory = -1; // < 0 means don't update inMemory properties
|
||||
if (inMemory != null) {
|
||||
isInMemory = Boolean.parseBoolean(inMemory) ? 1 : 0;
|
||||
if ((isInMemory > 0) == olapTable.isInMemory()) {
|
||||
// already up-to-date
|
||||
isInMemory = -1;
|
||||
}
|
||||
}
|
||||
String storagePolicy = properties.get(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY);
|
||||
long storagePolicyId = storagePolicyNameToId(storagePolicy);
|
||||
|
||||
if (isInMemory < 0 && storagePolicyId < 0) {
|
||||
LOG.info("Properties already up-to-date");
|
||||
return;
|
||||
}
|
||||
|
||||
for (Partition partition : partitions) {
|
||||
updatePartitionInMemoryMeta(db, olapTable.getName(), partition.getName(), storagePolicy, isInMemory);
|
||||
updatePartitionProperties(db, olapTable.getName(), partition.getName(), storagePolicyId, isInMemory);
|
||||
}
|
||||
|
||||
olapTable.writeLockOrDdlException();
|
||||
try {
|
||||
Env.getCurrentEnv().modifyTableInMemoryMeta(db, olapTable, properties);
|
||||
Env.getCurrentEnv().modifyTableProperties(db, olapTable, properties);
|
||||
} finally {
|
||||
olapTable.writeUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update some specified partitions' in-memory property of table
|
||||
* Update some specified partitions' properties of table
|
||||
*/
|
||||
public void updatePartitionsInMemoryMeta(Database db, String tableName, List<String> partitionNames,
|
||||
public void updatePartitionsProperties(Database db, String tableName, List<String> partitionNames,
|
||||
Map<String, String> properties) throws DdlException, MetaNotFoundException {
|
||||
OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP);
|
||||
boolean isInMemory = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY));
|
||||
String storagePolicy = properties.getOrDefault(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY, "");
|
||||
if (isInMemory == olapTable.isInMemory() && storagePolicy.equalsIgnoreCase("")) {
|
||||
LOG.info("isInMemory == olapTable.isInMemory() and storagePolicy empty"
|
||||
+ isInMemory + " " + olapTable.isInMemory() + " " + storagePolicy);
|
||||
String inMemory = properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY);
|
||||
int isInMemory = -1; // < 0 means don't update inMemory properties
|
||||
if (inMemory != null) {
|
||||
isInMemory = Boolean.parseBoolean(inMemory) ? 1 : 0;
|
||||
if ((isInMemory > 0) == olapTable.isInMemory()) {
|
||||
// already up-to-date
|
||||
isInMemory = -1;
|
||||
}
|
||||
}
|
||||
String storagePolicy = properties.get(PropertyAnalyzer.PROPERTIES_STORAGE_POLICY);
|
||||
long storagePolicyId = storagePolicyNameToId(storagePolicy);
|
||||
|
||||
if (isInMemory < 0 && storagePolicyId < 0) {
|
||||
LOG.info("Properties already up-to-date");
|
||||
return;
|
||||
}
|
||||
|
||||
for (String partitionName : partitionNames) {
|
||||
try {
|
||||
updatePartitionInMemoryMeta(db, olapTable.getName(), partitionName, storagePolicy, isInMemory);
|
||||
updatePartitionProperties(db, olapTable.getName(), partitionName, storagePolicyId, isInMemory);
|
||||
} catch (Exception e) {
|
||||
String errMsg = "Failed to update partition[" + partitionName + "]'s 'in_memory' property. "
|
||||
+ "The reason is [" + e.getMessage() + "]";
|
||||
@ -1912,15 +1950,11 @@ public class SchemaChangeHandler extends AlterHandler {
|
||||
}
|
||||
|
||||
/**
|
||||
* Update one specified partition's in-memory property by partition name of table
|
||||
* This operation may return partial successfully, with a exception to inform user to retry
|
||||
* Update one specified partition's properties by partition name of table
|
||||
* This operation may return partial successfully, with an exception to inform user to retry
|
||||
*/
|
||||
public void updatePartitionInMemoryMeta(Database db,
|
||||
String tableName,
|
||||
String partitionName,
|
||||
String storagePolicy,
|
||||
boolean isInMemory) throws UserException {
|
||||
|
||||
public void updatePartitionProperties(Database db, String tableName, String partitionName, long storagePolicyId,
|
||||
int isInMemory) throws UserException {
|
||||
// be id -> <tablet id,schemaHash>
|
||||
Map<Long, Set<Pair<Long, Integer>>> beIdToTabletIdWithHash = Maps.newHashMap();
|
||||
OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP);
|
||||
@ -1952,7 +1986,7 @@ public class SchemaChangeHandler extends AlterHandler {
|
||||
for (Map.Entry<Long, Set<Pair<Long, Integer>>> kv : beIdToTabletIdWithHash.entrySet()) {
|
||||
countDownLatch.addMark(kv.getKey(), kv.getValue());
|
||||
UpdateTabletMetaInfoTask task = new UpdateTabletMetaInfoTask(kv.getKey(), kv.getValue(),
|
||||
isInMemory, storagePolicy, countDownLatch);
|
||||
isInMemory, storagePolicyId, countDownLatch);
|
||||
batchTask.addTask(task);
|
||||
}
|
||||
if (!FeConstants.runningUnitTest) {
|
||||
|
||||
@ -4226,7 +4226,7 @@ public class Env {
|
||||
}
|
||||
|
||||
// The caller need to hold the table write lock
|
||||
public void modifyTableInMemoryMeta(Database db, OlapTable table, Map<String, String> properties) {
|
||||
public void modifyTableProperties(Database db, OlapTable table, Map<String, String> properties) {
|
||||
Preconditions.checkArgument(table.isWriteLockHeldByCurrentThread());
|
||||
TableProperty tableProperty = table.getTableProperty();
|
||||
if (tableProperty == null) {
|
||||
|
||||
@ -28,13 +28,6 @@ import org.apache.doris.common.proc.BaseProcResult;
|
||||
import org.apache.doris.datasource.CatalogIf;
|
||||
import org.apache.doris.persist.gson.GsonPostProcessable;
|
||||
import org.apache.doris.persist.gson.GsonUtils;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.PolicyTypeEnum;
|
||||
import org.apache.doris.policy.StoragePolicy;
|
||||
import org.apache.doris.system.SystemInfoService;
|
||||
import org.apache.doris.task.AgentBatchTask;
|
||||
import org.apache.doris.task.AgentTaskExecutor;
|
||||
import org.apache.doris.task.NotifyUpdateStoragePolicyTask;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Maps;
|
||||
@ -45,10 +38,9 @@ import org.apache.logging.log4j.Logger;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class Resource implements Writable, GsonPostProcessable {
|
||||
@ -92,6 +84,28 @@ public abstract class Resource implements Writable, GsonPostProcessable {
|
||||
protected ResourceType type;
|
||||
@SerializedName(value = "references")
|
||||
protected Map<String, ReferenceType> references = Maps.newHashMap();
|
||||
@SerializedName(value = "id")
|
||||
protected long id = -1;
|
||||
@SerializedName(value = "version")
|
||||
protected long version = -1;
|
||||
|
||||
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);
|
||||
|
||||
public void writeLock() {
|
||||
lock.writeLock().lock();
|
||||
}
|
||||
|
||||
public void writeUnlock() {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
public void readLock() {
|
||||
lock.readLock().lock();
|
||||
}
|
||||
|
||||
public void readUnlock() {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
||||
public Resource() {
|
||||
}
|
||||
@ -103,10 +117,20 @@ public abstract class Resource implements Writable, GsonPostProcessable {
|
||||
|
||||
public static Resource fromStmt(CreateResourceStmt stmt) throws DdlException {
|
||||
Resource resource = getResourceInstance(stmt.getResourceType(), stmt.getResourceName());
|
||||
resource.id = Env.getCurrentEnv().getNextId();
|
||||
resource.version = 0;
|
||||
resource.setProperties(stmt.getProperties());
|
||||
return resource;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public synchronized boolean removeReference(String referenceName, ReferenceType type) {
|
||||
String fullName = referenceName + REFERENCE_SPLIT + type.name();
|
||||
if (references.remove(fullName) != null) {
|
||||
@ -252,48 +276,7 @@ public abstract class Resource implements Writable, GsonPostProcessable {
|
||||
|
||||
private void notifyUpdate() {
|
||||
references.entrySet().stream().collect(Collectors.groupingBy(Entry::getValue)).forEach((type, refs) -> {
|
||||
if (type == ReferenceType.POLICY) {
|
||||
SystemInfoService systemInfoService = Env.getCurrentSystemInfo();
|
||||
AgentBatchTask batchTask = new AgentBatchTask();
|
||||
|
||||
Map<String, String> copiedProperties = getCopiedProperties();
|
||||
|
||||
for (Long beId : systemInfoService.getBackendIds(true)) {
|
||||
for (Map.Entry<String, ReferenceType> ref : refs) {
|
||||
String policyName = ref.getKey().split(REFERENCE_SPLIT)[0];
|
||||
List<Policy> policiesByType = Env.getCurrentEnv().getPolicyMgr()
|
||||
.getCopiedPoliciesByType(PolicyTypeEnum.STORAGE);
|
||||
Optional<Policy> findPolicy = policiesByType.stream()
|
||||
.filter(p -> p.getType() == PolicyTypeEnum.STORAGE
|
||||
&& policyName.equals(p.getPolicyName()))
|
||||
.findAny();
|
||||
LOG.info("find policy in {} ", policiesByType);
|
||||
if (!findPolicy.isPresent()) {
|
||||
return;
|
||||
}
|
||||
// add policy's coolDown ttl、coolDown data、policy name to map
|
||||
Map<String, String> tmpMap = Maps.newHashMap(copiedProperties);
|
||||
StoragePolicy used = (StoragePolicy) findPolicy.get();
|
||||
tmpMap.put(StoragePolicy.COOLDOWN_DATETIME,
|
||||
String.valueOf(used.getCooldownTimestampMs()));
|
||||
|
||||
final String[] cooldownTtl = {"-1"};
|
||||
Optional.ofNullable(used.getCooldownTtl())
|
||||
.ifPresent(date -> cooldownTtl[0] = used.getCooldownTtl());
|
||||
tmpMap.put(StoragePolicy.COOLDOWN_TTL, cooldownTtl[0]);
|
||||
|
||||
tmpMap.put(StoragePolicy.MD5_CHECKSUM, used.getMd5Checksum());
|
||||
|
||||
NotifyUpdateStoragePolicyTask modifyS3ResourcePropertiesTask =
|
||||
new NotifyUpdateStoragePolicyTask(beId, used.getPolicyName(), tmpMap);
|
||||
LOG.info("notify be: {}, policy name: {}, "
|
||||
+ "properties: {} to modify S3 resource batch task.",
|
||||
beId, used.getPolicyName(), tmpMap);
|
||||
batchTask.addTask(modifyS3ResourcePropertiesTask);
|
||||
}
|
||||
}
|
||||
AgentTaskExecutor.submit(batchTask);
|
||||
} else if (type == ReferenceType.CATALOG) {
|
||||
if (type == ReferenceType.CATALOG) {
|
||||
for (Map.Entry<String, ReferenceType> ref : refs) {
|
||||
String catalogName = ref.getKey().split(REFERENCE_SPLIT)[0];
|
||||
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(catalogName);
|
||||
|
||||
@ -47,7 +47,9 @@ import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Resource manager is responsible for managing external resources used by Doris.
|
||||
@ -167,6 +169,11 @@ public class ResourceMgr implements Writable {
|
||||
return nameToResource.size();
|
||||
}
|
||||
|
||||
public List<Resource> getResource(ResourceType type) {
|
||||
return nameToResource.values().stream().filter(resource -> resource.getType() == type)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public List<List<Comparable>> getResourcesInfo(String name, boolean accurateMatch, Set<String> typeSets) {
|
||||
List<List<String>> targetRows = procNode.fetchResult().getRows();
|
||||
List<List<Comparable>> returnRows = Lists.newArrayList();
|
||||
|
||||
@ -91,13 +91,14 @@ public class S3Resource extends Resource {
|
||||
@SerializedName(value = "properties")
|
||||
private Map<String, String> properties;
|
||||
|
||||
public S3Resource(String name) {
|
||||
this(name, Maps.newHashMap());
|
||||
}
|
||||
// for Gson fromJson
|
||||
// TODO(plat1ko): other Resource subclass also MUST define default ctor, otherwise when reloading object from json
|
||||
// some not serialized field (i.e. `lock`) will be `null`.
|
||||
public S3Resource() {}
|
||||
|
||||
public S3Resource(String name, Map<String, String> properties) {
|
||||
public S3Resource(String name) {
|
||||
super(name, ResourceType.S3);
|
||||
this.properties = properties;
|
||||
properties = Maps.newHashMap();
|
||||
}
|
||||
|
||||
public String getProperty(String propertyKey) {
|
||||
@ -186,9 +187,12 @@ public class S3Resource extends Resource {
|
||||
}
|
||||
}
|
||||
// modify properties
|
||||
writeLock();
|
||||
for (Map.Entry<String, String> kv : properties.entrySet()) {
|
||||
replaceIfEffectiveValue(this.properties, kv.getKey(), kv.getValue());
|
||||
}
|
||||
++version;
|
||||
writeUnlock();
|
||||
super.modifyProperties(properties);
|
||||
}
|
||||
|
||||
@ -200,6 +204,9 @@ public class S3Resource extends Resource {
|
||||
@Override
|
||||
protected void getProcNodeData(BaseProcResult result) {
|
||||
String lowerCaseType = type.name().toLowerCase();
|
||||
result.addRow(Lists.newArrayList(name, lowerCaseType, "id", String.valueOf(id)));
|
||||
readLock();
|
||||
result.addRow(Lists.newArrayList(name, lowerCaseType, "version", String.valueOf(version)));
|
||||
for (Map.Entry<String, String> entry : properties.entrySet()) {
|
||||
// it's dangerous to show password in show odbc resource,
|
||||
// so we use empty string to replace the real password
|
||||
@ -209,6 +216,7 @@ public class S3Resource extends Resource {
|
||||
result.addRow(Lists.newArrayList(name, lowerCaseType, entry.getKey(), entry.getValue()));
|
||||
}
|
||||
}
|
||||
readUnlock();
|
||||
}
|
||||
|
||||
public static Map<String, String> getS3HadoopProperties(Map<String, String> properties) {
|
||||
|
||||
@ -28,6 +28,8 @@ import org.apache.doris.catalog.Partition;
|
||||
import org.apache.doris.catalog.Replica;
|
||||
import org.apache.doris.catalog.Replica.ReplicaState;
|
||||
import org.apache.doris.catalog.ReplicaAllocation;
|
||||
import org.apache.doris.catalog.Resource;
|
||||
import org.apache.doris.catalog.Resource.ResourceType;
|
||||
import org.apache.doris.catalog.Table;
|
||||
import org.apache.doris.catalog.Tablet;
|
||||
import org.apache.doris.catalog.Tablet.TabletStatus;
|
||||
@ -45,6 +47,9 @@ import org.apache.doris.metric.MetricRepo;
|
||||
import org.apache.doris.persist.BackendReplicasInfo;
|
||||
import org.apache.doris.persist.BackendTabletsInfo;
|
||||
import org.apache.doris.persist.ReplicaPersistInfo;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.PolicyTypeEnum;
|
||||
import org.apache.doris.policy.StoragePolicy;
|
||||
import org.apache.doris.system.Backend;
|
||||
import org.apache.doris.system.Backend.BackendStatus;
|
||||
import org.apache.doris.system.SystemInfoService;
|
||||
@ -57,6 +62,7 @@ import org.apache.doris.task.CreateReplicaTask;
|
||||
import org.apache.doris.task.DropReplicaTask;
|
||||
import org.apache.doris.task.MasterTask;
|
||||
import org.apache.doris.task.PublishVersionTask;
|
||||
import org.apache.doris.task.PushStoragePolicyTask;
|
||||
import org.apache.doris.task.StorageMediaMigrationTask;
|
||||
import org.apache.doris.task.UpdateTabletMetaInfoTask;
|
||||
import org.apache.doris.thrift.TBackend;
|
||||
@ -67,6 +73,8 @@ import org.apache.doris.thrift.TReportRequest;
|
||||
import org.apache.doris.thrift.TStatus;
|
||||
import org.apache.doris.thrift.TStatusCode;
|
||||
import org.apache.doris.thrift.TStorageMedium;
|
||||
import org.apache.doris.thrift.TStoragePolicy;
|
||||
import org.apache.doris.thrift.TStorageResource;
|
||||
import org.apache.doris.thrift.TStorageType;
|
||||
import org.apache.doris.thrift.TTablet;
|
||||
import org.apache.doris.thrift.TTabletInfo;
|
||||
@ -84,11 +92,13 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.thrift.TException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ReportHandler extends Daemon {
|
||||
private static final Logger LOG = LogManager.getLogger(ReportHandler.class);
|
||||
@ -164,7 +174,8 @@ public class ReportHandler extends Daemon {
|
||||
backend.setTabletMaxCompactionScore(request.getTabletMaxCompactionScore());
|
||||
}
|
||||
|
||||
ReportTask reportTask = new ReportTask(beId, tasks, disks, tablets, reportVersion);
|
||||
ReportTask reportTask = new ReportTask(beId, tasks, disks, tablets, reportVersion,
|
||||
request.getStoragePolicy(), request.getResource());
|
||||
try {
|
||||
putToQueue(reportTask);
|
||||
} catch (Exception e) {
|
||||
@ -212,14 +223,20 @@ public class ReportHandler extends Daemon {
|
||||
private Map<Long, TTablet> tablets;
|
||||
private long reportVersion;
|
||||
|
||||
private List<TStoragePolicy> storagePolicies;
|
||||
private List<TStorageResource> storageResources;
|
||||
|
||||
public ReportTask(long beId, Map<TTaskType, Set<Long>> tasks,
|
||||
Map<String, TDisk> disks,
|
||||
Map<Long, TTablet> tablets, long reportVersion) {
|
||||
Map<Long, TTablet> tablets, long reportVersion,
|
||||
List<TStoragePolicy> storagePolicies, List<TStorageResource> storageResources) {
|
||||
this.beId = beId;
|
||||
this.tasks = tasks;
|
||||
this.disks = disks;
|
||||
this.tablets = tablets;
|
||||
this.reportVersion = reportVersion;
|
||||
this.storagePolicies = storagePolicies;
|
||||
this.storageResources = storageResources;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -230,6 +247,10 @@ public class ReportHandler extends Daemon {
|
||||
if (disks != null) {
|
||||
ReportHandler.diskReport(beId, disks);
|
||||
}
|
||||
if (Config.enable_storage_policy && storagePolicies != null && storageResources != null) {
|
||||
storagePolicyReport(beId, storagePolicies, storageResources);
|
||||
}
|
||||
|
||||
if (tablets != null) {
|
||||
long backendReportVersion = Env.getCurrentSystemInfo().getBackendReportVersion(beId);
|
||||
if (reportVersion < backendReportVersion) {
|
||||
@ -242,6 +263,122 @@ public class ReportHandler extends Daemon {
|
||||
}
|
||||
}
|
||||
|
||||
private static void handlePushStoragePolicy(long backendId, List<Policy> policyToPush,
|
||||
List<Resource> resourceToPush, List<Long> policyToDrop) {
|
||||
AgentBatchTask batchTask = new AgentBatchTask();
|
||||
PushStoragePolicyTask pushStoragePolicyTask = new PushStoragePolicyTask(backendId, policyToPush,
|
||||
resourceToPush, policyToDrop);
|
||||
batchTask.addTask(pushStoragePolicyTask);
|
||||
AgentTaskExecutor.submit(batchTask);
|
||||
}
|
||||
|
||||
private static void storagePolicyReport(long backendId,
|
||||
List<TStoragePolicy> storagePoliciesInBe,
|
||||
List<TStorageResource> storageResourcesInBe) {
|
||||
LOG.info("backend[{}] reports policies {}, report resources: {}",
|
||||
backendId, storagePoliciesInBe, storageResourcesInBe);
|
||||
// do the diff. find out (intersection) / (be - meta) / (meta - be)
|
||||
List<Policy> policiesInFe = Env.getCurrentEnv().getPolicyMgr().getCopiedPoliciesByType(PolicyTypeEnum.STORAGE);
|
||||
List<Resource> resourcesInFe = Env.getCurrentEnv().getResourceMgr().getResource(ResourceType.S3);
|
||||
|
||||
List<Resource> resourceToPush = new ArrayList<>();
|
||||
List<Policy> policyToPush = new ArrayList<>();
|
||||
List<Long> policyToDrop = new ArrayList<>();
|
||||
|
||||
diffPolicy(storagePoliciesInBe, policiesInFe, policyToPush, policyToDrop);
|
||||
diffResource(storageResourcesInBe, resourcesInFe, resourceToPush);
|
||||
|
||||
if (policyToPush.isEmpty() && resourceToPush.isEmpty() && policyToDrop.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
LOG.info("after diff policy, policyToPush {}, policyToDrop {}, and resourceToPush {}",
|
||||
policyToPush.stream()
|
||||
.map(p -> "StoragePolicy(name=" + p.getPolicyName() + " id=" + p.getId() + " version="
|
||||
+ p.getVersion()).collect(Collectors.toList()),
|
||||
policyToDrop, resourceToPush.stream()
|
||||
.map(r -> "Resource(name=" + r.getName() + " id=" + r.getId() + " version="
|
||||
+ r.getVersion()).collect(Collectors.toList()));
|
||||
// send push rpc
|
||||
handlePushStoragePolicy(backendId, policyToPush, resourceToPush, policyToDrop);
|
||||
}
|
||||
|
||||
private static void diffPolicy(List<TStoragePolicy> storagePoliciesInBe, List<Policy> policiesInFe,
|
||||
List<Policy> policyToPush, List<Long> policyToDrop) {
|
||||
// fe - be
|
||||
for (Policy policy : policiesInFe) {
|
||||
if (policy.getId() <= 0 || ((StoragePolicy) policy).getStorageResource() == null) {
|
||||
continue; // ignore policy with invalid id or storage resource
|
||||
}
|
||||
boolean beHasIt = false;
|
||||
for (TStoragePolicy tStoragePolicy : storagePoliciesInBe) {
|
||||
if (policy.getId() == tStoragePolicy.getId()) {
|
||||
beHasIt = true;
|
||||
// find id eq
|
||||
if (policy.getVersion() == tStoragePolicy.getVersion()) {
|
||||
// find version eq
|
||||
} else if (policy.getVersion() > tStoragePolicy.getVersion()) {
|
||||
// need to add
|
||||
policyToPush.add(policy);
|
||||
} else {
|
||||
// impossible
|
||||
LOG.warn("fe policy version {} litter than be {}, impossible",
|
||||
policy.getVersion(), tStoragePolicy.getVersion());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!beHasIt) {
|
||||
policyToPush.add(policy);
|
||||
}
|
||||
}
|
||||
|
||||
// be - fe
|
||||
for (TStoragePolicy tStoragePolicy : storagePoliciesInBe) {
|
||||
boolean feHasIt = false;
|
||||
for (Policy policy : policiesInFe) {
|
||||
if (policy.getId() == tStoragePolicy.getId()) {
|
||||
feHasIt = true;
|
||||
// find id eq
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!feHasIt) {
|
||||
policyToDrop.add(tStoragePolicy.getId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void diffResource(List<TStorageResource> storageResourcesInBe, List<Resource> resourcesInFe,
|
||||
List<Resource> resourceToPush) {
|
||||
// fe - be
|
||||
for (Resource resource : resourcesInFe) {
|
||||
if (resource.getId() <= 0) {
|
||||
continue; // ignore resource with invalid id
|
||||
}
|
||||
boolean beHasIt = false;
|
||||
for (TStorageResource tStorageResource : storageResourcesInBe) {
|
||||
if (resource.getId() == tStorageResource.getId()) {
|
||||
beHasIt = true;
|
||||
// find id eq
|
||||
if (resource.getVersion() == tStorageResource.getVersion()) {
|
||||
// find version eq
|
||||
} else if (resource.getVersion() > tStorageResource.getVersion()) {
|
||||
// need to add
|
||||
resourceToPush.add(resource);
|
||||
} else {
|
||||
// impossible
|
||||
LOG.warn("fe resource version {} litter than be {}, impossible",
|
||||
resource.getVersion(), tStorageResource.getVersion());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!beHasIt) {
|
||||
resourceToPush.add(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void tabletReport(long backendId, Map<Long, TTablet> backendTablets, long backendReportVersion) {
|
||||
long start = System.currentTimeMillis();
|
||||
LOG.info("backend[{}] reports {} tablet(s). report version: {}",
|
||||
|
||||
@ -23,6 +23,7 @@ import org.apache.doris.catalog.DatabaseIf;
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.catalog.TableIf;
|
||||
import org.apache.doris.common.AnalysisException;
|
||||
import org.apache.doris.common.DdlException;
|
||||
import org.apache.doris.common.io.Text;
|
||||
import org.apache.doris.common.io.Writable;
|
||||
import org.apache.doris.persist.gson.GsonPostProcessable;
|
||||
@ -39,6 +40,8 @@ import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* Base class for Policy.
|
||||
@ -48,8 +51,8 @@ public abstract class Policy implements Writable, GsonPostProcessable {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(Policy.class);
|
||||
|
||||
@SerializedName(value = "policyId")
|
||||
protected long policyId = -1;
|
||||
@SerializedName(value = "id")
|
||||
protected long id = -1;
|
||||
|
||||
@SerializedName(value = "type")
|
||||
protected PolicyTypeEnum type = null;
|
||||
@ -57,6 +60,30 @@ public abstract class Policy implements Writable, GsonPostProcessable {
|
||||
@SerializedName(value = "policyName")
|
||||
protected String policyName = null;
|
||||
|
||||
@SerializedName(value = "version")
|
||||
protected long version = -1;
|
||||
|
||||
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true);
|
||||
|
||||
public void writeLock() {
|
||||
lock.writeLock().lock();
|
||||
}
|
||||
|
||||
public void writeUnlock() {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
|
||||
public void readLock() {
|
||||
lock.readLock().lock();
|
||||
}
|
||||
|
||||
public void readUnlock() {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
|
||||
// just for subclass lombok @Data
|
||||
public Policy() {}
|
||||
|
||||
public Policy(PolicyTypeEnum type) {
|
||||
this.type = type;
|
||||
}
|
||||
@ -67,10 +94,11 @@ public abstract class Policy implements Writable, GsonPostProcessable {
|
||||
* @param type policy type
|
||||
* @param policyName policy name
|
||||
*/
|
||||
public Policy(long policyId, final PolicyTypeEnum type, final String policyName) {
|
||||
this.policyId = policyId;
|
||||
public Policy(long id, final PolicyTypeEnum type, final String policyName) {
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.policyName = policyName;
|
||||
this.version = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -98,6 +126,9 @@ public abstract class Policy implements Writable, GsonPostProcessable {
|
||||
}
|
||||
}
|
||||
|
||||
public void modifyProperties(Map<String, String> properties) throws DdlException, AnalysisException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Use for SHOW POLICY.
|
||||
**/
|
||||
|
||||
@ -142,7 +142,7 @@ public class RowPolicy extends Policy {
|
||||
|
||||
@Override
|
||||
public RowPolicy clone() {
|
||||
return new RowPolicy(this.policyId, this.policyName, this.dbId, this.user, this.originStmt, this.tableId,
|
||||
return new RowPolicy(this.id, this.policyName, this.dbId, this.user, this.originStmt, this.tableId,
|
||||
this.filterType, this.wherePredicate);
|
||||
}
|
||||
|
||||
|
||||
@ -25,29 +25,20 @@ import org.apache.doris.catalog.S3Resource;
|
||||
import org.apache.doris.catalog.ScalarType;
|
||||
import org.apache.doris.common.AnalysisException;
|
||||
import org.apache.doris.common.DdlException;
|
||||
import org.apache.doris.common.io.Text;
|
||||
import org.apache.doris.common.util.TimeUtils;
|
||||
import org.apache.doris.persist.gson.GsonUtils;
|
||||
import org.apache.doris.qe.ShowResultSetMetaData;
|
||||
import org.apache.doris.system.SystemInfoService;
|
||||
import org.apache.doris.task.AgentBatchTask;
|
||||
import org.apache.doris.task.AgentTaskExecutor;
|
||||
import org.apache.doris.task.NotifyUpdateStoragePolicyTask;
|
||||
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.gson.annotations.SerializedName;
|
||||
import lombok.Data;
|
||||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
@ -76,11 +67,12 @@ public class StoragePolicy extends Policy {
|
||||
public static final ShowResultSetMetaData STORAGE_META_DATA =
|
||||
ShowResultSetMetaData.builder()
|
||||
.addColumn(new Column("PolicyName", ScalarType.createVarchar(100)))
|
||||
.addColumn(new Column("Id", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("Version", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("Type", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("StorageResource", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("CooldownDatetime", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("CooldownTtl", ScalarType.createVarchar(20)))
|
||||
.addColumn(new Column("properties", ScalarType.createVarchar(65535)))
|
||||
.build();
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(StoragePolicy.class);
|
||||
@ -100,10 +92,6 @@ public class StoragePolicy extends Policy {
|
||||
private static final long ONE_DAY_MS = 24 * ONE_HOUR_MS;
|
||||
private static final long ONE_WEEK_MS = 7 * ONE_DAY_MS;
|
||||
|
||||
public static final String MD5_CHECKSUM = "md5_checksum";
|
||||
@SerializedName(value = "md5Checksum")
|
||||
private String md5Checksum = null;
|
||||
|
||||
@SerializedName(value = "storageResource")
|
||||
private String storageResource = null;
|
||||
|
||||
@ -118,6 +106,7 @@ public class StoragePolicy extends Policy {
|
||||
|
||||
private Map<String, String> props;
|
||||
|
||||
// for Gson fromJson
|
||||
public StoragePolicy() {
|
||||
super(PolicyTypeEnum.STORAGE);
|
||||
}
|
||||
@ -201,7 +190,6 @@ public class StoragePolicy extends Policy {
|
||||
if (!addResourceReference() && !ifNotExists) {
|
||||
throw new AnalysisException("this policy has been added to s3 resource once, policy has been created.");
|
||||
}
|
||||
this.md5Checksum = calcPropertiesMd5();
|
||||
}
|
||||
|
||||
private static Resource checkIsS3ResourceAndExist(final String storageResource) throws AnalysisException {
|
||||
@ -229,25 +217,18 @@ public class StoragePolicy extends Policy {
|
||||
* Use for SHOW POLICY.
|
||||
**/
|
||||
public List<String> getShowInfo() throws AnalysisException {
|
||||
final String[] props = {""};
|
||||
if (Env.getCurrentEnv().getResourceMgr().containsResource(this.storageResource)) {
|
||||
props[0] = Env.getCurrentEnv().getResourceMgr().getResource(this.storageResource).toString();
|
||||
readLock();
|
||||
try {
|
||||
if (cooldownTimestampMs == -1) {
|
||||
return Lists.newArrayList(this.policyName, String.valueOf(this.id), String.valueOf(this.version),
|
||||
this.type.name(), this.storageResource, "-1", this.cooldownTtl);
|
||||
}
|
||||
return Lists.newArrayList(this.policyName, String.valueOf(this.id), String.valueOf(this.version),
|
||||
this.type.name(), this.storageResource, TimeUtils.longToTimeString(this.cooldownTimestampMs),
|
||||
this.cooldownTtl);
|
||||
} finally {
|
||||
readUnlock();
|
||||
}
|
||||
if (!props[0].equals("")) {
|
||||
// s3_secret_key => ******
|
||||
S3Resource s3Resource = GsonUtils.GSON.fromJson(props[0], S3Resource.class);
|
||||
Optional.ofNullable(s3Resource).ifPresent(s3 -> {
|
||||
Map<String, String> copyMap = s3.getCopiedProperties();
|
||||
copyMap.put(S3Resource.S3_SECRET_KEY, "******");
|
||||
props[0] = GsonUtils.GSON.toJson(copyMap);
|
||||
});
|
||||
}
|
||||
if (cooldownTimestampMs == -1) {
|
||||
return Lists.newArrayList(this.policyName, this.type.name(), this.storageResource, "-1", this.cooldownTtl,
|
||||
props[0]);
|
||||
}
|
||||
return Lists.newArrayList(this.policyName, this.type.name(), this.storageResource,
|
||||
TimeUtils.longToTimeString(this.cooldownTimestampMs), this.cooldownTtl, props[0]);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -255,7 +236,7 @@ public class StoragePolicy extends Policy {
|
||||
|
||||
@Override
|
||||
public StoragePolicy clone() {
|
||||
return new StoragePolicy(this.policyId, this.policyName, this.storageResource, this.cooldownTimestampMs,
|
||||
return new StoragePolicy(this.id, this.policyName, this.storageResource, this.cooldownTimestampMs,
|
||||
this.cooldownTtl, this.cooldownTtlMs);
|
||||
}
|
||||
|
||||
@ -324,25 +305,6 @@ public class StoragePolicy extends Policy {
|
||||
return cooldownTtlMs;
|
||||
}
|
||||
|
||||
// be use this md5Sum to determine whether storage policy has been changed.
|
||||
// if md5Sum not eq previous value, be change its storage policy.
|
||||
private String calcPropertiesMd5() {
|
||||
List<String> calcKey = Arrays.asList(COOLDOWN_DATETIME, COOLDOWN_TTL, S3Resource.S3_MAX_CONNECTIONS,
|
||||
S3Resource.S3_REQUEST_TIMEOUT_MS, S3Resource.S3_CONNECTION_TIMEOUT_MS,
|
||||
S3Resource.S3_ACCESS_KEY, S3Resource.S3_SECRET_KEY);
|
||||
Map<String, String> copiedStoragePolicyProperties = Env.getCurrentEnv().getResourceMgr()
|
||||
.getResource(this.storageResource).getCopiedProperties();
|
||||
|
||||
copiedStoragePolicyProperties.put(COOLDOWN_DATETIME, String.valueOf(this.cooldownTimestampMs));
|
||||
copiedStoragePolicyProperties.put(COOLDOWN_TTL, this.cooldownTtl);
|
||||
|
||||
LOG.info("calcPropertiesMd5 map {}", copiedStoragePolicyProperties);
|
||||
|
||||
return DigestUtils.md5Hex(calcKey.stream()
|
||||
.map(iter -> "(" + iter + ":" + copiedStoragePolicyProperties.get(iter) + ")")
|
||||
.reduce("", String::concat));
|
||||
}
|
||||
|
||||
public void checkProperties(Map<String, String> properties) throws AnalysisException {
|
||||
// check properties
|
||||
Map<String, String> copiedProperties = Maps.newHashMap(properties);
|
||||
@ -357,58 +319,45 @@ public class StoragePolicy extends Policy {
|
||||
}
|
||||
|
||||
public void modifyProperties(Map<String, String> properties) throws DdlException, AnalysisException {
|
||||
Optional.ofNullable(properties.get(COOLDOWN_TTL)).ifPresent(this::setCooldownTtl);
|
||||
Optional.ofNullable(properties.get(COOLDOWN_DATETIME)).ifPresent(date -> {
|
||||
this.toString();
|
||||
// some check
|
||||
long cooldownTtlMs = -1;
|
||||
String cooldownTtl = properties.get(COOLDOWN_TTL);
|
||||
if (cooldownTtl != null) {
|
||||
cooldownTtlMs = getMsByCooldownTtl(cooldownTtl);
|
||||
}
|
||||
long cooldownTimestampMs = -1;
|
||||
String cooldownDatetime = properties.get(COOLDOWN_DATETIME);
|
||||
if (cooldownDatetime != null) {
|
||||
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
|
||||
try {
|
||||
this.cooldownTimestampMs = df.parse(properties.get(COOLDOWN_DATETIME)).getTime();
|
||||
cooldownTimestampMs = df.parse(cooldownDatetime).getTime();
|
||||
} catch (ParseException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
|
||||
if (policyName.equalsIgnoreCase(DEFAULT_STORAGE_POLICY_NAME) && storageResource == null) {
|
||||
// here first time set S3 resource to default storage policy.
|
||||
String alterStorageResource = Optional.ofNullable(properties.get(STORAGE_RESOURCE)).orElseThrow(
|
||||
() -> new DdlException("first time set default storage policy, but not give storageResource"));
|
||||
// check alterStorageResource resource exist.
|
||||
checkIsS3ResourceAndExist(alterStorageResource);
|
||||
storageResource = alterStorageResource;
|
||||
}
|
||||
|
||||
md5Checksum = calcPropertiesMd5();
|
||||
notifyUpdate();
|
||||
}
|
||||
|
||||
private void notifyUpdate() {
|
||||
SystemInfoService systemInfoService = Env.getCurrentSystemInfo();
|
||||
AgentBatchTask batchTask = new AgentBatchTask();
|
||||
|
||||
for (Long beId : systemInfoService.getBackendIds(true)) {
|
||||
Map<String, String> copiedProperties = Env.getCurrentEnv().getResourceMgr().getResource(storageResource)
|
||||
.getCopiedProperties();
|
||||
|
||||
Map<String, String> tmpMap = Maps.newHashMap(copiedProperties);
|
||||
|
||||
tmpMap.put(COOLDOWN_DATETIME, String.valueOf(this.cooldownTimestampMs));
|
||||
|
||||
Optional.ofNullable(this.getCooldownTtl()).ifPresent(date -> {
|
||||
tmpMap.put(COOLDOWN_TTL, this.getCooldownTtl());
|
||||
});
|
||||
tmpMap.put(MD5_CHECKSUM, this.getMd5Checksum());
|
||||
NotifyUpdateStoragePolicyTask notifyUpdateStoragePolicyTask = new NotifyUpdateStoragePolicyTask(beId,
|
||||
getPolicyName(), tmpMap);
|
||||
batchTask.addTask(notifyUpdateStoragePolicyTask);
|
||||
LOG.info("update policy info to be: {}, policy name: {}, "
|
||||
+ "properties: {} to modify S3 resource batch task.", beId, getPolicyName(), tmpMap);
|
||||
String storageResource = properties.get(STORAGE_RESOURCE);
|
||||
if (storageResource != null) {
|
||||
checkIsS3ResourceAndExist(storageResource);
|
||||
}
|
||||
|
||||
AgentTaskExecutor.submit(batchTask);
|
||||
}
|
||||
|
||||
public static StoragePolicy read(DataInput in) throws IOException {
|
||||
String json = Text.readString(in);
|
||||
return GsonUtils.GSON.fromJson(json, StoragePolicy.class);
|
||||
if (this.policyName.equalsIgnoreCase(DEFAULT_STORAGE_POLICY_NAME) && this.storageResource == null
|
||||
&& storageResource == null) {
|
||||
throw new DdlException("first time set default storage policy, but not give storageResource");
|
||||
}
|
||||
// modify properties
|
||||
writeLock();
|
||||
if (cooldownTtlMs > 0) {
|
||||
this.cooldownTtl = cooldownTtl;
|
||||
this.cooldownTtlMs = cooldownTtlMs;
|
||||
}
|
||||
if (cooldownTimestampMs > 0) {
|
||||
this.cooldownTimestampMs = cooldownTimestampMs;
|
||||
}
|
||||
if (storageResource != null) {
|
||||
this.storageResource = storageResource;
|
||||
}
|
||||
++version;
|
||||
writeUnlock();
|
||||
}
|
||||
|
||||
public boolean addResourceReference() {
|
||||
|
||||
@ -26,7 +26,6 @@ import org.apache.doris.catalog.DatabaseIf;
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.catalog.HMSResource;
|
||||
import org.apache.doris.catalog.OlapTable;
|
||||
import org.apache.doris.catalog.S3Resource;
|
||||
import org.apache.doris.catalog.Table;
|
||||
import org.apache.doris.catalog.TableIf;
|
||||
import org.apache.doris.catalog.TableIf.TableType;
|
||||
@ -54,9 +53,6 @@ import org.apache.doris.datasource.InternalCatalog;
|
||||
import org.apache.doris.master.MasterImpl;
|
||||
import org.apache.doris.mysql.privilege.PrivPredicate;
|
||||
import org.apache.doris.planner.StreamLoadPlanner;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.PolicyTypeEnum;
|
||||
import org.apache.doris.policy.StoragePolicy;
|
||||
import org.apache.doris.qe.ConnectContext;
|
||||
import org.apache.doris.qe.ConnectProcessor;
|
||||
import org.apache.doris.qe.QeProcessorImpl;
|
||||
@ -84,8 +80,6 @@ import org.apache.doris.thrift.TFrontendPingFrontendResult;
|
||||
import org.apache.doris.thrift.TFrontendPingFrontendStatusCode;
|
||||
import org.apache.doris.thrift.TGetDbsParams;
|
||||
import org.apache.doris.thrift.TGetDbsResult;
|
||||
import org.apache.doris.thrift.TGetStoragePolicy;
|
||||
import org.apache.doris.thrift.TGetStoragePolicyResult;
|
||||
import org.apache.doris.thrift.TGetTablesParams;
|
||||
import org.apache.doris.thrift.TGetTablesResult;
|
||||
import org.apache.doris.thrift.TIcebergMetadataType;
|
||||
@ -111,7 +105,6 @@ import org.apache.doris.thrift.TReportExecStatusParams;
|
||||
import org.apache.doris.thrift.TReportExecStatusResult;
|
||||
import org.apache.doris.thrift.TReportRequest;
|
||||
import org.apache.doris.thrift.TRow;
|
||||
import org.apache.doris.thrift.TS3StorageParam;
|
||||
import org.apache.doris.thrift.TShowVariableRequest;
|
||||
import org.apache.doris.thrift.TShowVariableResult;
|
||||
import org.apache.doris.thrift.TSnapshotLoaderReportRequest;
|
||||
@ -147,11 +140,9 @@ import org.jetbrains.annotations.NotNull;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.LocalDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
@ -1233,61 +1224,6 @@ public class FrontendServiceImpl implements FrontendService.Iface {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TGetStoragePolicyResult refreshStoragePolicy() throws TException {
|
||||
TGetStoragePolicyResult result = new TGetStoragePolicyResult();
|
||||
TStatus status = new TStatus(TStatusCode.OK);
|
||||
result.setStatus(status);
|
||||
|
||||
List<Policy> policyList = Env.getCurrentEnv().getPolicyMgr().getCopiedPoliciesByType(PolicyTypeEnum.STORAGE);
|
||||
policyList.stream().filter(p -> p instanceof StoragePolicy).map(p -> (StoragePolicy) p).forEach(
|
||||
iter -> {
|
||||
// default policy not init.
|
||||
if (iter.getStorageResource() == null) {
|
||||
return;
|
||||
}
|
||||
TGetStoragePolicy rEntry = new TGetStoragePolicy();
|
||||
rEntry.setPolicyName(iter.getPolicyName());
|
||||
//java 8 not support ifPresentOrElse
|
||||
final long[] ttlCoolDown = {-1};
|
||||
Optional.ofNullable(iter.getCooldownTtl()).ifPresent(ttl -> ttlCoolDown[0] = Integer.parseInt(ttl));
|
||||
rEntry.setCooldownTtl(ttlCoolDown[0]);
|
||||
|
||||
//timestamp : ms -> s
|
||||
rEntry.setCooldownDatetime(
|
||||
iter.getCooldownTimestampMs() == -1 ? -1 : iter.getCooldownTimestampMs() / 1000);
|
||||
|
||||
Optional.ofNullable(iter.getMd5Checksum()).ifPresent(rEntry::setMd5Checksum);
|
||||
TS3StorageParam s3Info = new TS3StorageParam();
|
||||
Optional.ofNullable(iter.getStorageResource()).ifPresent(resource -> {
|
||||
Map<String, String> storagePolicyProperties = Env.getCurrentEnv().getResourceMgr()
|
||||
.getResource(resource).getCopiedProperties();
|
||||
s3Info.setS3Endpoint(storagePolicyProperties.get(S3Resource.S3_ENDPOINT));
|
||||
s3Info.setS3Region(storagePolicyProperties.get(S3Resource.S3_REGION));
|
||||
s3Info.setRootPath(storagePolicyProperties.get(S3Resource.S3_ROOT_PATH));
|
||||
s3Info.setS3Ak(storagePolicyProperties.get(S3Resource.S3_ACCESS_KEY));
|
||||
s3Info.setS3Sk(storagePolicyProperties.get(S3Resource.S3_SECRET_KEY));
|
||||
s3Info.setBucket(storagePolicyProperties.get(S3Resource.S3_BUCKET));
|
||||
s3Info.setS3MaxConn(
|
||||
Integer.parseInt(storagePolicyProperties.get(S3Resource.S3_MAX_CONNECTIONS)));
|
||||
s3Info.setS3RequestTimeoutMs(Integer.parseInt(
|
||||
storagePolicyProperties.get(S3Resource.S3_REQUEST_TIMEOUT_MS)));
|
||||
s3Info.setS3ConnTimeoutMs(Integer.parseInt(
|
||||
storagePolicyProperties.get(S3Resource.S3_CONNECTION_TIMEOUT_MS)));
|
||||
});
|
||||
|
||||
rEntry.setS3StorageParam(s3Info);
|
||||
result.addToResultEntrys(rEntry);
|
||||
}
|
||||
);
|
||||
if (!result.isSetResultEntrys()) {
|
||||
result.setResultEntrys(new ArrayList<>());
|
||||
}
|
||||
|
||||
LOG.debug("refresh storage policy request: {}", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TInitExternalCtlMetaResult initExternalCtlMeta(TInitExternalCtlMetaRequest request) throws TException {
|
||||
if (request.isSetCatalogId() && request.isSetDbId()) {
|
||||
|
||||
@ -33,12 +33,12 @@ import org.apache.doris.thrift.TCompactionReq;
|
||||
import org.apache.doris.thrift.TCreateTabletReq;
|
||||
import org.apache.doris.thrift.TDownloadReq;
|
||||
import org.apache.doris.thrift.TDropTabletReq;
|
||||
import org.apache.doris.thrift.TGetStoragePolicy;
|
||||
import org.apache.doris.thrift.TMoveDirReq;
|
||||
import org.apache.doris.thrift.TNetworkAddress;
|
||||
import org.apache.doris.thrift.TPublishVersionRequest;
|
||||
import org.apache.doris.thrift.TPushCooldownConfReq;
|
||||
import org.apache.doris.thrift.TPushReq;
|
||||
import org.apache.doris.thrift.TPushStoragePolicyReq;
|
||||
import org.apache.doris.thrift.TReleaseSnapshotRequest;
|
||||
import org.apache.doris.thrift.TSnapshotRequest;
|
||||
import org.apache.doris.thrift.TStorageMediumMigrateReq;
|
||||
@ -347,13 +347,13 @@ public class AgentBatchTask implements Runnable {
|
||||
tAgentTaskRequest.setCompactionReq(request);
|
||||
return tAgentTaskRequest;
|
||||
}
|
||||
case NOTIFY_UPDATE_STORAGE_POLICY: {
|
||||
NotifyUpdateStoragePolicyTask notifyUpdateStoragePolicyTask = (NotifyUpdateStoragePolicyTask) task;
|
||||
TGetStoragePolicy request = notifyUpdateStoragePolicyTask.toThrift();
|
||||
case PUSH_STORAGE_POLICY: {
|
||||
PushStoragePolicyTask pushStoragePolicyTask = (PushStoragePolicyTask) task;
|
||||
TPushStoragePolicyReq request = pushStoragePolicyTask.toThrift();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(request.toString());
|
||||
}
|
||||
tAgentTaskRequest.setUpdatePolicy(request);
|
||||
tAgentTaskRequest.setPushStoragePolicyReq(request);
|
||||
return tAgentTaskRequest;
|
||||
}
|
||||
case PUSH_COOLDOWN_CONF: {
|
||||
|
||||
@ -20,10 +20,13 @@ package org.apache.doris.task;
|
||||
import org.apache.doris.alter.SchemaChangeHandler;
|
||||
import org.apache.doris.analysis.DataSortInfo;
|
||||
import org.apache.doris.catalog.Column;
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.catalog.Index;
|
||||
import org.apache.doris.catalog.KeysType;
|
||||
import org.apache.doris.common.MarkedCountDownLatch;
|
||||
import org.apache.doris.common.Status;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.PolicyTypeEnum;
|
||||
import org.apache.doris.thrift.TColumn;
|
||||
import org.apache.doris.thrift.TCompressionType;
|
||||
import org.apache.doris.thrift.TCreateTabletReq;
|
||||
@ -42,6 +45,7 @@ import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
|
||||
public class CreateReplicaTask extends AgentTask {
|
||||
@ -88,7 +92,7 @@ public class CreateReplicaTask extends AgentTask {
|
||||
private boolean isRecoverTask = false;
|
||||
|
||||
private DataSortInfo dataSortInfo;
|
||||
private String storagePolicy;
|
||||
private long storagePolicyId = 0; // <= 0 means no storage policy
|
||||
|
||||
private boolean enableUniqueKeyMergeOnWrite;
|
||||
|
||||
@ -134,7 +138,13 @@ public class CreateReplicaTask extends AgentTask {
|
||||
this.tabletType = tabletType;
|
||||
this.dataSortInfo = dataSortInfo;
|
||||
this.enableUniqueKeyMergeOnWrite = (keysType == KeysType.UNIQUE_KEYS && enableUniqueKeyMergeOnWrite);
|
||||
this.storagePolicy = storagePolicy;
|
||||
if (storagePolicy != null && !storagePolicy.isEmpty()) {
|
||||
Optional<Policy> policy = Env.getCurrentEnv().getPolicyMgr()
|
||||
.findPolicy(storagePolicy, PolicyTypeEnum.STORAGE);
|
||||
if (policy.isPresent()) {
|
||||
this.storagePolicyId = policy.get().getId();
|
||||
}
|
||||
}
|
||||
this.disableAutoCompaction = disableAutoCompaction;
|
||||
this.storeRowColumn = storeRowColumn;
|
||||
}
|
||||
@ -242,7 +252,9 @@ public class CreateReplicaTask extends AgentTask {
|
||||
createTabletReq.setVersion(version);
|
||||
|
||||
createTabletReq.setStorageMedium(storageMedium);
|
||||
createTabletReq.setStoragePolicy(storagePolicy);
|
||||
if (storagePolicyId > 0) {
|
||||
createTabletReq.setStoragePolicyId(storagePolicyId);
|
||||
}
|
||||
if (inRestoreMode) {
|
||||
createTabletReq.setInRestoreMode(true);
|
||||
}
|
||||
|
||||
@ -1,70 +0,0 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.task;
|
||||
|
||||
import org.apache.doris.catalog.S3Resource;
|
||||
import org.apache.doris.policy.StoragePolicy;
|
||||
import org.apache.doris.thrift.TGetStoragePolicy;
|
||||
import org.apache.doris.thrift.TS3StorageParam;
|
||||
import org.apache.doris.thrift.TTaskType;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class NotifyUpdateStoragePolicyTask extends AgentTask {
|
||||
private static final Logger LOG = LogManager.getLogger(NotifyUpdateStoragePolicyTask.class);
|
||||
private String policyName;
|
||||
private Map<String, String> properties;
|
||||
|
||||
public NotifyUpdateStoragePolicyTask(long backendId, String name, Map<String, String> properties) {
|
||||
super(null, backendId, TTaskType.NOTIFY_UPDATE_STORAGE_POLICY, -1, -1, -1, -1, -1, -1, -1);
|
||||
this.policyName = name;
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
public TGetStoragePolicy toThrift() {
|
||||
TGetStoragePolicy ret = new TGetStoragePolicy();
|
||||
|
||||
ret.policy_name = policyName;
|
||||
// cooldown_datetime in BE is in seconds
|
||||
ret.setCooldownDatetime(Long.parseLong(properties.get(StoragePolicy.COOLDOWN_DATETIME)) / 1000);
|
||||
ret.setCooldownTtl(Long.parseLong(properties.get(StoragePolicy.COOLDOWN_TTL)));
|
||||
ret.s3_storage_param = new TS3StorageParam();
|
||||
ret.s3_storage_param.s3_max_conn = Integer.parseInt(
|
||||
properties.getOrDefault(S3Resource.S3_MAX_CONNECTIONS,
|
||||
S3Resource.DEFAULT_S3_MAX_CONNECTIONS));
|
||||
ret.s3_storage_param.s3_request_timeout_ms = Integer.parseInt(
|
||||
properties.getOrDefault(S3Resource.S3_REQUEST_TIMEOUT_MS,
|
||||
S3Resource.DEFAULT_S3_REQUEST_TIMEOUT_MS));
|
||||
ret.s3_storage_param.s3_conn_timeout_ms = Integer.parseInt(
|
||||
properties.getOrDefault(S3Resource.S3_CONNECTION_TIMEOUT_MS,
|
||||
S3Resource.DEFAULT_S3_CONNECTION_TIMEOUT_MS));
|
||||
ret.s3_storage_param.s3_endpoint = properties.get(S3Resource.S3_ENDPOINT);
|
||||
ret.s3_storage_param.s3_region = properties.get(S3Resource.S3_REGION);
|
||||
ret.s3_storage_param.root_path = properties.get(S3Resource.S3_ROOT_PATH);
|
||||
ret.s3_storage_param.s3_ak = properties.get(S3Resource.S3_ACCESS_KEY);
|
||||
ret.s3_storage_param.s3_sk = properties.get(S3Resource.S3_SECRET_KEY);
|
||||
ret.s3_storage_param.bucket = properties.get(S3Resource.S3_BUCKET);
|
||||
ret.md5_checksum = properties.get(StoragePolicy.MD5_CHECKSUM);
|
||||
|
||||
LOG.info("TGetStoragePolicy toThrift : {}", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,115 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.task;
|
||||
|
||||
import org.apache.doris.catalog.Env;
|
||||
import org.apache.doris.catalog.Resource;
|
||||
import org.apache.doris.catalog.Resource.ResourceType;
|
||||
import org.apache.doris.catalog.S3Resource;
|
||||
import org.apache.doris.policy.Policy;
|
||||
import org.apache.doris.policy.StoragePolicy;
|
||||
import org.apache.doris.thrift.TPushStoragePolicyReq;
|
||||
import org.apache.doris.thrift.TS3StorageParam;
|
||||
import org.apache.doris.thrift.TStoragePolicy;
|
||||
import org.apache.doris.thrift.TStorageResource;
|
||||
import org.apache.doris.thrift.TTaskType;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class PushStoragePolicyTask extends AgentTask {
|
||||
private static final Logger LOG = LogManager.getLogger(PushStoragePolicyTask.class);
|
||||
|
||||
private List<Policy> storagePolicy;
|
||||
private List<Resource> resource;
|
||||
private List<Long> droppedStoragePolicy;
|
||||
|
||||
public PushStoragePolicyTask(long backendId, List<Policy> storagePolicy,
|
||||
List<Resource> resource, List<Long> droppedStoragePolicy) {
|
||||
super(null, backendId, TTaskType.PUSH_STORAGE_POLICY, -1, -1, -1, -1, -1, -1, -1);
|
||||
this.storagePolicy = storagePolicy;
|
||||
this.resource = resource;
|
||||
this.droppedStoragePolicy = droppedStoragePolicy;
|
||||
}
|
||||
|
||||
public TPushStoragePolicyReq toThrift() {
|
||||
TPushStoragePolicyReq ret = new TPushStoragePolicyReq();
|
||||
List<TStoragePolicy> tStoragePolicies = new ArrayList<>();
|
||||
storagePolicy.forEach(p -> {
|
||||
TStoragePolicy item = new TStoragePolicy();
|
||||
p.readLock();
|
||||
try {
|
||||
item.setId(p.getId());
|
||||
item.setName(p.getPolicyName());
|
||||
item.setVersion(p.getVersion());
|
||||
StoragePolicy storagePolicy = (StoragePolicy) p;
|
||||
String resourceName = storagePolicy.getStorageResource();
|
||||
Resource resource = Env.getCurrentEnv().getResourceMgr().getResource(resourceName);
|
||||
if (resource == null || resource.getType() != ResourceType.S3) {
|
||||
LOG.warn("can't find s3 resource by name {}", resourceName);
|
||||
return;
|
||||
}
|
||||
item.setResourceId(resource.getId());
|
||||
long coolDownDatetime = storagePolicy.getCooldownTimestampMs() / 1000;
|
||||
item.setCooldownDatetime(coolDownDatetime);
|
||||
long coolDownTtl = storagePolicy.getCooldownTtlMs() / 1000;
|
||||
item.setCooldownTtl(coolDownTtl);
|
||||
} finally {
|
||||
p.readUnlock();
|
||||
}
|
||||
tStoragePolicies.add(item);
|
||||
});
|
||||
ret.setStoragePolicy(tStoragePolicies);
|
||||
|
||||
List<TStorageResource> tStorageResources = new ArrayList<>();
|
||||
resource.forEach(r -> {
|
||||
TStorageResource item = new TStorageResource();
|
||||
r.readLock();
|
||||
item.setId(r.getId());
|
||||
item.setName(r.getName());
|
||||
item.setVersion(r.getVersion());
|
||||
TS3StorageParam s3Info = new TS3StorageParam();
|
||||
S3Resource s3Resource = (S3Resource) r;
|
||||
s3Info.setEndpoint(s3Resource.getProperty(S3Resource.S3_ENDPOINT));
|
||||
s3Info.setRegion(s3Resource.getProperty(S3Resource.S3_REGION));
|
||||
s3Info.setAk(s3Resource.getProperty(S3Resource.S3_ACCESS_KEY));
|
||||
s3Info.setSk(s3Resource.getProperty(S3Resource.S3_SECRET_KEY));
|
||||
s3Info.setRootPath(s3Resource.getProperty(S3Resource.S3_ROOT_PATH));
|
||||
s3Info.setBucket(s3Resource.getProperty(S3Resource.S3_BUCKET));
|
||||
String maxConnections = s3Resource.getProperty(S3Resource.S3_MAX_CONNECTIONS);
|
||||
s3Info.setMaxConn(Integer.parseInt(maxConnections == null
|
||||
? S3Resource.DEFAULT_S3_MAX_CONNECTIONS : maxConnections));
|
||||
String requestTimeoutMs = s3Resource.getProperty(S3Resource.S3_REQUEST_TIMEOUT_MS);
|
||||
s3Info.setMaxConn(Integer.parseInt(requestTimeoutMs == null
|
||||
? S3Resource.DEFAULT_S3_REQUEST_TIMEOUT_MS : requestTimeoutMs));
|
||||
String connTimeoutMs = s3Resource.getProperty(S3Resource.S3_CONNECTION_TIMEOUT_MS);
|
||||
s3Info.setMaxConn(Integer.parseInt(connTimeoutMs == null
|
||||
? S3Resource.DEFAULT_S3_CONNECTION_TIMEOUT_MS : connTimeoutMs));
|
||||
r.readUnlock();
|
||||
item.setS3StorageParam(s3Info);
|
||||
tStorageResources.add(item);
|
||||
});
|
||||
ret.setResource(tStorageResources);
|
||||
|
||||
ret.setDroppedStoragePolicy(droppedStoragePolicy);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -44,9 +44,9 @@ public class UpdateTabletMetaInfoTask extends AgentTask {
|
||||
private MarkedCountDownLatch<Long, Set<Pair<Long, Integer>>> latch;
|
||||
|
||||
private Set<Pair<Long, Integer>> tableIdWithSchemaHash;
|
||||
private boolean isInMemory;
|
||||
private int inMemory = -1; // < 0 means not to update inMemory property, > 0 means true, == 0 means false
|
||||
private TTabletMetaType metaType;
|
||||
private String storagePolicy;
|
||||
private long storagePolicyId = -1; // < 0 means not to update storage policy, == 0 means to reset storage policy
|
||||
|
||||
// <tablet id, tablet schema hash, tablet in memory>
|
||||
private List<Triple<Long, Integer, Boolean>> tabletToInMemory;
|
||||
@ -61,11 +61,11 @@ public class UpdateTabletMetaInfoTask extends AgentTask {
|
||||
|
||||
public UpdateTabletMetaInfoTask(long backendId,
|
||||
Set<Pair<Long, Integer>> tableIdWithSchemaHash,
|
||||
boolean isInMemory, String storagePolicy,
|
||||
int inMemory, long storagePolicyId,
|
||||
MarkedCountDownLatch<Long, Set<Pair<Long, Integer>>> latch) {
|
||||
this(backendId, tableIdWithSchemaHash, TTabletMetaType.INMEMORY);
|
||||
this.storagePolicy = storagePolicy;
|
||||
this.isInMemory = isInMemory;
|
||||
this.storagePolicyId = storagePolicyId;
|
||||
this.inMemory = inMemory;
|
||||
this.latch = latch;
|
||||
}
|
||||
|
||||
@ -132,8 +132,12 @@ public class UpdateTabletMetaInfoTask extends AgentTask {
|
||||
TTabletMetaInfo metaInfo = new TTabletMetaInfo();
|
||||
metaInfo.setTabletId(pair.first);
|
||||
metaInfo.setSchemaHash(pair.second);
|
||||
metaInfo.setIsInMemory(isInMemory);
|
||||
metaInfo.setStoragePolicy(storagePolicy);
|
||||
if (inMemory >= 0) {
|
||||
metaInfo.setIsInMemory(inMemory > 0);
|
||||
}
|
||||
if (storagePolicyId >= 0) {
|
||||
metaInfo.setStoragePolicyId(storagePolicyId);
|
||||
}
|
||||
metaInfo.setMetaType(metaType);
|
||||
metaInfos.add(metaInfo);
|
||||
}
|
||||
|
||||
@ -0,0 +1,57 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.persist;
|
||||
|
||||
import org.apache.doris.catalog.Resource;
|
||||
import org.apache.doris.catalog.S3Resource;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
|
||||
public class ResourcePersistTest {
|
||||
@Test
|
||||
public void test() throws IOException {
|
||||
Resource resource = new S3Resource("s3_resource");
|
||||
File file = new File("./ResourcePersistTest");
|
||||
try {
|
||||
// 1. Write objects to file
|
||||
file.createNewFile();
|
||||
DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
|
||||
resource.write(dos);
|
||||
dos.flush();
|
||||
dos.close();
|
||||
|
||||
// 2. Read objects from file
|
||||
DataInputStream dis = new DataInputStream(new FileInputStream(file));
|
||||
S3Resource resource1 = (S3Resource) Resource.read(dis);
|
||||
dis.close();
|
||||
Assert.assertEquals(resource1.toString(), resource.toString());
|
||||
resource1.readLock();
|
||||
resource1.readUnlock();
|
||||
} finally {
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -37,22 +37,24 @@ public class StoragePolicyPersistTest {
|
||||
|
||||
// 1. Write objects to file
|
||||
File file = new File("./StoregaPolicyPersistTest");
|
||||
file.createNewFile();
|
||||
DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
|
||||
storagePolicy.write(dos);
|
||||
dos.flush();
|
||||
dos.close();
|
||||
try {
|
||||
file.createNewFile();
|
||||
DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
|
||||
storagePolicy.write(dos);
|
||||
dos.flush();
|
||||
dos.close();
|
||||
|
||||
// 2. Read objects from file
|
||||
DataInputStream dis = new DataInputStream(new FileInputStream(file));
|
||||
StoragePolicy anotherStoragePolicy = StoragePolicy.read(dis);
|
||||
Assert.assertEquals(cooldownTime, anotherStoragePolicy.getCooldownTimestampMs());
|
||||
// 2. Read objects from file
|
||||
DataInputStream dis = new DataInputStream(new FileInputStream(file));
|
||||
StoragePolicy storagePolicy1 = (StoragePolicy) StoragePolicy.read(dis);
|
||||
dis.close();
|
||||
Assert.assertEquals(cooldownTime, storagePolicy1.getCooldownTimestampMs());
|
||||
Assert.assertTrue(storagePolicy1.getLock() != null);
|
||||
|
||||
StoragePolicy clonePolicy = anotherStoragePolicy.clone();
|
||||
Assert.assertEquals(cooldownTime, clonePolicy.getCooldownTimestampMs());
|
||||
|
||||
// 3. delete files
|
||||
dis.close();
|
||||
file.delete();
|
||||
StoragePolicy clonePolicy = storagePolicy1.clone();
|
||||
Assert.assertEquals(cooldownTime, clonePolicy.getCooldownTimestampMs());
|
||||
} finally {
|
||||
file.delete();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,7 +221,7 @@ public class PolicyTest extends TestWithFeService {
|
||||
Policy newPolicy = Policy.read(input);
|
||||
Assertions.assertTrue(newPolicy instanceof RowPolicy);
|
||||
RowPolicy newRowPolicy = (RowPolicy) newPolicy;
|
||||
Assertions.assertEquals(rowPolicy.getPolicyId(), newRowPolicy.getPolicyId());
|
||||
Assertions.assertEquals(rowPolicy.getId(), newRowPolicy.getId());
|
||||
Assertions.assertEquals(type, newRowPolicy.getType());
|
||||
Assertions.assertEquals(policyName, newRowPolicy.getPolicyName());
|
||||
Assertions.assertEquals(dbId, newRowPolicy.getDbId());
|
||||
|
||||
Reference in New Issue
Block a user