[refactor](fe): Remove resource group which is useless (#18249)

This commit is contained in:
赵立伟
2023-04-18 21:04:30 +08:00
committed by GitHub
parent 0165ffbcae
commit d24a8a524e
23 changed files with 40 additions and 614 deletions

View File

@ -1,145 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.catalog;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.io.Writable;
import org.apache.doris.thrift.TResourceGroup;
import org.apache.doris.thrift.TResourceType;
import com.google.common.collect.Maps;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.EnumMap;
import java.util.Map;
// Resource group to contain
public class ResourceGroup implements Writable {
private EnumMap<ResourceType, Integer> quotaByType;
// Used for readIn
private ResourceGroup() {
quotaByType = Maps.newEnumMap(ResourceType.class);
}
private ResourceGroup(int cpuShare) {
quotaByType = Maps.newEnumMap(ResourceType.class);
quotaByType.put(ResourceType.CPU_SHARE, cpuShare);
}
public ResourceGroup getCopiedResourceGroup() {
ResourceGroup resourceGroup = new ResourceGroup();
resourceGroup.quotaByType = Maps.newEnumMap(quotaByType);
return resourceGroup;
}
public void updateByDesc(String desc, int value) throws DdlException {
ResourceType type = ResourceType.fromDesc(desc);
if (type == null) {
throw new DdlException("Unknown resource type(" + desc + ")");
}
if (type == ResourceType.CPU_SHARE || type == ResourceType.IO_SHARE) {
if (value < 100 || value > 1000) {
throw new DdlException("Value for resource type("
+ desc + ") has to be in [100, 1000]");
}
}
quotaByType.put(type, value);
}
public int getByDesc(String desc) throws DdlException {
ResourceType type = ResourceType.fromDesc(desc);
if (type == null) {
throw new DdlException("Unknown resource type(" + desc + ")");
}
return quotaByType.get(type);
}
public Map<ResourceType, Integer> getQuotaMap() {
return quotaByType;
}
public static Builder builder() {
return new Builder();
}
public TResourceGroup toThrift() {
TResourceGroup tgroup = new TResourceGroup();
for (EnumMap.Entry<ResourceType, Integer> entry : quotaByType.entrySet()) {
tgroup.putToResourceByType(entry.getKey().toThrift(), entry.getValue());
}
return tgroup;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
int idx = 0;
for (EnumMap.Entry<ResourceType, Integer> entry : quotaByType.entrySet()) {
if (idx++ != 0) {
sb.append(", ");
}
sb.append(entry.getKey().toString()).append(" = ").append(entry.getValue());
}
return sb.toString();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(quotaByType.size());
for (Map.Entry<ResourceType, Integer> entry : quotaByType.entrySet()) {
out.writeInt(entry.getKey().toThrift().getValue());
out.writeInt(entry.getValue());
}
}
public void readFields(DataInput in) throws IOException {
int numResource = in.readInt();
for (int i = 0; i < numResource; ++i) {
int code = in.readInt();
int value = in.readInt();
quotaByType.put(ResourceType.fromThrift(TResourceType.findByValue(code)), value);
}
}
public static ResourceGroup readIn(DataInput in) throws IOException {
ResourceGroup group = new ResourceGroup();
group.readFields(in);
return group;
}
public static class Builder {
private int cpuShare;
public Builder() {
cpuShare = ResourceType.CPU_SHARE.getDefaultValue();
}
public ResourceGroup build() {
return new ResourceGroup(cpuShare);
}
public Builder cpuShare(int share) {
this.cpuShare = share;
return this;
}
}
}

View File

@ -30,13 +30,11 @@ import org.apache.doris.catalog.TabletInvertedIndex;
import org.apache.doris.catalog.TabletMeta;
import org.apache.doris.common.Config;
import org.apache.doris.persist.ConsistencyCheckInfo;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.task.AgentBatchTask;
import org.apache.doris.task.AgentTask;
import org.apache.doris.task.AgentTaskExecutor;
import org.apache.doris.task.AgentTaskQueue;
import org.apache.doris.task.CheckConsistencyTask;
import org.apache.doris.thrift.TResourceInfo;
import org.apache.doris.thrift.TTaskType;
import com.google.common.base.Preconditions;
@ -118,12 +116,6 @@ public class CheckConsistencyJob {
return false;
}
// get user resource info
TResourceInfo resourceInfo = null;
if (ConnectContext.get() != null) {
resourceInfo = ConnectContext.get().toResourceCtx();
}
Tablet tablet = null;
AgentBatchTask batchTask = new AgentBatchTask();
@ -179,7 +171,7 @@ public class CheckConsistencyJob {
maxDataSize = replica.getDataSize();
}
CheckConsistencyTask task = new CheckConsistencyTask(resourceInfo, replica.getBackendId(),
CheckConsistencyTask task = new CheckConsistencyTask(null, replica.getBackendId(),
tabletMeta.getDbId(),
tabletMeta.getTableId(),
tabletMeta.getPartitionId(),

View File

@ -36,9 +36,7 @@ import org.apache.doris.load.FailMsg.CancelType;
import org.apache.doris.persist.ReplicaPersistInfo;
import org.apache.doris.task.PushTask;
import org.apache.doris.thrift.TPriority;
import org.apache.doris.thrift.TResourceInfo;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
@ -120,8 +118,6 @@ public class LoadJob implements Writable {
private List<Predicate> conditions = null;
private DeleteInfo deleteInfo;
private TResourceInfo resourceInfo;
private TPriority priority;
private long execMemLimit;
@ -167,7 +163,6 @@ public class LoadJob implements Writable {
this.unfinishedTablets = new ArrayList<>();
this.pushTasks = new HashSet<PushTask>();
this.replicaPersistInfos = Maps.newHashMap();
this.resourceInfo = null;
this.priority = TPriority.NORMAL;
this.execMemLimit = DEFAULT_EXEC_MEM_LIMIT;
this.finishedReplicas = Maps.newHashMap();
@ -563,14 +558,6 @@ public class LoadJob implements Writable {
}
}
public void setResourceInfo(TResourceInfo resourceInfo) {
this.resourceInfo = resourceInfo;
}
public TResourceInfo getResourceInfo() {
return resourceInfo;
}
public boolean addFinishedReplica(Replica replica) {
finishedReplicas.put(replica.getId(), replica);
return true;
@ -657,8 +644,6 @@ public class LoadJob implements Writable {
pushTasks.clear();
pushTasks = null;
}
resourceInfo = null;
}
public void write(DataOutput out) throws IOException {
@ -728,14 +713,7 @@ public class LoadJob implements Writable {
}
// resourceInfo
if (resourceInfo == null || Strings.isNullOrEmpty(resourceInfo.getGroup())
|| Strings.isNullOrEmpty(resourceInfo.getUser())) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
Text.writeString(out, resourceInfo.getUser());
Text.writeString(out, resourceInfo.getGroup());
}
out.writeBoolean(false);
Text.writeString(out, priority.name());
@ -877,9 +855,8 @@ public class LoadJob implements Writable {
}
if (in.readBoolean()) {
String user = Text.readString(in);
String group = Text.readString(in);
resourceInfo = new TResourceInfo(user, group);
Text.readString(in);
Text.readString(in);
}
this.priority = TPriority.valueOf(Text.readString(in));

View File

@ -51,7 +51,6 @@ import org.apache.doris.task.StorageMediaMigrationTask;
import org.apache.doris.task.UpdateTabletMetaInfoTask;
import org.apache.doris.task.UploadTask;
import org.apache.doris.thrift.TBackend;
import org.apache.doris.thrift.TFetchResourceResult;
import org.apache.doris.thrift.TFinishTaskRequest;
import org.apache.doris.thrift.TMasterResult;
import org.apache.doris.thrift.TPushType;
@ -567,10 +566,6 @@ public class MasterImpl {
return reportHandler.handleReport(request);
}
public TFetchResourceResult fetchResource() {
return Env.getCurrentEnv().getAuth().toResourceThrift();
}
private void finishAlterTask(AgentTask task) {
AlterReplicaTask alterTask = (AlterReplicaTask) task;
try {

View File

@ -29,7 +29,6 @@ import org.apache.doris.common.LdapConfig;
import org.apache.doris.datasource.CatalogIf;
import org.apache.doris.ldap.LdapAuthenticate;
import org.apache.doris.mysql.privilege.Auth;
import org.apache.doris.mysql.privilege.UserResource;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.system.SystemInfoService;
@ -109,9 +108,6 @@ public class MysqlProto {
strList = tmpUser.split("#", 2);
if (strList.length > 1) {
tmpUser = strList[0];
if (UserResource.isValidGroup(strList[1])) {
context.getSessionVariable().setResourceGroup(strList[1]);
}
}
LOG.debug("parse cluster: {}", clusterName);

View File

@ -58,7 +58,6 @@ import org.apache.doris.persist.PrivInfo;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.resource.Tag;
import org.apache.doris.system.SystemInfoService;
import org.apache.doris.thrift.TFetchResourceResult;
import org.apache.doris.thrift.TPrivilegeStatus;
import com.google.common.base.Joiner;
@ -457,7 +456,7 @@ public class Auth implements Writable {
userRoleManager.addUserRole(userIdent, roleName);
}
// other user properties
propertyMgr.addUserResource(userIdent.getQualifiedUser(), false);
propertyMgr.addUserResource(userIdent.getQualifiedUser());
// 5. update password policy
passwdPolicyManager.updatePolicy(userIdent, password, passwordOptions);
@ -1246,15 +1245,6 @@ public class Auth implements Writable {
}
}
public TFetchResourceResult toResourceThrift() {
readLock();
try {
return propertyMgr.toResourceThrift();
} finally {
readUnlock();
}
}
public List<List<String>> getRoleInfo() {
readLock();
try {

View File

@ -19,8 +19,6 @@ package org.apache.doris.mysql.privilege;
import org.apache.doris.analysis.SetUserPropertyVar;
import org.apache.doris.catalog.Env;
import org.apache.doris.catalog.ResourceGroup;
import org.apache.doris.catalog.ResourceType;
import org.apache.doris.common.AnalysisException;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.FeMetaVersion;
@ -47,7 +45,6 @@ import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
/*
@ -81,8 +78,6 @@ public class UserProperty implements Writable {
private CommonUserProperties commonProperties = new CommonUserProperties();
// Resource belong to this user.
private UserResource resource = new UserResource(1000);
// load cluster
private String defaultLoadCluster = null;
private Map<String, DppConfig> clusterToDppConfig = Maps.newHashMap();
@ -180,7 +175,6 @@ public class UserProperty implements Writable {
int queryTimeout = this.commonProperties.getQueryTimeout();
int insertTimeout = this.commonProperties.getInsertTimeout();
UserResource newResource = resource.getCopiedUserResource();
String newDefaultLoadCluster = defaultLoadCluster;
Map<String, DppConfig> newDppConfigs = Maps.newHashMap(clusterToDppConfig);
@ -205,42 +199,6 @@ public class UserProperty implements Writable {
if (newMaxConn <= 0 || newMaxConn > 10000) {
throw new DdlException(PROP_MAX_USER_CONNECTIONS + " is not valid, must between 1 and 10000");
}
} else if (keyArr[0].equalsIgnoreCase(PROP_RESOURCE)) {
// set property "resource.cpu_share" = "100"
if (keyArr.length != 2) {
throw new DdlException(PROP_RESOURCE + " format error");
}
int resource = 0;
try {
resource = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new DdlException(key + " is not number");
}
if (resource <= 0) {
throw new DdlException(key + " is not valid");
}
newResource.updateResource(keyArr[1], resource);
} else if (keyArr[0].equalsIgnoreCase(PROP_QUOTA)) {
// set property "quota.normal" = "100"
if (keyArr.length != 2) {
throw new DdlException(PROP_QUOTA + " format error");
}
int quota = 0;
try {
quota = Integer.parseInt(value);
} catch (NumberFormatException e) {
throw new DdlException(key + " is not number");
}
if (quota <= 0) {
throw new DdlException(key + " is not valid");
}
newResource.updateGroupShare(keyArr[1], quota);
} else if (keyArr[0].equalsIgnoreCase(PROP_LOAD_CLUSTER)) {
updateLoadCluster(keyArr, value, newDppConfigs);
} else if (keyArr[0].equalsIgnoreCase(PROP_DEFAULT_LOAD_CLUSTER)) {
@ -349,7 +307,6 @@ public class UserProperty implements Writable {
this.commonProperties.setExecMemLimit(execMemLimit);
this.commonProperties.setQueryTimeout(queryTimeout);
this.commonProperties.setInsertTimeout(insertTimeout);
resource = newResource;
if (newDppConfigs.containsKey(newDefaultLoadCluster)) {
defaultLoadCluster = newDefaultLoadCluster;
} else {
@ -430,10 +387,6 @@ public class UserProperty implements Writable {
}
}
public UserResource getResource() {
return resource;
}
public String getDefaultLoadCluster() {
return defaultLoadCluster;
}
@ -484,19 +437,6 @@ public class UserProperty implements Writable {
// resource tag
result.add(Lists.newArrayList(PROP_RESOURCE_TAGS, Joiner.on(", ").join(commonProperties.getResourceTags())));
// resource
ResourceGroup group = resource.getResource();
for (Map.Entry<ResourceType, Integer> entry : group.getQuotaMap().entrySet()) {
result.add(Lists.newArrayList(PROP_RESOURCE + dot + entry.getKey().getDesc().toLowerCase(),
entry.getValue().toString()));
}
// quota
Map<String, AtomicInteger> groups = resource.getShareByGroup();
for (Map.Entry<String, AtomicInteger> entry : groups.entrySet()) {
result.add(Lists.newArrayList(PROP_QUOTA + dot + entry.getKey(), entry.getValue().toString()));
}
// load cluster
if (defaultLoadCluster != null) {
result.add(Lists.newArrayList(PROP_DEFAULT_LOAD_CLUSTER, defaultLoadCluster));
@ -555,8 +495,8 @@ public class UserProperty implements Writable {
// user name
Text.writeString(out, qualifiedUser);
// user resource
resource.write(out);
// call UserResource.write(out) to make sure that FE can rollback.
UserResource.write(out);
// load cluster
if (defaultLoadCluster == null) {
@ -583,8 +523,8 @@ public class UserProperty implements Writable {
this.commonProperties.setMaxConn(maxConn);
}
// user resource
resource = UserResource.readIn(in);
// call UserResource.readIn(out) to make sure that FE can rollback.
UserResource.readIn(in);
// load cluster
if (in.readBoolean()) {

View File

@ -27,8 +27,6 @@ import org.apache.doris.common.UserException;
import org.apache.doris.common.io.Writable;
import org.apache.doris.load.DppConfig;
import org.apache.doris.resource.Tag;
import org.apache.doris.thrift.TAgentServiceVersion;
import org.apache.doris.thrift.TFetchResourceResult;
import com.google.common.collect.Maps;
import org.apache.logging.log4j.LogManager;
@ -51,40 +49,18 @@ public class UserPropertyMgr implements Writable {
public static final String LDAP_RESOURCE_USER = "ldap";
private static final UserProperty LDAP_PROPERTY = new UserProperty(LDAP_RESOURCE_USER);
static {
try {
setNormalUserDefaultResource(LDAP_PROPERTY);
} catch (DdlException e) {
LOG.error("init DEFAULT_PROPERTY error.", e);
throw new RuntimeException(e);
}
}
private AtomicLong resourceVersion = new AtomicLong(0);
public UserPropertyMgr() {
}
public void addUserResource(String qualifiedUser, boolean isSystemUser) {
public void addUserResource(String qualifiedUser) {
UserProperty property = propertyMap.get(qualifiedUser);
if (property != null) {
return;
}
property = new UserProperty(qualifiedUser);
// set user properties
try {
if (isSystemUser) {
setSystemUserDefaultResource(property);
} else {
setNormalUserDefaultResource(property);
}
} catch (DdlException e) {
// this should not happen, because the value is set by us!!
}
propertyMap.put(qualifiedUser, property);
resourceVersion.incrementAndGet();
}
@ -149,42 +125,6 @@ public class UserPropertyMgr implements Writable {
return existProperty.getCopiedResourceTags();
}
public int getPropertyMapSize() {
return propertyMap.size();
}
private void setSystemUserDefaultResource(UserProperty user) throws DdlException {
UserResource userResource = user.getResource();
userResource.updateResource("CPU_SHARE", 100);
userResource.updateResource("IO_SHARE", 100);
userResource.updateResource("SSD_READ_MBPS", 30);
userResource.updateResource("SSD_WRITE_MBPS", 30);
userResource.updateResource("HDD_READ_MBPS", 30);
userResource.updateResource("HDD_WRITE_MBPS", 30);
}
private static void setNormalUserDefaultResource(UserProperty user) throws DdlException {
UserResource userResource = user.getResource();
userResource.updateResource("CPU_SHARE", 1000);
userResource.updateResource("IO_SHARE", 1000);
userResource.updateResource("SSD_READ_IOPS", 1000);
userResource.updateResource("HDD_READ_IOPS", 80);
userResource.updateResource("SSD_READ_MBPS", 30);
userResource.updateResource("HDD_READ_MBPS", 30);
}
public TFetchResourceResult toResourceThrift() {
TFetchResourceResult tResult = new TFetchResourceResult();
tResult.setProtocolVersion(TAgentServiceVersion.V1);
tResult.setResourceVersion(resourceVersion.get());
for (Map.Entry<String, UserProperty> entry : propertyMap.entrySet()) {
tResult.putToResourceByUser(entry.getKey(), entry.getValue().getResource().toThrift());
}
return tResult;
}
public Pair<String, DppConfig> getLoadClusterInfo(String qualifiedUser, String cluster) throws DdlException {
Pair<String, DppConfig> loadClusterInfo = null;

View File

@ -17,137 +17,33 @@
package org.apache.doris.mysql.privilege;
import org.apache.doris.catalog.ResourceGroup;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.io.Text;
import org.apache.doris.common.io.Writable;
import org.apache.doris.thrift.TUserResource;
import com.google.common.collect.ImmutableSortedMap;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
// Resource belong to one user
public class UserResource implements Writable {
public static final String LOW = "low";
public static final String NORMAL = "normal";
public static final String HIGH = "high";
// TODO(zhaochun): move this to config
private static int defaultLowShare = 100;
private static int defaultNormalShare = 400;
private static int defaultHighShare = 800;
// This resource group is used to extend
private ResourceGroup resource;
// Use atomic integer for modify in place.
private ImmutableSortedMap<String, AtomicInteger> shareByGroup;
// Used to readIn
private UserResource() {
// This class is used for keeping backward compatible
@Deprecated
public class UserResource {
public static void write(DataOutput out) throws IOException {
// resouce count
out.writeInt(0);
// group count
out.writeInt(0);
}
public UserResource(int cpuShare) {
resource = ResourceGroup.builder().cpuShare(cpuShare).build();
ImmutableSortedMap.Builder<String, AtomicInteger> builder =
ImmutableSortedMap.orderedBy(String.CASE_INSENSITIVE_ORDER);
// Low, Normal and High.
builder.put(LOW, new AtomicInteger(defaultLowShare));
builder.put(NORMAL, new AtomicInteger(defaultNormalShare));
builder.put(HIGH, new AtomicInteger(defaultHighShare));
shareByGroup = builder.build();
}
public UserResource getCopiedUserResource() {
UserResource userResource = new UserResource();
userResource.resource = resource.getCopiedResourceGroup();
userResource.shareByGroup = ImmutableSortedMap.copyOf(shareByGroup);
return userResource;
}
public static boolean isValidGroup(String group) {
if (group.equalsIgnoreCase(LOW) || group.equalsIgnoreCase(NORMAL) || group.equalsIgnoreCase(HIGH)) {
return true;
public static void readIn(DataInput in) throws IOException {
int numResource = in.readInt();
for (int i = 0; i < numResource; ++i) {
in.readInt();
in.readInt();
}
return false;
}
public void updateResource(String desc, int quota) throws DdlException {
resource.updateByDesc(desc, quota);
}
public void updateGroupShare(String groupName, int newShare) throws DdlException {
AtomicInteger share = shareByGroup.get(groupName);
if (share == null) {
throw new DdlException("Unknown resource(" + groupName + ")");
}
share.set(newShare);
}
public ResourceGroup getResource() {
return resource;
}
public Map<String, AtomicInteger> getShareByGroup() {
return shareByGroup;
}
public TUserResource toThrift() {
TUserResource tResource = new TUserResource();
tResource.setResource(resource.toThrift());
for (Map.Entry<String, AtomicInteger> entry : shareByGroup.entrySet()) {
tResource.putToShareByGroup(entry.getKey(), entry.getValue().get());
}
return tResource;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("TotalQuota(").append(resource.toString()).append(")\n");
int idx = 0;
for (Map.Entry<String, AtomicInteger> entry : shareByGroup.entrySet()) {
if (idx++ != 0) {
sb.append(", ");
}
sb.append(entry.getKey()).append("(").append(entry.getValue()).append(")");
}
return sb.toString();
}
@Override
public void write(DataOutput out) throws IOException {
resource.write(out);
out.writeInt(shareByGroup.size());
for (Map.Entry<String, AtomicInteger> entry : shareByGroup.entrySet()) {
Text.writeString(out, entry.getKey());
out.writeInt(entry.getValue().get());
}
}
public void readFields(DataInput in) throws IOException {
resource = ResourceGroup.readIn(in);
ImmutableSortedMap.Builder<String, AtomicInteger> builder =
ImmutableSortedMap.orderedBy(String.CASE_INSENSITIVE_ORDER);
int numGroup = in.readInt();
for (int i = 0; i < numGroup; ++i) {
String name = Text.readString(in);
AtomicInteger value = new AtomicInteger(in.readInt());
builder.put(name, value);
Text.readString(in);
in.readInt();
}
shareByGroup = builder.build();
}
public static UserResource readIn(DataInput in) throws IOException {
UserResource userResource = new UserResource();
userResource.readFields(in);
return userResource;
}
}

View File

@ -1079,7 +1079,6 @@ public class EditLog {
}
long start = System.currentTimeMillis();
try {
journal.write(op, writable);
} catch (Throwable t) {

View File

@ -36,7 +36,6 @@ import org.apache.doris.nereids.StatementContext;
import org.apache.doris.nereids.stats.StatsErrorEstimator;
import org.apache.doris.plugin.AuditEvent.AuditEventBuilder;
import org.apache.doris.resource.Tag;
import org.apache.doris.thrift.TResourceInfo;
import org.apache.doris.thrift.TUniqueId;
import org.apache.doris.transaction.TransactionEntry;
import org.apache.doris.transaction.TransactionStatus;
@ -325,10 +324,6 @@ public class ConnectContext {
this.txnEntry = txnEntry;
}
public TResourceInfo toResourceCtx() {
return new TResourceInfo(qualifiedUser, sessionVariable.getResourceGroup());
}
public void setEnv(Env env) {
this.env = env;
defaultCatalog = env.getInternalCatalog().getName();

View File

@ -667,9 +667,6 @@ public class ConnectProcessor {
if (request.isSetCluster()) {
ctx.setCluster(request.cluster);
}
if (request.isSetResourceInfo()) {
ctx.getSessionVariable().setResourceGroup(request.getResourceInfo().getGroup());
}
if (request.isSetUserIp()) {
ctx.setRemoteIP(request.getUserIp());
}

View File

@ -93,7 +93,6 @@ import org.apache.doris.thrift.TQueryGlobals;
import org.apache.doris.thrift.TQueryOptions;
import org.apache.doris.thrift.TQueryType;
import org.apache.doris.thrift.TReportExecStatusParams;
import org.apache.doris.thrift.TResourceInfo;
import org.apache.doris.thrift.TResourceLimit;
import org.apache.doris.thrift.TRuntimeFilterParams;
import org.apache.doris.thrift.TRuntimeFilterTargetParams;
@ -237,7 +236,6 @@ public class Coordinator {
// Input parameter
private long jobId = -1; // job which this task belongs to
private TUniqueId queryId;
private final TResourceInfo tResourceInfo;
private final boolean needReport;
// parallel execute
@ -343,8 +341,6 @@ public class Coordinator {
} else {
this.queryGlobals.setTimeZone(context.getSessionVariable().getTimeZone());
}
this.tResourceInfo = new TResourceInfo(context.getQualifiedUser(),
context.getSessionVariable().getResourceGroup());
this.needReport = context.getSessionVariable().enableProfile();
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
@ -369,7 +365,6 @@ public class Coordinator {
this.queryGlobals.setTimeZone(timezone);
this.queryGlobals.setLoadZeroTolerance(loadZeroTolerance);
this.queryOptions.setBeExecVersion(Config.be_exec_version);
this.tResourceInfo = new TResourceInfo("", "");
this.needReport = true;
this.nextInstanceId = new TUniqueId();
nextInstanceId.setHi(queryId.hi);
@ -3085,7 +3080,6 @@ public class Coordinator {
params.setFragment(fragment.toThrift());
params.setDescTbl(descTable);
params.setParams(new TPlanFragmentExecParams());
params.setResourceInfo(tResourceInfo);
params.setBuildHashTableForBroadcastJoin(instanceExecParam.buildHashTableForBroadcastJoin);
params.params.setQueryId(queryId);
params.params.setFragmentInstanceId(instanceExecParam.instanceId);
@ -3149,7 +3143,6 @@ public class Coordinator {
// Set global param
params.setProtocolVersion(PaloInternalServiceVersion.V1);
params.setDescTbl(descTable);
params.setResourceInfo(tResourceInfo);
params.setQueryId(queryId);
params.setPerExchNumSenders(perExchNumSenders);
params.setDestinations(destinations);

View File

@ -105,7 +105,6 @@ public class MasterOpExecutor {
params.setStmtIdx(originStmt.idx);
params.setUser(ctx.getQualifiedUser());
params.setDb(ctx.getDatabase());
params.setResourceInfo(ctx.toResourceCtx());
params.setUserIp(ctx.getRemoteIP());
params.setStmtId(ctx.getStmtId());
params.setCurrentUserIdent(ctx.getCurrentUserIdentity().toThrift());

View File

@ -843,9 +843,10 @@ public class FrontendServiceImpl implements FrontendService.Iface {
return masterImpl.report(request);
}
// This interface is used for keeping backward compatible
@Override
public TFetchResourceResult fetchResource() throws TException {
return masterImpl.fetchResource();
throw new TException("not supported");
}
@Override