Delete, update and simplify some FE code (#3125)

This commit is contained in:
kangkaisen
2020-03-19 12:27:08 +08:00
committed by GitHub
parent 41815ef176
commit 9059afcc80
27 changed files with 47 additions and 711 deletions

View File

@ -224,8 +224,6 @@ import org.apache.kudu.client.KuduClient;
import org.apache.kudu.client.KuduException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import java.io.BufferedInputStream;
@ -1359,12 +1357,6 @@ public class Catalog {
connection.setConnectTimeout(HTTP_TIMEOUT_SECOND * 1000);
connection.setReadTimeout(HTTP_TIMEOUT_SECOND * 1000);
return mapper.readValue(connection.getInputStream(), StorageInfo.class);
} catch (JsonParseException e) {
throw new IOException(e);
} catch (JsonMappingException e) {
throw new IOException(e);
} catch (IOException e) {
throw e;
} finally {
if (connection != null) {
connection.disconnect();
@ -1831,8 +1823,7 @@ public class Catalog {
long checksum = 0;
long saveImageStartTime = System.currentTimeMillis();
DataOutputStream dos = new DataOutputStream(new FileOutputStream(curFile));
try {
try (DataOutputStream dos = new DataOutputStream(new FileOutputStream(curFile))) {
checksum = saveHeader(dos, replayedJournalId, checksum);
checksum = saveMasterInfo(dos, checksum);
checksum = saveFrontends(dos, checksum);
@ -1853,8 +1844,6 @@ public class Catalog {
checksum = saveLoadJobsV2(dos, checksum);
checksum = saveSmallFiles(dos, checksum);
dos.writeLong(checksum);
} finally {
dos.close();
}
long saveImageEndTime = System.currentTimeMillis();
@ -2202,7 +2191,7 @@ public class Catalog {
// but service may be continued when there is no log being replayed.
LOG.warn("meta out of date. current time: {}, synchronized time: {}, has log: {}, fe type: {}",
currentTimeMs, synchronizedTimeMs, hasLog, feType);
if (hasLog || (!hasLog && feType == FrontendNodeType.UNKNOWN)) {
if (hasLog || feType == FrontendNodeType.UNKNOWN) {
// 1. if we read log from BDB, which means master is still alive.
// So we need to set meta out of date.
// 2. if we didn't read any log from BDB and feType is UNKNOWN,
@ -4811,8 +4800,7 @@ public class Catalog {
public static short calcShortKeyColumnCount(List<Column> columns, Map<String, String> properties)
throws DdlException {
List<Column> indexColumns = new ArrayList<Column>();
for (int i = 0; i < columns.size(); i++) {
Column column = columns.get(i);
for (Column column : columns) {
if (column.isKey()) {
indexColumns.add(column);
}

View File

@ -124,19 +124,19 @@ public class ColocateTableIndex implements Writable {
}
private final void readLock() {
private void readLock() {
this.lock.readLock().lock();
}
private final void readUnlock() {
private void readUnlock() {
this.lock.readLock().unlock();
}
private final void writeLock() {
private void writeLock() {
this.lock.writeLock().lock();
}
private final void writeUnlock() {
private void writeUnlock() {
this.lock.writeLock().unlock();
}

View File

@ -1,65 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.catalog;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.ErrorCode;
import org.apache.doris.common.ErrorReport;
public class ColocateTableUtils {
static Table getColocateTable(Database db, String tableName) {
Table parentTable;
db.readLock();
try {
parentTable = db.getTable(tableName);
} finally {
db.readUnlock();
}
return parentTable;
}
public static Table getTable(Database db, long tblId) {
Table tbl;
db.readLock();
try {
tbl = db.getTable(tblId);
} finally {
db.readUnlock();
}
return tbl;
}
static void checkTableExist(Table colocateTable, String colocateTableName) throws DdlException {
if (colocateTable == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_COLOCATE_TABLE_NOT_EXIST, colocateTableName);
}
}
static void checkTableType(Table colocateTable) throws DdlException {
if (colocateTable.type != (Table.TableType.OLAP)) {
ErrorReport.reportDdlException(ErrorCode.ERR_COLOCATE_TABLE_MUST_BE_OLAP_TABLE, colocateTable.getName());
}
}
public static void checkTableIsColocated(Table parentTable, String colocateTableName) throws DdlException {
if (Catalog.getCurrentCatalog().getColocateTableIndex().isColocateTable(parentTable.getId())) {
ErrorReport.reportDdlException(ErrorCode.ERR_COLOCATE_NOT_COLOCATE_TABLE, colocateTableName);
}
}
}

View File

@ -168,10 +168,7 @@ public class Column implements Writable {
}
public boolean isAggregated() {
if (aggregationType == null || aggregationType == AggregateType.NONE) {
return false;
}
return true;
return aggregationType != null && aggregationType != AggregateType.NONE;
}
public boolean isAggregationTypeImplicit() {

View File

@ -226,7 +226,7 @@ public class Database extends MetaObject implements Writable {
} // end for groups
long leftDataQuota = dataQuotaBytes - usedDataQuota;
return leftDataQuota > 0L ? leftDataQuota : 0L;
return Math.max(leftDataQuota, 0L);
} finally {
readUnlock();
}

View File

@ -141,11 +141,7 @@ public class DomainResolver extends MasterDaemon {
return false;
}
return true;
} catch (IOException e) {
LOG.warn("failed to revole domain with BNS", e);
resolvedIPs.clear();
return false;
} catch (InterruptedException e) {
} catch (IOException | InterruptedException e) {
LOG.warn("failed to revole domain with BNS", e);
resolvedIPs.clear();
return false;

View File

@ -1,24 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.catalog;
public class MetaSignatureVersion {
public static final int VERSION_1 = 1;
public static final int CURRENT_VERSION = VERSION_1;
}

View File

@ -333,8 +333,8 @@ public class PartitionKey implements Comparable<PartitionKey>, Writable {
@Override
public int hashCode() {
int ret = types.size() * 1000;
for (int i = 0; i < types.size(); i++) {
ret += types.get(i).ordinal();
for (PrimitiveType type : types) {
ret += type.ordinal();
}
return ret;
}

View File

@ -26,7 +26,6 @@ import com.google.common.collect.ImmutableSetMultimap;
import com.google.common.collect.Lists;
import java.util.ArrayList;
import java.util.List;
public enum PrimitiveType {
INVALID_TYPE("INVALID_TYPE", -1, TPrimitiveType.INVALID_TYPE),

View File

@ -71,6 +71,7 @@ public class Replica implements Writable {
private long id;
@SerializedName(value = "backendId")
private long backendId;
// the version could be queried
@SerializedName(value = "version")
private long version;
@SerializedName(value = "versionHash")
@ -82,13 +83,15 @@ public class Replica implements Writable {
private long rowCount = 0;
@SerializedName(value = "state")
private ReplicaState state;
// the last load failed version
@SerializedName(value = "lastFailedVersion")
private long lastFailedVersion = -1L;
@SerializedName(value = "lastFailedVersionHash")
private long lastFailedVersionHash = 0L;
// not serialized, not very important
private long lastFailedTimestamp = 0;
// the last load successful version
@SerializedName(value = "lastSuccessVersion")
private long lastSuccessVersion = -1L;
@SerializedName(value = "lastSuccessVersionHash")

View File

@ -42,352 +42,6 @@ public class SchemaTable extends Table {
private final static int MY_CS_NAME_SIZE = 32;
private SchemaTableType schemaTableType;
// static {
// tableMap = Maps.newHashMap();
// List<Column> columns;
// int pos = 0;
// // AUTHORS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("Name", ScalarType.createCharType(50), pos++));
// columns.add(new Column("Location", ScalarType.createCharType(50), pos++));
// columns.add(new Column("Comment", ScalarType.createCharType(50), pos++));
// tableMap.put("authors", columns);
// // COLUMNS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("TABLE_CATALOG", ScalarType.createCharType(FN_REFLEN), pos++));
// columns.add(new Column("TABLE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("COLUMN_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ORDINAL_POSITION", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("COLUMN_DEFAULT", ScalarType.createCharType(MAX_FIELD_VARCHARLENGTH), pos++));
// columns.add(new Column("IS_NULLABLE", ScalarType.createCharType(3), pos++));
// columns.add(new Column("DATA_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CHARACTER_MAXIMUM_LENGTH", ScalarType.createType(PrimitiveType.BIGINT),
// pos++));
// columns.add(
// new Column("CHARACTER_OCTET_LENGTH", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("NUMERIC_PRECISION", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("NUMERIC_SCALE", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("CHARACTER_SET_NAME", ScalarType.createCharType(MY_CS_NAME_SIZE), pos++));
// columns.add(
// new Column("COLLATION_NAME", ScalarType.createCharType(MY_CS_NAME_SIZE), pos++));
// columns.add(new Column("COLUMN_TYPE", ScalarType.createCharType(65535), pos++));
// columns.add(new Column("COLUMN_KEY", ScalarType.createCharType(3), pos++));
// columns.add(new Column("EXTRA", ScalarType.createCharType(27), pos++));
// columns.add(new Column("PRIVILEGES", ScalarType.createCharType(80), pos++));
// columns.add(new Column("COLUMN_COMMENT", ScalarType.createCharType(255), pos++));
// tableMap.put("columns", columns);
// // create table
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("Catalog", ScalarType.createCharType(FN_REFLEN), pos++));
// columns.add(new Column("Schema", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("Table", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("Create Table", ScalarType.createCharType(65535), pos++));
// tableMap.put("create_table", columns);
// // ENGINES
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("ENGINE", ScalarType.createCharType(64), pos++));
// columns.add(new Column("SUPPORT", ScalarType.createCharType(8), pos++));
// columns.add(new Column("COMMENT", ScalarType.createCharType(80), pos++));
// columns.add(new Column("TRANSACTIONS", ScalarType.createCharType(3), pos++));
// columns.add(new Column("XA", ScalarType.createCharType(3), pos++));
// columns.add(new Column("SAVEPOINTS", ScalarType.createCharType(3), pos++));
// tableMap.put("engines", columns);
// // EVENTS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("EVENT_CATALOG", ScalarType.createCharType(FN_REFLEN), pos++));
// columns.add(new Column("EVENT_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("EVENT_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DEFINER", ScalarType.createCharType(77), pos++));
// columns.add(new Column("TIME_ZONE", ScalarType.createCharType(64), pos++));
// columns.add(new Column("EVENT_BODY", ScalarType.createCharType(8), pos++));
// columns.add(new Column("EVENT_DEFINITION", ScalarType.createCharType(65535), pos++));
// columns.add(new Column("EVENT_TYPE", ScalarType.createCharType(9), pos++));
// columns.add(
// new Column("EXECUTE_AT", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("INTERVAL_VALUE", ScalarType.createCharType(256), pos++));
// columns.add(new Column("INTERVAL_FIELD", ScalarType.createCharType(18), pos++));
// columns.add(new Column("SQL_MODE", ScalarType.createCharType(32 * 256), pos++));
// columns.add(new Column("STARTS", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("ENDS", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("STATUS", ScalarType.createCharType(18), pos++));
// columns.add(new Column("ON_COMPLETION", ScalarType.createCharType(12), pos++));
// columns.add(new Column("CREATED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(
// new Column("LAST_ALTERED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(
// new Column("LAST_EXECUTED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("EVENT_COMMENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ORIGINATOR", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("CHARACTER_SET_CLIENT", ScalarType.createCharType(MY_CS_NAME_SIZE), pos++));
// columns.add(
// new Column("COLLATION_CONNECTION", ScalarType.createCharType(MY_CS_NAME_SIZE), pos++));
// columns.add(
// new Column("DATABASE_COLLATION", ScalarType.createCharType(MY_CS_NAME_SIZE), pos++));
// tableMap.put("events", columns);
//
// // OPEN_TABLES
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("Database", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("Table", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("In_use", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("Name_locked", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// tableMap.put("open_tables", columns);
// // TABLE_NAMES
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("TABLE_CATALOG", ScalarType.createCharType(FN_REFLEN), pos++));
// columns.add(new Column("TABLE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("table_names", columns);
// // PLUGINS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("PLUGIN_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("PLUGIN_VERSION", ScalarType.createCharType(20), pos++));
// columns.add(new Column("PLUGIN_STATUS", ScalarType.createCharType(10), pos++));
// columns.add(new Column("PLUGIN_TYPE", ScalarType.createCharType(80), pos++));
// columns.add(new Column("PLUGIN_TYPE_VERSION", ScalarType.createCharType(20), pos++));
// columns.add(new Column("PLUGIN_LIBRARY", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("PLUGIN_LIBRARY_VERSION", ScalarType.createCharType(20), pos++));
// columns.add(new Column("PLUGIN_AUTHOR", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("PLUGIN_DESCRIPTION", ScalarType.createCharType(65535), pos++));
// columns.add(new Column("PLUGIN_LICENSE", ScalarType.createCharType(80), pos++));
// tableMap.put("plugins", columns);
// // PROCESSLIST
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("ID", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("USER", ScalarType.createCharType(16), pos++));
// columns.add(new Column("HOST", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DB", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("COMMAND", ScalarType.createCharType(16), pos++));
// columns.add(new Column("TIME", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("STATE", ScalarType.createCharType(64), pos++));
// columns.add(new Column("INFO", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("processlist", columns);
// // PROFILING
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("QUERY_ID", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("SEQ", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("STATE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DURATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CPU_USER", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CPU_SYSTEM", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CONTEXT_VOLUNTARY", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(
// new Column("CONTEXT_INVOLUNTARY", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("BLOCK_OPS_IN", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("BLOCK_OPS_OUT", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("MESSAGES_SENT", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(
// new Column("MESSAGES_RECEIVED", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(
// new Column("PAGE_FAULTS_MAJOR", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(
// new Column("PAGE_FAULTS_MINOR", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("SWAPS", ScalarType.createType(PrimitiveType.INT), pos++));
// columns.add(new Column("SOURCE_FUNCTION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SOURCE_FILE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SOURCE_LINE", ScalarType.createType(PrimitiveType.INT), pos++));
// tableMap.put("profiling", columns);
// // TABLE PRIVILEGES
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("GRANTEE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("PRIVILEGE_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("IS_GRANTABLE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("table_privileges", columns);
//
// // TABLE CONSTRAINTS
// pos = 0;
// columns = new ArrayList();
// columns.add(
// new Column("CONSTRAINT_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CONSTRAINT_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CONSTRAINT_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CONSTRAINT_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("table_constraints", columns);
//
// // REFERENTIAL_CONSTRAINTS
// pos = 0;
// columns = new ArrayList();
// columns.add(
// new Column("CONSTRAINT_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CONSTRAINT_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CONSTRAINT_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("UNIQUE_CONSTRAINT_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("UNIQUE_CONSTRAINT_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("UNIQUE_CONSTRAINT_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("MATCH_OPTION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("UPDATE_RULE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DELETE_RULE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("REFERENCED_TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("referential_constraints", columns);
//
// // VARIABLES
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("VARIABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("VARIABLE_VALUE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("variables", columns);
//
// // ROUNTINE
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("SPECIFIC_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DTD_IDENTIFIER", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_BODY", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ROUTINE_DEFINITION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("EXTERNAL_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("EXTERNAL_LANGUAGE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("PARAMETER_STYLE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("IS_DETERMINISTIC", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SQL_DATA_ACCESS", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SQL_PATH", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SECURITY_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CREATED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(
// new Column("LAST_ALTERED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("SQL_MODE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ROUTINE_COMMENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DEFINER", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CHARACTER_SET_CLIENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("COLLATION_CONNECTION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("DATABASE_COLLATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("routines", columns);
//
// // STATISTICS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("TABLE_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("NON_UNIQUE", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("INDEX_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("INDEX_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SEQ_IN_INDEX", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("COLUMN_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("COLLATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CARDINALITY", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("SUB_PART", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("PACKED", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("NULLABLE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("INDEX_TYPE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("COMMENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("statistics", columns);
//
// // STATUS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("VARIABLE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("VARIABLE_VALUE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("status", columns);
//
// // TRIGGERS
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("TRIGGER_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TRIGGER_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("TRIGGER_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("EVENT_MANIPULATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("EVENT_OBJECT_CATALOG", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("EVENT_OBJECT_SCHEMA", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("EVENT_OBJECT_TABLE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ACTION_ORDER", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(
// new Column("ACTION_CONDITION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ACTION_STATEMENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ACTION_ORIENTATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ACTION_TIMING", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ACTION_REFERENCE_OLD_TABLE", ScalarType.createCharType(NAME_CHAR_LEN),
// pos++));
// columns.add(
// new Column("ACTION_REFERENCE_NEW_TABLE", ScalarType.createCharType(NAME_CHAR_LEN),
// pos++));
// columns.add(
// new Column("ACTION_REFERENCE_OLD_ROW", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("ACTION_REFERENCE_NEW_ROW", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("CREATED", ScalarType.createType(PrimitiveType.TIMESTAMP), pos++));
// columns.add(new Column("SQL_MODE", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DEFINER", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CHARACTER_SET_CLIENT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("COLLATION_CONNECTION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("DATABASE_COLLATION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// tableMap.put("triggers", columns);
//
// /* COLLATION */
// pos = 0;
// columns = new ArrayList();
// columns.add(new Column("COLLATION_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("CHARACTER_SET_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("ID", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// columns.add(new Column("IS_DEFAULT", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("IS_COMPILED", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("SORTLEN", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// tableMap.put("collations", columns);
// /* CHARSETS */
// pos = 0;
// columns = new ArrayList();
// columns.add(
// new Column("CHARACTER_SET_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(
// new Column("DEFAULT_COLLATE_NAME", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("DESCRIPTION", ScalarType.createCharType(NAME_CHAR_LEN), pos++));
// columns.add(new Column("MAXLEN", ScalarType.createType(PrimitiveType.BIGINT), pos++));
// tableMap.put("character_sets", columns);
// }
protected SchemaTable(long id, String name, TableType type, List<Column> baseSchema) {
super(id, name, type, baseSchema);
}

View File

@ -86,19 +86,19 @@ public class TabletInvertedIndex {
public TabletInvertedIndex() {
}
private final void readLock() {
private void readLock() {
this.lock.readLock().lock();
}
private final void readUnlock() {
private void readUnlock() {
this.lock.readLock().unlock();
}
private final void writeLock() {
private void writeLock() {
this.lock.writeLock().lock();
}
private final void writeUnlock() {
private void writeUnlock() {
this.lock.writeLock().unlock();
}
@ -283,8 +283,7 @@ public class TabletInvertedIndex {
if (tabletId == null) {
return null;
}
TabletMeta tabletMeta = tabletMetaMap.get(tabletId);
return tabletMeta;
return tabletMetaMap.get(tabletId);
} finally {
readUnlock();
}
@ -293,8 +292,7 @@ public class TabletInvertedIndex {
public Long getTabletIdByReplica(long replicaId) {
readLock();
try {
Long tabletId = replicaToTabletMap.get(replicaId);
return tabletId;
return replicaToTabletMap.get(replicaId);
} finally {
readUnlock();
}

View File

@ -306,11 +306,11 @@ public class Util {
if (directory.isDirectory()) {
File[] files = directory.listFiles();
if (null != files) {
for (int i = 0; i < files.length; i++) {
if (files[i].isDirectory()) {
deleteDirectory(files[i]);
for (File file : files) {
if (file.isDirectory()) {
deleteDirectory(file);
} else {
files[i].delete();
file.delete();
}
}
}
@ -401,8 +401,7 @@ public class Util {
}
try {
boolean result = Boolean.valueOf(valStr);
return result;
return Boolean.valueOf(valStr);
} catch (NumberFormatException e) {
throw new AnalysisException(hintMsg);
}

View File

@ -67,7 +67,7 @@ public class BDBHA implements HAProtocol {
long count = epochDb.count();
long myEpoch = count + 1;
LOG.info("start fencing, epoch number is {}", myEpoch);
Long key = new Long(myEpoch);
Long key = myEpoch;
DatabaseEntry theKey = new DatabaseEntry();
TupleBinding<Long> idBinding = TupleBinding.getPrimitiveBinding(Long.class);
idBinding.objectToEntry(key, theKey);
@ -79,18 +79,14 @@ public class BDBHA implements HAProtocol {
} else if (status == OperationStatus.KEYEXIST) {
return false;
} else {
Exception e = new Exception(status.toString());
throw e;
throw new Exception(status.toString());
}
} catch (Exception e) {
LOG.error("fencing failed. tried {} times", i, e);
if (i < RETRY_TIME) {
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
continue;
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
e1.printStackTrace();
}
}
}

View File

@ -185,7 +185,6 @@ public class BDBEnvironment {
// list as the argument to config.setLogProviders(), if the
// default selection of providers is not suitable.
restore.execute(insufficientLogEx, config);
continue;
} catch (DatabaseException e) {
if (i < RETRY_TIME - 1) {
try {
@ -193,7 +192,6 @@ public class BDBEnvironment {
} catch (InterruptedException e1) {
e1.printStackTrace();
}
continue;
} else {
LOG.error("error to open replicated environment. will exit.", e);
System.exit(-1);
@ -333,7 +331,6 @@ public class BDBEnvironment {
} catch (InterruptedException e1) {
e1.printStackTrace();
}
continue;
} catch (DatabaseException e) {
LOG.warn("catch an exception when calling getDatabaseNames", e);
return null;

View File

@ -247,8 +247,7 @@ public class BDBJEJournal implements Journal {
@Override
public JournalCursor read(long fromKey, long toKey) {
JournalCursor cursor = BDBJournalCursor.getJournalCursor(bdbEnvironment, fromKey, toKey);
return cursor;
return BDBJournalCursor.getJournalCursor(bdbEnvironment, fromKey, toKey);
}
@Override
@ -363,7 +362,6 @@ public class BDBJEJournal implements Journal {
bdbEnvironment.close();
bdbEnvironment.setup(new File(environmentPath), selfNodeName, selfNodeHostPort,
helperNode.first + ":" + helperNode.second, Catalog.getInstance().isElectable());
continue;
}
}
}

View File

@ -75,7 +75,6 @@ public class BDBJournalCursor implements JournalCursor {
if (fromKey >= db) {
dbName = Long.toString(db);
nextDbPositionIndex++;
continue;
} else {
break;
}
@ -94,7 +93,7 @@ public class BDBJournalCursor implements JournalCursor {
if (currentKey > toKey) {
return ret;
}
Long key = new Long(currentKey);
Long key = currentKey;
DatabaseEntry theKey = new DatabaseEntry();
TupleBinding<Long> myBinding = TupleBinding.getPrimitiveBinding(Long.class);
myBinding.objectToEntry(key, theKey);
@ -124,12 +123,10 @@ public class BDBJournalCursor implements JournalCursor {
database = environment.openDatabase(dbNames.get(nextDbPositionIndex).toString());
nextDbPositionIndex++;
tryTimes = 0;
continue;
} else if (tryTimes < maxTryTime) {
tryTimes++;
LOG.warn("fail to get journal {}, will try again. status: {}", currentKey, operationStatus);
Thread.sleep(3000);
continue;
} else if (operationStatus == OperationStatus.NOTFOUND) {
// In the case:
// On non-master FE, the replayer will first get the max journal id,
@ -143,7 +140,7 @@ public class BDBJournalCursor implements JournalCursor {
throw new Exception(
"Failed to find key " + currentKey + " in database " + database.getDatabaseName());
} else {
LOG.error("fail to get journal {}, status: {}, will exit", currentKey);
LOG.error("fail to get journal {}, status: {}, will exit", currentKey, operationStatus);
System.exit(-1);
}
}

View File

@ -76,7 +76,6 @@ public class LoadJobScheduler extends MasterDaemon {
.build(), e);
loadJob.cancelJobWithoutCheck(new FailMsg(FailMsg.CancelType.ETL_SUBMIT_FAIL, e.getMessage()),
false, true);
continue;
} catch (DuplicatedRequestException e) {
// should not happen in load job scheduler, there is no request id.
LOG.warn(new LogBuilder(LogKey.LOAD_JOB, loadJob.getId())

View File

@ -20,9 +20,7 @@ package org.apache.doris.load.loadv2;
import org.apache.doris.analysis.Analyzer;
import org.apache.doris.analysis.BrokerDesc;
import org.apache.doris.analysis.DescriptorTable;
import org.apache.doris.analysis.Expr;
import org.apache.doris.analysis.SlotDescriptor;
import org.apache.doris.analysis.SlotRef;
import org.apache.doris.analysis.TupleDescriptor;
import org.apache.doris.catalog.Catalog;
import org.apache.doris.catalog.Column;
@ -93,7 +91,6 @@ public class LoadingTaskPlanner {
public void plan(TUniqueId loadId, List<List<TBrokerFileStatus>> fileStatusesList, int filesAdded)
throws UserException {
// Generate tuple descriptor
List<Expr> slotRefs = Lists.newArrayList();
TupleDescriptor tupleDesc = descTable.createTupleDescriptor();
// use full schema to fill the descriptor table
for (Column col : table.getFullSchema()) {
@ -105,7 +102,6 @@ public class LoadingTaskPlanner {
} else {
slotDesc.setIsNullable(false);
}
slotRefs.add(new SlotRef(slotDesc));
}
// Generate plan trees
@ -135,7 +131,7 @@ public class LoadingTaskPlanner {
try {
fragment.finalize(analyzer, false);
} catch (NotImplementedException e) {
LOG.info("Fragment finalize failed.{}", e);
LOG.info("Fragment finalize failed.{}", e.getMessage());
throw new UserException("Fragment finalize failed.");
}
}

View File

@ -194,8 +194,7 @@ public class Checkpoint extends MasterDaemon {
}
}
}
deleteVersion = (minOtherNodesJournalId > checkPointVersion)
? checkPointVersion : minOtherNodesJournalId;
deleteVersion = Math.min(minOtherNodesJournalId, checkPointVersion);
}
editLog.deleteJournals(deleteVersion + 1);
if (MetricRepo.isInit.get()) {

View File

@ -94,10 +94,8 @@ public class ReportHandler extends Daemon {
private BlockingQueue<ReportTask> reportQueue = Queues.newLinkedBlockingQueue();
private GaugeMetric<Long> gaugeQueueSize;
public ReportHandler() {
gaugeQueueSize = (GaugeMetric<Long>) new GaugeMetric<Long>(
GaugeMetric<Long> gaugeQueueSize = new GaugeMetric<Long>(
"report_queue_size", "report queue size") {
@Override
public Long getValue() {

View File

@ -124,12 +124,10 @@ public class MysqlServer {
} catch (InterruptedException e1) {
// Do nothing
}
continue;
} catch (Throwable e) {
// NotYetBoundException
// SecurityException
LOG.warn("Query server failed when calling accept.", e);
continue;
}
}
}

View File

@ -1,161 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.persist;
import org.apache.doris.common.io.Text;
import org.apache.doris.common.io.Writable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
@Deprecated
// (cmy 2015-07-22)
// donot use anymore. use ReplicaPersistInfo instead.
// remove later
public class CloneInfo implements Writable {
public enum CloneType {
CLONE,
DELETE
}
private long dbId;
private long tableId;
private long partitionId;
private long indexId;
private long tabletId;
private long replicaId;
private long version;
private long versionHash;
private long dataSize;
private long rowCount;
private long backendId;
private CloneType type;
public CloneInfo() {
this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, CloneType.CLONE);
}
public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId,
long replicaId, CloneType type) {
this(dbId, tableId, partitionId, indexId, tabletId, replicaId, 0, 0, 0, 0, 0, type);
}
public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId,
long replicaId, long version, long versionHash, long dataSize, long rowCount,
long backendId, CloneType type) {
this.dbId = dbId;
this.tableId = tableId;
this.partitionId = partitionId;
this.indexId = indexId;
this.tabletId = tabletId;
this.replicaId = replicaId;
this.version = version;
this.versionHash = versionHash;
this.dataSize = dataSize;
this.rowCount = rowCount;
this.backendId = backendId;
this.type = type;
}
public long getDbId() {
return this.dbId;
}
public long getTableId() {
return this.tableId;
}
public long getPartitionId() {
return this.partitionId;
}
public long getIndexId() {
return this.indexId;
}
public long getTabletId() {
return this.tabletId;
}
public long getReplicaId() {
return this.replicaId;
}
public long getVersion() {
return this.version;
}
public long getVersionHash() {
return this.versionHash;
}
public long getDataSize() {
return this.dataSize;
}
public long getRowCount() {
return this.rowCount;
}
public long getBackendId() {
return this.backendId;
}
public CloneType getType() {
return this.type;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(dbId);
out.writeLong(tableId);
out.writeLong(partitionId);
out.writeLong(indexId);
out.writeLong(tabletId);
out.writeLong(replicaId);
out.writeLong(version);
out.writeLong(versionHash);
out.writeLong(dataSize);
out.writeLong(rowCount);
out.writeLong(backendId);
Text.writeString(out, type.toString());
}
public void readFields(DataInput in) throws IOException {
dbId = in.readLong();
tableId = in.readLong();
partitionId = in.readLong();
indexId = in.readLong();
tabletId = in.readLong();
replicaId = in.readLong();
version = in.readLong();
versionHash = in.readLong();
dataSize = in.readLong();
rowCount = in.readLong();
backendId = in.readLong();
type = CloneType.valueOf(Text.readString(in));
}
public static CloneInfo read(DataInput in) throws IOException {
CloneInfo cloneInfo = new CloneInfo();
cloneInfo.readFields(in);
return cloneInfo;
}
}

View File

@ -205,19 +205,6 @@ public class AggregationNode extends PlanNode {
private void updateplanNodeName() {
StringBuilder sb = new StringBuilder();
sb.append("AGGREGATE");
/*
if (aggInfo.isMerge() || needsFinalize) {
sb.append(" (");
if (aggInfo.isMerge() && needsFinalize) {
sb.append("merge finalize");
} else if (aggInfo.isMerge()) {
sb.append("merge");
} else {
sb.append("finalize");
}
sb.append(")");
}
*/
sb.append(" (");
if (aggInfo.isMerge()) {
sb.append("merge");

View File

@ -96,12 +96,10 @@ public class HashDistributionPruner implements DistributionPruner {
// return all SubPartition
return Lists.newArrayList(bucketsList);
}
if (null != inPredicate) {
if (!(inPredicate.getChild(0) instanceof SlotRef)) {
// return all SubPartition
return Lists.newArrayList(bucketsList);
}
if (!(inPredicate.getChild(0) instanceof SlotRef)) {
// return all SubPartition
return Lists.newArrayList(bucketsList);
}
Set<Long> resultSet = Sets.newHashSet();
int inElementNum = inPredicate.getInElementNum();
@ -111,9 +109,7 @@ public class HashDistributionPruner implements DistributionPruner {
LiteralExpr expr = (LiteralExpr) inPredicate.getChild(i);
hashKey.pushColumn(expr, keyColumn.getDataType());
Collection<Long> subList = prune(columnId + 1, hashKey, newComplex);
for (Long subPartitionId : subList) {
resultSet.add(subPartitionId);
}
resultSet.addAll(subList);
hashKey.popColumn();
if (resultSet.size() >= bucketsList.size()) {
break;

View File

@ -110,8 +110,7 @@ public class MaterializedViewSelector {
long bestIndexId = priorities(olapScanNode, candidateIndexIdToSchema);
LOG.info("The best materialized view is {} for scan node {} in query {}, cost {}",
bestIndexId, scanNode.getId(), selectStmt.toSql(), (System.currentTimeMillis() - start));
BestIndexInfo bestIndexInfo = new BestIndexInfo(bestIndexId, isPreAggregation, reasonOfDisable);
return bestIndexInfo;
return new BestIndexInfo(bestIndexId, isPreAggregation, reasonOfDisable);
}
private Map<Long, List<Column>> predicates(OlapScanNode scanNode) {
@ -482,11 +481,7 @@ public class MaterializedViewSelector {
private void addAggregatedColumn(String columnName, String functionName, String tableName) {
AggregatedColumn newAggregatedColumn = new AggregatedColumn(columnName, functionName);
Set<AggregatedColumn> aggregatedColumns = aggregateColumnsInQuery.get(tableName);
if (aggregatedColumns == null) {
aggregatedColumns = Sets.newHashSet();
aggregateColumnsInQuery.put(tableName, aggregatedColumns);
}
Set<AggregatedColumn> aggregatedColumns = aggregateColumnsInQuery.computeIfAbsent(tableName, k -> Sets.newHashSet());
aggregatedColumns.add(newAggregatedColumn);
}

View File

@ -1366,11 +1366,7 @@ public class SingleNodePlanner {
analyzer.materializeSlots(scanNode.getConjuncts());
scanNodes.add(scanNode);
List<ScanNode> scanNodeList = selectStmtToScanNodes.get(selectStmt.getId());
if (scanNodeList == null) {
scanNodeList = Lists.newArrayList();
selectStmtToScanNodes.put(selectStmt.getId(), scanNodeList);
}
List<ScanNode> scanNodeList = selectStmtToScanNodes.computeIfAbsent(selectStmt.getId(), k -> Lists.newArrayList());
scanNodeList.add(scanNode);
return scanNode;
}