Fix database quota check bug. Modify load help doc (#130)

This commit is contained in:
morningman
2017-10-25 20:40:11 -05:00
committed by chenhao7253886
parent fe38773c3e
commit cc64875e6b
7 changed files with 265 additions and 270 deletions

View File

@ -30,7 +30,6 @@
# CREATE TABLE
## description
该语句用于创建 table。
执行成功后,该表将生成一个 base index。之后可通过 base index 创建其他 rollup index。
语法:
CREATE [EXTERNAL] TABLE [IF NOT EXISTS] [database.]table_name
(column_definition1[, column_definition2, ...])
@ -60,7 +59,7 @@
范围:0 ~ 2^127 - 1
FLOAT(4字节)
DOUBLE(12字节)
DECIMAL[(precision, scale)]
DECIMAL[(precision, scale)] (40字节)
保证精度的小数类型。默认是 DECIMAL(10, 0)
precision: 1 ~ 27
scale: 0 ~ 9
@ -108,7 +107,7 @@
"column_separator" = "value_separator"
"line_delimiter" = "value_delimiter"
)
另外还可以提供Broker需要的Property信息,通过BROKER PROPERTIES来传递,例如HDFS需要传入
另外还需要提供Broker需要的Property信息,通过BROKER PROPERTIES来传递,例如HDFS需要传入
BROKER PROPERTIES(
"username" = "name",
"password" = "password"

View File

@ -34,7 +34,7 @@
[SET (k1 = func(k2))]
说明:
file_path:broker中的文件路径,可以指定到一个文件,也可以用/*通配符指定某个目录下的所有文件。
file_path: 文件路径,可以指定到一个文件,也可以用 * 通配符指定某个目录下的所有文件。
PARTITION:
如果指定此参数,则只会导入指定的分区,导入分区以外的数据会被过滤掉。
@ -102,6 +102,15 @@
load_delete_flag:指定该导入是否通过导入key列的方式删除数据,仅适用于UNIQUE KEY,
导入时可不指定value列。默认为false。
5. 导入数据格式样例
整型类(TINYINT/SMALLINT/INT/BIGINT/LARGEINT):1, 1000, 1234
浮点类(FLOAT/DOUBLE/DECIMAL):1.1, 0.23, .356
日期类(DATE/DATETIME):2017-10-03, 2017-06-13 12:34:03。
(注:如果是其他日期格式,可以在导入命令中,使用 strftime 或者 time_format 函数进行转换)
字符串类(CHAR/VARCHAR):"I am a student", "a"
NULL值:\N
## example
1. 导入一批数据,指定超时时间和过滤比例
@ -476,8 +485,8 @@
[PROPERTIES ("key"="value", ...)]
可以指定如下参数:
column_separator:指定导出的列分隔符,默认为\t。
line_delimiter:指定导出的行分隔符,默认为\n。
column_separator:指定导出的列分隔符,默认为\t。
line_delimiter:指定导出的行分隔符,默认为\n。
5. broker
用于指定导出使用的broker
语法:
@ -770,4 +779,4 @@
SHOW BACKENDS
## keyword
SHOW, BACKENDS
SHOW, BACKENDS

View File

@ -13,244 +13,242 @@
// specific language governing permissions and limitations
// under the License.
package com.baidu.palo.alter;
import com.baidu.palo.analysis.AddColumnClause;
import com.baidu.palo.analysis.AddColumnsClause;
import com.baidu.palo.analysis.AddRollupClause;
import com.baidu.palo.analysis.AddPartitionClause;
import com.baidu.palo.analysis.AlterClause;
import com.baidu.palo.analysis.AlterSystemStmt;
import com.baidu.palo.analysis.AlterTableStmt;
import com.baidu.palo.analysis.ColumnRenameClause;
import com.baidu.palo.analysis.DropColumnClause;
import com.baidu.palo.analysis.DropRollupClause;
import com.baidu.palo.analysis.DropPartitionClause;
import com.baidu.palo.analysis.ModifyColumnClause;
import com.baidu.palo.analysis.ModifyPartitionClause;
import com.baidu.palo.analysis.PartitionRenameClause;
import com.baidu.palo.analysis.ModifyTablePropertiesClause;
import com.baidu.palo.analysis.ReorderColumnsClause;
import com.baidu.palo.analysis.RollupRenameClause;
import com.baidu.palo.analysis.TableName;
import com.baidu.palo.analysis.TableRenameClause;
import com.baidu.palo.catalog.Catalog;
import com.baidu.palo.catalog.Database;
import com.baidu.palo.catalog.OlapTable;
import com.baidu.palo.catalog.OlapTable.OlapTableState;
import com.baidu.palo.catalog.Partition;
import com.baidu.palo.catalog.Table;
import com.baidu.palo.catalog.Table.TableType;
import com.baidu.palo.common.DdlException;
import com.baidu.palo.common.ErrorCode;
import com.baidu.palo.common.ErrorReport;
import com.baidu.palo.load.Load;
import com.google.common.base.Preconditions;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import java.util.Arrays;
import java.util.List;
public class Alter {
private static final Logger LOG = LogManager.getLogger(Alter.class);
private AlterHandler schemaChangeHandler;
private AlterHandler rollupHandler;
private SystemHandler clusterHandler;
public Alter() {
schemaChangeHandler = new SchemaChangeHandler();
rollupHandler = new RollupHandler();
clusterHandler = new SystemHandler();
}
public void start() {
schemaChangeHandler.start();
rollupHandler.start();
clusterHandler.start();
}
public void processAlterTable(AlterTableStmt stmt) throws DdlException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
final String clusterName = stmt.getClusterName();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
// check cluster capacity
Catalog.getCurrentSystemInfo().checkCapacity();
// check db quota
db.checkQuota();
// schema change ops can appear several in one alter stmt without other alter ops entry
boolean hasSchemaChange = false;
// rollup ops, if has, should appear one and only one entry
boolean hasRollup = false;
// partition ops, if has, should appear one and only one entry
boolean hasPartition = false;
// rename ops, if has, should appear one and only one entry
boolean hasRename = false;
// check conflict alter ops first
List<AlterClause> alterClauses = stmt.getOps();
// check conflict alter ops first
// if all alterclause is DropPartitionClause, no call checkQuota.
boolean allDropPartitionClause = true;
for (AlterClause alterClause : alterClauses) {
if (!(alterClause instanceof DropPartitionClause)) {
allDropPartitionClause = false;
break;
}
}
if (!allDropPartitionClause) {
// check db quota
db.checkQuota();
}
for (AlterClause alterClause : alterClauses) {
if ((alterClause instanceof AddColumnClause
|| alterClause instanceof AddColumnsClause
|| alterClause instanceof DropColumnClause
|| alterClause instanceof ModifyColumnClause
|| alterClause instanceof ReorderColumnsClause
|| alterClause instanceof ModifyTablePropertiesClause)
&& !hasRollup && !hasPartition && !hasRename) {
hasSchemaChange = true;
} else if (alterClause instanceof AddRollupClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasRollup = true;
} else if (alterClause instanceof DropRollupClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasRollup = true;
} else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasPartition = true;
} else if (alterClause instanceof DropPartitionClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasPartition = true;
} else if (alterClause instanceof ModifyPartitionClause && !hasSchemaChange && !hasRollup
&& !hasPartition && !hasRename) {
hasPartition = true;
} else if ((alterClause instanceof TableRenameClause || alterClause instanceof RollupRenameClause
|| alterClause instanceof PartitionRenameClause || alterClause instanceof ColumnRenameClause)
&& !hasSchemaChange && !hasRollup && !hasPartition && !hasRename) {
hasRename = true;
} else {
throw new DdlException("Conflicting alter clauses. see help for more information");
}
} // end for alter clauses
boolean hasAddPartition = false;
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.OLAP) {
throw new DdlException("Donot support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getPartitions().size() == 0) {
throw new DdlException("table with empty parition cannot do schema change. [" + tableName + "]");
}
if (olapTable.getState() == OlapTableState.SCHEMA_CHANGE
|| olapTable.getState() == OlapTableState.BACKUP
|| olapTable.getState() == OlapTableState.RESTORE) {
throw new DdlException("Table[" + table.getName() + "]'s state[" + olapTable.getState()
+ "] does not allow doing ALTER ops");
// here we pass NORMAL and ROLLUP
// NORMAL: ok to do any alter ops
// ROLLUP: we allow user DROP a rollup index when it's under ROLLUP
}
if (!hasPartition) {
// partition op include add/drop/modify partition. these ops do not required no loading jobs.
// NOTICE: if adding other partition op, may change code path here.
Load load = Catalog.getInstance().getLoadInstance();
for (Partition partition : olapTable.getPartitions()) {
load.checkHashRunningDeleteJob(partition.getId(), partition.getName());
}
}
if (hasSchemaChange) {
schemaChangeHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasRollup) {
rollupHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof DropPartitionClause) {
Catalog.getInstance().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
} else if (alterClause instanceof ModifyPartitionClause) {
Catalog.getInstance().modifyPartition(db, olapTable, ((ModifyPartitionClause) alterClause));
} else {
hasAddPartition = true;
}
} else if (hasRename) {
processRename(db, olapTable, alterClauses);
}
} finally {
db.writeUnlock();
}
// add partition op should done outside db lock. cause it contain synchronized create operation
if (hasAddPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof AddPartitionClause) {
Catalog.getInstance().addPartition(db, tableName, (AddPartitionClause) alterClause);
} else {
Preconditions.checkState(false);
}
}
}
public void processAlterCluster(AlterSystemStmt stmt) throws DdlException {
clusterHandler.process(Arrays.asList(stmt.getAlterClause()), stmt.getClusterName(), null, null);
}
private void processRename(Database db, OlapTable table, List<AlterClause> alterClauses) throws DdlException {
for (AlterClause alterClause : alterClauses) {
if (alterClause instanceof TableRenameClause) {
Catalog.getInstance().renameTable(db, table, (TableRenameClause) alterClause);
break;
} else if (alterClause instanceof RollupRenameClause) {
Catalog.getInstance().renameRollup(db, table, (RollupRenameClause) alterClause);
break;
} else if (alterClause instanceof PartitionRenameClause) {
Catalog.getInstance().renamePartition(db, table, (PartitionRenameClause) alterClause);
break;
} else if (alterClause instanceof ColumnRenameClause) {
Catalog.getInstance().renameColumn(db, table, (ColumnRenameClause) alterClause);
break;
} else {
Preconditions.checkState(false);
}
}
}
public AlterHandler getSchemaChangeHandler() {
return this.schemaChangeHandler;
}
public AlterHandler getRollupHandler() {
return this.rollupHandler;
}
public AlterHandler getClusterHandler() {
return this.clusterHandler;
}
}
package com.baidu.palo.alter;
import com.baidu.palo.analysis.AddColumnClause;
import com.baidu.palo.analysis.AddColumnsClause;
import com.baidu.palo.analysis.AddPartitionClause;
import com.baidu.palo.analysis.AddRollupClause;
import com.baidu.palo.analysis.AlterClause;
import com.baidu.palo.analysis.AlterSystemStmt;
import com.baidu.palo.analysis.AlterTableStmt;
import com.baidu.palo.analysis.ColumnRenameClause;
import com.baidu.palo.analysis.DropColumnClause;
import com.baidu.palo.analysis.DropPartitionClause;
import com.baidu.palo.analysis.DropRollupClause;
import com.baidu.palo.analysis.ModifyColumnClause;
import com.baidu.palo.analysis.ModifyPartitionClause;
import com.baidu.palo.analysis.ModifyTablePropertiesClause;
import com.baidu.palo.analysis.PartitionRenameClause;
import com.baidu.palo.analysis.ReorderColumnsClause;
import com.baidu.palo.analysis.RollupRenameClause;
import com.baidu.palo.analysis.TableName;
import com.baidu.palo.analysis.TableRenameClause;
import com.baidu.palo.catalog.Catalog;
import com.baidu.palo.catalog.Database;
import com.baidu.palo.catalog.OlapTable;
import com.baidu.palo.catalog.OlapTable.OlapTableState;
import com.baidu.palo.catalog.Partition;
import com.baidu.palo.catalog.Table;
import com.baidu.palo.catalog.Table.TableType;
import com.baidu.palo.common.DdlException;
import com.baidu.palo.common.ErrorCode;
import com.baidu.palo.common.ErrorReport;
import com.baidu.palo.load.Load;
import com.google.common.base.Preconditions;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.Arrays;
import java.util.List;
public class Alter {
private static final Logger LOG = LogManager.getLogger(Alter.class);
private AlterHandler schemaChangeHandler;
private AlterHandler rollupHandler;
private SystemHandler clusterHandler;
public Alter() {
schemaChangeHandler = new SchemaChangeHandler();
rollupHandler = new RollupHandler();
clusterHandler = new SystemHandler();
}
public void start() {
schemaChangeHandler.start();
rollupHandler.start();
clusterHandler.start();
}
public void processAlterTable(AlterTableStmt stmt) throws DdlException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
final String clusterName = stmt.getClusterName();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
// check cluster capacity
Catalog.getCurrentSystemInfo().checkClusterCapacity(clusterName);
// schema change ops can appear several in one alter stmt without other alter ops entry
boolean hasSchemaChange = false;
// rollup ops, if has, should appear one and only one entry
boolean hasRollup = false;
// partition ops, if has, should appear one and only one entry
boolean hasPartition = false;
// rename ops, if has, should appear one and only one entry
boolean hasRename = false;
// check conflict alter ops first
List<AlterClause> alterClauses = stmt.getOps();
// check conflict alter ops first
// if all alterclause is DropPartitionClause, no call checkQuota.
boolean allDropPartitionClause = true;
for (AlterClause alterClause : alterClauses) {
if (!(alterClause instanceof DropPartitionClause)) {
allDropPartitionClause = false;
break;
}
}
if (!allDropPartitionClause) {
// check db quota
db.checkQuota();
}
for (AlterClause alterClause : alterClauses) {
if ((alterClause instanceof AddColumnClause
|| alterClause instanceof AddColumnsClause
|| alterClause instanceof DropColumnClause
|| alterClause instanceof ModifyColumnClause
|| alterClause instanceof ReorderColumnsClause
|| alterClause instanceof ModifyTablePropertiesClause)
&& !hasRollup && !hasPartition && !hasRename) {
hasSchemaChange = true;
} else if (alterClause instanceof AddRollupClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasRollup = true;
} else if (alterClause instanceof DropRollupClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasRollup = true;
} else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasPartition = true;
} else if (alterClause instanceof DropPartitionClause && !hasSchemaChange && !hasRollup && !hasPartition
&& !hasRename) {
hasPartition = true;
} else if (alterClause instanceof ModifyPartitionClause && !hasSchemaChange && !hasRollup
&& !hasPartition && !hasRename) {
hasPartition = true;
} else if ((alterClause instanceof TableRenameClause || alterClause instanceof RollupRenameClause
|| alterClause instanceof PartitionRenameClause || alterClause instanceof ColumnRenameClause)
&& !hasSchemaChange && !hasRollup && !hasPartition && !hasRename) {
hasRename = true;
} else {
throw new DdlException("Conflicting alter clauses. see help for more information");
}
} // end for alter clauses
boolean hasAddPartition = false;
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.OLAP) {
throw new DdlException("Donot support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getPartitions().size() == 0) {
throw new DdlException("table with empty parition cannot do schema change. [" + tableName + "]");
}
if (olapTable.getState() == OlapTableState.SCHEMA_CHANGE
|| olapTable.getState() == OlapTableState.BACKUP
|| olapTable.getState() == OlapTableState.RESTORE) {
throw new DdlException("Table[" + table.getName() + "]'s state[" + olapTable.getState()
+ "] does not allow doing ALTER ops");
// here we pass NORMAL and ROLLUP
// NORMAL: ok to do any alter ops
// ROLLUP: we allow user DROP a rollup index when it's under ROLLUP
}
if (!hasPartition) {
// partition op include add/drop/modify partition. these ops do not required no loading jobs.
// NOTICE: if adding other partition op, may change code path here.
Load load = Catalog.getInstance().getLoadInstance();
for (Partition partition : olapTable.getPartitions()) {
load.checkHashRunningDeleteJob(partition.getId(), partition.getName());
}
}
if (hasSchemaChange) {
schemaChangeHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasRollup) {
rollupHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof DropPartitionClause) {
Catalog.getInstance().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
} else if (alterClause instanceof ModifyPartitionClause) {
Catalog.getInstance().modifyPartition(db, olapTable, ((ModifyPartitionClause) alterClause));
} else {
hasAddPartition = true;
}
} else if (hasRename) {
processRename(db, olapTable, alterClauses);
}
} finally {
db.writeUnlock();
}
// add partition op should done outside db lock. cause it contain synchronized create operation
if (hasAddPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof AddPartitionClause) {
Catalog.getInstance().addPartition(db, tableName, (AddPartitionClause) alterClause);
} else {
Preconditions.checkState(false);
}
}
}
public void processAlterCluster(AlterSystemStmt stmt) throws DdlException {
clusterHandler.process(Arrays.asList(stmt.getAlterClause()), stmt.getClusterName(), null, null);
}
private void processRename(Database db, OlapTable table, List<AlterClause> alterClauses) throws DdlException {
for (AlterClause alterClause : alterClauses) {
if (alterClause instanceof TableRenameClause) {
Catalog.getInstance().renameTable(db, table, (TableRenameClause) alterClause);
break;
} else if (alterClause instanceof RollupRenameClause) {
Catalog.getInstance().renameRollup(db, table, (RollupRenameClause) alterClause);
break;
} else if (alterClause instanceof PartitionRenameClause) {
Catalog.getInstance().renamePartition(db, table, (PartitionRenameClause) alterClause);
break;
} else if (alterClause instanceof ColumnRenameClause) {
Catalog.getInstance().renameColumn(db, table, (ColumnRenameClause) alterClause);
break;
} else {
Preconditions.checkState(false);
}
}
}
public AlterHandler getSchemaChangeHandler() {
return this.schemaChangeHandler;
}
public AlterHandler getRollupHandler() {
return this.rollupHandler;
}
public AlterHandler getClusterHandler() {
return this.clusterHandler;
}
}

View File

@ -2310,7 +2310,7 @@ public class Catalog {
}
// check cluster capacity
Catalog.getCurrentSystemInfo().checkCapacity();
Catalog.getCurrentSystemInfo().checkClusterCapacity(stmt.getClusterName());
// check db quota
db.checkQuota();
@ -3091,7 +3091,6 @@ public class Catalog {
LOG.info("successfully create table[{};{}] to restore", tableName, tableId);
} else {
if (!db.createTableWithLock(olapTable, false, stmt.isSetIfNotExists())) {
// TODO(cmy): add error code timeout;
ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists");
}
LOG.info("successfully create table[{};{}]", tableName, tableId);

View File

@ -392,7 +392,7 @@ public class Load {
private void addLoadJob(LoadJob job, Database db) throws DdlException {
// check cluster capacity
Catalog.getCurrentSystemInfo().checkCapacity();
Catalog.getCurrentSystemInfo().checkClusterCapacity(db.getClusterName());
// check db quota
db.checkQuota();

View File

@ -1068,11 +1068,15 @@ public class SystemInfoService extends Daemon {
memoryBe.setDecommissionType(be.getDecommissionType());
}
public long getAvailableCapacityB() {
private long getClusterAvailableCapacityB(String clusterName) {
List<Backend> clusterBackends = getClusterBackends(clusterName);
long capacity = 0L;
ImmutableMap<Long, Backend> idToBackend = idToBackendRef.get();
for (Backend backend : idToBackend.values()) {
for (Backend backend : clusterBackends) {
// Here we do not check if backend is alive,
// We suppose the dead backends will back to alive later.
if (backend.isDecommissioned()) {
// Data on decommissioned backend will move to other backends,
// So we need to minus size of those data.
capacity -= backend.getTotalCapacityB() - backend.getAvailableCapacityB();
} else {
capacity += backend.getAvailableCapacityB();
@ -1081,21 +1085,9 @@ public class SystemInfoService extends Daemon {
return capacity;
}
public void checkCapacity() throws DdlException {
if (getAvailableCapacityB() <= 0L) {
throw new DdlException("Cluster has no available capacity");
}
}
/**
* now we will only check capacity of logic cluster when execute operation
*
* @param clusterName
* @throws DdlException
*/
public void checkClusterCapacity(String clusterName) throws DdlException {
if (getClusterBackends(clusterName).isEmpty()) {
throw new DdlException("Cluster has no available capacity");
if (getClusterAvailableCapacityB(clusterName) <= 0L) {
throw new DdlException("Cluster " + clusterName + " has no available capacity");
}
}

View File

@ -49,8 +49,6 @@ import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.thrift.TException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.List;
import java.util.Set;